repo_name
stringlengths
5
100
path
stringlengths
4
299
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1.03M
license
stringclasses
15 values
hash
int64
-9,223,351,895,964,839,000
9,223,297,778B
line_mean
float64
3.17
100
line_max
int64
7
1k
alpha_frac
float64
0.25
0.98
autogenerated
bool
1 class
ingadhoc/openerp-travel
travel/tests/test_travel.py
2
7477
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # This module copyright (C) 2013 Savoir-faire Linux # (<http://www.savoirfairelinux.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.tests.common import TransactionCase from openerp.osv.orm import except_orm import time class test_travel(TransactionCase): def setUp(self): super(test_travel, self).setUp() # Clean up registries self.registry('ir.model').clear_caches() self.registry('ir.model.data').clear_caches() # Get registries self.user_model = self.registry("res.users") self.travel_model = self.registry("travel.travel") # Get context self.context = self.user_model.context_get(self.cr, self.uid) # Create values for test, travel and partner also created self.year = str(time.localtime(time.time())[0]) self.vals = { 'name': 'This is a test travel name', 'date_start': self.year + '-01-01', 'date_stop': self.year + '-01-14', } def test_create_travel(self): cr, uid, vals, context = self.cr, self.uid, self.vals, self.context self.assertTrue(self.travel_model.create( cr, uid, vals, context=context)) def test_write_travel(self): cr, uid, vals, context = self.cr, self.uid, self.vals, self.context travel_id = self.travel_model.create(cr, uid, vals, context=context) self.travel_model.write(cr, uid, travel_id, { 'date_stop': self.year + '-01-21', }, context=context) def test_unlink_travel(self): cr, uid, vals, context = self.cr, self.uid, self.vals, self.context travel_id = self.travel_model.create(cr, uid, vals, context=context) self.travel_model.unlink(cr, uid, travel_id, context=context) def test_change_state_travel(self): cr, uid, vals, context = self.cr, self.uid, self.vals, self.context states = { 'open': self.travel_model.travel_open, 'booking': self.travel_model.travel_book, 'reserved': self.travel_model.travel_reserve, 'confirmed': self.travel_model.travel_confirm, 'done': self.travel_model.travel_close, } travel_id = self.travel_model.create(cr, uid, vals, context=context) for state, func in states.iteritems(): func(cr, uid, travel_id, context=context) travel_obj = self.travel_model.browse( cr, uid, travel_id, context=context) self.assertEqual(travel_obj.state, state) def test_create_travel_too_many_passengers(self): cr, uid, vals, context = self.cr, self.uid, self.vals, self.context partner_model = self.registry("res.partner") group_id = self.registry("ir.model.data").get_object_reference( cr, uid, 'travel', 'group_basic_travel_user')[1] user_id = self.user_model.create(cr, uid, { 'login': 'test', 'name': 'test', 'groups_id': [(4, group_id)], }, context=context) vals = vals.copy() vals["passenger_ids"] = [] for i in xrange(12): partner_id = partner_model.create( cr, uid, {'name': 'test_partner_%d' % i}, context=context) vals["passenger_ids"].append(tuple([0, 0, { "partner_id": partner_id, }])) self.assertRaises( except_orm, self.travel_model.create, cr, user_id, vals, context=context ) def test_write_travel_too_many_passengers(self): cr, uid, vals, context = self.cr, self.uid, self.vals, self.context travel_id = self.travel_model.create(cr, uid, vals, context=context) partner_model = self.registry("res.partner") group_id = self.registry("ir.model.data").get_object_reference( cr, uid, 'travel', 'group_basic_travel_user')[1] user_id = self.user_model.create(cr, uid, { 'login': 'test', 'name': 'test', 'groups_id': [(4, group_id)], }, context=context) vals = { "passenger_ids": [], } for i in xrange(12): partner_id = partner_model.create( cr, uid, {'name': 'test_partner_%d' % i}, context=context) vals["passenger_ids"].append(tuple([0, 0, { "partner_id": partner_id, }])) self.assertRaises( except_orm, self.travel_model.write, cr, user_id, travel_id, vals, context=context ) self.travel_model.write(cr, uid, travel_id, vals, context=context) self.assertRaises( except_orm, self.travel_model.write, cr, user_id, travel_id, vals={'name': 'changed'}, context=context ) def test_unlink_travel_too_many_passengers(self): cr, uid, vals, context = self.cr, self.uid, self.vals, self.context partner_model = self.registry("res.partner") group_id = self.registry("ir.model.data").get_object_reference( cr, uid, 'travel', 'group_basic_travel_user')[1] user_id = self.user_model.create(cr, uid, { 'login': 'test', 'name': 'test', 'groups_id': [(4, group_id)], }, context=context) vals = vals.copy() vals["passenger_ids"] = [] for i in xrange(12): partner_id = partner_model.create( cr, uid, {'name': 'test_partner_%d' % i}, context=context) vals["passenger_ids"].append(tuple([0, 0, { "partner_id": partner_id, }])) travel_id = self.travel_model.create(cr, uid, vals, context=context) self.assertRaises( except_orm, self.travel_model.unlink, cr, user_id, travel_id, context=context ) def test_create_travel_bad_date(self): cr, uid, vals, context = self.cr, self.uid, self.vals, self.context self.assertRaises( except_orm, self.travel_model.create, cr, uid, dict(vals, date_start=self.year+'-01-21'), context=context ) def test_write_travel_bad_date(self): cr, uid, vals, context = self.cr, self.uid, self.vals, self.context travel_id = self.travel_model.create(cr, uid, vals, context=context) self.assertRaises( except_orm, self.travel_model.write, cr, uid, travel_id, dict(vals, date_start=self.year+'-01-21'), context=context )
agpl-3.0
5,107,808,570,240,683,000
40.309392
78
0.566537
false
DougFirErickson/neon
examples/babi/util.py
4
3310
# ---------------------------------------------------------------------------- # Copyright 2015 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ Utility functions for bAbI example and demo. """ from neon.data import BABI from neon.initializers import GlorotUniform, Uniform, Orthonormal from neon.layers import Affine, GRU, LookupTable, MergeMultistream, LSTM from neon.models import Model from neon.transforms import Logistic, Softmax, Tanh # list of bAbI tasks subset = 'en' task_list = [ 'qa1_single-supporting-fact', 'qa2_two-supporting-facts', 'qa3_three-supporting-facts', 'qa4_two-arg-relations', 'qa5_three-arg-relations', 'qa6_yes-no-questions', 'qa7_counting', 'qa8_lists-sets', 'qa9_simple-negation', 'qa10_indefinite-knowledge', 'qa11_basic-coreference', 'qa12_conjunction', 'qa13_compound-coreference', 'qa14_time-reasoning', 'qa15_basic-deduction', 'qa16_basic-induction', 'qa17_positional-reasoning', 'qa18_size-reasoning', 'qa19_path-finding', 'qa20_agents-motivations', ] def babi_handler(data_dir, task_number): """ Handle for bAbI task. Args: data_dir (string) : Path to bAbI data directory. task_number (int) : The task ID from the bAbI dataset (1-20). Returns: BABI : Handler for bAbI task. """ task = task_list[task_number - 1] return BABI(path=data_dir, task=task, subset=subset) def create_model(vocab_size, rlayer_type): """ Create LSTM/GRU model for bAbI dataset. Args: vocab_size (int) : String of bAbI data. rlayer_type (string) : Type of recurrent layer to use (gru or lstm). Returns: Model : Model of the created network """ # recurrent layer parameters (default gru) rlayer_obj = GRU if rlayer_type == 'gru' else LSTM rlayer_params = dict(output_size=100, reset_cells=True, init=GlorotUniform(), init_inner=Orthonormal(0.5), activation=Tanh(), gate_activation=Logistic()) # if using lstm, swap the activation functions if rlayer_type == 'lstm': rlayer_params.update(dict(activation=Logistic(), gate_activation=Tanh())) # lookup layer parameters lookup_params = dict(vocab_size=vocab_size, embedding_dim=50, init=Uniform(-0.05, 0.05)) # Model construction story_path = [LookupTable(**lookup_params), rlayer_obj(**rlayer_params)] query_path = [LookupTable(**lookup_params), rlayer_obj(**rlayer_params)] layers = [MergeMultistream(layers=[story_path, query_path], merge="stack"), Affine(vocab_size, init=GlorotUniform(), activation=Softmax())] return Model(layers=layers)
apache-2.0
-5,692,681,496,997,404,000
33.479167
92
0.645015
false
MakarenaLabs/Orator-Google-App-Engine
orator/dbal/sqlite_schema_manager.py
1
2801
# -*- coding: utf-8 -*- import re from .schema_manager import SchemaManager from .platforms.sqlite_platform import SQLitePlatform from .column import Column class SQLiteSchemaManager(SchemaManager): def _get_portable_table_column_definition(self, table_column): parts = table_column['type'].split('(') table_column['type'] = parts[0] if len(parts) > 1: length = parts[1].strip(')') table_column['length'] = length db_type = table_column['type'].lower() length = table_column.get('length', None) unsigned = False if ' unsigned' in db_type: db_type = db_type.replace(' unsigned', '') unsigned = True fixed = False type = self._platform.get_type_mapping(db_type) default = table_column['dflt_value'] if default == 'NULL': default = None if default is not None: # SQLite returns strings wrapped in single quotes, so we need to strip them default = re.sub("^'(.*)'$", '\\1', default) notnull = bool(table_column['notnull']) if 'name' not in table_column: table_column['name'] = '' precision = None scale = None if db_type in ['char']: fixed = True elif db_type in ['float', 'double', 'real', 'decimal', 'numeric']: if 'length' in table_column: if ',' not in table_column['length']: table_column['length'] += ',0' precision, scale = tuple(map(lambda x: x.strip(), table_column['length'].split(','))) length = None options = { 'length': length, 'unsigned': bool(unsigned), 'fixed': fixed, 'notnull': notnull, 'default': default, 'precision': precision, 'scale': scale, 'autoincrement': False } column = Column(table_column['name'], type, options) column.set_platform_option('pk', table_column['pk']) return column def list_table_indexes(self, table): sql = self._platform.get_list_table_indexes_sql(table) cursor = self._connection.get_connection().cursor() table_indexes = cursor.execute(sql).fetchall() indexes = [] for index in table_indexes: table_index = dict(index.items()) index_info = cursor.execute('PRAGMA index_info(%s)' % index['name']).fetchall() columns = [] for column in index_info: columns.append(column['name']) table_index['columns'] = columns indexes.append(table_index) return indexes def get_database_platform(self): return SQLitePlatform()
mit
4,539,920,076,825,593,300
29.11828
101
0.546233
false
tjsavage/djangononrel-starter
django/db/backends/oracle/base.py
12
28766
""" Oracle database backend for Django. Requires cx_Oracle: http://cx-oracle.sourceforge.net/ """ import datetime import os import sys import time from decimal import Decimal # Oracle takes client-side character set encoding from the environment. os.environ['NLS_LANG'] = '.UTF8' # This prevents unicode from getting mangled by getting encoded into the # potentially non-unicode database character set. os.environ['ORA_NCHAR_LITERAL_REPLACE'] = 'TRUE' try: import cx_Oracle as Database except ImportError, e: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e) from django.db import utils from django.db.backends import * from django.db.backends.signals import connection_created from django.db.backends.oracle.client import DatabaseClient from django.db.backends.oracle.creation import DatabaseCreation from django.db.backends.oracle.introspection import DatabaseIntrospection from django.utils.encoding import smart_str, force_unicode DatabaseError = Database.DatabaseError IntegrityError = Database.IntegrityError # Check whether cx_Oracle was compiled with the WITH_UNICODE option. This will # also be True in Python 3.0. if int(Database.version.split('.', 1)[0]) >= 5 and not hasattr(Database, 'UNICODE'): convert_unicode = force_unicode else: convert_unicode = smart_str class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () needs_datetime_string_cast = False interprets_empty_strings_as_nulls = True uses_savepoints = True can_return_id_from_insert = True allow_sliced_subqueries = False supports_subqueries_in_group_by = False supports_timezones = False supports_bitwise_or = False can_defer_constraint_checks = True class DatabaseOperations(BaseDatabaseOperations): compiler_module = "django.db.backends.oracle.compiler" def autoinc_sql(self, table, column): # To simulate auto-incrementing primary keys in Oracle, we have to # create a sequence and a trigger. sq_name = get_sequence_name(table) tr_name = get_trigger_name(table) tbl_name = self.quote_name(table) col_name = self.quote_name(column) sequence_sql = """ DECLARE i INTEGER; BEGIN SELECT COUNT(*) INTO i FROM USER_CATALOG WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE'; IF i = 0 THEN EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"'; END IF; END; /""" % locals() trigger_sql = """ CREATE OR REPLACE TRIGGER "%(tr_name)s" BEFORE INSERT ON %(tbl_name)s FOR EACH ROW WHEN (new.%(col_name)s IS NULL) BEGIN SELECT "%(sq_name)s".nextval INTO :new.%(col_name)s FROM dual; END; /""" % locals() return sequence_sql, trigger_sql def date_extract_sql(self, lookup_type, field_name): # http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions42a.htm#1017163 if lookup_type == 'week_day': # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday. return "TO_CHAR(%s, 'D')" % field_name else: return "EXTRACT(%s FROM %s)" % (lookup_type, field_name) def date_trunc_sql(self, lookup_type, field_name): # Oracle uses TRUNC() for both dates and numbers. # http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions155a.htm#SQLRF06151 if lookup_type == 'day': sql = 'TRUNC(%s)' % field_name else: sql = "TRUNC(%s, '%s')" % (field_name, lookup_type) return sql def convert_values(self, value, field): if isinstance(value, Database.LOB): value = value.read() if field and field.get_internal_type() == 'TextField': value = force_unicode(value) # Oracle stores empty strings as null. We need to undo this in # order to adhere to the Django convention of using the empty # string instead of null, but only if the field accepts the # empty string. if value is None and field and field.empty_strings_allowed: value = u'' # Convert 1 or 0 to True or False elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'): value = bool(value) # Force floats to the correct type elif value is not None and field and field.get_internal_type() == 'FloatField': value = float(value) # Convert floats to decimals elif value is not None and field and field.get_internal_type() == 'DecimalField': value = util.typecast_decimal(field.format_number(value)) # cx_Oracle always returns datetime.datetime objects for # DATE and TIMESTAMP columns, but Django wants to see a # python datetime.date, .time, or .datetime. We use the type # of the Field to determine which to cast to, but it's not # always available. # As a workaround, we cast to date if all the time-related # values are 0, or to time if the date is 1/1/1900. # This could be cleaned a bit by adding a method to the Field # classes to normalize values from the database (the to_python # method is used for validation and isn't what we want here). elif isinstance(value, Database.Timestamp): # In Python 2.3, the cx_Oracle driver returns its own # Timestamp object that we must convert to a datetime class. if not isinstance(value, datetime.datetime): value = datetime.datetime(value.year, value.month, value.day, value.hour, value.minute, value.second, value.fsecond) if field and field.get_internal_type() == 'DateTimeField': pass elif field and field.get_internal_type() == 'DateField': value = value.date() elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1): value = value.time() elif value.hour == value.minute == value.second == value.microsecond == 0: value = value.date() return value def datetime_cast_sql(self): return "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def drop_sequence_sql(self, table): return "DROP SEQUENCE %s;" % self.quote_name(get_sequence_name(table)) def fetch_returned_insert_id(self, cursor): return long(cursor._insert_id_var.getvalue()) def field_cast_sql(self, db_type): if db_type and db_type.endswith('LOB'): return "DBMS_LOB.SUBSTR(%s)" else: return "%s" def last_insert_id(self, cursor, table_name, pk_name): sq_name = get_sequence_name(table_name) cursor.execute('SELECT "%s".currval FROM dual' % sq_name) return cursor.fetchone()[0] def lookup_cast(self, lookup_type): if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): return "UPPER(%s)" return "%s" def max_in_list_size(self): return 1000 def max_name_length(self): return 30 def prep_for_iexact_query(self, x): return x def process_clob(self, value): if value is None: return u'' return force_unicode(value.read()) def quote_name(self, name): # SQL92 requires delimited (quoted) names to be case-sensitive. When # not quoted, Oracle has case-insensitive behavior for identifiers, but # always defaults to uppercase. # We simplify things by making Oracle identifiers always uppercase. if not name.startswith('"') and not name.endswith('"'): name = '"%s"' % util.truncate_name(name.upper(), self.max_name_length()) return name.upper() def random_function_sql(self): return "DBMS_RANDOM.RANDOM" def regex_lookup_9(self, lookup_type): raise NotImplementedError("Regexes are not supported in Oracle before version 10g.") def regex_lookup_10(self, lookup_type): if lookup_type == 'regex': match_option = "'c'" else: match_option = "'i'" return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option def regex_lookup(self, lookup_type): # If regex_lookup is called before it's been initialized, then create # a cursor to initialize it and recur. from django.db import connection connection.cursor() return connection.ops.regex_lookup(lookup_type) def return_insert_id(self): return "RETURNING %s INTO %%s", (InsertIdVar(),) def savepoint_create_sql(self, sid): return convert_unicode("SAVEPOINT " + self.quote_name(sid)) def savepoint_rollback_sql(self, sid): return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid)) def sql_flush(self, style, tables, sequences): # Return a list of 'TRUNCATE x;', 'TRUNCATE y;', # 'TRUNCATE z;'... style SQL statements if tables: # Oracle does support TRUNCATE, but it seems to get us into # FK referential trouble, whereas DELETE FROM table works. sql = ['%s %s %s;' % \ (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'), style.SQL_FIELD(self.quote_name(table))) for table in tables] # Since we've just deleted all the rows, running our sequence # ALTER code will reset the sequence to 0. for sequence_info in sequences: sequence_name = get_sequence_name(sequence_info['table']) table_name = self.quote_name(sequence_info['table']) column_name = self.quote_name(sequence_info['column'] or 'id') query = _get_sequence_reset_sql() % {'sequence': sequence_name, 'table': table_name, 'column': column_name} sql.append(query) return sql else: return [] def sequence_reset_sql(self, style, model_list): from django.db import models output = [] query = _get_sequence_reset_sql() for model in model_list: for f in model._meta.local_fields: if isinstance(f, models.AutoField): table_name = self.quote_name(model._meta.db_table) sequence_name = get_sequence_name(model._meta.db_table) column_name = self.quote_name(f.column) output.append(query % {'sequence': sequence_name, 'table': table_name, 'column': column_name}) # Only one AutoField is allowed per model, so don't # continue to loop break for f in model._meta.many_to_many: if not f.rel.through: table_name = self.quote_name(f.m2m_db_table()) sequence_name = get_sequence_name(f.m2m_db_table()) column_name = self.quote_name('id') output.append(query % {'sequence': sequence_name, 'table': table_name, 'column': column_name}) return output def start_transaction_sql(self): return '' def tablespace_sql(self, tablespace, inline=False): return "%sTABLESPACE %s" % ((inline and "USING INDEX " or ""), self.quote_name(tablespace)) def value_to_db_datetime(self, value): # Oracle doesn't support tz-aware datetimes if getattr(value, 'tzinfo', None) is not None: raise ValueError("Oracle backend does not support timezone-aware datetimes.") return super(DatabaseOperations, self).value_to_db_datetime(value) def value_to_db_time(self, value): if value is None: return None if isinstance(value, basestring): return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6])) # Oracle doesn't support tz-aware datetimes if value.tzinfo is not None: raise ValueError("Oracle backend does not support timezone-aware datetimes.") return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second, value.microsecond) def year_lookup_bounds_for_date_field(self, value): first = '%s-01-01' second = '%s-12-31' return [first % value, second % value] def combine_expression(self, connector, sub_expressions): "Oracle requires special cases for %% and & operators in query expressions" if connector == '%%': return 'MOD(%s)' % ','.join(sub_expressions) elif connector == '&': return 'BITAND(%s)' % ','.join(sub_expressions) elif connector == '|': raise NotImplementedError("Bit-wise or is not supported in Oracle.") return super(DatabaseOperations, self).combine_expression(connector, sub_expressions) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'oracle' operators = { 'exact': '= %s', 'iexact': '= UPPER(%s)', 'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", } def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) self.oracle_version = None self.features = DatabaseFeatures(self) self.ops = DatabaseOperations() self.client = DatabaseClient(self) self.creation = DatabaseCreation(self) self.introspection = DatabaseIntrospection(self) self.validation = BaseDatabaseValidation(self) def _valid_connection(self): return self.connection is not None def _connect_string(self): settings_dict = self.settings_dict if not settings_dict['HOST'].strip(): settings_dict['HOST'] = 'localhost' if settings_dict['PORT'].strip(): dsn = Database.makedsn(settings_dict['HOST'], int(settings_dict['PORT']), settings_dict['NAME']) else: dsn = settings_dict['NAME'] return "%s/%s@%s" % (settings_dict['USER'], settings_dict['PASSWORD'], dsn) def _cursor(self): cursor = None if not self._valid_connection(): conn_string = convert_unicode(self._connect_string()) self.connection = Database.connect(conn_string, **self.settings_dict['OPTIONS']) cursor = FormatStylePlaceholderCursor(self.connection) # Set oracle date to ansi date format. This only needs to execute # once when we create a new connection. We also set the Territory # to 'AMERICA' which forces Sunday to evaluate to a '1' in TO_CHAR(). cursor.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS' " "NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF' " "NLS_TERRITORY = 'AMERICA'") try: self.oracle_version = int(self.connection.version.split('.')[0]) # There's no way for the DatabaseOperations class to know the # currently active Oracle version, so we do some setups here. # TODO: Multi-db support will need a better solution (a way to # communicate the current version). if self.oracle_version <= 9: self.ops.regex_lookup = self.ops.regex_lookup_9 else: self.ops.regex_lookup = self.ops.regex_lookup_10 except ValueError: pass try: self.connection.stmtcachesize = 20 except: # Django docs specify cx_Oracle version 4.3.1 or higher, but # stmtcachesize is available only in 4.3.2 and up. pass connection_created.send(sender=self.__class__, connection=self) if not cursor: cursor = FormatStylePlaceholderCursor(self.connection) return cursor # Oracle doesn't support savepoint commits. Ignore them. def _savepoint_commit(self, sid): pass def _commit(self): if self.connection is not None: try: return self.connection.commit() except Database.IntegrityError, e: # In case cx_Oracle implements (now or in a future version) # raising this specific exception raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] except Database.DatabaseError, e: # cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception # with the following attributes and values: # code = 2091 # message = 'ORA-02091: transaction rolled back # 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS # _C00102056) violated - parent key not found' # We convert that particular case to our IntegrityError exception x = e.args[0] if hasattr(x, 'code') and hasattr(x, 'message') \ and x.code == 2091 and 'ORA-02291' in x.message: raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2] class OracleParam(object): """ Wrapper object for formatting parameters for Oracle. If the string representation of the value is large enough (greater than 4000 characters) the input size needs to be set as CLOB. Alternatively, if the parameter has an `input_size` attribute, then the value of the `input_size` attribute will be used instead. Otherwise, no input size will be set for the parameter when executing the query. """ def __init__(self, param, cursor, strings_only=False): if hasattr(param, 'bind_parameter'): self.smart_str = param.bind_parameter(cursor) else: self.smart_str = convert_unicode(param, cursor.charset, strings_only) if hasattr(param, 'input_size'): # If parameter has `input_size` attribute, use that. self.input_size = param.input_size elif isinstance(param, basestring) and len(param) > 4000: # Mark any string param greater than 4000 characters as a CLOB. self.input_size = Database.CLOB else: self.input_size = None class VariableWrapper(object): """ An adapter class for cursor variables that prevents the wrapped object from being converted into a string when used to instanciate an OracleParam. This can be used generally for any other object that should be passed into Cursor.execute as-is. """ def __init__(self, var): self.var = var def bind_parameter(self, cursor): return self.var def __getattr__(self, key): return getattr(self.var, key) def __setattr__(self, key, value): if key == 'var': self.__dict__[key] = value else: setattr(self.var, key, value) class InsertIdVar(object): """ A late-binding cursor variable that can be passed to Cursor.execute as a parameter, in order to receive the id of the row created by an insert statement. """ def bind_parameter(self, cursor): param = cursor.cursor.var(Database.NUMBER) cursor._insert_id_var = param return param class FormatStylePlaceholderCursor(object): """ Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". We also do automatic conversion between Unicode on the Python side and UTF-8 -- for talking to Oracle -- in here. """ charset = 'utf-8' def __init__(self, connection): self.cursor = connection.cursor() # Necessary to retrieve decimal values without rounding error. self.cursor.numbersAsStrings = True # Default arraysize of 1 is highly sub-optimal. self.cursor.arraysize = 100 def _format_params(self, params): return tuple([OracleParam(p, self, True) for p in params]) def _guess_input_sizes(self, params_list): sizes = [None] * len(params_list[0]) for params in params_list: for i, value in enumerate(params): if value.input_size: sizes[i] = value.input_size self.setinputsizes(*sizes) def _param_generator(self, params): return [p.smart_str for p in params] def execute(self, query, params=None): if params is None: params = [] else: params = self._format_params(params) args = [(':arg%d' % i) for i in range(len(params))] # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it # it does want a trailing ';' but not a trailing '/'. However, these # characters must be included in the original query in case the query # is being passed to SQL*Plus. if query.endswith(';') or query.endswith('/'): query = query[:-1] query = convert_unicode(query % tuple(args), self.charset) self._guess_input_sizes([params]) try: return self.cursor.execute(query, self._param_generator(params)) except Database.IntegrityError, e: raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] except Database.DatabaseError, e: # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400. if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError): raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2] def executemany(self, query, params=None): try: args = [(':arg%d' % i) for i in range(len(params[0]))] except (IndexError, TypeError): # No params given, nothing to do return None # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it # it does want a trailing ';' but not a trailing '/'. However, these # characters must be included in the original query in case the query # is being passed to SQL*Plus. if query.endswith(';') or query.endswith('/'): query = query[:-1] query = convert_unicode(query % tuple(args), self.charset) formatted = [self._format_params(i) for i in params] self._guess_input_sizes(formatted) try: return self.cursor.executemany(query, [self._param_generator(p) for p in formatted]) except Database.IntegrityError, e: raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] except Database.DatabaseError, e: # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400. if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError): raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2] def fetchone(self): row = self.cursor.fetchone() if row is None: return row return _rowfactory(row, self.cursor) def fetchmany(self, size=None): if size is None: size = self.arraysize return tuple([_rowfactory(r, self.cursor) for r in self.cursor.fetchmany(size)]) def fetchall(self): return tuple([_rowfactory(r, self.cursor) for r in self.cursor.fetchall()]) def var(self, *args): return VariableWrapper(self.cursor.var(*args)) def arrayvar(self, *args): return VariableWrapper(self.cursor.arrayvar(*args)) def __getattr__(self, attr): if attr in self.__dict__: return self.__dict__[attr] else: return getattr(self.cursor, attr) def __iter__(self): return CursorIterator(self.cursor) class CursorIterator(object): """Cursor iterator wrapper that invokes our custom row factory.""" def __init__(self, cursor): self.cursor = cursor self.iter = iter(cursor) def __iter__(self): return self def next(self): return _rowfactory(self.iter.next(), self.cursor) def _rowfactory(row, cursor): # Cast numeric values as the appropriate Python type based upon the # cursor description, and convert strings to unicode. casted = [] for value, desc in zip(row, cursor.description): if value is not None and desc[1] is Database.NUMBER: precision, scale = desc[4:6] if scale == -127: if precision == 0: # NUMBER column: decimal-precision floating point # This will normally be an integer from a sequence, # but it could be a decimal value. if '.' in value: value = Decimal(value) else: value = int(value) else: # FLOAT column: binary-precision floating point. # This comes from FloatField columns. value = float(value) elif precision > 0: # NUMBER(p,s) column: decimal-precision fixed point. # This comes from IntField and DecimalField columns. if scale == 0: value = int(value) else: value = Decimal(value) elif '.' in value: # No type information. This normally comes from a # mathematical expression in the SELECT list. Guess int # or Decimal based on whether it has a decimal point. value = Decimal(value) else: value = int(value) elif desc[1] in (Database.STRING, Database.FIXED_CHAR, Database.LONG_STRING): value = to_unicode(value) casted.append(value) return tuple(casted) def to_unicode(s): """ Convert strings to Unicode objects (and return all other data types unchanged). """ if isinstance(s, basestring): return force_unicode(s) return s def _get_sequence_reset_sql(): # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc. return """ DECLARE table_value integer; seq_value integer; BEGIN SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s; SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences WHERE sequence_name = '%(sequence)s'; WHILE table_value > seq_value LOOP SELECT "%(sequence)s".nextval INTO seq_value FROM dual; END LOOP; END; /""" def get_sequence_name(table): name_length = DatabaseOperations().max_name_length() - 3 return '%s_SQ' % util.truncate_name(table, name_length).upper() def get_trigger_name(table): name_length = DatabaseOperations().max_name_length() - 3 return '%s_TR' % util.truncate_name(table, name_length).upper()
bsd-3-clause
-2,027,966,672,640,528,000
39.802837
126
0.593826
false
daviswr/ZenPacks.daviswr.ZFS
ZenPacks/daviswr/ZFS/__init__.py
1
2528
import re import sys from ZenPacks.zenoss.ZenPackLib import zenpacklib # https://www.skills-1st.co.uk/papers/jane/zpdevguide/ZenPack_DevGuide_V1.0.1_publish1_20161013.pdf # noqa # https://github.com/cluther/ZenPacks.example.EvaluatedCommandModeler from Products.ZenUtils.Utils import monkeypatch from Products.ZenUtils.ZenTales import talesEvalStr CFG = zenpacklib.load_yaml() schema = CFG.zenpack_module.schema # SshClient does a relative import of CollectorClient from # /opt/zenoss/Products/DataCollector/CollectorClient.py. # The standard CollectorClient class has an __init__ like: # def __init__(self, hostname, ip, port, plugins=None, options=None, # device=None, datacollector=None, alog=None): # Note the first 3 paramters are mandatory ( args[0] to args[2] ), plugins # is the first optional at args[3]. device may be args[5] # # Normally one cannot pass TALES expressions to a command. This code # does a monkeypatch to the relative CollectorClient module already in # sys.modules to check for ${ syntax and performs a TALES evaluation. if 'CollectorClient' in sys.modules: CollectorClient = sys.modules['CollectorClient'] @monkeypatch(CollectorClient.CollectorClient) def __init__(self, *args, **kwargs): # original is injected into locals by the monkeypatch decorator. original(self, *args, **kwargs) # Reset cmdmap and _commands. self.cmdmap = {} self._commands = [] # Get plugins from args or kwargs. plugins = kwargs.get('plugins') if plugins is None: if len(args) > 3: plugins = args[3] else: plugins = [] # Get device from args or kwargs. device = kwargs.get('device') if device is None: if len(args) > 5: device = args[5] else: device = None # Do TALES evaluation of each plugin's command. for plugin in plugins: if re.search(r'\$\{\S+\/\S+\}', plugin.command): try: command = talesEvalStr(plugin.command, device) except Exception: CollectorClient.log.exception( '%s - command TALES evaluation failed, proceeding', device.id) command = plugin.command else: command = plugin.command self.cmdmap[command] = plugin self._commands.append(command)
mit
-3,321,025,650,586,362,000
36.731343
107
0.620253
false
pizzathief/scipy
scipy/odr/tests/test_odr.py
2
17853
# SciPy imports. import numpy as np from numpy import pi from numpy.testing import (assert_array_almost_equal, assert_equal, assert_warns) from pytest import raises as assert_raises from scipy.odr import (Data, Model, ODR, RealData, OdrStop, OdrWarning, multilinear, exponential, unilinear, quadratic, polynomial) class TestODR(object): # Bad Data for 'x' def test_bad_data(self): assert_raises(ValueError, Data, 2, 1) assert_raises(ValueError, RealData, 2, 1) # Empty Data for 'x' def empty_data_func(self, B, x): return B[0]*x + B[1] def test_empty_data(self): beta0 = [0.02, 0.0] linear = Model(self.empty_data_func) empty_dat = Data([], []) assert_warns(OdrWarning, ODR, empty_dat, linear, beta0=beta0) empty_dat = RealData([], []) assert_warns(OdrWarning, ODR, empty_dat, linear, beta0=beta0) # Explicit Example def explicit_fcn(self, B, x): ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2) return ret def explicit_fjd(self, B, x): eBx = np.exp(B[2]*x) ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx return ret def explicit_fjb(self, B, x): eBx = np.exp(B[2]*x) res = np.vstack([np.ones(x.shape[-1]), np.power(eBx-1.0, 2), B[1]*2.0*(eBx-1.0)*eBx*x]) return res def test_explicit(self): explicit_mod = Model( self.explicit_fcn, fjacb=self.explicit_fjb, fjacd=self.explicit_fjd, meta=dict(name='Sample Explicit Model', ref='ODRPACK UG, pg. 39'), ) explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.], [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6, 1213.8,1215.5,1212.]) explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1], ifixx=[0,0,1,1,1,1,1,1,1,1,1,0]) explicit_odr.set_job(deriv=2) explicit_odr.set_iprint(init=0, iter=0, final=0) out = explicit_odr.run() assert_array_almost_equal( out.beta, np.array([1.2646548050648876e+03, -5.4018409956678255e+01, -8.7849712165253724e-02]), ) assert_array_almost_equal( out.sd_beta, np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]), ) assert_array_almost_equal( out.cov_beta, np.array([[4.4949592379003039e-01, -3.7421976890364739e-01, -8.0978217468468912e-04], [-3.7421976890364739e-01, 1.0529686462751804e+00, -1.9453521827942002e-03], [-8.0978217468468912e-04, -1.9453521827942002e-03, 1.6827336938454476e-05]]), ) # Implicit Example def implicit_fcn(self, B, x): return (B[2]*np.power(x[0]-B[0], 2) + 2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) + B[4]*np.power(x[1]-B[1], 2) - 1.0) def test_implicit(self): implicit_mod = Model( self.implicit_fcn, implicit=1, meta=dict(name='Sample Implicit Model', ref='ODRPACK UG, pg. 49'), ) implicit_dat = Data([ [0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28, -0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44], [-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32, -6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]], 1, ) implicit_odr = ODR(implicit_dat, implicit_mod, beta0=[-1.0, -3.0, 0.09, 0.02, 0.08]) out = implicit_odr.run() assert_array_almost_equal( out.beta, np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354, 0.0162299708984738, 0.0797537982976416]), ) assert_array_almost_equal( out.sd_beta, np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314, 0.0027500347539902, 0.0034962501532468]), ) assert_array_almost_equal( out.cov_beta, np.array([[2.1089274602333052e+00, -1.9437686411979040e+00, 7.0263550868344446e-02, -4.7175267373474862e-02, 5.2515575927380355e-02], [-1.9437686411979040e+00, 2.0481509222414456e+00, -6.1600515853057307e-02, 4.6268827806232933e-02, -5.8822307501391467e-02], [7.0263550868344446e-02, -6.1600515853057307e-02, 2.8659542561579308e-03, -1.4628662260014491e-03, 1.4528860663055824e-03], [-4.7175267373474862e-02, 4.6268827806232933e-02, -1.4628662260014491e-03, 1.2855592885514335e-03, -1.2692942951415293e-03], [5.2515575927380355e-02, -5.8822307501391467e-02, 1.4528860663055824e-03, -1.2692942951415293e-03, 2.0778813389755596e-03]]), ) # Multi-variable Example def multi_fcn(self, B, x): if (x < 0.0).any(): raise OdrStop theta = pi*B[3]/2. ctheta = np.cos(theta) stheta = np.sin(theta) omega = np.power(2.*pi*x*np.exp(-B[2]), B[3]) phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta)) r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) + np.power(omega*stheta, 2)), -B[4]) ret = np.vstack([B[1] + r*np.cos(B[4]*phi), r*np.sin(B[4]*phi)]) return ret def test_multi(self): multi_mod = Model( self.multi_fcn, meta=dict(name='Sample Multi-Response Model', ref='ODRPACK UG, pg. 56'), ) multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0, 700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0, 15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0]) multi_y = np.array([ [4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713, 3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984, 2.934, 2.876, 2.838, 2.798, 2.759], [0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309, 0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218, 0.202, 0.182, 0.168, 0.153, 0.139], ]) n = len(multi_x) multi_we = np.zeros((2, 2, n), dtype=float) multi_ifixx = np.ones(n, dtype=int) multi_delta = np.zeros(n, dtype=float) multi_we[0,0,:] = 559.6 multi_we[1,0,:] = multi_we[0,1,:] = -1634.0 multi_we[1,1,:] = 8397.0 for i in range(n): if multi_x[i] < 100.0: multi_ifixx[i] = 0 elif multi_x[i] <= 150.0: pass # defaults are fine elif multi_x[i] <= 1000.0: multi_delta[i] = 25.0 elif multi_x[i] <= 10000.0: multi_delta[i] = 560.0 elif multi_x[i] <= 100000.0: multi_delta[i] = 9500.0 else: multi_delta[i] = 144000.0 if multi_x[i] == 100.0 or multi_x[i] == 150.0: multi_we[:,:,i] = 0.0 multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2), we=multi_we) multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5], delta0=multi_delta, ifixx=multi_ifixx) multi_odr.set_job(deriv=1, del_init=1) out = multi_odr.run() assert_array_almost_equal( out.beta, np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978, 0.5101147161764654, 0.5173902330489161]), ) assert_array_almost_equal( out.sd_beta, np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757, 0.0132642749596149, 0.0288529201353984]), ) assert_array_almost_equal( out.cov_beta, np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406, -0.0058700836512467, 0.011281212888768], [0.0036159705923791, 0.0064793789429006, 0.0517610978353126, -0.0051181304940204, 0.0130726943624117], [0.0438637051470406, 0.0517610978353126, 0.5182263323095322, -0.0563083340093696, 0.1269490939468611], [-0.0058700836512467, -0.0051181304940204, -0.0563083340093696, 0.0066939246261263, -0.0140184391377962], [0.011281212888768, 0.0130726943624117, 0.1269490939468611, -0.0140184391377962, 0.0316733013820852]]), ) # Pearson's Data # K. Pearson, Philosophical Magazine, 2, 559 (1901) def pearson_fcn(self, B, x): return B[0] + B[1]*x def test_pearson(self): p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4]) p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5]) p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.]) p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04]) p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy) # Reverse the data to test invariance of results pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx) p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit')) p_odr = ODR(p_dat, p_mod, beta0=[1.,1.]) pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.]) out = p_odr.run() assert_array_almost_equal( out.beta, np.array([5.4767400299231674, -0.4796082367610305]), ) assert_array_almost_equal( out.sd_beta, np.array([0.3590121690702467, 0.0706291186037444]), ) assert_array_almost_equal( out.cov_beta, np.array([[0.0854275622946333, -0.0161807025443155], [-0.0161807025443155, 0.003306337993922]]), ) rout = pr_odr.run() assert_array_almost_equal( rout.beta, np.array([11.4192022410781231, -2.0850374506165474]), ) assert_array_almost_equal( rout.sd_beta, np.array([0.9820231665657161, 0.3070515616198911]), ) assert_array_almost_equal( rout.cov_beta, np.array([[0.6391799462548782, -0.1955657291119177], [-0.1955657291119177, 0.0624888159223392]]), ) # Lorentz Peak # The data is taken from one of the undergraduate physics labs I performed. def lorentz(self, beta, x): return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x - beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0))) def test_lorentz(self): l_sy = np.array([.29]*18) l_sx = np.array([.000972971,.000948268,.000707632,.000706679, .000706074, .000703918,.000698955,.000456856, .000455207,.000662717,.000654619,.000652694, .000000859202,.00106589,.00106378,.00125483, .00140818,.00241839]) l_dat = RealData( [3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608, 3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982, 3.6562, 3.62498, 3.55525, 3.41886], [652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122, 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5], sx=l_sx, sy=l_sy, ) l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak')) l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8)) out = l_odr.run() assert_array_almost_equal( out.beta, np.array([1.4306780846149925e+03, 1.3390509034538309e-01, 3.7798193600109009e+00]), ) assert_array_almost_equal( out.sd_beta, np.array([7.3621186811330963e-01, 3.5068899941471650e-04, 2.4451209281408992e-04]), ) assert_array_almost_equal( out.cov_beta, np.array([[2.4714409064597873e-01, -6.9067261911110836e-05, -3.1236953270424990e-05], [-6.9067261911110836e-05, 5.6077531517333009e-08, 3.6133261832722601e-08], [-3.1236953270424990e-05, 3.6133261832722601e-08, 2.7261220025171730e-08]]), ) def test_ticket_1253(self): def linear(c, x): return c[0]*x+c[1] c = [2.0, 3.0] x = np.linspace(0, 10) y = linear(c, x) model = Model(linear) data = Data(x, y, wd=1.0, we=1.0) job = ODR(data, model, beta0=[1.0, 1.0]) result = job.run() assert_equal(result.info, 2) # Verify fix for gh-9140 def test_ifixx(self): x1 = [-2.01, -0.99, -0.001, 1.02, 1.98] x2 = [3.98, 1.01, 0.001, 0.998, 4.01] fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int))) data = Data(np.vstack((x1, x2)), y=1, fix=fix) model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True) odr1 = ODR(data, model, beta0=np.array([1.])) sol1 = odr1.run() odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix) sol2 = odr2.run() assert_equal(sol1.beta, sol2.beta) # verify bugfix for #11800 in #11802 def test_ticket_11800(self): # parameters beta_true = np.array([1.0, 2.3, 1.1, -1.0, 1.3, 0.5]) nr_measurements = 10 std_dev_x = 0.01 x_error = np.array([[0.00063445, 0.00515731, 0.00162719, 0.01022866, -0.01624845, 0.00482652, 0.00275988, -0.00714734, -0.00929201, -0.00687301], [-0.00831623, -0.00821211, -0.00203459, 0.00938266, -0.00701829, 0.0032169, 0.00259194, -0.00581017, -0.0030283, 0.01014164]]) std_dev_y = 0.05 y_error = np.array([[0.05275304, 0.04519563, -0.07524086, 0.03575642, 0.04745194, 0.03806645, 0.07061601, -0.00753604, -0.02592543, -0.02394929], [0.03632366, 0.06642266, 0.08373122, 0.03988822, -0.0092536, -0.03750469, -0.03198903, 0.01642066, 0.01293648, -0.05627085]]) beta_solution = np.array([ 2.62920235756665876536e+00, -1.26608484996299608838e+02, 1.29703572775403074502e+02, -1.88560985401185465804e+00, 7.83834160771274923718e+01, -7.64124076838087091801e+01]) # model's function and Jacobians def func(beta, x): y0 = beta[0] + beta[1] * x[0, :] + beta[2] * x[1, :] y1 = beta[3] + beta[4] * x[0, :] + beta[5] * x[1, :] return np.vstack((y0, y1)) def df_dbeta_odr(beta, x): nr_meas = np.shape(x)[1] zeros = np.zeros(nr_meas) ones = np.ones(nr_meas) dy0 = np.array([ones, x[0, :], x[1, :], zeros, zeros, zeros]) dy1 = np.array([zeros, zeros, zeros, ones, x[0, :], x[1, :]]) return np.stack((dy0, dy1)) def df_dx_odr(beta, x): nr_meas = np.shape(x)[1] ones = np.ones(nr_meas) dy0 = np.array([beta[1] * ones, beta[2] * ones]) dy1 = np.array([beta[4] * ones, beta[5] * ones]) return np.stack((dy0, dy1)) # do measurements with errors in independent and dependent variables x0_true = np.linspace(1, 10, nr_measurements) x1_true = np.linspace(1, 10, nr_measurements) x_true = np.array([x0_true, x1_true]) y_true = func(beta_true, x_true) x_meas = x_true + x_error y_meas = y_true + y_error # estimate model's parameters model_f = Model(func, fjacb=df_dbeta_odr, fjacd=df_dx_odr) data = RealData(x_meas, y_meas, sx=std_dev_x, sy=std_dev_y) odr_obj = ODR(data, model_f, beta0=0.9 * beta_true, maxit=100) #odr_obj.set_iprint(init=2, iter=0, iter_step=1, final=1) odr_obj.set_job(deriv=3) odr_out = odr_obj.run() # check results assert_equal(odr_out.info, 1) assert_array_almost_equal(odr_out.beta, beta_solution) def test_multilinear_model(self): x = np.linspace(0.0, 5.0) y = 10.0 + 5.0 * x data = Data(x, y) odr_obj = ODR(data, multilinear) output = odr_obj.run() assert_array_almost_equal(output.beta, [10.0, 5.0]) def test_exponential_model(self): x = np.linspace(0.0, 5.0) y = -10.0 + np.exp(0.5*x) data = Data(x, y) odr_obj = ODR(data, exponential) output = odr_obj.run() assert_array_almost_equal(output.beta, [-10.0, 0.5]) def test_polynomial_model(self): x = np.linspace(0.0, 5.0) y = 1.0 + 2.0 * x + 3.0 * x ** 2 + 4.0 * x ** 3 poly_model = polynomial(3) data = Data(x, y) odr_obj = ODR(data, poly_model) output = odr_obj.run() assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0, 4.0]) def test_unilinear_model(self): x = np.linspace(0.0, 5.0) y = 1.0 * x + 2.0 data = Data(x, y) odr_obj = ODR(data, unilinear) output = odr_obj.run() assert_array_almost_equal(output.beta, [1.0, 2.0]) def test_quadratic_model(self): x = np.linspace(0.0, 5.0) y = 1.0 * x ** 2 + 2.0 * x + 3.0 data = Data(x, y) odr_obj = ODR(data, quadratic) output = odr_obj.run() assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0])
bsd-3-clause
-4,048,994,963,251,669,000
36.664557
98
0.522545
false
openstack/watcher-dashboard
watcher_dashboard/content/audit_templates/views.py
1
4844
# Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging from django.urls import reverse_lazy from django.utils.translation import ugettext_lazy as _ import horizon.exceptions from horizon import forms import horizon.tables import horizon.tabs import horizon.workflows from watcher_dashboard.api import watcher from watcher_dashboard.content.audit_templates import forms as wforms from watcher_dashboard.content.audit_templates import tables from watcher_dashboard.content.audit_templates import tabs as wtabs LOG = logging.getLogger(__name__) class IndexView(horizon.tables.DataTableView): table_class = tables.AuditTemplatesTable template_name = 'infra_optim/audit_templates/index.html' page_title = _("Audit Templates") def get_context_data(self, **kwargs): context = super(IndexView, self).get_context_data(**kwargs) context['audit_templates_count'] = self.get_count() return context def get_data(self): audit_templates = [] search_opts = self.get_filters() try: audit_templates = watcher.AuditTemplate.list( self.request, **search_opts) except Exception as exc: LOG.exception(exc) horizon.exceptions.handle( self.request, _("Unable to retrieve audit template information.")) return audit_templates def get_count(self): return len(self.get_data()) def get_filters(self): filters = {} filter_action = self.table._meta._filter_action if filter_action: filter_field = self.table.get_filter_field() if filter_action.is_api_filter(filter_field): filter_string = self.table.get_filter_string() if filter_field and filter_string: filters[filter_field] = filter_string return filters class CreateView(forms.ModalFormView): form_class = wforms.CreateForm form_id = "create_audit_templates_form" modal_header = _("Create Audit Template") template_name = 'infra_optim/audit_templates/create.html' success_url = reverse_lazy("horizon:admin:audit_templates:index") page_title = _("Create an Audit Template") submit_label = _("Create Audit Template") submit_url = reverse_lazy("horizon:admin:audit_templates:create") def get_object_id(self, obj): return obj.uuid class DetailView(horizon.tabs.TabbedTableView): tab_group_class = wtabs.AuditTemplateDetailTabs template_name = 'infra_optim/audit_templates/details.html' redirect_url = 'horizon:admin:audit_templates:index' page_title = _("Audit Template Details: {{ audit_template.name }}") def _get_data(self): audit_template_uuid = None try: LOG.info(self.kwargs) audit_template_uuid = self.kwargs['audit_template_uuid'] audit_template = watcher.AuditTemplate.get( self.request, audit_template_uuid) if audit_template.scope: audit_template.scope = json.dumps(audit_template.scope) except Exception as exc: LOG.exception(exc) msg = _('Unable to retrieve details for audit template "%s".') \ % audit_template_uuid horizon.exceptions.handle( self.request, msg, redirect=self.redirect_url) return audit_template def get_related_audits_data(self): try: audit_template = self._get_data() audits = watcher.Audit.list( self.request, audit_template=audit_template.uuid) except Exception as exc: LOG.exception(exc) audits = [] msg = _('Audits list cannot be retrieved.') horizon.exceptions.handle(self.request, msg) return audits def get_context_data(self, **kwargs): context = super(DetailView, self).get_context_data(**kwargs) audit_template = self._get_data() context["audit_template"] = audit_template return context def get_tabs(self, request, *args, **kwargs): audit_template = self._get_data() # ports = self._get_ports() return self.tab_group_class( request, audit_template=audit_template, **kwargs)
apache-2.0
-4,823,441,813,257,277,000
35.149254
76
0.651941
false
Robpol86/general
tests/convert_music/test_find_empty_dirs.py
1
2409
from convert_music import find_empty_dirs def test_no_dirs(tmpdir): """Test when mp3 dir has nothing at all in it.""" mp3_dir = tmpdir.mkdir('mp3') expected = [] actual = find_empty_dirs(str(mp3_dir.realpath())) assert expected == actual def test_files_only(tmpdir): """Test when mp3 dir just files in it.""" mp3_dir = tmpdir.mkdir('mp3') expected = [] mp3_dir.join('file01').ensure(file=True) mp3_dir.join('file02').ensure(file=True) actual = find_empty_dirs(str(mp3_dir.realpath())) assert expected == actual def test_no_empty_dirs(tmpdir): """Test when mp3 dir has non-empty dirs in it.""" mp3_dir = tmpdir.mkdir('mp3') expected = [] mp3_dir.mkdir('dir01').join('file01').ensure(file=True) mp3_dir.mkdir('dir02').join('file02').ensure(file=True) actual = find_empty_dirs(str(mp3_dir.realpath())) assert expected == actual def test_dirs_of_empty_dirs(tmpdir): """Test to make sure empty subdirectories are detected.""" mp3_dir = tmpdir.mkdir('mp3') # Generate. dir01 = str(mp3_dir.join('dir01').ensure(dir=True).realpath()) dir02 = str(mp3_dir.join('dir02').ensure(dir=True).realpath()) dir03 = str(mp3_dir.join('dir02').join('dir03').ensure(dir=True).realpath()) dir04 = str(mp3_dir.join('dir04').ensure(dir=True).realpath()) expected = [dir04, dir03, dir02, dir01] # Test. actual = find_empty_dirs(str(mp3_dir.realpath())) assert expected == actual def test_some_empty_dirs(tmpdir): """Test when a directory has an empty sub directory, and another subdirectory with a file somewhere in it.""" mp3_dir = tmpdir.mkdir('mp3') # Long empty dir path. dir01 = str(mp3_dir.join('dir01').ensure(dir=True).realpath()) dir02 = str(mp3_dir.join('dir01').join('dir02').ensure(dir=True).realpath()) dir03 = str(mp3_dir.join('dir01').join('dir02').join('dir03').ensure(dir=True).realpath()) dir04 = str(mp3_dir.join('dir01').join('dir02').join('dir03').join('dir04').ensure(dir=True).realpath()) expected = [dir04, dir03, dir02, dir01] # Dir with a file somewhere in it. dir06 = str(mp3_dir.mkdir('dir05').join('dir06').ensure(dir=True).realpath()) mp3_dir.join('dir05').mkdir('dir07').join('file01').ensure(file=True) expected.insert(0, dir06) # Test. actual = find_empty_dirs(str(mp3_dir.realpath())) assert expected == actual
mit
-5,558,701,239,900,072,000
38.491803
113
0.651723
false
neubot/neubot-client
neubot/negotiate/server_speedtest.py
2
3664
# neubot/negotiate/server_speedtest.py # # Copyright (c) 2011 Simone Basso <bassosimone@gmail.com>, # NEXA Center for Internet & Society at Politecnico di Torino # # This file is part of Neubot <http://www.neubot.org/>. # # Neubot is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Neubot is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Neubot. If not, see <http://www.gnu.org/licenses/>. # ''' Speedtest negotiator ''' import logging from neubot.negotiate.server import NegotiateServerModule from neubot.backend import BACKEND from neubot import privacy class NegotiateServerSpeedtest(NegotiateServerModule): ''' Negotiator for Speedtest ''' def __init__(self): ''' Initialize Speedtest negotiator ''' NegotiateServerModule.__init__(self) self.clients = set() def unchoke(self, stream, request_body): ''' Invoked when we must unchoke a session ''' ident = str(hash(stream)) if ident not in self.clients: # Create record for this session self.clients.add(ident) stream.atclose(self._update_clients) return {'authorization': ident} else: raise RuntimeError('Multiple unchoke') def collect_legacy(self, stream, request_body, request): ''' Invoked when we must save the result of a session ''' ident = str(hash(stream)) if ident not in self.clients: # # Before Neubot 0.4.2 we were using multiple connections # for speedtest, which were used both for testing and for # negotiating/collecting. Sometimes the connection used # to collect is not the one used to negotiate: the code # uses the one that terminates the upload first. # When this happens we inspect the Authorization header # before deciding the collect request is an abuse. # authorization = request['Authorization'] if authorization not in self.clients: raise RuntimeError('Not authorized to collect') else: logging.warning('speedtest: working around multiple conns ' 'issue') ident = authorization # Note: no more than one collect per session self.clients.remove(ident) # # Backward compatibility: the variable name changed from # can_share to can_publish after Neubot 0.4.5 # if 'privacy_can_share' in request_body: request_body['privacy_can_publish'] = request_body[ 'privacy_can_share'] del request_body['privacy_can_share'] if privacy.collect_allowed(request_body): BACKEND.speedtest_store(request_body) else: logging.warning('* bad privacy settings: %s', str(stream)) return {} # Note: if collect is successful ident is not in self.clients def _update_clients(self, stream, ignored): ''' Invoked when a session has been closed ''' ident = str(hash(stream)) if ident in self.clients: self.clients.remove(ident) NEGOTIATE_SERVER_SPEEDTEST = NegotiateServerSpeedtest()
gpl-3.0
7,397,734,455,039,038,000
36.010101
75
0.644378
false
takaakiaoki/PyFoam
PyFoam/Applications/APoMaFoXiiQt.py
3
10853
""" Application-class that implements pyFoamAPoMaFoX.py (A Poor Man's FoamX) """ from os import path import sys from PyFoam.Applications.PyFoamApplication import PyFoamApplication from PyFoam.Applications.CaseBuilderBackend import CaseBuilderFile,CaseBuilderDescriptionList from PyFoam.Applications.CommonCaseBuilder import CommonCaseBuilder from PyFoam import configuration as config from PyFoam.Error import error,warning from PyFoam.ThirdParty.six import print_ try: import PyQt4 except ImportError: error("This application needs an installed PyQt4-library") from PyQt4 import QtCore, QtGui class APoMaFoXiiQt(PyFoamApplication, CommonCaseBuilder): def __init__(self,args=None): description="""\ APoMaFoX is "A Poor Mans FoamX". A small text interface to the CaseBuilder-Functionality """ PyFoamApplication.__init__(self, args=args, description=description, usage="%prog <caseBuilderFile>", interspersed=True, nr=0, exactNr=False) def addOptions(self): CommonCaseBuilder.addOptions(self) def run(self): if self.pathInfo(): return app = QtGui.QApplication(self.parser.getArgs()) fName=None if len(self.parser.getArgs())==0: dialog=CaseBuilderBrowser() if len(dialog.descriptions)==1: fName=dialog.descriptions[0][1] self.warning("Automatically choosing the only description",fName) elif len(self.parser.getArgs())==1: fName=self.searchDescriptionFile(self.parser.getArgs()[0]) if not path.exists(fName): error("The description file",fName,"does not exist") else: error("Too many arguments") if fName!=None: dialog=CaseBuilderDialog(fName) dialog.show() sys.exit(app.exec_()) class ComboWrapper(QtGui.QComboBox): def __init__(self): super(ComboWrapper,self).__init__() def text(self): return str(self.currentText()) class FilenameWrapper(QtGui.QWidget): def __init__(self,parent=None): super(FilenameWrapper,self).__init__(parent) layout=QtGui.QHBoxLayout() self.name=QtGui.QLineEdit() layout.addWidget(self.name) button=QtGui.QPushButton("File ...") layout.addWidget(button) self.connect(button,QtCore.SIGNAL("clicked()"),self.pushed) self.setLayout(layout) def pushed(self): try: theDir=path.dirname(self.text()) except AttributeError: theDir=path.abspath(path.curdir) fName=QtGui.QFileDialog.getOpenFileName(self, # parent "Select File", # caption theDir) if fName!="": self.setText(str(fName)) return False def setText(self,txt): self.name.setText(txt) def text(self): return path.abspath(str(self.name.text())) class CaseBuilderQt(QtGui.QDialog): """The common denominator for the windows""" def __init__(self,parent=None): super(CaseBuilderQt,self).__init__(parent) self.status=None def setStatus(self,text): print_(text) if not self.status: self.status=QtGui.QStatusBar(self) self.layout().addWidget(self.status) self.status.showMessage(text) class CaseBuilderBrowser(CaseBuilderQt): """A browser of all the available descriptions""" def __init__(self): CaseBuilderQt.__init__(self) self.descriptions=CaseBuilderDescriptionList() if len(self.descriptions)==0: error("No description-files (.pfcb) found in path",config().get("CaseBuilder","descriptionpath")) mainLayout = QtGui.QVBoxLayout() self.setLayout(mainLayout) self.descriptsList = QtGui.QListWidget() self.descriptsList.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) mainLayout.addWidget(self.descriptsList) self.itemlist=[] for d in self.descriptions: item=QtGui.QListWidgetItem(d[2]) item.setToolTip(d[3]) self.descriptsList.addItem(item) self.itemlist.append((item,d)) buttons=QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Cancel) mainLayout.addWidget(buttons) selectButton=QtGui.QPushButton("Select") selectButton.setToolTip("Select the case description that we want to work with") buttons.addButton(selectButton,QtGui.QDialogButtonBox.AcceptRole) try: buttons.accepted.connect(self.selectPressed) buttons.rejected.connect(self.reject) except AttributeError: warning("Old QT-version where QDialogButtonBox doesn't have the accepted/rejected-attributes") self.connect(buttons,QtCore.SIGNAL("accepted()"),self.selectPressed) self.connect(buttons,QtCore.SIGNAL("rejected()"),self.reject) def selectPressed(self): self.setStatus("Pressed selected") selected=self.descriptsList.selectedItems() if len(selected)!=1: self.setStatus("Nothing selected") return desc=None for it,d in self.itemlist: if it==selected[0]: desc=d break if desc==None: self.setStatus("Did not find the selection") return self.setStatus("") sub=CaseBuilderDialog(desc[1],parent=self) sub.show() class CaseBuilderDialog(CaseBuilderQt): """A dialog for a CaswBuilder-dialog""" def __init__(self,fName,parent=None): CaseBuilderQt.__init__(self,parent=parent) self.desc=CaseBuilderFile(fName) # print_("Read case description",self.desc.name()) mainLayout = QtGui.QVBoxLayout() self.setLayout(mainLayout) mainLayout.addWidget(QtGui.QLabel("Builder Template: " + self.desc.name() +"\n"+self.desc.description())) mainLayout.addWidget(QtGui.QLabel("Data Template: " + self.desc.templatePath())) try: caseLayout=QtGui.QFormLayout() except AttributeError: warning("Qt-version without QFormLayout") caseLayout=QtGui.QVBoxLayout() mainLayout.addLayout(caseLayout) self.caseName=QtGui.QLineEdit() self.caseName.setToolTip("The name under which the case will be saved") try: caseLayout.addRow("Case name",self.caseName) except AttributeError: caseLayout.addWidget(QtGui.QLabel("Case name")) caseLayout.addWidget(self.caseName) args=self.desc.arguments() mLen=max(*list(map(len,args))) aDesc=self.desc.argumentDescriptions() aDef=self.desc.argumentDefaults() allArgs=self.desc.argumentDict() self.argfields={} groups=[None]+self.desc.argumentGroups() gDesc=self.desc.argumentGroupDescription() theGroupTabs=QtGui.QTabWidget() mainLayout.addWidget(theGroupTabs) for g in groups: if g==None: name="Default" desc="All the arguments that did not fit into another group" else: name=g desc=gDesc[g] gWidget=QtGui.QWidget() try: gLayout=QtGui.QFormLayout() except AttributeError: gLayout=QtGui.QVBoxLayout() gWidget.setLayout(gLayout) idx=theGroupTabs.addTab(gWidget,name) theGroupTabs.setTabToolTip(idx,desc) for a in self.desc.groupArguments(g): theType=allArgs[a].type if theType=="file": print_("File",a) aWidget=FilenameWrapper(self) aWidget.setText(aDef[a]) elif theType=="selection": aWidget=ComboWrapper() aWidget.addItems(allArgs[a].values) aWidget.setCurrentIndex(allArgs[a].values.index(aDef[a])) else: aWidget=QtGui.QLineEdit() aWidget.setText(aDef[a]) aWidget.setToolTip(aDesc[a]) self.argfields[a]=aWidget try: gLayout.addRow(a,aWidget) except AttributeError: gLayout.addWidget(QtGui.QLabel(a)) gLayout.addWidget(aWidget) bottomLayout=QtGui.QHBoxLayout() mainLayout.addLayout(bottomLayout) self.noClose=QtGui.QCheckBox("Don't close") self.noClose.setToolTip("Do not close after 'Generate'") bottomLayout.addWidget(self.noClose) buttons=QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Cancel) bottomLayout.addWidget(buttons) generateButton=QtGui.QPushButton("Generate") generateButton.setToolTip("Copy the template case and modify it according to the settings") buttons.addButton(generateButton,QtGui.QDialogButtonBox.AcceptRole) try: buttons.accepted.connect(self.generatePressed) buttons.rejected.connect(self.reject) except AttributeError: self.connect(buttons,QtCore.SIGNAL("accepted()"),self.generatePressed) self.connect(buttons,QtCore.SIGNAL("rejected()"),self.reject) def generatePressed(self): self.setStatus("Pressed generate") ok=False caseName=str(self.caseName.text()) if len(caseName)==0: self.setStatus("Casename empty") return if path.exists(caseName): self.setStatus("Directory "+caseName+" already existing") return self.setStatus("Generating the case "+caseName) args={} for i,a in enumerate(self.desc.arguments()): args[a]=str(self.argfields[a].text()) if len(args[a])==0: self.setStatus("No argument "+a+" was given") return msg=self.desc.verifyArguments(args) if msg: self.setStatus(msg) return self.setStatus("With the arguments "+str(args)) self.desc.buildCase(caseName,args) ok=True if ok: self.setStatus("") if not self.noClose.isChecked(): self.accept() else: self.setStatus("Generated "+caseName) # Should work with Python3 and Python2
gpl-2.0
7,947,026,444,969,829,000
32.915625
109
0.592186
false
KanoComputing/make-snake
snake-editor/stage.py
1
1596
#!/usr/bin/env python # stage.py # # Copyright (C) 2014 Kano Computing Ltd. # License: http://www.gnu.org/licenses/gpl-2.0.txt GNU General Public License v2 # import console import math import time import os from gtk import gdk from kano.window import _get_window_by_child_pid, gdk_window_settings def init(): global size, width, height, padding, boundaries, chosen_theme, resolution # Get containing terminal window and set it to maximised pid = os.getpid() win = _get_window_by_child_pid(pid) gdk_window_settings(win, maximized=True) time.sleep(0.1) available_size = (width, height) = console.getTerminalSize() # Check for screen resolution resolution = gdk.screen_height() # Select a set of sizes depending on the screen resolution if resolution > 768: chosen_size = (20, 15) else: chosen_size = (10, 5) # Calculate width if chosen_size[0] > available_size[0] / 2: width = available_size[0] / 2 else: width = chosen_size[0] # Calculate height if chosen_size[1] > available_size[1]: height = available_size[1] else: height = chosen_size[1] size = (width, height) padding_x = int(math.floor(available_size[0] - width) / 4) padding_y = int(math.floor(available_size[1] - height) / 2) padding = (padding_y, padding_x, padding_y, padding_x) boundaries = { "bottom": int(math.floor(height / 2)), "left": int(math.floor(-width / 2)), "right": int(math.floor(width / 2)), "top": int(math.floor(-height / 2)), }
gpl-2.0
-1,772,000,586,387,063,000
25.6
80
0.631579
false
ABaldwinHunter/django-clone-classic
django/contrib/gis/geos/libgeos.py
6
6956
""" This module houses the ctypes initialization procedures, as well as the notice and error handler function callbacks (get called when an error occurs in GEOS). This module also houses GEOS Pointer utilities, including get_pointer_arr(), and GEOM_PTR. """ import logging import os import re import threading from ctypes import CDLL, CFUNCTYPE, POINTER, Structure, c_char_p from ctypes.util import find_library from django.contrib.gis.geos.error import GEOSException from django.core.exceptions import ImproperlyConfigured from django.utils.functional import SimpleLazyObject, cached_property logger = logging.getLogger('django.contrib.gis') def load_geos(): # Custom library path set? try: from django.conf import settings lib_path = settings.GEOS_LIBRARY_PATH except (AttributeError, EnvironmentError, ImportError, ImproperlyConfigured): lib_path = None # Setting the appropriate names for the GEOS-C library. if lib_path: lib_names = None elif os.name == 'nt': # Windows NT libraries lib_names = ['geos_c', 'libgeos_c-1'] elif os.name == 'posix': # *NIX libraries lib_names = ['geos_c', 'GEOS'] else: raise ImportError('Unsupported OS "%s"' % os.name) # Using the ctypes `find_library` utility to find the path to the GEOS # shared library. This is better than manually specifying each library name # and extension (e.g., libgeos_c.[so|so.1|dylib].). if lib_names: for lib_name in lib_names: lib_path = find_library(lib_name) if lib_path is not None: break # No GEOS library could be found. if lib_path is None: raise ImportError( 'Could not find the GEOS library (tried "%s"). ' 'Try setting GEOS_LIBRARY_PATH in your settings.' % '", "'.join(lib_names) ) # Getting the GEOS C library. The C interface (CDLL) is used for # both *NIX and Windows. # See the GEOS C API source code for more details on the library function calls: # http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html _lgeos = CDLL(lib_path) # Here we set up the prototypes for the initGEOS_r and finishGEOS_r # routines. These functions aren't actually called until they are # attached to a GEOS context handle. _lgeos.initGEOS_r.restype = CONTEXT_PTR _lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR] # Ensures compatibility across 32 and 64-bit platforms. _lgeos.GEOSversion.restype = c_char_p return _lgeos # The notice and error handler C function callback definitions. # Supposed to mimic the GEOS message handler (C below): # typedef void (*GEOSMessageHandler)(const char *fmt, ...); NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p) def notice_h(fmt, lst): fmt, lst = fmt.decode(), lst.decode() try: warn_msg = fmt % lst except TypeError: warn_msg = fmt logger.warning('GEOS_NOTICE: %s\n', warn_msg) notice_h = NOTICEFUNC(notice_h) ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p) def error_h(fmt, lst): fmt, lst = fmt.decode(), lst.decode() try: err_msg = fmt % lst except TypeError: err_msg = fmt logger.error('GEOS_ERROR: %s\n', err_msg) error_h = ERRORFUNC(error_h) # #### GEOS Geometry C data structures, and utility functions. #### # Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR class GEOSGeom_t(Structure): pass class GEOSPrepGeom_t(Structure): pass class GEOSCoordSeq_t(Structure): pass class GEOSContextHandle_t(Structure): pass # Pointers to opaque GEOS geometry structures. GEOM_PTR = POINTER(GEOSGeom_t) PREPGEOM_PTR = POINTER(GEOSPrepGeom_t) CS_PTR = POINTER(GEOSCoordSeq_t) CONTEXT_PTR = POINTER(GEOSContextHandle_t) # Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection # GEOS routines def get_pointer_arr(n): "Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer." GeomArr = GEOM_PTR * n return GeomArr() lgeos = SimpleLazyObject(load_geos) class GEOSContextHandle(object): def __init__(self): # Initializing the context handle for this thread with # the notice and error handler. self.ptr = lgeos.initGEOS_r(notice_h, error_h) def __del__(self): if self.ptr and lgeos: lgeos.finishGEOS_r(self.ptr) class GEOSContext(threading.local): @cached_property def ptr(self): # Assign handle so it will will garbage collected when # thread is finished. self.handle = GEOSContextHandle() return self.handle.ptr class GEOSFuncFactory(object): """ Lazy loading of GEOS functions. """ argtypes = None restype = None errcheck = None thread_context = GEOSContext() def __init__(self, func_name, *args, **kwargs): self.func_name = func_name self.restype = kwargs.pop('restype', self.restype) self.errcheck = kwargs.pop('errcheck', self.errcheck) self.argtypes = kwargs.pop('argtypes', self.argtypes) self.args = args self.kwargs = kwargs self.func = None def __call__(self, *args, **kwargs): if self.func is None: self.func = self.get_func(*self.args, **self.kwargs) # Call the threaded GEOS routine with pointer of the context handle # as the first argument. return self.func(self.thread_context.ptr, *args) def get_func(self, *args, **kwargs): # GEOS thread-safe function signatures end with '_r' and # take an additional context handle parameter. func = getattr(lgeos, self.func_name + '_r') func.argtypes = [CONTEXT_PTR] + (self.argtypes or []) func.restype = self.restype if self.errcheck: func.errcheck = self.errcheck return func # Returns the string version of the GEOS library. geos_version = lambda: lgeos.GEOSversion() # Regular expression should be able to parse version strings such as # '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0' version_regex = re.compile( r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))' r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$' ) def geos_version_info(): """ Returns a dictionary containing the various version metadata parsed from the GEOS version string, including the version number, whether the version is a release candidate (and what number release candidate), and the C API version. """ ver = geos_version().decode() m = version_regex.match(ver) if not m: raise GEOSException('Could not parse version info string "%s"' % ver) return {key: m.group(key) for key in ( 'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor')}
bsd-3-clause
3,881,259,770,439,313,000
30.762557
94
0.655549
false
chrislit/abydos
abydos/phonetic/_phonex.py
1
5895
# Copyright 2014-2020 by Christopher C. Little. # This file is part of Abydos. # # Abydos is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Abydos is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Abydos. If not, see <http://www.gnu.org/licenses/>. """abydos.phonetic._phonex. Phonex """ from unicodedata import normalize as unicode_normalize from ._phonetic import _Phonetic __all__ = ['Phonex'] class Phonex(_Phonetic): """Phonex code. Phonex is an algorithm derived from Soundex, defined in :cite:`Lait:1996`. .. versionadded:: 0.3.6 """ _alphabetic = dict(zip((ord(_) for _ in '123456'), 'PSTLNR')) def __init__(self, max_length: int = 4, zero_pad: bool = True) -> None: """Initialize Phonex instance. Parameters ---------- max_length : int The length of the code returned (defaults to 4) zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string .. versionadded:: 0.4.0 """ # Clamp max_length to [4, 64] if max_length != -1: self._max_length = min(max(4, max_length), 64) else: self._max_length = 64 self._zero_pad = zero_pad def encode_alpha(self, word: str) -> str: """Return the alphabetic Phonex code for a word. Parameters ---------- word : str The word to transform Returns ------- str The alphabetic Phonex value Examples -------- >>> pe = Phonex() >>> pe.encode_alpha('Christopher') 'CRST' >>> pe.encode_alpha('Niall') 'NL' >>> pe.encode_alpha('Smith') 'SNT' >>> pe.encode_alpha('Schmidt') 'SSNT' .. versionadded:: 0.4.0 """ code = self.encode(word).rstrip('0') return code[:1] + code[1:].translate(self._alphabetic) def encode(self, word: str) -> str: """Return the Phonex code for a word. Parameters ---------- word : str The word to transform Returns ------- str The Phonex value Examples -------- >>> pe = Phonex() >>> pe.encode('Christopher') 'C623' >>> pe.encode('Niall') 'N400' >>> pe.encode('Schmidt') 'S253' >>> pe.encode('Smith') 'S530' .. versionadded:: 0.1.0 .. versionchanged:: 0.3.6 Encapsulated in class """ name = unicode_normalize('NFKD', word.upper()) name_code = last = '' # Deletions effected by replacing with next letter which # will be ignored due to duplicate handling of Soundex code. # This is faster than 'moving' all subsequent letters. # Remove any trailing Ss while name[-1:] == 'S': name = name[:-1] # Phonetic equivalents of first 2 characters # Works since duplicate letters are ignored if name[:2] == 'KN': name = 'N' + name[2:] # KN.. == N.. elif name[:2] == 'PH': name = 'F' + name[2:] # PH.. == F.. (H ignored anyway) elif name[:2] == 'WR': name = 'R' + name[2:] # WR.. == R.. if name: # Special case, ignore H first letter (subsequent Hs ignored # anyway) # Works since duplicate letters are ignored if name[0] == 'H': name = name[1:] if name: # Phonetic equivalents of first character if name[0] in self._uc_vy_set: name = 'A' + name[1:] elif name[0] in {'B', 'P'}: name = 'B' + name[1:] elif name[0] in {'V', 'F'}: name = 'F' + name[1:] elif name[0] in {'C', 'K', 'Q'}: name = 'C' + name[1:] elif name[0] in {'G', 'J'}: name = 'G' + name[1:] elif name[0] in {'S', 'Z'}: name = 'S' + name[1:] name_code = last = name[0] # Modified Soundex code for i in range(1, len(name)): code = '0' if name[i] in {'B', 'F', 'P', 'V'}: code = '1' elif name[i] in {'C', 'G', 'J', 'K', 'Q', 'S', 'X', 'Z'}: code = '2' elif name[i] in {'D', 'T'}: if name[i + 1 : i + 2] != 'C': code = '3' elif name[i] == 'L': if name[i + 1 : i + 2] in self._uc_vy_set or i + 1 == len( name ): code = '4' elif name[i] in {'M', 'N'}: if name[i + 1 : i + 2] in {'D', 'G'}: name = name[: i + 1] + name[i] + name[i + 2 :] code = '5' elif name[i] == 'R': if name[i + 1 : i + 2] in self._uc_vy_set or i + 1 == len( name ): code = '6' if code != last and code != '0' and i != 0: name_code += code last = name_code[-1] if self._zero_pad: name_code += '0' * self._max_length if not name_code: name_code = '0' return name_code[: self._max_length] if __name__ == '__main__': import doctest doctest.testmod()
gpl-3.0
6,297,073,751,688,029,000
27.205742
78
0.472943
false
billiob/papyon
papyon/gnet/io/sock.py
2
3728
# -*- coding: utf-8 -*- # # Copyright (C) 2005 Ole André Vadla Ravnås <oleavr@gmail.com> # Copyright (C) 2006-2007 Ali Sabil <ali.sabil@gmail.com> # Copyright (C) 2007 Johann Prieur <johann.prieur@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # from papyon.gnet.constants import * from papyon.gnet.errors import * from iochannel import GIOChannelClient import gobject import socket __all__ = ['SocketClient'] class SocketClient(GIOChannelClient): """Asynchronous Socket client class. @sort: __init__, open, send, close @undocumented: do_*, _watch_*, __io_*, _connect_done_handler @since: 0.1""" def __init__(self, host, port, domain=AF_INET, type=SOCK_STREAM): GIOChannelClient.__init__(self, host, port, domain, type) def _pre_open(self, sock=None): if sock is None: sock = socket.socket(self._domain, self._type) try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) except AttributeError: pass GIOChannelClient._pre_open(self, sock) def _post_open(self): GIOChannelClient._post_open(self) opts = self._transport.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if opts == 0: self._watch_set_cond(gobject.IO_IN | gobject.IO_PRI | gobject.IO_ERR | gobject.IO_HUP) self._status = IoStatus.OPEN else: self.emit("error", IoConnectionFailed(self, str(opts))) self._status = IoStatus.CLOSED return False def _io_channel_handler(self, chan, cond): if self._status == IoStatus.CLOSED: return False if cond & (gobject.IO_IN | gobject.IO_PRI): buf = "" try: buf = self._channel.read(2048) except gobject.GError: pass if buf == "": self.close() return False self.emit("received", buf, len(buf)) # Check for error/EOF if cond & (gobject.IO_ERR | gobject.IO_HUP): self.close() return False if cond & gobject.IO_OUT: if len(self._outgoing_queue) > 0: # send next item item = self._outgoing_queue[0] # Deal with broken pipe from the socket. try: item.sent(self._channel.write(item.read())) except gobject.GError, err: self.emit("error", IoConnectionFailed(self, str(err))) return True if item.is_complete(): # sent item self.emit("sent", item.buffer, item.size) item.callback() del self._outgoing_queue[0] del item if len(self._outgoing_queue) == 0: self._watch_remove_cond(gobject.IO_OUT) else: self._watch_remove_cond(gobject.IO_OUT) return True gobject.type_register(SocketClient)
gpl-2.0
8,719,866,632,957,982,000
34.150943
77
0.584004
false
lixiangning888/whole_project
modules/signatures/antivm_vmware_keys.py
3
1147
# -*- coding: utf-8 -*- # Copyright (C) 2014 Accuvant, Inc. (bspengler@accuvant.com) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from lib.cuckoo.common.abstracts import Signature class VMwareDetectKeys(Signature): name = "antivm_vmware_keys" description = "通过注册表键检测VMware系统" severity = 3 categories = ["anti-vm"] authors = ["Accuvant"] minimum = "1.2" def run(self): return self.check_key(pattern=".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?VMWare,\\ Inc\..*", regex=True)
lgpl-3.0
-5,280,191,833,947,633,000
37.862069
94
0.693878
false
cyberark-bizdev/ansible
lib/ansible/modules/network/ios/ios_vrf.py
13
16264
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: ios_vrf version_added: "2.3" author: "Peter Sprygada (@privateip)" short_description: Manage the collection of VRF definitions on Cisco IOS devices description: - This module provides declarative management of VRF definitions on Cisco IOS devices. It allows playbooks to manage individual or the entire VRF collection. It also supports purging VRF definitions from the configuration that are not explicitly defined. extends_documentation_fragment: ios notes: - Tested against IOS 15.6 options: vrfs: description: - The set of VRF definition objects to be configured on the remote IOS device. Ths list entries can either be the VRF name or a hash of VRF definitions and attributes. This argument is mutually exclusive with the C(name) argument. name: description: - The name of the VRF definition to be managed on the remote IOS device. The VRF definition name is an ASCII string name used to uniquely identify the VRF. This argument is mutually exclusive with the C(vrfs) argument description: description: - Provides a short description of the VRF definition in the current active configuration. The VRF definition value accepts alphanumeric characters used to provide additional information about the VRF. rd: description: - The router-distinguisher value uniquely identifies the VRF to routing processes on the remote IOS system. The RD value takes the form of C(A:B) where C(A) and C(B) are both numeric values. interfaces: description: - Identifies the set of interfaces that should be configured in the VRF. Interfaces must be routed interfaces in order to be placed into a VRF. delay: description: - Time in seconds to wait before checking for the operational state on remote device. version_added: "2.4" default: 10 purge: description: - Instructs the module to consider the VRF definition absolute. It will remove any previously configured VRFs on the device. default: false state: description: - Configures the state of the VRF definition as it relates to the device operational configuration. When set to I(present), the VRF should be configured in the device active configuration and when set to I(absent) the VRF should not be in the device active configuration default: present choices: ['present', 'absent'] route_both: description: - Adds an export and import list of extended route target communities to the VRF. version_added: "2.5" route_export: description: - Adds an export list of extended route target communities to the VRF. version_added: "2.5" route_import: description: - Adds an import list of extended route target communities to the VRF. version_added: "2.5" """ EXAMPLES = """ - name: configure a vrf named management ios_vrf: name: management description: oob mgmt vrf interfaces: - Management1 - name: remove a vrf named test ios_vrf: name: test state: absent - name: configure set of VRFs and purge any others ios_vrf: vrfs: - red - blue - green purge: yes - name: Creates a list of import RTs for the VRF with the same parameters ios_vrf: name: test_import rd: 1:100 route_import: - 1:100 - 3:100 - name: Creates a list of export RTs for the VRF with the same parameters ios_vrf: name: test_export rd: 1:100 route_export: - 1:100 - 3:100 - name: Creates a list of import and export route targets for the VRF with the same parameters ios_vrf: name: test_both rd: 1:100 route_both: - 1:100 - 3:100 """ RETURN = """ commands: description: The list of configuration mode commands to send to the device returned: always type: list sample: - vrf definition ansible - description management vrf - rd: 1:100 start: description: The time the job started returned: always type: str sample: "2016-11-16 10:38:15.126146" end: description: The time the job ended returned: always type: str sample: "2016-11-16 10:38:25.595612" delta: description: The time elapsed to perform all operations returned: always type: str sample: "0:00:10.469466" """ import re import time from functools import partial from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import exec_command from ansible.module_utils.network.ios.ios import load_config, get_config from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args from ansible.module_utils.network.common.config import NetworkConfig from ansible.module_utils.six import iteritems def get_interface_type(interface): if interface.upper().startswith('ET'): return 'ethernet' elif interface.upper().startswith('VL'): return 'svi' elif interface.upper().startswith('LO'): return 'loopback' elif interface.upper().startswith('MG'): return 'management' elif interface.upper().startswith('MA'): return 'management' elif interface.upper().startswith('PO'): return 'portchannel' elif interface.upper().startswith('NV'): return 'nve' else: return 'unknown' def add_command_to_vrf(name, cmd, commands): if 'vrf definition %s' % name not in commands: commands.extend([ 'vrf definition %s' % name, 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', ]) commands.append(cmd) def map_obj_to_commands(updates, module): commands = list() state = module.params['state'] # FIXME NOT USED for update in updates: want, have = update def needs_update(want, have, x): return want.get(x) and (want.get(x) != have.get(x)) if want['state'] == 'absent': commands.append('no vrf definition %s' % want['name']) continue if not have.get('state'): commands.extend([ 'vrf definition %s' % want['name'], 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', ]) if needs_update(want, have, 'description'): cmd = 'description %s' % want['description'] add_command_to_vrf(want['name'], cmd, commands) if needs_update(want, have, 'rd'): cmd = 'rd %s' % want['rd'] add_command_to_vrf(want['name'], cmd, commands) if needs_update(want, have, 'route_import'): for route in want['route_import']: cmd = 'route-target import %s' % route add_command_to_vrf(want['name'], cmd, commands) if needs_update(want, have, 'route_export'): for route in want['route_export']: cmd = 'route-target export %s' % route add_command_to_vrf(want['name'], cmd, commands) if needs_update(want, have, 'route_both'): for route in want['route_both']: cmd = 'route-target both %s' % route add_command_to_vrf(want['name'], cmd, commands) if want['interfaces'] is not None: # handle the deletes for intf in set(have.get('interfaces', [])).difference(want['interfaces']): commands.extend(['interface %s' % intf, 'no vrf forwarding %s' % want['name']]) # handle the adds for intf in set(want['interfaces']).difference(have.get('interfaces', [])): cfg = get_config(module) configobj = NetworkConfig(indent=1, contents=cfg) children = configobj['interface %s' % intf].children intf_config = '\n'.join(children) commands.extend(['interface %s' % intf, 'vrf forwarding %s' % want['name']]) match = re.search('ip address .+', intf_config, re.M) if match: commands.append(match.group()) return commands def parse_description(configobj, name): cfg = configobj['vrf definition %s' % name] cfg = '\n'.join(cfg.children) match = re.search(r'description (.+)$', cfg, re.M) if match: return match.group(1) def parse_rd(configobj, name): cfg = configobj['vrf definition %s' % name] cfg = '\n'.join(cfg.children) match = re.search(r'rd (.+)$', cfg, re.M) if match: return match.group(1) def parse_interfaces(configobj, name): vrf_cfg = 'vrf forwarding %s' % name interfaces = list() for intf in re.findall('^interface .+', str(configobj), re.M): if vrf_cfg in '\n'.join(configobj[intf].children): interfaces.append(intf.split(' ')[1]) return interfaces def parse_import(configobj, name): cfg = configobj['vrf definition %s' % name] cfg = '\n'.join(cfg.children) matches = re.findall(r'route-target\s+import\s+(.+)', cfg, re.M) return matches def parse_export(configobj, name): cfg = configobj['vrf definition %s' % name] cfg = '\n'.join(cfg.children) matches = re.findall(r'route-target\s+export\s+(.+)', cfg, re.M) return matches def parse_both(configobj, name): matches = list() export_match = parse_export(configobj, name) import_match = parse_import(configobj, name) matches.extend(export_match) matches.extend(import_match) return matches def map_config_to_obj(module): config = get_config(module) configobj = NetworkConfig(indent=1, contents=config) match = re.findall(r'^vrf definition (\S+)', config, re.M) if not match: return list() instances = list() for item in set(match): obj = { 'name': item, 'state': 'present', 'description': parse_description(configobj, item), 'rd': parse_rd(configobj, item), 'interfaces': parse_interfaces(configobj, item), 'route_import': parse_import(configobj, item), 'route_export': parse_export(configobj, item), 'route_both': parse_both(configobj, item) } instances.append(obj) return instances def get_param_value(key, item, module): # if key doesn't exist in the item, get it from module.params if not item.get(key): value = module.params[key] # if key does exist, do a type check on it to validate it else: value_type = module.argument_spec[key].get('type', 'str') type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type] type_checker(item[key]) value = item[key] # validate the param value (if validator func exists) validator = globals().get('validate_%s' % key) if validator: validator(value, module) return value def map_params_to_obj(module): vrfs = module.params.get('vrfs') if not vrfs: if not module.params['name'] and module.params['purge']: return list() elif not module.params['name']: module.fail_json(msg='name is required') collection = [{'name': module.params['name']}] else: collection = list() for item in vrfs: if not isinstance(item, dict): collection.append({'name': item}) elif 'name' not in item: module.fail_json(msg='name is required') else: collection.append(item) objects = list() for item in collection: get_value = partial(get_param_value, item=item, module=module) item['description'] = get_value('description') item['rd'] = get_value('rd') item['interfaces'] = get_value('interfaces') item['state'] = get_value('state') item['route_import'] = get_value('route_import') item['route_export'] = get_value('route_export') item['route_both'] = get_value('route_both') objects.append(item) return objects def update_objects(want, have): updates = list() for entry in want: item = next((i for i in have if i['name'] == entry['name']), None) if all((item is None, entry['state'] == 'present')): updates.append((entry, {})) else: for key, value in iteritems(entry): if value: if isinstance(value, list): if sorted(value) != sorted(item[key]): if (entry, item) not in updates: updates.append((entry, item)) elif value != item[key]: if (entry, item) not in updates: updates.append((entry, item)) return updates def check_declarative_intent_params(want, module): if module.params['interfaces']: name = module.params['name'] rc, out, err = exec_command(module, 'show vrf | include {0}'.format(name)) if rc == 0: data = out.strip().split() # data will be empty if the vrf was just added if not data: return vrf = data[0] interface = data[-1] for w in want: if w['name'] == vrf: for i in w['interfaces']: if get_interface_type(i) is not get_interface_type(interface): module.fail_json(msg="Interface %s not configured on vrf %s" % (interface, name)) def main(): """ main entry point for module execution """ argument_spec = dict( vrfs=dict(type='list'), name=dict(), description=dict(), rd=dict(), route_export=dict(type='list'), route_import=dict(type='list'), route_both=dict(type='list'), interfaces=dict(type='list'), delay=dict(default=10, type='int'), purge=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent']) ) argument_spec.update(ios_argument_spec) mutually_exclusive = [('name', 'vrfs'), ('route_import', 'route_both'), ('route_export', 'route_both')] module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True) result = {'changed': False} warnings = list() check_args(module, warnings) result['warnings'] = warnings want = map_params_to_obj(module) have = map_config_to_obj(module) commands = map_obj_to_commands(update_objects(want, have), module) if module.params['purge']: want_vrfs = [x['name'] for x in want] have_vrfs = [x['name'] for x in have] for item in set(have_vrfs).difference(want_vrfs): cmd = 'no vrf definition %s' % item if cmd not in commands: commands.append(cmd) result['commands'] = commands if commands: if not module.check_mode: load_config(module, commands) result['changed'] = True if result['changed']: time.sleep(module.params['delay']) check_declarative_intent_params(want, module) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
5,526,029,364,138,630,000
31.015748
109
0.606062
false
oVirt/ovirt-node
src/ovirt/node/setup/core/ping.py
2
4719
#!/usr/bin/python # -*- coding: utf-8 -*- # # ping_page.py - Copyright (C) 2012 Red Hat, Inc. # Written by Fabian Deutsch <fabiand@redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. A copy of the GNU General Public License is # also available at http://www.gnu.org/copyleft/gpl.html. from ovirt.node import plugins, valid, ui from ovirt.node.utils import process import network_page import threading """ A ping tool page """ class Plugin(plugins.NodePlugin): _model = None def name(self): return _("Networking/Ping") def rank(self): return 999 def has_ui(self): return False def model(self): """Returns the model of this plugin This is expected to parse files and all stuff to build up the model. """ if not self._model: self._model = { # The target address "ping.address": "127.0.0.1", "ping.count": "3", "ping.progress": "0", # The result field "ping.result": "", } return self._model def validators(self): """Validators validate the input on change and give UI feedback """ # The address must be fqdn, ipv4 or ipv6 address return {"ping.address": valid.FQDNOrIPAddress(), "ping.count": valid.Number(bounds=[1, 20]), } def ui_content(self): """Describes the UI this plugin requires This is an ordered list of (path, widget) tuples. """ ws = [ui.Header("ping.header", _("Ping a remote host")), ui.Entry("ping.address", _("Address:")), ui.Entry("ping.count", _("Count:")), ui.Divider("divider[1]"), ui.Row("row[0]", [ui.SaveButton("ping.do_ping", _("Ping")), ui.Button("ping.close", _("Close")) ] ), ui.Divider("divider[2]"), ui.Label("ping.result", _("Result:")), ] page = ui.Page("page", ws) page.buttons = [] self.widgets.add(page) return page def on_change(self, changes): """Applies the changes to the plugins model, will do all required logic """ self.logger.debug("New (valid) address: %s" % changes) if "ping.address" in changes: self._model.update(changes) if "ping.count" in changes: self._model.update(changes) def on_merge(self, effective_changes): """Applies the changes to the plugins model, will do all required logic Normally on_merge is called by pushing the SaveButton instance, in this case it is called by on_change """ if "ping.close" in effective_changes: self.application.switch_to_plugin( network_page.Plugin) return elif "ping.address" in self._model: addr = self._model["ping.address"] count = self._model["ping.count"] self.logger.debug("Pinging %s" % addr) cmd = "ping" if valid.IPv6Address().validate(addr): cmd = "ping6" cmd = "%s -c %s %s" % (cmd, count, addr) ping = PingThread(self, cmd, count) ping.start() class PingThread(threading.Thread): def __init__(self, plugin, cmd, count): self.p = plugin self.cmd = cmd self.count = count super(PingThread, self).__init__() def run(self): try: ui_thread = self.p.application.ui.thread_connection() stdoutdump = self.p.widgets["ping.result"] self.p.widgets["ping.do_ping"].enabled(False) ui_thread.call(lambda: stdoutdump.text("Pinging ...")) out = process.pipe(self.cmd, shell=True) ui_thread.call(lambda: stdoutdump.text(out)) except: self.p.logger.exception("Exception while pinging") finally: self.p.widgets["ping.do_ping"].enabled(True)
gpl-2.0
80,697,676,256,242,960
32.468085
79
0.57046
false
spencerpomme/coconuts-on-fire
intSet.py
1
1398
class intSet(object): """An intSet is a set of integers The value is represented by a list of ints, self.vals. Each int in the set occurs in self.vals exactly once.""" def __init__(self): """Create an empty set of integers""" self.vals = [] def insert(self, e): """Assumes e is an integer and inserts e into self""" if not e in self.vals: self.vals.append(e) def member(self, e): """Assumes e is an integer Returns True if e is in self, and False otherwise""" return e in self.vals def remove(self, e): """Assumes e is an integer and removes e from self Raises ValueError if e is not in self""" try: self.vals.remove(e) except: raise ValueError(str(e) + ' not found') def __str__(self): """Returns a string representation of self""" self.vals.sort() return '{' + ','.join([str(e) for e in self.vals]) + '}' def __len__(self): """Returns an integer that shows the length of intSet instance""" return len(self.vals) def intersect(self, other): """Returns a new intSet that contains the common elements of self and other""" newSet = intSet() for item in self.vals: if other.member(item): newSet.insert(item) return newSet
apache-2.0
1,552,189,489,668,418,600
30.772727
86
0.565808
false
lidavidm/sympy
sympy/functions/elementary/tests/test_trigonometric.py
2
31879
from sympy import (symbols, Symbol, nan, oo, zoo, I, sinh, sin, acot, pi, atan, acos, Rational, sqrt, asin, acot, cot, coth, E, S, tan, tanh, cos, cosh, atan2, exp, log, asinh, acoth, atanh, O, cancel, Matrix, re, im, Float, Pow, gcd, sec, csc, cot, diff, simplify, Heaviside, arg, conjugate) from sympy.utilities.pytest import XFAIL, slow, raises from sympy.core.compatibility import xrange x, y, z = symbols('x y z') r = Symbol('r', real=True) k = Symbol('k', integer=True) def test_sin(): x, y = symbols('x y') assert sin(nan) == nan assert sin(oo*I) == oo*I assert sin(-oo*I) == -oo*I assert sin(oo).args[0] == oo assert sin(0) == 0 assert sin(asin(x)) == x assert sin(atan(x)) == x / sqrt(1 + x**2) assert sin(acos(x)) == sqrt(1 - x**2) assert sin(acot(x)) == 1 / (sqrt(1 + 1 / x**2) * x) assert sin(atan2(y, x)) == y / sqrt(x**2 + y**2) assert sin(pi*I) == sinh(pi)*I assert sin(-pi*I) == -sinh(pi)*I assert sin(-2*I) == -sinh(2)*I assert sin(pi) == 0 assert sin(-pi) == 0 assert sin(2*pi) == 0 assert sin(-2*pi) == 0 assert sin(-3*10**73*pi) == 0 assert sin(7*10**103*pi) == 0 assert sin(pi/2) == 1 assert sin(-pi/2) == -1 assert sin(5*pi/2) == 1 assert sin(7*pi/2) == -1 n = symbols('n', integer=True) assert sin(pi*n/2) == (-1)**(n/2 - S.Half) assert sin(pi/3) == S.Half*sqrt(3) assert sin(-2*pi/3) == -S.Half*sqrt(3) assert sin(pi/4) == S.Half*sqrt(2) assert sin(-pi/4) == -S.Half*sqrt(2) assert sin(17*pi/4) == S.Half*sqrt(2) assert sin(-3*pi/4) == -S.Half*sqrt(2) assert sin(pi/6) == S.Half assert sin(-pi/6) == -S.Half assert sin(7*pi/6) == -S.Half assert sin(-5*pi/6) == -S.Half assert sin(1*pi/5) == sqrt((5 - sqrt(5)) / 8) assert sin(2*pi/5) == sqrt((5 + sqrt(5)) / 8) assert sin(3*pi/5) == sin(2*pi/5) assert sin(4*pi/5) == sin(1*pi/5) assert sin(6*pi/5) == -sin(1*pi/5) assert sin(8*pi/5) == -sin(2*pi/5) assert sin(-1273*pi/5) == -sin(2*pi/5) assert sin(pi/8) == sqrt((2 - sqrt(2))/4) assert sin(104*pi/105) == sin(pi/105) assert sin(106*pi/105) == -sin(pi/105) assert sin(-104*pi/105) == -sin(pi/105) assert sin(-106*pi/105) == sin(pi/105) assert sin(x*I) == sinh(x)*I assert sin(k*pi) == 0 assert sin(17*k*pi) == 0 assert sin(k*pi*I) == sinh(k*pi)*I assert sin(r).is_real is True assert isinstance(sin( re(x) - im(y)), sin) is True assert isinstance(sin(-re(x) + im(y)), sin) is False for d in list(range(1, 22)) + [60, 85]: for n in xrange(0, d*2 + 1): x = n*pi/d e = abs( float(sin(x)) - sin(float(x)) ) assert e < 1e-12 def test_sin_cos(): for d in [1, 2, 3, 4, 5, 6, 10, 12]: # list is not exhaustive... for n in xrange(-2*d, d*2): x = n*pi/d assert sin(x + pi/2) == cos(x), "fails for %d*pi/%d" % (n, d) assert sin(x - pi/2) == -cos(x), "fails for %d*pi/%d" % (n, d) assert sin(x) == cos(x - pi/2), "fails for %d*pi/%d" % (n, d) assert -sin(x) == cos(x + pi/2), "fails for %d*pi/%d" % (n, d) def test_sin_series(): assert sin(x).series(x, 0, 9) == \ x - x**3/6 + x**5/120 - x**7/5040 + O(x**9) def test_sin_rewrite(): assert sin(x).rewrite(exp) == -I*(exp(I*x) - exp(-I*x))/2 assert sin(x).rewrite(tan) == 2*tan(x/2)/(1 + tan(x/2)**2) assert sin(x).rewrite(cot) == 2*cot(x/2)/(1 + cot(x/2)**2) assert sin(sinh(x)).rewrite( exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, sinh(3)).n() assert sin(cosh(x)).rewrite( exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cosh(3)).n() assert sin(tanh(x)).rewrite( exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, tanh(3)).n() assert sin(coth(x)).rewrite( exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, coth(3)).n() assert sin(sin(x)).rewrite( exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, sin(3)).n() assert sin(cos(x)).rewrite( exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cos(3)).n() assert sin(tan(x)).rewrite( exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, tan(3)).n() assert sin(cot(x)).rewrite( exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cot(3)).n() assert sin(log(x)).rewrite(Pow) == I*x**-I / 2 - I*x**I /2 def test_sin_expansion(): # Note: these formulas are not unique. The ones here come from the # Chebyshev formulas. assert sin(x + y).expand(trig=True) == sin(x)*cos(y) + cos(x)*sin(y) assert sin(x - y).expand(trig=True) == sin(x)*cos(y) - cos(x)*sin(y) assert sin(y - x).expand(trig=True) == cos(x)*sin(y) - sin(x)*cos(y) assert sin(2*x).expand(trig=True) == 2*sin(x)*cos(x) assert sin(3*x).expand(trig=True) == -4*sin(x)**3 + 3*sin(x) assert sin(4*x).expand(trig=True) == -8*sin(x)**3*cos(x) + 4*sin(x)*cos(x) assert sin(2).expand(trig=True) == 2*sin(1)*cos(1) assert sin(3).expand(trig=True) == -4*sin(1)**3 + 3*sin(1) def test_trig_symmetry(): assert sin(-x) == -sin(x) assert cos(-x) == cos(x) assert tan(-x) == -tan(x) assert cot(-x) == -cot(x) assert sin(x + pi) == -sin(x) assert sin(x + 2*pi) == sin(x) assert sin(x + 3*pi) == -sin(x) assert sin(x + 4*pi) == sin(x) assert sin(x - 5*pi) == -sin(x) assert cos(x + pi) == -cos(x) assert cos(x + 2*pi) == cos(x) assert cos(x + 3*pi) == -cos(x) assert cos(x + 4*pi) == cos(x) assert cos(x - 5*pi) == -cos(x) assert tan(x + pi) == tan(x) assert tan(x - 3*pi) == tan(x) assert cot(x + pi) == cot(x) assert cot(x - 3*pi) == cot(x) assert sin(pi/2 - x) == cos(x) assert sin(3*pi/2 - x) == -cos(x) assert sin(5*pi/2 - x) == cos(x) assert cos(pi/2 - x) == sin(x) assert cos(3*pi/2 - x) == -sin(x) assert cos(5*pi/2 - x) == sin(x) assert tan(pi/2 - x) == cot(x) assert tan(3*pi/2 - x) == cot(x) assert tan(5*pi/2 - x) == cot(x) assert cot(pi/2 - x) == tan(x) assert cot(3*pi/2 - x) == tan(x) assert cot(5*pi/2 - x) == tan(x) assert sin(pi/2 + x) == cos(x) assert cos(pi/2 + x) == -sin(x) assert tan(pi/2 + x) == -cot(x) assert cot(pi/2 + x) == -tan(x) def test_cos(): x, y = symbols('x y') assert cos(nan) == nan assert cos(oo*I) == oo assert cos(-oo*I) == oo assert cos(0) == 1 assert cos(acos(x)) == x assert cos(atan(x)) == 1 / sqrt(1 + x**2) assert cos(asin(x)) == sqrt(1 - x**2) assert cos(acot(x)) == 1 / sqrt(1 + 1 / x**2) assert cos(atan2(y, x)) == x / sqrt(x**2 + y**2) assert cos(pi*I) == cosh(pi) assert cos(-pi*I) == cosh(pi) assert cos(-2*I) == cosh(2) assert cos(pi/2) == 0 assert cos(-pi/2) == 0 assert cos(pi/2) == 0 assert cos(-pi/2) == 0 assert cos((-3*10**73 + 1)*pi/2) == 0 assert cos((7*10**103 + 1)*pi/2) == 0 n = symbols('n', integer=True) assert cos(pi*n/2) == 0 assert cos(pi) == -1 assert cos(-pi) == -1 assert cos(2*pi) == 1 assert cos(5*pi) == -1 assert cos(8*pi) == 1 assert cos(pi/3) == S.Half assert cos(-2*pi/3) == -S.Half assert cos(pi/4) == S.Half*sqrt(2) assert cos(-pi/4) == S.Half*sqrt(2) assert cos(11*pi/4) == -S.Half*sqrt(2) assert cos(-3*pi/4) == -S.Half*sqrt(2) assert cos(pi/6) == S.Half*sqrt(3) assert cos(-pi/6) == S.Half*sqrt(3) assert cos(7*pi/6) == -S.Half*sqrt(3) assert cos(-5*pi/6) == -S.Half*sqrt(3) assert cos(1*pi/5) == (sqrt(5) + 1)/4 assert cos(2*pi/5) == (sqrt(5) - 1)/4 assert cos(3*pi/5) == -cos(2*pi/5) assert cos(4*pi/5) == -cos(1*pi/5) assert cos(6*pi/5) == -cos(1*pi/5) assert cos(8*pi/5) == cos(2*pi/5) assert cos(-1273*pi/5) == -cos(2*pi/5) assert cos(pi/8) == sqrt((2 + sqrt(2))/4) assert cos(104*pi/105) == -cos(pi/105) assert cos(106*pi/105) == -cos(pi/105) assert cos(-104*pi/105) == -cos(pi/105) assert cos(-106*pi/105) == -cos(pi/105) assert cos(x*I) == cosh(x) assert cos(k*pi*I) == cosh(k*pi) assert cos(r).is_real is True assert cos(k*pi) == (-1)**k assert cos(2*k*pi) == 1 for d in list(range(1, 22)) + [60, 85]: for n in xrange(0, 2*d + 1): x = n*pi/d e = abs( float(cos(x)) - cos(float(x)) ) assert e < 1e-12 def test_issue_3091(): c = Float('123456789012345678901234567890.25', '') for cls in [sin, cos, tan, cot]: assert cls(c*pi) == cls(pi/4) assert cls(4.125*pi) == cls(pi/8) assert cls(4.7*pi) == cls((4.7 % 2)*pi) def test_cos_series(): assert cos(x).series(x, 0, 9) == \ 1 - x**2/2 + x**4/24 - x**6/720 + x**8/40320 + O(x**9) def test_cos_rewrite(): assert cos(x).rewrite(exp) == exp(I*x)/2 + exp(-I*x)/2 assert cos(x).rewrite(tan) == (1 - tan(x/2)**2)/(1 + tan(x/2)**2) assert cos(x).rewrite(cot) == -(1 - cot(x/2)**2)/(1 + cot(x/2)**2) assert cos(sinh(x)).rewrite( exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, sinh(3)).n() assert cos(cosh(x)).rewrite( exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, cosh(3)).n() assert cos(tanh(x)).rewrite( exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, tanh(3)).n() assert cos(coth(x)).rewrite( exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, coth(3)).n() assert cos(sin(x)).rewrite( exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, sin(3)).n() assert cos(cos(x)).rewrite( exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, cos(3)).n() assert cos(tan(x)).rewrite( exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, tan(3)).n() assert cos(cot(x)).rewrite( exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, cot(3)).n() assert cos(log(x)).rewrite(Pow) == x**I/2 + x**-I/2 def test_cos_expansion(): assert cos(x + y).expand(trig=True) == cos(x)*cos(y) - sin(x)*sin(y) assert cos(x - y).expand(trig=True) == cos(x)*cos(y) + sin(x)*sin(y) assert cos(y - x).expand(trig=True) == cos(x)*cos(y) + sin(x)*sin(y) assert cos(2*x).expand(trig=True) == 2*cos(x)**2 - 1 assert cos(3*x).expand(trig=True) == 4*cos(x)**3 - 3*cos(x) assert cos(4*x).expand(trig=True) == 8*cos(x)**4 - 8*cos(x)**2 + 1 assert cos(2).expand(trig=True) == 2*cos(1)**2 - 1 assert cos(3).expand(trig=True) == 4*cos(1)**3 - 3*cos(1) def test_tan(): assert tan(nan) == nan assert tan(oo*I) == I assert tan(-oo*I) == -I assert tan(0) == 0 assert tan(atan(x)) == x assert tan(asin(x)) == x / sqrt(1 - x**2) assert tan(acos(x)) == sqrt(1 - x**2) / x assert tan(acot(x)) == 1 / x assert tan(atan2(y, x)) == y/x assert tan(pi*I) == tanh(pi)*I assert tan(-pi*I) == -tanh(pi)*I assert tan(-2*I) == -tanh(2)*I assert tan(pi) == 0 assert tan(-pi) == 0 assert tan(2*pi) == 0 assert tan(-2*pi) == 0 assert tan(-3*10**73*pi) == 0 assert tan(pi/2) == zoo assert tan(3*pi/2) == zoo assert tan(pi/3) == sqrt(3) assert tan(-2*pi/3) == sqrt(3) assert tan(pi/4) == S.One assert tan(-pi/4) == -S.One assert tan(17*pi/4) == S.One assert tan(-3*pi/4) == S.One assert tan(pi/6) == 1/sqrt(3) assert tan(-pi/6) == -1/sqrt(3) assert tan(7*pi/6) == 1/sqrt(3) assert tan(-5*pi/6) == 1/sqrt(3) assert tan(x*I) == tanh(x)*I assert tan(k*pi) == 0 assert tan(17*k*pi) == 0 assert tan(k*pi*I) == tanh(k*pi)*I assert tan(r).is_real is True assert tan(10*pi/7) == tan(3*pi/7) assert tan(11*pi/7) == -tan(3*pi/7) assert tan(-11*pi/7) == tan(3*pi/7) def test_tan_series(): assert tan(x).series(x, 0, 9) == \ x + x**3/3 + 2*x**5/15 + 17*x**7/315 + O(x**9) def test_tan_rewrite(): neg_exp, pos_exp = exp(-x*I), exp(x*I) assert tan(x).rewrite(exp) == I*(neg_exp - pos_exp)/(neg_exp + pos_exp) assert tan(x).rewrite(sin) == 2*sin(x)**2/sin(2*x) assert tan(x).rewrite(cos) == -cos(x + S.Pi/2)/cos(x) assert tan(x).rewrite(cot) == 1/cot(x) assert tan(sinh(x)).rewrite( exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, sinh(3)).n() assert tan(cosh(x)).rewrite( exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, cosh(3)).n() assert tan(tanh(x)).rewrite( exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, tanh(3)).n() assert tan(coth(x)).rewrite( exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, coth(3)).n() assert tan(sin(x)).rewrite( exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, sin(3)).n() assert tan(cos(x)).rewrite( exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, cos(3)).n() assert tan(tan(x)).rewrite( exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, tan(3)).n() assert tan(cot(x)).rewrite( exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, cot(3)).n() assert tan(log(x)).rewrite(Pow) == I*(x**-I - x**I)/(x**-I + x**I) assert 0 == (cos(pi/15)*tan(pi/15) - sin(pi/15)).rewrite(pow) assert tan(pi/19).rewrite(pow) == tan(pi/19) assert tan(8*pi/19).rewrite(sqrt) == tan(8*pi/19) def test_tan_subs(): assert tan(x).subs(tan(x), y) == y assert tan(x).subs(x, y) == tan(y) assert tan(x).subs(x, S.Pi/2) == zoo assert tan(x).subs(x, 3*S.Pi/2) == zoo def test_tan_expansion(): assert tan(x + y).expand(trig=True) == ((tan(x) + tan(y))/(1 - tan(x)*tan(y))).expand() assert tan(x - y).expand(trig=True) == ((tan(x) - tan(y))/(1 + tan(x)*tan(y))).expand() assert tan(x + y + z).expand(trig=True) == ( (tan(x) + tan(y) + tan(z) - tan(x)*tan(y)*tan(z))/ (1 - tan(x)*tan(y) - tan(x)*tan(z) - tan(y)*tan(z))).expand() assert 0 == tan(2*x).expand(trig=True).rewrite(tan).subs([(tan(x), Rational(1, 7))])*24 - 7 assert 0 == tan(3*x).expand(trig=True).rewrite(tan).subs([(tan(x), Rational(1, 5))])*55 - 37 assert 0 == tan(4*x - pi/4).expand(trig=True).rewrite(tan).subs([(tan(x), Rational(1, 5))])*239 - 1 def test_cot(): assert cot(nan) == nan assert cot(oo*I) == -I assert cot(-oo*I) == I assert cot(0) == zoo assert cot(2*pi) == zoo assert cot(acot(x)) == x assert cot(atan(x)) == 1 / x assert cot(asin(x)) == sqrt(1 - x**2) / x assert cot(acos(x)) == x / sqrt(1 - x**2) assert cot(atan2(y, x)) == x/y assert cot(pi*I) == -coth(pi)*I assert cot(-pi*I) == coth(pi)*I assert cot(-2*I) == coth(2)*I assert cot(pi) == cot(2*pi) == cot(3*pi) assert cot(-pi) == cot(-2*pi) == cot(-3*pi) assert cot(pi/2) == 0 assert cot(-pi/2) == 0 assert cot(5*pi/2) == 0 assert cot(7*pi/2) == 0 assert cot(pi/3) == 1/sqrt(3) assert cot(-2*pi/3) == 1/sqrt(3) assert cot(pi/4) == S.One assert cot(-pi/4) == -S.One assert cot(17*pi/4) == S.One assert cot(-3*pi/4) == S.One assert cot(pi/6) == sqrt(3) assert cot(-pi/6) == -sqrt(3) assert cot(7*pi/6) == sqrt(3) assert cot(-5*pi/6) == sqrt(3) assert cot(x*I) == -coth(x)*I assert cot(k*pi*I) == -coth(k*pi)*I assert cot(r).is_real is True assert cot(10*pi/7) == cot(3*pi/7) assert cot(11*pi/7) == -cot(3*pi/7) assert cot(-11*pi/7) == cot(3*pi/7) def test_cot_series(): assert cot(x).series(x, 0, 9) == \ 1/x - x/3 - x**3/45 - 2*x**5/945 - x**7/4725 + O(x**9) # issue 3111: assert cot(x**20 + x**21 + x**22).series(x, 0, 4) == \ x**(-20) - 1/x**19 + x**(-17) - 1/x**16 + x**(-14) - 1/x**13 + \ x**(-11) - 1/x**10 + x**(-8) - 1/x**7 + x**(-5) - 1/x**4 + \ x**(-2) - 1/x + x - x**2 + O(x**4) def test_cot_rewrite(): neg_exp, pos_exp = exp(-x*I), exp(x*I) assert cot(x).rewrite(exp) == I*(pos_exp + neg_exp)/(pos_exp - neg_exp) assert cot(x).rewrite(sin) == 2*sin(2*x)/sin(x)**2 assert cot(x).rewrite(cos) == -cos(x)/cos(x + S.Pi/2) assert cot(x).rewrite(tan) == 1/tan(x) assert cot(sinh(x)).rewrite( exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, sinh(3)).n() assert cot(cosh(x)).rewrite( exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, cosh(3)).n() assert cot(tanh(x)).rewrite( exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, tanh(3)).n() assert cot(coth(x)).rewrite( exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, coth(3)).n() assert cot(sin(x)).rewrite( exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, sin(3)).n() assert cot(tan(x)).rewrite( exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, tan(3)).n() assert cot(log(x)).rewrite(Pow) == -I*(x**-I + x**I)/(x**-I - x**I) assert cot(4*pi/15).rewrite(pow) == (cos(4*pi/15)/sin(4*pi/15)).rewrite(pow) assert cot(pi/19).rewrite(pow) == cot(pi/19) assert cot(pi/19).rewrite(sqrt) == cot(pi/19) def test_cot_subs(): assert cot(x).subs(cot(x), y) == y assert cot(x).subs(x, y) == cot(y) assert cot(x).subs(x, 0) == zoo assert cot(x).subs(x, S.Pi) == zoo def test_cot_expansion(): assert cot(x + y).expand(trig=True) == ((cot(x)*cot(y) - 1)/(cot(x) + cot(y))).expand() assert cot(x - y).expand(trig=True) == (-(cot(x)*cot(y) + 1)/(cot(x) - cot(y))).expand() assert cot(x + y + z).expand(trig=True) == ( (cot(x)*cot(y)*cot(z) - cot(x) - cot(y) - cot(z))/ (-1 + cot(x)*cot(y) + cot(x)*cot(z) + cot(y)*cot(z))).expand() assert cot(3*x).expand(trig=True) == ((cot(x)**3 - 3*cot(x))/(3*cot(x)**2 - 1)).expand() assert 0 == cot(2*x).expand(trig=True).rewrite(cot).subs([(cot(x), Rational(1, 3))])*3 + 4 assert 0 == cot(3*x).expand(trig=True).rewrite(cot).subs([(cot(x), Rational(1, 5))])*55 - 37 assert 0 == cot(4*x - pi/4).expand(trig=True).rewrite(cot).subs([(cot(x), Rational(1, 7))])*863 + 191 def test_asin(): assert asin(nan) == nan assert asin(oo) == -I*oo assert asin(-oo) == I*oo # Note: asin(-x) = - asin(x) assert asin(0) == 0 assert asin(1) == pi/2 assert asin(-1) == -pi/2 assert asin(sqrt(3)/2) == pi/3 assert asin(-sqrt(3)/2) == -pi/3 assert asin(sqrt(2)/2) == pi/4 assert asin(-sqrt(2)/2) == -pi/4 assert asin(sqrt((5 - sqrt(5))/8)) == pi/5 assert asin(-sqrt((5 - sqrt(5))/8)) == -pi/5 assert asin(Rational(1, 2)) == pi/6 assert asin(-Rational(1, 2)) == -pi/6 assert asin((sqrt(2 - sqrt(2)))/2) == pi/8 assert asin(-(sqrt(2 - sqrt(2)))/2) == -pi/8 assert asin((sqrt(5) - 1)/4) == pi/10 assert asin(-(sqrt(5) - 1)/4) == -pi/10 assert asin((sqrt(3) - 1)/sqrt(2**3)) == pi/12 assert asin(-(sqrt(3) - 1)/sqrt(2**3)) == -pi/12 assert asin(x).diff(x) == 1/sqrt(1 - x**2) assert asin(0.2).is_real is True assert asin(-2).is_real is False assert asin(-2*I) == -I*asinh(2) def test_asin_series(): assert asin(x).series(x, 0, 9) == \ x + x**3/6 + 3*x**5/40 + 5*x**7/112 + O(x**9) t5 = asin(x).taylor_term(5, x) assert t5 == 3*x**5/40 assert asin(x).taylor_term(7, x, t5, 0) == 5*x**7/112 def test_asin_rewrite(): assert asin(x).rewrite(log) == -I*log(I*x + sqrt(1 - x**2)) assert asin(x).rewrite(atan) == 2*atan(x/(1 + sqrt(1 - x**2))) assert asin(x).rewrite(acos) == S.Pi/2 - acos(x) def test_acos(): assert acos(nan) == nan assert acos(oo) == I*oo assert acos(-oo) == -I*oo # Note: acos(-x) = pi - acos(x) assert acos(0) == pi/2 assert acos(Rational(1, 2)) == pi/3 assert acos(-Rational(1, 2)) == (2*pi)/3 assert acos(1) == 0 assert acos(-1) == pi assert acos(sqrt(2)/2) == pi/4 assert acos(-sqrt(2)/2) == (3*pi)/4 assert acos(x).diff(x) == -1/sqrt(1 - x**2) assert acos(0.2).is_real is True assert acos(-2).is_real is False def test_acos_series(): assert acos(x).series(x, 0, 8) == \ pi/2 - x - x**3/6 - 3*x**5/40 - 5*x**7/112 + O(x**8) assert acos(x).series(x, 0, 8) == pi/2 - asin(x).series(x, 0, 8) t5 = acos(x).taylor_term(5, x) assert t5 == -3*x**5/40 assert acos(x).taylor_term(7, x, t5, 0) == -5*x**7/112 def test_acos_rewrite(): assert acos(x).rewrite(log) == pi/2 + I*log(I*x + sqrt(1 - x**2)) assert acos(x).rewrite(atan) == \ atan(sqrt(1 - x**2)/x) + (pi/2)*(1 - x*sqrt(1/x**2)) assert acos(0).rewrite(atan) == S.Pi/2 assert acos(0.5).rewrite(atan) == acos(0.5).rewrite(log) assert acos(x).rewrite(asin) == S.Pi/2 - asin(x) def test_atan(): assert atan(nan) == nan assert atan(oo) == pi/2 assert atan(-oo) == -pi/2 assert atan(0) == 0 assert atan(1) == pi/4 assert atan(sqrt(3)) == pi/3 assert atan(oo) == pi/2 assert atan(x).diff(x) == 1/(1 + x**2) assert atan(r).is_real is True assert atan(-2*I) == -I*atanh(2) def test_atan_rewrite(): assert atan(x).rewrite(log) == I*log((1 - I*x)/(1 + I*x))/2 def test_atan2(): assert atan2(0, 0) == S.NaN assert atan2(0, 1) == 0 assert atan2(1, 1) == pi/4 assert atan2(1, 0) == pi/2 assert atan2(1, -1) == 3*pi/4 assert atan2(0, -1) == pi assert atan2(-1, -1) == -3*pi/4 assert atan2(-1, 0) == -pi/2 assert atan2(-1, 1) == -pi/4 u = Symbol("u", positive=True) assert atan2(0, u) == 0 u = Symbol("u", negative=True) assert atan2(0, u) == pi assert atan2(y, oo) == 0 assert atan2(y, -oo)== 2*pi*Heaviside(re(y)) - pi assert atan2(y, x).rewrite(log) == -I*log((x + I*y)/sqrt(x**2 + y**2)) assert atan2(y, x).rewrite(atan) == 2*atan(y/(x + sqrt(x**2 + y**2))) ex = atan2(y, x) - arg(x + I*y) assert ex.subs({x:2, y:3}).rewrite(arg) == 0 assert ex.subs({x:2, y:3*I}).rewrite(arg) == 0 assert ex.subs({x:2*I, y:3}).rewrite(arg) == 0 assert ex.subs({x:2*I, y:3*I}).rewrite(arg) == 0 assert conjugate(atan2(x, y)) == atan2(conjugate(x), conjugate(y)) assert diff(atan2(y, x), x) == -y/(x**2 + y**2) assert diff(atan2(y, x), y) == x/(x**2 + y**2) assert simplify(diff(atan2(y, x).rewrite(log), x)) == -y/(x**2 + y**2) assert simplify(diff(atan2(y, x).rewrite(log), y)) == x/(x**2 + y**2) assert isinstance(atan2(2, 3*I).n(), atan2) def test_acot(): assert acot(nan) == nan assert acot(-oo) == 0 assert acot(oo) == 0 assert acot(1) == pi/4 assert acot(0) == pi/2 assert acot(sqrt(3)/3) == pi/3 assert acot(1/sqrt(3)) == pi/3 assert acot(-1/sqrt(3)) == -pi/3 assert acot(x).diff(x) == -1/(1 + x**2) assert acot(r).is_real is True assert acot(I*pi) == -I*acoth(pi) assert acot(-2*I) == I*acoth(2) def test_acot_rewrite(): assert acot(x).rewrite(log) == I*log((x - I)/(x + I))/2 def test_attributes(): assert sin(x).args == (x,) def test_sincos_rewrite(): assert sin(pi/2 - x) == cos(x) assert sin(pi - x) == sin(x) assert cos(pi/2 - x) == sin(x) assert cos(pi - x) == -cos(x) def _check_even_rewrite(func, arg): """Checks that the expr has been rewritten using f(-x) -> f(x) arg : -x """ return func(arg).args[0] == -arg def _check_odd_rewrite(func, arg): """Checks that the expr has been rewritten using f(-x) -> -f(x) arg : -x """ return func(arg).func.is_Mul def _check_no_rewrite(func, arg): """Checks that the expr is not rewritten""" return func(arg).args[0] == arg def test_evenodd_rewrite(): a = cos(2) # negative b = sin(1) # positive even = [cos] odd = [sin, tan, cot, asin, atan, acot] with_minus = [-1, -2**1024 * E, -pi/105, -x*y, -x - y] for func in even: for expr in with_minus: assert _check_even_rewrite(func, expr) assert _check_no_rewrite(func, a*b) assert func( x - y) == func(y - x) # it doesn't matter which form is canonical for func in odd: for expr in with_minus: assert _check_odd_rewrite(func, expr) assert _check_no_rewrite(func, a*b) assert func( x - y) == -func(y - x) # it doesn't matter which form is canonical def test_issue1448(): assert sin(x).rewrite(cot) == 2*cot(x/2)/(1 + cot(x/2)**2) assert cos(x).rewrite(cot) == -(1 - cot(x/2)**2)/(1 + cot(x/2)**2) assert tan(x).rewrite(cot) == 1/cot(x) assert cot(x).fdiff() == -1 - cot(x)**2 def test_as_leading_term_issue2173(): assert sin(x).as_leading_term(x) == x assert cos(x).as_leading_term(x) == 1 assert tan(x).as_leading_term(x) == x assert cot(x).as_leading_term(x) == 1/x assert asin(x).as_leading_term(x) == x assert acos(x).as_leading_term(x) == x assert atan(x).as_leading_term(x) == x assert acot(x).as_leading_term(x) == x def test_leading_terms(): for func in [sin, cos, tan, cot, asin, acos, atan, acot]: for arg in (1/x, S.Half): eq = func(arg) assert eq.as_leading_term(x) == eq def test_atan2_expansion(): assert cancel(atan2(x**2, x + 1).diff(x) - atan(x**2/(x + 1)).diff(x)) == 0 assert cancel(atan(y/x).series(y, 0, 5) - atan2(y, x).series(y, 0, 5) + atan2(0, x) - atan(0)) == O(y**5) assert cancel(atan(y/x).series(x, 1, 4) - atan2(y, x).series(x, 1, 4) + atan2(y, 1) - atan(y)) == O(x**4) assert cancel(atan((y + x)/x).series(x, 1, 3) - atan2(y + x, x).series(x, 1, 3) + atan2(1 + y, 1) - atan(1 + y)) == O(x**3) assert Matrix([atan2(y, x)]).jacobian([y, x]) == \ Matrix([[x/(y**2 + x**2), -y/(y**2 + x**2)]]) def test_aseries(): def t(n, v, d, e): assert abs( n(1/v).evalf() - n(1/x).series(x, dir=d).removeO().subs(x, v)) < e t(atan, 0.1, '+', 1e-5) t(atan, -0.1, '-', 1e-5) t(acot, 0.1, '+', 1e-5) t(acot, -0.1, '-', 1e-5) def test_issue_1321(): i = Symbol('i', integer=True) e = Symbol('e', even=True) o = Symbol('o', odd=True) # unknown parity for variable assert cos(4*i*pi) == 1 assert sin(4*i*pi) == 0 assert tan(4*i*pi) == 0 assert cot(4*i*pi) == zoo assert cos(3*i*pi) == cos(pi*i) # +/-1 assert sin(3*i*pi) == 0 assert tan(3*i*pi) == 0 assert cot(3*i*pi) == zoo assert cos(4.0*i*pi) == 1 assert sin(4.0*i*pi) == 0 assert tan(4.0*i*pi) == 0 assert cot(4.0*i*pi) == zoo assert cos(3.0*i*pi) == cos(pi*i) # +/-1 assert sin(3.0*i*pi) == 0 assert tan(3.0*i*pi) == 0 assert cot(3.0*i*pi) == zoo assert cos(4.5*i*pi) == cos(0.5*pi*i) assert sin(4.5*i*pi) == sin(0.5*pi*i) assert tan(4.5*i*pi) == tan(0.5*pi*i) assert cot(4.5*i*pi) == cot(0.5*pi*i) # parity of variable is known assert cos(4*e*pi) == 1 assert sin(4*e*pi) == 0 assert tan(4*e*pi) == 0 assert cot(4*e*pi) == zoo assert cos(3*e*pi) == 1 assert sin(3*e*pi) == 0 assert tan(3*e*pi) == 0 assert cot(3*e*pi) == zoo assert cos(4.0*e*pi) == 1 assert sin(4.0*e*pi) == 0 assert tan(4.0*e*pi) == 0 assert cot(4.0*e*pi) == zoo assert cos(3.0*e*pi) == 1 assert sin(3.0*e*pi) == 0 assert tan(3.0*e*pi) == 0 assert cot(3.0*e*pi) == zoo assert cos(4.5*e*pi) == cos(0.5*pi*e) assert sin(4.5*e*pi) == sin(0.5*pi*e) assert tan(4.5*e*pi) == tan(0.5*pi*e) assert cot(4.5*e*pi) == cot(0.5*pi*e) assert cos(4*o*pi) == 1 assert sin(4*o*pi) == 0 assert tan(4*o*pi) == 0 assert cot(4*o*pi) == zoo assert cos(3*o*pi) == -1 assert sin(3*o*pi) == 0 assert tan(3*o*pi) == 0 assert cot(3*o*pi) == zoo assert cos(4.0*o*pi) == 1 assert sin(4.0*o*pi) == 0 assert tan(4.0*o*pi) == 0 assert cot(4.0*o*pi) == zoo assert cos(3.0*o*pi) == -1 assert sin(3.0*o*pi) == 0 assert tan(3.0*o*pi) == 0 assert cot(3.0*o*pi) == zoo assert cos(4.5*o*pi) == cos(0.5*pi*o) assert sin(4.5*o*pi) == sin(0.5*pi*o) assert tan(4.5*o*pi) == tan(0.5*pi*o) assert cot(4.5*o*pi) == cot(0.5*pi*o) # x could be imaginary assert cos(4*x*pi) == cos(4*pi*x) assert sin(4*x*pi) == sin(4*pi*x) assert tan(4*x*pi) == tan(4*pi*x) assert cot(4*x*pi) == cot(4*pi*x) assert cos(3*x*pi) == cos(3*pi*x) assert sin(3*x*pi) == sin(3*pi*x) assert tan(3*x*pi) == tan(3*pi*x) assert cot(3*x*pi) == cot(3*pi*x) assert cos(4.0*x*pi) == cos(4.0*pi*x) assert sin(4.0*x*pi) == sin(4.0*pi*x) assert tan(4.0*x*pi) == tan(4.0*pi*x) assert cot(4.0*x*pi) == cot(4.0*pi*x) assert cos(3.0*x*pi) == cos(3.0*pi*x) assert sin(3.0*x*pi) == sin(3.0*pi*x) assert tan(3.0*x*pi) == tan(3.0*pi*x) assert cot(3.0*x*pi) == cot(3.0*pi*x) assert cos(4.5*x*pi) == cos(4.5*pi*x) assert sin(4.5*x*pi) == sin(4.5*pi*x) assert tan(4.5*x*pi) == tan(4.5*pi*x) assert cot(4.5*x*pi) == cot(4.5*pi*x) def test_inverses(): raises(AttributeError, lambda: sin(x).inverse()) raises(AttributeError, lambda: cos(x).inverse()) assert tan(x).inverse() == atan assert cot(x).inverse() == acot raises(AttributeError, lambda: csc(x).inverse()) raises(AttributeError, lambda: sec(x).inverse()) assert asin(x).inverse() == sin assert acos(x).inverse() == cos assert atan(x).inverse() == tan assert acot(x).inverse() == cot def test_real_imag(): a, b = symbols('a b', real=True) z = a + b*I for deep in [True, False]: assert sin( z).as_real_imag(deep=deep) == (sin(a)*cosh(b), cos(a)*sinh(b)) assert cos( z).as_real_imag(deep=deep) == (cos(a)*cosh(b), -sin(a)*sinh(b)) assert tan(z).as_real_imag(deep=deep) == (sin(a)*cos( a)/(cos(a)**2 + sinh(b)**2), sinh(b)*cosh(b)/(cos(a)**2 + sinh(b)**2)) assert cot(z).as_real_imag(deep=deep) == (sin(a)*cos(a)/( sin(a)**2 + sinh(b)**2), -sinh(b)*cosh(b)/(sin(a)**2 + sinh(b)**2)) assert sin(a).as_real_imag(deep=deep) == (sin(a), 0) assert cos(a).as_real_imag(deep=deep) == (cos(a), 0) assert tan(a).as_real_imag(deep=deep) == (tan(a), 0) assert cot(a).as_real_imag(deep=deep) == (cot(a), 0) @XFAIL def test_sin_cos_with_infinity(): # Test for issue 2097 # http://code.google.com/p/sympy/issues/detail?id=2097 assert sin(oo) == S.NaN assert cos(oo) == S.NaN @slow def test_sincos_rewrite_sqrt(): # equivalent to testing rewrite(pow) for p in [1, 3, 5, 17]: for t in [1, 8]: n = t*p for i in xrange(1, (n + 1)//2 + 1): if 1 == gcd(i, n): x = i*pi/n s1 = sin(x).rewrite(sqrt) c1 = cos(x).rewrite(sqrt) assert not s1.has(cos, sin), "fails for %d*pi/%d" % (i, n) assert not c1.has(cos, sin), "fails for %d*pi/%d" % (i, n) assert 1e-3 > abs(sin(x.evalf(5)) - s1.evalf(2)), "fails for %d*pi/%d" % (i, n) assert 1e-3 > abs(cos(x.evalf(5)) - c1.evalf(2)), "fails for %d*pi/%d" % (i, n) @slow def test_tancot_rewrite_sqrt(): # equivalent to testing rewrite(pow) for p in [1, 3, 5, 17]: for t in [1, 8]: n = t*p for i in xrange(1, (n + 1)//2 + 1): if 1 == gcd(i, n): x = i*pi/n if 2*i != n and 3*i != 2*n: t1 = tan(x).rewrite(sqrt) assert not t1.has(cot, tan), "fails for %d*pi/%d" % (i, n) assert 1e-3 > abs( tan(x.evalf(7)) - t1.evalf(4) ), "fails for %d*pi/%d" % (i, n) if i != 0 and i != n: c1 = cot(x).rewrite(sqrt) assert not c1.has(cot, tan), "fails for %d*pi/%d" % (i, n) assert 1e-3 > abs( cot(x.evalf(7)) - c1.evalf(4) ), "fails for %d*pi/%d" % (i, n) def test_sec(): assert sec(x).diff(x) == tan(x)*sec(x) def test_csc(): assert csc(x).diff(x) == -cot(x)*csc(x)
bsd-3-clause
-4,128,740,282,095,562,000
31.69641
105
0.517896
false
neopenx/Dragon
Dragon/python/dragon/memonger.py
1
1374
# -------------------------------------------------------- # Dragon # Copyright(c) 2017 SeetaTech # Written by Ting Pan # -------------------------------------------------------- def ShareGrads(enabled=True): """Enable gradients sharing globally. Parameters ---------- enabled : boolean Whether to share grads. Returns ------- None Examples -------- >>> import dragon.memonger as opt >>> opt.ShareGrads() """ from dragon.config import option option['share_grads'] = enabled def Drop(op_func, *args, **kwargs): """Drop(Share) the inputs for outputs. Parameters ---------- op_func : lambda The function of any operators. args : list The args of this operator. kwargs : dict The kwargs. The kwargs of this operator. Returns ------- Tensor or list of Tensor As the ``op_func`` returns. Examples -------- >>> from dragon.core.tensor import Tensor >>> import dragon.ops as ops >>> import dragon.memonger as opt >>> data = Tensor().Variable() >>> conv_1 = ops.Conv2D(data, num_output=8) >>> conv_1_bn = opt.Drop(ops.BatchNorm, [conv_1, Tensor().Variable(), Tensor.Variable()]) >>> conv_1_relu = opt.Drop(ops.Relu, conv_1_bn) """ kwargs['mirror_stage'] = True return op_func(*args, **kwargs)
bsd-2-clause
7,309,461,154,045,384,000
22.706897
93
0.529112
false
sanyi/awsutils
awsutils/sqs/message.py
1
1549
# awsutils/sqs/message.py # Copyright 2013 Sandor Attila Gerendi (Sanyi) # # This module is part of awsutils and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import time from awsutils.exceptions.aws import UserInputException class SQSMessage: def __init__(self, messageBody=None, queue=None): self.messageBody = messageBody self.receiptHandle = None self.queue = None def getBody(self): return self.messageBody def setBody(self, messageBody): self.messageBody = messageBody def delete(self): if self.queue is None: raise UserInputException('This message does not belong to any queue') self.queue.sqsclient.deleteMessage(self.queue.qName, self.receiptHandle) def visibilityTimeoutLeft(self): if self.queue is None: raise UserInputException('This message does not belong to any queue') return self.VisibilityTimeout - (time.time() - self.receptionTimestamp) def changeVisibility(self, visibilityTimeout): if self.queue is None: raise UserInputException('This message does not belong to any queue') if self.receiptHandle is None: raise UserInputException('This message does not have a receipt handle') self.queue.lsqsclient.changeMessageVisibility(self.queue.qName, self.receiptHandle, visibilityTimeout) self.VisibilityTimeout = visibilityTimeout def __repr__(self): return 'SQSMessage: ' + repr(self.__dict__)
mit
-1,632,423,533,953,449,500
35.880952
110
0.699161
false
pygeek/django
tests/regressiontests/localflavor/de/tests.py
14
1885
from __future__ import unicode_literals from django.contrib.localflavor.de.forms import (DEZipCodeField, DEStateSelect, DEIdentityCardNumberField) from django.test import SimpleTestCase class DELocalFlavorTests(SimpleTestCase): def test_DEStateSelect(self): f = DEStateSelect() out = '''<select name="states"> <option value="BW">Baden-Wuerttemberg</option> <option value="BY">Bavaria</option> <option value="BE">Berlin</option> <option value="BB">Brandenburg</option> <option value="HB">Bremen</option> <option value="HH">Hamburg</option> <option value="HE">Hessen</option> <option value="MV">Mecklenburg-Western Pomerania</option> <option value="NI">Lower Saxony</option> <option value="NW">North Rhine-Westphalia</option> <option value="RP">Rhineland-Palatinate</option> <option value="SL">Saarland</option> <option value="SN">Saxony</option> <option value="ST">Saxony-Anhalt</option> <option value="SH">Schleswig-Holstein</option> <option value="TH" selected="selected">Thuringia</option> </select>''' self.assertHTMLEqual(f.render('states', 'TH'), out) def test_DEZipCodeField(self): error_format = ['Enter a zip code in the format XXXXX.'] valid = { '99423': '99423', } invalid = { ' 99423': error_format, } self.assertFieldOutput(DEZipCodeField, valid, invalid) def test_DEIdentityCardNumberField(self): error_format = ['Enter a valid German identity card number in XXXXXXXXXXX-XXXXXXX-XXXXXXX-X format.'] valid = { '7549313035D-6004103-0903042-0': '7549313035D-6004103-0903042-0', '9786324830D 6104243 0910271 2': '9786324830D-6104243-0910271-2', } invalid = { '0434657485D-6407276-0508137-9': error_format, } self.assertFieldOutput(DEIdentityCardNumberField, valid, invalid)
bsd-3-clause
-552,282,568,406,641,340
35.960784
109
0.679576
false
davidsanchez/CTAtools
Simulations/simulation.py
1
4640
# ------ Imports --------------- # from ebltable.tau_from_model import OptDepth as OD import os,sys,numpy from os.path import join import ctools from ctoolsAnalysis.config import get_config,get_default_config from ctoolsAnalysis.LikeFit import CTA_ctools_analyser from Script.Common_Functions import * from Script.Utils import LiMa import ctoolsAnalysis.xml_generator as xml import pyfits import random # ------------------------------ # try: get_ipython().magic(u'pylab') except : pass catalog = os.getcwd()+"/AGN_Monitoring_list.dat" #Model = "PL" #Model = "LPEBL" Model = "PLEBL" #Model = "PLECEBL" try: #conf file provided config = get_config(sys.argv[-2]) redshift = float(sys.argv[-1]) except : print "usage : python "+sys.argv[0]+" config_file redshift" exit() # #------------------- Analyse = CTA_ctools_analyser.fromConfig(config) #------------------ Value of the EBL ETeV = numpy.logspace(-2,2.5,200) EBL_mod = config["simulation"]["EBL_model"] tau = OD.readmodel(model = EBL_mod) Tau_values = tau.opt_depth(redshift,ETeV) #------------------ Make the XML model for simulations lib,doc = xml.CreateLib() srcname = config["target"]["name"] Model = config["simulation"]["spectrum_model"] #SOURCE SPECTRUM if Model == "PL": spec = xml.addPowerLaw1(lib,srcname,"PointSource", eflux=1e6*numpy.sqrt(Analyse.config["energy"]["emax"]*Analyse.config["energy"]["emin"]),flux_value=1e-10,flux_max=1000.0, flux_min=1e-5) elif Model == "PLEBL": #### EBL filename = config["out"]+"/tau_"+srcname+".txt" filefun = open(filename,"w") for j in xrange(len(ETeV)): filefun.write(str(ETeV[j]*1e6)+" "+str(max(1e-10,numpy.exp(-Tau_values)[j]))+"\n") #------------------ Make the XML model spec = xml.PowerLawEBL(lib,srcname,filename,eflux=1e6,flux_value=1.0e-16,index_value=-2.0,flux_free=0, index_free=0) #### EBL : end elif Model == "LPEBL": #### EBL filename = config["out"]+"/tau_"+srcname+".txt" filefun = open(filename,"w") for j in xrange(len(ETeV)): filefun.write(str(ETeV[j]*1e6)+" "+str(max(1e-10,numpy.exp(-Tau_values)[j]))+"\n") #------------------ Make the XML model spec = xml.LogParabolaEBL(lib,srcname,filename,eflux=2000,flux_value=1.465e-10,flux_max=1.665e-10, flux_min=1.275e-10,index_value=-2.184, index_min=-2.3334, index_max=-2.0346) #### EBL : end elif Model == "PLECEBL": #### EBL filename = config["out"]+"/tau_"+srcname+".txt" filefun = open(filename,"w") for j in xrange(len(ETeV)): filefun.write(str(ETeV[j]*1e6)+" "+str(max(1e-10,numpy.exp(-Tau_values)[j]))+"\n") #------------------ Make the XML model Ecut = 3./(1+redshift)*1e6 spec = xml.PowerLawExpCutoffEBL(lib,srcname,filename,eflux=2000,flux_value=1.465e-10,flux_max=1.665e-10, flux_min=1.275e-10,index_value=-2.184, index_min=-2.3334, index_max=-2.0346) #### EBL : end ra = config["target"]["ra"] dec = config["target"]["dec"] spatial = xml.AddPointLike(doc,ra,dec) spec.appendChild(spatial) lib.appendChild(spec) #CTA BACKGROUND bkg = xml.addCTAIrfBackground(lib) lib.appendChild(bkg) # save the model into an xml file open(config["file"]["inmodel_sim"], 'w').write(doc.toprettyxml(' ')) #---------------------------- Simulations n_hours = 1 #hours for simulations n_simulations = 1 # produce different seeds for simulations #seed_index = 0 #while seed_index != 1: # seeds = random.sample(range(1, 1000000), n_simulations) # sor_seed = numpy.sort(seeds) # equal_el = 0 # for j in xrange(len(sor_seed)-1): # if sor_seed[j] != sor_seed[j+1]: # equal_el = equal_el+1 # if equal_el == len(sor_seed)-1: # seed_index = 1 start_time_MET = 662774400 end_time_MET = 662774400 + 3600*n_hours print 'starting simulation...' #for i in range(1, n_simulations+1): from ctools import ctobssim sm = ctools.ctobssim() sm["inmodel"]=config["file"]["inmodel_sim"] sm["outevents"]= srcname+"_sim"+".fits" sm["caldb"]= config["irfs"]["caldb"] sm["irf"]= config["irfs"]["irf"] #sm["edisp"]=yes #sm["seed"]=seeds[i-1] sm["ra"]= config["target"]["ra"] sm["dec"]= config["target"]["dec"] sm["rad"]= 3 sm["tmin"]= start_time_MET sm["tmax"]= end_time_MET #sm["emin"]= config["simulation"]["emin_sim"] #sm["emax"]= config["simulation"]["emax_sim"] sm["emin"]= 0.03 sm["emax"]= 12.5 #sm["logfile"]="/home/biasuzzi/Desktop/DC1_Analysis/test_ctobssim/PKS0507+17/ctobssim_"+str(i)+"log.txt" sm["logfile"]=srcname+"_ctobssim.log" sm.execute() # create a list #bashCommand = "rm /home/biasuzzi/Desktop/DC1_Analysis/test_ctobssim/PKS0507+17/test.dat" #import subprocess #process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
gpl-3.0
3,105,693,044,302,928,000
31
189
0.646767
false
jtk54/spinnaker
dev/validate_bom__config.py
1
39074
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This is the "deploy" module for the validate_bom script. It is responsible for configuring spinnaker via Halyard. The Configurator interface is used internally to implement the public interface, which is provided via free functions. * Each configurable aspect has its own Configurator class. * The class has the following methods: init_argument_parser Adds the configuration parameters for that aspect to the argparser. validate_options Performs a quick validation of the options to fail fast. add_config Adds script commands used to configure this component [via hal]. add_files_to_upload Adds paths to files referenced by config options that should be uploaded with the script that will be referencing them. * The configurator may add other implicit parameters. <service>_account_enabled is set if it is configured. The flag is used to make test filtering easier. """ import os from validate_bom__deploy import write_data_to_secure_path class AzsStorageConfiguratorHelper(object): """Helper class for StorageConfigurator to handle AZS.""" @classmethod def init_argument_parser(cls, parser): """Implements interface.""" parser.add_argument( '--storage_azs_account_name', default=None, help='The name for the Azure Storage Account to use.' ' This is only used if --spinnaker_storage=azs.') parser.add_argument( '--storage_azs_credentials', default=None, help='Path to Azure Storage Account credentials to configure' 'spinnaker storage. This is only used if --spinnaker_storage=azs.') @classmethod def validate_options(cls, options): """Implements interface.""" if not options.storage_azs_credentials: raise ValueError('Specified --spinnaker_storage="azs"' ' but not --storage_azs_credentials') @classmethod def add_files_to_upload(cls, options, file_set): """Implements interface.""" file_set.add(options.storage_azs_credentials) @classmethod def add_config(cls, options, script): """Implements interface.""" script.append( 'AZS_PASSWORD=$(cat {file})' .format(file=os.path.basename(options.storage_azs_credentials))) hal = ( 'hal -q --log=info config storage azs edit' ' --storage-account-name {name}' ' --storage-account-key "$AZS_PASSWORD"' .format(name=options.storage_azs_account_name)) script.append(hal) class S3StorageConfiguratorHelper(object): """Helper class for StorageConfigurator to handle S3.""" REGIONS = ['us-east-2', 'us-east-1', 'us-west-1', 'us-west-2', 'ca-central-1', 'ap-south-1', 'ap-northeast-2', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'eu-central-1', 'eu-west-1', 'eu-west-2', 'sa-east-1'] @classmethod def init_argument_parser(cls, parser): """Implements interface.""" parser.add_argument( '--storage_s3_bucket', default=None, help='The name for the AWS S3 bucket to use.' ' This is only used if --spinnaker_storage=s3.') parser.add_argument( '--storage_s3_assume_role', default='role/spinnakerManaged', help='Use AWS SecurityToken Service to assume this role.') parser.add_argument( '--storage_s3_region', choices=cls.REGIONS, help='The name for the AWS region to create the bucket in.' ' This is only used if the bucket does not already exist.') parser.add_argument( '--storage_s3_endpoint', help='The s3 endpoint.') parser.add_argument( '--storage_s3_access_key_id', default=None, help='AWS Access Key ID for AWS account owning s3 storage.') parser.add_argument( '--storage_s3_credentials', default=None, help='Path to file containing the secret access key for the S3 account') @classmethod def validate_options(cls, options): """Implements interface.""" if not options.storage_s3_credentials: raise ValueError('--storage_s3_credentials is required.') if not options.storage_s3_access_key_id: raise ValueError('--storage_s3_access_key_id is required.') if not options.storage_s3_region: raise ValueError('--storage_s3_region is required.') @classmethod def add_files_to_upload(cls, options, file_set): """Implements interface.""" if options.storage_s3_credentials: file_set.add(options.storage_s3_credentials) @classmethod def add_config(cls, options, script): """Implements interface.""" command = ['hal -q --log=info config storage s3 edit'] if options.storage_s3_access_key_id: command.extend(['--access-key-id', options.storage_s3_access_key_id]) if options.storage_s3_bucket: command.extend(['--bucket', options.storage_s3_bucket]) if options.storage_s3_assume_role: command.extend(['--assume-role', options.storage_s3_assume_role]) if options.storage_s3_region: command.extend(['--region', options.storage_s3_region]) if options.storage_s3_endpoint: command.extend(['--endpoint', options.storage_s3_endpoint]) if options.storage_s3_credentials: command.extend(['--secret-access-key < {file}'.format( file=os.path.basename(options.storage_s3_credentials))]) script.append(' '.join(command)) class GcsStorageConfiguratorHelper(object): """Helper class for StorageConfigurator to handle GCS.""" LOCATIONS = [ # multi-regional bucket 'us', 'eu', 'ap', # regional bucket 'us-central1', 'us-east1', 'us-west1', 'us-east4', 'europe-west1', 'asia-east1', 'asia-northeast1', 'asia-southeast1' ] @classmethod def init_argument_parser(cls, parser): """Implements interface.""" parser.add_argument( '--storage_gcs_bucket', default=None, help=('URI for specific Google Storage bucket to use.' ' This is suggested if using gcs storage, though can be left' ' empty to let Halyard create one.')) parser.add_argument( '--storage_gcs_location', choices=cls.LOCATIONS, default='us-central1', help=('Location for the bucket if it needs to be created.')) parser.add_argument( '--storage_gcs_project', default=None, help=('URI for specific Google Storage bucket project to use.' ' If empty, use the --deploy_google_project.')) parser.add_argument( '--storage_gcs_credentials', default=None, help='Path to google credentials file to configure spinnaker storage.' ' This is only used if --spinnaker_storage=gcs.' ' If left empty then use application default credentials.') @classmethod def validate_options(cls, options): """Implements interface.""" if not options.storage_gcs_bucket: raise ValueError('Specified --spinnaker_storage="gcs"' ' but not --storage_gcs_bucket') @classmethod def add_files_to_upload(cls, options, file_set): """Implements interface.""" if options.storage_gcs_credentials: file_set.add(options.storage_gcs_credentials) @classmethod def add_config(cls, options, script): """Implements interface.""" project = options.storage_gcs_project or options.deploy_google_project hal = ( 'hal -q --log=info config storage gcs edit' ' --project {project}' ' --bucket {bucket}' ' --bucket-location {location}' .format(project=project, bucket=options.storage_gcs_bucket, location=options.storage_gcs_location)) if options.storage_gcs_credentials: hal += (' --json-path ./{filename}' .format(filename=os.path.basename( options.storage_gcs_credentials))) script.append(hal) class StorageConfigurator(object): """Controls hal config storage for Spinnaker Storage .""" HELPERS = { 'azs': AzsStorageConfiguratorHelper, 'gcs': GcsStorageConfiguratorHelper, 's3': S3StorageConfiguratorHelper } def init_argument_parser(self, parser): """Implements interface.""" parser.add_argument( '--spinnaker_storage', required=True, choices=self.HELPERS.keys(), help='The storage type to configure.') for helper in self.HELPERS.values(): helper.init_argument_parser(parser) def validate_options(self, options): """Implements interface.""" helper = self.HELPERS.get(options.spinnaker_storage, None) if helper is None: raise ValueError('Unknown --spinnaker_storage="{0}"' .format(options.spinnaker_storage)) helper.validate_options(options) def add_files_to_upload(self, options, file_set): """Implements interface.""" helper = self.HELPERS.get(options.spinnaker_storage, None) if helper is None: raise ValueError('Unknown --spinnaker_storage="{0}"' .format(options.spinnaker_storage)) helper.add_files_to_upload(options, file_set) def add_config(self, options, script): """Implements interface.""" helper = self.HELPERS.get(options.spinnaker_storage, None) if helper is None: raise ValueError('Unknown --spinnaker_storage="{0}"' .format(options.spinnaker_storage)) helper.add_config(options, script) script.append('hal -q --log=info config storage edit --type {type}' .format(type=options.spinnaker_storage)) class AwsConfigurator(object): """Controls hal config provider aws.""" def init_argument_parser(self, parser): """Implements interface.""" # pylint: disable=line-too-long parser.add_argument( '--aws_access_key_id', default=None, help='The AWS ACCESS_KEY_ID.') parser.add_argument( '--aws_credentials', default=None, help='A path to a file containing the AWS SECRET_ACCESS_KEY') parser.add_argument( '--aws_account_name', default='my-aws-account', help='The name of the primary AWS account to configure.') parser.add_argument( '--aws_account_id', default=None, help='The AWS account id for the account.' ' See http://docs.aws.amazon.com/IAM/latest/UserGuide/console_account-alias.html') parser.add_argument( '--aws_account_role', default='role/spinnakerManaged', help=' The account will assume this role.') parser.add_argument( '--aws_account_regions', default='us-east-1,us-west-2', help='The AWS account regions the account will manage.') parser.add_argument( '--aws_account_pem_path', default=None, help='The path to the PEM file for the keypair to use.' 'The basename minus suffix will be the name of the keypair.') def validate_options(self, options): """Implements interface.""" options.aws_account_enabled = options.aws_access_key_id is not None if options.aws_account_enabled and not options.aws_credentials: raise ValueError( '--aws_access_key_id given, but not --aws_credentials') if options.aws_account_enabled and not options.aws_account_id: raise ValueError( '--aws_access_key_id given, but not --aws_account_id') if options.aws_account_enabled and not options.aws_account_role: raise ValueError( '--aws_access_key_id given, but not --aws_account_role') def add_config(self, options, script): """Implements interface.""" if not options.aws_access_key_id: return account_params = [options.aws_account_name, '--assume-role', options.aws_account_role, '--account-id', options.aws_account_id] if options.aws_account_pem_path: basename = os.path.basename(options.aws_account_pem_path) script.append('mv {file} .ssh/'.format(file=basename)) account_params.extend( ['--default-key-pair', os.path.splitext(basename)[0]]) if options.aws_account_regions: account_params.extend(['--regions', options.aws_account_regions]) script.append('hal -q --log=info config provider aws enable') script.append( 'hal -q --log=info config provider aws edit ' ' --access-key-id {id} --secret-access-key < {file}' .format(id=options.aws_access_key_id, file=os.path.basename(options.aws_credentials))) script.append( 'hal -q --log=info config provider aws account add {params}' .format(params=' '.join(account_params))) def add_files_to_upload(self, options, file_set): """Implements interface.""" if options.aws_credentials: file_set.add(options.aws_credentials) if options.aws_account_pem_path: file_set.add(options.aws_account_pem_path) class AppengineConfigurator(object): """Controls hal config provider for appengine""" def init_argument_parser(self, parser): """Implements interface.""" # pylint: disable=line-too-long parser.add_argument( '--appengine_account_project', default=None, help='The Google Cloud Platform project this Spinnaker account will manage.') parser.add_argument( '--appengine_account_name', default='my-appengine-account', help='The name of the primary Appengine account to configure.') parser.add_argument( '--appengine_account_credentials', default=None, help='Path to file containing the JSON Oauth credentials for the AppEngine account.') parser.add_argument( '--appengine_account_git_username', default=None, help='The name of the remote git user.') parser.add_argument( '--appengine_account_git_https_credentials', default=None, help='Path to file containing the password for the remote git repository.') parser.add_argument( '--appengine_account_git_oauth_credentials', default=None, help='Path to file containing the password for the remote git repository.') parser.add_argument( '--appengine_account_ssh_private_key_path', default=None) parser.add_argument( '--appengine_account_ssh_private_key_passphrase', default=None) parser.add_argument( '--appengine_account_local_repository_directory', default=None) def validate_options(self, options): """Implements interface.""" options.appengine_account_enabled = (options.appengine_account_project is not None) if not options.appengine_account_enabled: return def add_config(self, options, script): """Implements interface.""" if not options.appengine_account_project: return script.append('hal -q --log=info config provider appengine enable') account_params = [ options.appengine_account_name, '--project', options.appengine_account_project ] if options.appengine_account_credentials: account_params.extend( ['--json-path', os.path.basename(options.appengine_account_credentials)]) if options.appengine_account_local_repository_directory: account_params.extend( ['--local-repository-directory', options.appengine_account_local_repository_directory]) script.append( 'hal -q --log=info config provider appengine account add {params}' .format(params=' '.join(account_params))) hal_edit = ('hal -q --log=info' ' config provider appengine account edit {name}' .format(name=options.appengine_account_name)) # Maybe config github if options.appengine_account_git_username: git_params = ['--git-https-username', options.appengine_account_git_username] if options.appengine_account_git_oauth_credentials: git_params.append( '--github-oauth-access-token < {file}' .format( file=os.path.basename( options.appengine_account_git_oauth_credentials))) elif options.appengine_account_git_https_credentials: git_params.append( '--git-https-password < {path}' .format( path=os.path.basename( options.appengine_account_git_https_credentials))) script.append( '{hal} {params}'.format(hal=hal_edit, params=' '.join(git_params))) # Maybe config ssh if options.appengine_account_ssh_private_key_path: ssh_params = ['--ssh-private-key-file-path', options.appengine_account_local_repository_directory] if options.appengine_account_ssh_private_key_passphrase: ssh_params.append('--ssh-private-key-passphrase < {path}'.format( path=os.path.basename( options.appengine_account_ssh_private_key_passphrase))) script.append( '{hal} {params}'.format(hal=hal_edit, params=' '.join(ssh_params))) def add_files_to_upload(self, options, file_set): """Implements interface.""" if options.appengine_account_credentials: file_set.add(options.appengine_account_credentials) if options.appengine_account_git_https_credentials: file_set.add(options.appengine_account_git_https_credentials) if options.appengine_account_git_oauth_credentials: file_set.add(options.appengine_account_git_oauth_credentials) if options.appengine_account_ssh_private_key_passphrase: file_set.add(options.appengine_account_ssh_private_key_passphrase) class AzureConfigurator(object): """Controls hal config provider azure.""" def init_argument_parser(self, parser): """Implements interface.""" # pylint: disable=line-too-long parser.add_argument( '--azure_account_credentials', default=None, help='Path to Azure credentials file containing the appKey' ' for the service principal.') parser.add_argument( '--azure_account_name', default='my-azure-account', help='The name of the primary Azure account to configure.') parser.add_argument( '--azure_account_client_id', default=None, help='The Azure clientId for the service principal.') parser.add_argument( '--azure_account_subscription_id', default=None, help='The subscriptionId for the service principal.') parser.add_argument( '--azure_account_tenant_id', default=None, help='The tenantId for the service principal.') parser.add_argument( '--azure_account_object_id', default=None, help='The objectId of the service principal.' ' Needed to bake Windows images.') parser.add_argument( '--azure_account_default_key_vault', default=None, help='The name of the KeyValue containing the default user/password' ' to create VMs.') parser.add_argument( '--azure_account_default_resource_group', default=None, help='The default for non-application specific resources.') parser.add_argument( '--azure_account_packer_resource_group', default=None, help='Used by packer when baking images.') parser.add_argument( '--azure_account_packer_storage_account', default=None, help='The storage account ot use if baking images with packer.') def validate_options(self, options): """Implements interface.""" options.azure_account_enabled = (options.azure_account_subscription_id is not None) if not options.azure_account_enabled: return if ((options.azure_account_packer_resource_group != None) != (options.azure_account_packer_storage_account != None)): raise ValueError( '--azure_account_packer_resource_group' ' and --azure_account_packer_storage_account' ' must either both be set or neither be set.') for name in ['client_id', 'credentials', 'subscription_id', 'tenant_id', 'default_key_vault', 'default_resource_group']: key = 'azure_account_' + name if not getattr(options, key): raise ValueError( '--{0} is required with --azure_account_subscription_id.' .format(key)) def add_config(self, options, script): """Implements interface.""" if not options.azure_account_credentials: return account_params = [ options.azure_account_name, '--client-id', options.azure_account_client_id, '--default-key-vault', options.azure_account_default_key_vault, '--default-resource-group', options.azure_account_default_resource_group, '--subscription-id', options.azure_account_subscription_id, '--tenant-id', options.azure_account_tenant_id ] if options.azure_account_object_id: account_params.extend(['--object-id', options.azure_account_object_id]) if options.azure_account_packer_resource_group: account_params.extend(['--packer-resource-group', options.azure_account_packer_resource_group]) if options.azure_account_packer_storage_account: account_params.extend(['--packer-storage-account', options.azure_account_packer_storage_account]) script.append('hal -q --log=info config provider azure enable') script.append( 'hal -q --log=info config provider azure account add {params}' ' --app-key < {creds}' .format(params=' '.join(account_params), creds=os.path.basename(options.azure_account_credentials))) def add_files_to_upload(self, options, file_set): """Implements interface.""" if options.azure_account_credentials: file_set.add(options.azure_account_credentials) class GoogleConfigurator(object): """Controls hal config provider google.""" def init_argument_parser(self, parser): """Implements interface.""" parser.add_argument( '--google_account_project', default=None, help='Google project to deploy to if --host_platform is gce.') parser.add_argument( '--google_account_credentials', default=None, help='Path to google credentials file for the google account.' 'Adding credentials enables the account.') parser.add_argument( '--google_account_name', default='my-google-account', help='The name of the primary google account to configure.') def validate_options(self, options): """Implements interface.""" options.google_account_enabled = ( options.google_account_credentials is not None) if options.google_account_credentials: if not options.google_account_project: raise ValueError('--google_account_project was not specified.') def add_config(self, options, script): """Implements interface.""" if not options.google_account_credentials: return if not options.google_account_project: raise ValueError( '--google_account_credentials without --google_account_project') account_params = [options.google_account_name] account_params.extend([ '--project', options.google_account_project, '--json-path', os.path.basename(options.google_account_credentials)]) script.append('hal -q --log=info config provider google enable') if options.deploy_google_zone: script.append('hal -q --log=info config provider google bakery edit' ' --zone {zone}'.format(zone=options.deploy_google_zone)) script.append( 'hal -q --log=info config provider google account add {params}' .format(params=' '.join(account_params))) def add_files_to_upload(self, options, file_set): """Implements interface.""" if options.google_account_credentials: file_set.add(options.google_account_credentials) class KubernetesConfigurator(object): """Controls hal config provider kubernetes.""" def init_argument_parser(self, parser): """Implements interface.""" parser.add_argument( '--k8s_account_credentials', default=None, help='Path to k8s credentials file.') parser.add_argument( '--k8s_account_name', default='my-kubernetes-account', help='The name of the primary Kubernetes account to configure.') parser.add_argument( '--k8s_account_context', help='The kubernetes context for the primary Kubernetes account.') parser.add_argument( '--k8s_account_namespaces', default='validate-bom', help='The kubernetes namespaces for the primary Kubernetes account.') parser.add_argument( '--k8s_account_docker_account', default=None, help='The docker registry account to use with the --k8s_account') def validate_options(self, options): """Implements interface.""" options.k8s_account_enabled = options.k8s_account_credentials is not None if options.k8s_account_credentials: if not options.k8s_account_docker_account: raise ValueError('--k8s_account_docker_account was not specified.') def add_config(self, options, script): """Implements interface.""" if not options.k8s_account_credentials: return if not options.k8s_account_docker_account: raise ValueError( '--k8s_account_credentials without --k8s_account_docker_account') account_params = [options.k8s_account_name] account_params.extend([ '--docker-registries', options.k8s_account_docker_account, '--kubeconfig-file', os.path.basename(options.k8s_account_credentials) ]) if options.k8s_account_context: account_params.extend(['--context', options.k8s_account_context]) if options.k8s_account_namespaces: account_params.extend(['--namespaces', options.k8s_account_namespaces]) script.append('hal -q --log=info config provider kubernetes enable') script.append('hal -q --log=info config provider kubernetes account' ' add {params}' .format(params=' '.join(account_params))) def add_files_to_upload(self, options, file_set): """Implements interface.""" if options.k8s_account_credentials: file_set.add(options.k8s_account_credentials) class DockerConfigurator(object): """Controls hal config provider docker.""" def init_argument_parser(self, parser): """Implements interface.""" parser.add_argument( '--docker_account_address', default=None, help='Registry address to pull and deploy images from.') parser.add_argument( '--docker_account_name', default='my-docker-account', help='The name of the primary Docker account to configure.') parser.add_argument( '--docker_account_registry_username', default=None, help='The username for the docker registry.') parser.add_argument( '--docker_account_credentials', default=None, help='Path to plain-text password file.') parser.add_argument( '--docker_account_repositories', default=None, help='Additional list of repositories to cache images from.') def validate_options(self, options): """Implements interface.""" options.docker_account_enabled = options.docker_account_address is not None def add_config(self, options, script): """Implements interface.""" if not options.docker_account_address: return account_params = [options.docker_account_name, '--address', options.docker_account_address] if options.docker_account_credentials: cred_basename = os.path.basename(options.docker_account_credentials) account_params.extend( ['--password-file', cred_basename]) if options.docker_account_registry_username: account_params.extend( ['--username', options.docker_account_registry_username]) if options.docker_account_repositories: account_params.extend( ['--repositories', options.docker_account_repositories]) script.append('hal -q --log=info config provider docker-registry enable') script.append('hal -q --log=info config provider docker-registry account' ' add {params}' .format(params=' '.join(account_params))) def add_files_to_upload(self, options, file_set): """Implements interface.""" if options.docker_account_credentials: file_set.add(options.docker_account_credentials) class JenkinsConfigurator(object): """Controls hal config ci.""" def init_argument_parser(self, parser): """Implements interface.""" parser.add_argument( '--jenkins_master_name', default=None, help='The name of the jenkins master to configure.' ' If provided, this also needs --jenkins_master_address, ' ' --jenkins_master_user, and --jenkins_master_credentials' ' or an environment variable JENKINS_MASTER_PASSWORD') parser.add_argument( '--jenkins_master_address', default=None, help='The network address of the jenkins master to configure.' ' If provided, this also needs --jenkins_master_name, ' ' --jenkins_master_user, and --jenkins_master_credentials' ' or an environment variable JENKINS_MASTER_PASSWORD') parser.add_argument( '--jenkins_master_user', default=None, help='The name of the jenkins master to configure.' ' If provided, this also needs --jenkins_master_address, ' ' --jenkins_master_name, and --jenkins_master_credentials' ' or an environment variable JENKINS_MASTER_PASSWORD') parser.add_argument( '--jenkins_master_credentials', default=None, help='The password for the jenkins master to configure.' ' If provided, this takes pre cedence over' ' any JENKINS_MASTER_PASSWORD environment variable value.') def validate_options(self, options): """Implements interface.""" if ((options.jenkins_master_name is None) != (options.jenkins_master_address is None) or ((options.jenkins_master_name is None) != (options.jenkins_master_user is None))): raise ValueError('Inconsistent jenkins_master specification: ' ' --jenkins_master_name="{0}"' ' --jenkins_master_address="{1}"' ' --jenkins_master_user="{2}"' .format(options.jenkins_master_name, options.jenkins_master_address, options.jenkins_master_user)) if (options.jenkins_master_name and os.environ.get('JENKINS_MASTER_PASSWORD') is None): raise ValueError('--jenkins_master_name was provided,' ' but no JENKINS_MASTER_PASSWORD environment variable') options.jenkins_master_enabled = options.jenkins_master_name is not None def add_config(self, options, script): """Implements interface.""" name = options.jenkins_master_name or None address = options.jenkins_master_address or None user = options.jenkins_master_user or None if options.jenkins_master_credentials: password_file = os.path.basename(options.jenkins_master_credentials) elif os.environ.get('JENKINS_MASTER_PASSWORD', None): password_file = 'jenkins_{name}_password'.format( name=options.jenkins_master_name) else: password_file = None if ((name is None) != (address is None) or (name is None) != (user is None)): raise ValueError('Either all of --jenkins_master_name,' ' --jenkins_master_address, --jenkins_master_user' ' or none of them must be supplied.') if name is None: return if password_file is None: raise ValueError( 'No --jenkins_master_credentials or JENKINS_MASTER_PASSWORD' ' environment variable was supplied.') script.append('hal -q --log=info config ci jenkins enable') script.append('hal -q --log=info config ci jenkins master' ' add {name}' ' --address {address}' ' --username {user}' ' --password < {password_file}' .format(name=options.jenkins_master_name, address=options.jenkins_master_address, user=options.jenkins_master_user, password_file=os.path.basename(password_file))) def add_files_to_upload(self, options, file_set): """Implements interface.""" if options.jenkins_master_credentials: file_set.add(options.jenkins_master_credentials) elif os.environ.get('JENKINS_MASTER_PASSWORD', None): path = write_data_to_secure_path( os.environ.get('JENKINS_MASTER_PASSWORD'), 'jenkins_{0}_password'.format(options.jenkins_master_name)) file_set.add(path) class MonitoringConfigurator(object): """Controls hal config monitoring.""" def init_argument_parser(self, parser): """Implements interface.""" parser.add_argument( '--monitoring_prometheus_gateway', default=None, help='If provided, and which is "prometheus",' ' configure to use the gateway server at thsi URL.') parser.add_argument( '--monitoring_install_which', default=None, help='If provided, install monitoring with these params.') def validate_options(self, options): """Implements interface.""" if (options.monitoring_prometheus_gateway and (options.monitoring_install_which != 'prometheus')): raise ValueError('gateway is only applicable to ' ' --monitoring_install_which="prometheus"') def __inject_prometheus_node_exporter(self, options, script): """Add installation instructions for node_exporter Add these to the start of the script so we can monitor installation. """ version = '0.14.0' node_version = 'node_exporter-{0}.linux-amd64'.format(version) install_node_exporter = [ 'curl -s -S -L -o /tmp/node_exporter.gz' ' https://github.com/prometheus/node_exporter/releases/download' '/v{version}/{node_version}.tar.gz' .format(version=version, node_version=node_version), 'sudo tar xzf /tmp/node_exporter.gz -C /opt', 'sudo ln -fs /opt/{node_version}/node_exporter' ' /usr/bin/node_exporter' .format(node_version=node_version), 'rm /tmp/node_exporter.gz', 'echo "start on filesystem or runlevel [2345]"' ' | sudo tee /etc/init/node_exporter.conf', 'echo "exec /usr/bin/node_exporter 2>&1 /var/log/node_exporter.log"' ' | sudo tee -a /etc/init/node_exporter.conf', 'sudo chmod 644 /etc/init/node_exporter.conf', 'sudo service node_exporter restart' ] # Prepend install_node_exporter to the beginning of the list. # This is so we can monitor installation process itself, # at least from this point in the script execution (before halyard install). # # There is no prepend, only individual element insert. # But there is a reverse, so we'll do a bit of manipulation here script.reverse() install_node_exporter.reverse() script.extend(install_node_exporter) script.reverse() def add_config(self, options, script): """Implements interface.""" if not options.monitoring_install_which: return # Start up monitoring now so we can monitor these VMs if (options.monitoring_install_which == 'prometheus' and options.deploy_spinnaker_type == 'localdebian'): self.__inject_prometheus_node_exporter(options, script) script.append('mkdir -p ~/.hal/default/service-settings') script.append('echo "host: 0.0.0.0"' ' > ~/.hal/default/service-settings/monitoring-daemon.yml') script.append('hal -q --log=info config metric-stores {which} enable' .format(which=options.monitoring_install_which)) if options.monitoring_prometheus_gateway: script.append('hal -q --log=info config metric-stores prometheus edit' ' --push-gateway {gateway}' .format(gateway=options.monitoring_prometheus_gateway)) def add_files_to_upload(self, options, file_set): """Implements interface.""" pass class NotificationConfigurator(object): """Controls hal config notification.""" def init_argument_parser(self, parser): """Implements interface.""" pass def validate_options(self, options): """Implements interface.""" pass def add_config(self, options, script): """Implements interface.""" pass def add_files_to_upload(self, options, file_set): """Implements interface.""" pass class SecurityConfigurator(object): """Controls hal config security.""" def init_argument_parser(self, parser): """Implements interface.""" pass def validate_options(self, options): """Implements interface.""" pass def add_config(self, options, script): """Implements interface.""" pass def add_files_to_upload(self, options, file_set): """Implements interface.""" pass CONFIGURATOR_LIST = [ MonitoringConfigurator(), StorageConfigurator(), AwsConfigurator(), AppengineConfigurator(), AzureConfigurator(), DockerConfigurator(), GoogleConfigurator(), KubernetesConfigurator(), JenkinsConfigurator(), NotificationConfigurator(), SecurityConfigurator(), ] def init_argument_parser(parser): """Initialize the argument parser with configuration options. Args: parser: [ArgumentParser] The argument parser to add the options to. """ for configurator in CONFIGURATOR_LIST: configurator.init_argument_parser(parser) def validate_options(options): """Validate supplied options to ensure basic idea is ok. This doesnt perform a fine-grained check, just whether or not the arguments seem consistent or complete so we can fail fast. """ for configurator in CONFIGURATOR_LIST: configurator.validate_options(options) def make_script(options): """Creates the bash script for configuring Spinnaker. Returns a list of bash statement strings. """ script = [] for configurator in CONFIGURATOR_LIST: configurator.add_config(options, script) return script def get_files_to_upload(options): """Collects the paths to files that the configuration script will reference. Returns: A set of path strings. """ file_set = set([]) for configurator in CONFIGURATOR_LIST: configurator.add_files_to_upload(options, file_set) return file_set
apache-2.0
1,387,266,455,685,102,300
37.763889
95
0.656088
false
crobby/sahara
sahara/utils/remote.py
10
5045
# Copyright (c) 2013 Mirantis Inc. # Copyright (c) 2013 Hortonworks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from oslo_config import cfg import six from sahara import exceptions as ex from sahara.i18n import _ # These options are for SSH remote only ssh_opts = [ cfg.IntOpt('global_remote_threshold', default=100, help='Maximum number of remote operations that will ' 'be running at the same time. Note that each ' 'remote operation requires its own process to ' 'run.'), cfg.IntOpt('cluster_remote_threshold', default=70, help='The same as global_remote_threshold, but for ' 'a single cluster.'), cfg.StrOpt('proxy_command', default='', help='Proxy command used to connect to instances. If set, this ' 'command should open a netcat socket, that Sahara will use for ' 'SSH and HTTP connections. Use {host} and {port} to describe ' 'the destination. Other available keywords: {tenant_id}, ' '{network_id}, {router_id}.'), ] CONF = cfg.CONF CONF.register_opts(ssh_opts) DRIVER = None @six.add_metaclass(abc.ABCMeta) class RemoteDriver(object): @abc.abstractmethod def setup_remote(self, engine): """Performs driver initialization.""" @abc.abstractmethod def get_remote(self, instance): """Returns driver specific Remote.""" @abc.abstractmethod def get_userdata_template(self): """Returns userdata template preparing instance to work with driver.""" @abc.abstractmethod def get_type_and_version(self): """Returns engine type and version Result should be in the form 'type.major.minor'. """ @six.add_metaclass(abc.ABCMeta) class Remote(object): @abc.abstractmethod def get_neutron_info(self): """Returns dict which later could be passed to get_http_client.""" @abc.abstractmethod def get_http_client(self, port, info=None): """Returns HTTP client for a given instance's port.""" @abc.abstractmethod def close_http_session(self, port): """Closes cached HTTP session for a given instance's port.""" @abc.abstractmethod def execute_command(self, cmd, run_as_root=False, get_stderr=False, raise_when_error=True, timeout=300): """Execute specified command remotely using existing ssh connection. Return exit code, stdout data and stderr data of the executed command. """ @abc.abstractmethod def write_file_to(self, remote_file, data, run_as_root=False, timeout=120): """Create remote file and write the given data to it. Uses existing ssh connection. """ @abc.abstractmethod def append_to_file(self, r_file, data, run_as_root=False, timeout=120): """Append the given data to remote file. Uses existing ssh connection. """ @abc.abstractmethod def write_files_to(self, files, run_as_root=False, timeout=120): """Copy file->data dictionary in a single ssh connection.""" @abc.abstractmethod def append_to_files(self, files, run_as_root=False, timeout=120): """Copy file->data dictionary in a single ssh connection.""" @abc.abstractmethod def read_file_from(self, remote_file, run_as_root=False, timeout=120): """Read remote file from the specified host and return given data.""" @abc.abstractmethod def replace_remote_string(self, remote_file, old_str, new_str, timeout=120): """Replaces strings in remote file using sed command.""" def setup_remote(driver, engine): global DRIVER DRIVER = driver DRIVER.setup_remote(engine) def get_remote_type_and_version(): return DRIVER.get_type_and_version() def _check_driver_is_loaded(): if not DRIVER: raise ex.SystemError(_('Remote driver is not loaded. Most probably ' 'you see this error because you are running ' 'Sahara in distributed mode and it is broken.' 'Try running sahara-all instead.')) def get_remote(instance): """Returns Remote for a given instance.""" _check_driver_is_loaded() return DRIVER.get_remote(instance) def get_userdata_template(): """Returns userdata template as a string.""" _check_driver_is_loaded() return DRIVER.get_userdata_template()
apache-2.0
-1,376,785,825,670,762,200
31.548387
79
0.647374
false
ipdcode/skydns
kdns/dpdk-17.02/usertools/cpu_layout.py
5
3591
#!/usr/bin/env python # # BSD LICENSE # # Copyright(c) 2010-2014 Intel Corporation. All rights reserved. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of Intel Corporation nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from __future__ import print_function import sys sockets = [] cores = [] core_map = {} fd = open("/proc/cpuinfo") lines = fd.readlines() fd.close() core_details = [] core_lines = {} for line in lines: if len(line.strip()) != 0: name, value = line.split(":", 1) core_lines[name.strip()] = value.strip() else: core_details.append(core_lines) core_lines = {} for core in core_details: for field in ["processor", "core id", "physical id"]: if field not in core: print("Error getting '%s' value from /proc/cpuinfo" % field) sys.exit(1) core[field] = int(core[field]) if core["core id"] not in cores: cores.append(core["core id"]) if core["physical id"] not in sockets: sockets.append(core["physical id"]) key = (core["physical id"], core["core id"]) if key not in core_map: core_map[key] = [] core_map[key].append(core["processor"]) print("============================================================") print("Core and Socket Information (as reported by '/proc/cpuinfo')") print("============================================================\n") print("cores = ", cores) print("sockets = ", sockets) print("") max_processor_len = len(str(len(cores) * len(sockets) * 2 - 1)) max_core_map_len = max_processor_len * 2 + len('[, ]') + len('Socket ') max_core_id_len = len(str(max(cores))) output = " ".ljust(max_core_id_len + len('Core ')) for s in sockets: output += " Socket %s" % str(s).ljust(max_core_map_len - len('Socket ')) print(output) output = " ".ljust(max_core_id_len + len('Core ')) for s in sockets: output += " --------".ljust(max_core_map_len) output += " " print(output) for c in cores: output = "Core %s" % str(c).ljust(max_core_id_len) for s in sockets: output += " " + str(core_map[(s, c)]).ljust(max_core_map_len) print(output)
mit
-1,481,177,172,915,420,400
35.642857
76
0.642718
false
cedadev/cis
cis/test/unit/test_hyperpoint.py
3
9132
""" Module to test the collocation routines """ from cis.data_io.hyperpoint import HyperPoint from nose.tools import assert_almost_equal, istest, eq_ @istest def test_same_point_in_space_and_time_with_points_with_different_values(): assert (HyperPoint(lat=10.0, lon=50.0, val=14.4).same_point_in_space_and_time( HyperPoint(lat=10.0, lon=50.0, val=15.1))) @istest def test_is_not_same_point_in_space_and_time_with_points_with_different_values(): assert (not HyperPoint(lat=10.0, lon=51.0, val=14.4).same_point_in_space_and_time( HyperPoint(lat=10.0, lon=50.0, val=15.1))) @istest def test_same_point_in_space_with_points_with_different_values(): assert (HyperPoint(lat=10.0, lon=50.0, alt=10.0, val=14.4).same_point_in_space( HyperPoint(lat=10.0, lon=50.0, alt=10.0, val=15.1))) @istest def test_is_notsame_point_in_space_with_points_with_different_values(): assert (not HyperPoint(lat=10.0, lon=50.0, alt=10.0, val=14.4).same_point_in_space( HyperPoint(lat=10.0, lon=50.0, alt=11.0, val=15.1))) @istest def test_same_point_in_space_with_pressure_not_altitude_with_points_with_different_values(): assert (HyperPoint(lat=10.0, lon=50.0, pres=10.0, val=14.4).same_point_in_space( HyperPoint(lat=10.0, lon=50.0, pres=10.0, val=15.1))) @istest def test_is_not_same_point_in_space_with_pressure_not_altitude_with_points_with_different_values(): assert (not HyperPoint(lat=10.0, lon=50.0, pres=10.0, val=14.4).same_point_in_space( HyperPoint(lat=10.0, lon=50.0, pres=11.0, val=15.1))) @istest def test_same_point_in_space_with_pressure_and_altitude_with_points_with_different_values(): assert (HyperPoint(lat=10.0, lon=50.0, alt=5.0, pres=10.0, val=14.4).same_point_in_space( HyperPoint(lat=10.0, lon=50.0, alt=5.0, pres=10.0, val=15.1))) @istest def test_is_not_same_point_in_space_with_pressure_and_altitude_with_points_with_different_values(): assert (not HyperPoint(lat=10.0, lon=50.0, alt=5.0, pres=10.0, val=14.4).same_point_in_space( HyperPoint(lat=10.0, lon=50.0, alt=6.0, pres=11.0, val=15.1))) @istest def test_same_point_in_space_with_points_with_different_times(): assert (HyperPoint(lat=10.0, lon=50.0, alt=10.0, t=500.0).same_point_in_space( HyperPoint(lat=10.0, lon=50.0, alt=10.0, t=501.0))) @istest def test_is_not_same_point_in_space_with_points_with_different_times(): assert (not HyperPoint(lat=10.0, lon=50.0, alt=10.0, t=500.0).same_point_in_space( HyperPoint(lat=11.0, lon=51.0, alt=10.0, t=501.0))) @istest def test_same_point_in_space_with_pressure_not_altitude_with_points_with_different_times(): assert (HyperPoint(lat=10.0, lon=50.0, pres=10.0, t=500.0).same_point_in_space( HyperPoint(lat=10.0, lon=50.0, pres=10.0, t=501.0))) @istest def test_is_not_same_point_in_space_with_pressure_not_altitude_with_points_with_different_times(): assert (not HyperPoint(lat=10.0, lon=50.0, pres=10.0, t=500.0).same_point_in_space( HyperPoint(lat=10.0, lon=50.0, pres=11.0, t=501.0))) @istest def test_same_point_in_space_with_pressure_and_altitude_with_points_with_different_times(): assert (HyperPoint(lat=10.0, lon=50.0, alt=5.0, pres=10.0, t=500.0).same_point_in_space( HyperPoint(lat=10.0, lon=50.0, alt=5.0, pres=10.0, t=501.0))) @istest def test_is_not_same_point_in_space_with_pressure_and_altitude_with_points_with_different_times(): assert (not HyperPoint(lat=10.0, lon=50.0, alt=5.0, pres=10.0, t=500.0).same_point_in_space( HyperPoint(lat=10.0, lon=50.0, alt=5.0, pres=11.0, t=501.0))) @istest def test_same_point_in_time_with_points_with_different_values(): assert (HyperPoint(lat=10.0, lon=50.0, alt=5.0, t=500.0, val=14.4).same_point_in_time( HyperPoint(lat=10.0, lon=50.0, alt=5.0, t=500.0, val=15.1))) @istest def test_is_not_same_point_in_time_with_points_with_different_values(): assert (not HyperPoint(lat=10.0, lon=50.0, alt=5.0, t=500.0, val=14.4).same_point_in_time( HyperPoint(lat=10.0, lon=50.0, alt=5.0, t=501.0, val=15.1))) @istest def test_same_point_in_time_with_pressure_not_altitude_with_points_with_different_values(): assert (HyperPoint(lat=10.0, lon=50.0, pres=5.0, t=500.0, val=14.4).same_point_in_time( HyperPoint(lat=10.0, lon=50.0, pres=5.0, t=500.0, val=15.1))) @istest def test_is_not_same_point_in_time_with_pressure_not_altitude_with_points_with_different_values(): assert (not HyperPoint(lat=10.0, lon=50.0, pres=5.0, t=500.0, val=14.4).same_point_in_time( HyperPoint(lat=10.0, lon=50.0, pres=5.0, t=501.0, val=15.1))) @istest def test_same_point_in_time_with_pressure_and_altitude_with_points_with_different_values(): assert (HyperPoint(lat=10.0, lon=50.0, alt=10.0, pres=5.0, t=500.0, val=14.4).same_point_in_time( HyperPoint(lat=10.0, lon=50.0, alt=10.0, pres=5.0, t=500.0, val=15.1))) @istest def test_is_not_same_point_in_time_with_pressure_and_altitude_with_points_with_different_values(): assert (not HyperPoint(lat=10.0, lon=50.0, alt=10.0, pres=5.0, t=500.0, val=14.4).same_point_in_time( HyperPoint(lat=10.0, lon=50.0, alt=10.0, pres=5.0, t=501.0, val=15.1))) @istest def test_same_point_in_time_with_points_with_different_spatial_coords(): assert (HyperPoint(lat=10.0, lon=50.0, alt=5.0, t=500.0).same_point_in_time( HyperPoint(lat=10.0, lon=50.0, alt=10.0, t=500.0))) @istest def test_is_not_same_point_in_time_with_points_with_different_spatial_coords(): assert (not HyperPoint(lat=10.0, lon=50.0, alt=5.0, t=500.0).same_point_in_time( HyperPoint(lat=10.0, lon=50.0, alt=10.0, t=501.0))) @istest def test_same_point_in_time_with_pressure_not_altitude_with_points_with_different_spatial_coords(): assert (HyperPoint(lat=10.0, lon=50.0, pres=5.0, t=500.0).same_point_in_time( HyperPoint(lat=10.0, lon=50.0, pres=10.0, t=500.0))) @istest def test_is_not_same_point_in_time_with_pressure_not_altitude_with_points_with_different_spatial_coords(): assert (not HyperPoint(lat=10.0, lon=50.0, pres=5.0, t=500.0).same_point_in_time( HyperPoint(lat=10.0, lon=50.0, pres=10.0, t=501.0))) @istest def test_same_point_in_time_with_pressure_and_altitude_with_points_with_different_spatial_coords(): assert (HyperPoint(lat=10.0, lon=50.0, alt=5.0, pres=4.0, t=500.0).same_point_in_time( HyperPoint(lat=10.0, lon=50.0, alt=10.0, pres=9.0, t=500.0))) @istest def test_is_not_same_point_in_time_with_pressure_and_altitude_with_points_with_different_spatial_coords(): assert (not HyperPoint(lat=10.0, lon=50.0, alt=5.0, pres=4.0, t=500.0).same_point_in_time( HyperPoint(lat=10.0, lon=50.0, alt=10.0, pres=9.0, t=501.0))) @istest def can_get_valid_coord_tuple_lat(): eq_(HyperPoint(10).get_coord_tuple(), [('latitude', 10)]) @istest def can_get_valid_coord_tuple_lat_lon(): eq_(HyperPoint(10, 12).get_coord_tuple(), [('latitude', 10), ('longitude', 12)]) @istest def check_furthest_point_on_equator(): eq_(HyperPoint(0, 0).furthest_point_from(), HyperPoint(0, 180)) @istest def check_furthest_point_on_pole(): eq_(HyperPoint(90, 0).furthest_point_from(), HyperPoint(-90, 180)) @istest def check_furthest_point(): eq_(HyperPoint(51, 0).furthest_point_from(), HyperPoint(-51, 180)) @istest def check_dist_between_2d_points_on_equator(): assert_almost_equal(HyperPoint(0, 0).haversine_dist(HyperPoint(0, 1)), 111.317, places=3) @istest def check_dist_between_2d_points_at_pole(): assert_almost_equal(HyperPoint(90, 0).haversine_dist(HyperPoint(90, 1)), 0.0000, places=5) @istest def check_dist_between_opposite_2d_points(): """ The distance between two points on opposite sides of the globe should be half the circumfrence of the globe """ import math R_E = 6378 # Radius of the earth in km max_dist = math.pi * R_E assert_almost_equal(HyperPoint(0, 0).haversine_dist(HyperPoint(0, 180)), max_dist, places=3) assert_almost_equal(HyperPoint(90, 0).haversine_dist(HyperPoint(-90, 0)), max_dist, places=3) assert_almost_equal(HyperPoint(51, 0).haversine_dist(HyperPoint(-51, 180)), max_dist, places=3) @istest def check_compdist_returns_true_when_p2_is_closer_than_p1(): assert (HyperPoint(0, 0).compdist(HyperPoint(4, 4), HyperPoint(3, 3))) @istest def check_compdist_returns_false_when_p1_is_closer_than_p2(): assert (not HyperPoint(0, 0).compdist(HyperPoint(3, 3), HyperPoint(4, 4))) @istest def check_compdist_returns_false_when_p1_is_the_same_as_p2(): assert (not HyperPoint(0, 0).compdist(HyperPoint(3, 3), HyperPoint(3, 3))) assert (not HyperPoint(0, 0).compdist(HyperPoint(-3, 3), HyperPoint(3, 3))) assert (not HyperPoint(0, 0).compdist(HyperPoint(3, -3), HyperPoint(3, 3))) assert (not HyperPoint(0, 0).compdist(HyperPoint(-3, -3), HyperPoint(3, 3))) assert (not HyperPoint(-2.5, 0).compdist(HyperPoint(-5, 0), HyperPoint(0, 0))) assert (not HyperPoint(0, -2.5).compdist(HyperPoint(0, -5), HyperPoint(0, 0))) assert (not HyperPoint(-2.5, -2.5).compdist(HyperPoint(-5, -5), HyperPoint(0, 0)))
lgpl-3.0
8,069,184,961,615,374,000
38.704348
115
0.681669
false
Acidburn0zzz/readthedocs.org
readthedocs/urls.py
2
5810
from django.conf.urls.defaults import url, patterns, include from django.contrib import admin from django.conf import settings from django.views.generic.simple import direct_to_template from tastypie.api import Api from api.base import (ProjectResource, UserResource, BuildResource, VersionResource, FileResource) from builds.filters import VersionFilter from core.forms import UserProfileForm from core.views import SearchView from projects.feeds import LatestProjectsFeed, NewProjectsFeed from projects.filters import ProjectFilter from projects.constants import LANGUAGES_REGEX v1_api = Api(api_name='v1') v1_api.register(BuildResource()) v1_api.register(UserResource()) v1_api.register(ProjectResource()) v1_api.register(VersionResource()) v1_api.register(FileResource()) admin.autodiscover() handler500 = 'core.views.server_error' handler404 = 'core.views.server_error_404' urlpatterns = patterns( '', # base view, flake8 complains if it is on the previous line. url(r'^$', 'core.views.homepage'), url(r'^security/', direct_to_template, {'template': 'security.html'}), # For serving docs locally and when nginx isn't url((r'^docs/(?P<project_slug>[-\w]+)/(?P<lang_slug>%s)/(?P<version_slug>' r'[-._\w]+?)/(?P<filename>.*)$') % LANGUAGES_REGEX, 'core.views.serve_docs', name='docs_detail'), # Redirect to default version, if only lang_slug is set. url((r'^docs/(?P<project_slug>[-\w]+)/(?P<lang_slug>%s)/$') % LANGUAGES_REGEX, 'core.views.redirect_lang_slug', name='docs_detail'), # Redirect to default version, if only version_slug is set. url(r'^docs/(?P<project_slug>[-\w]+)/(?P<version_slug>[-._\w]+)/$', 'core.views.redirect_version_slug', name='docs_detail'), # Redirect to default version. url(r'^docs/(?P<project_slug>[-\w]+)/$', 'core.views.redirect_project_slug', name='docs_detail'), # Handle /page/<path> redirects for explicit "latest" version goodness. url(r'^docs/(?P<project_slug>[-\w]+)/page/(?P<filename>.*)$', 'core.views.redirect_page_with_filename', name='docs_detail'), # Handle single version URLs url(r'^docs/(?P<project_slug>[-\w]+)/(?P<filename>.*)$', 'core.views.serve_single_version_docs', name='docs_detail'), # Handle fallbacks url(r'^user_builds/(?P<project_slug>[-\w]+)/rtd-builds/(?P<version_slug>[-._\w]+?)/(?P<filename>.*)$', 'core.views.server_helpful_404', name='user_buils_fallback'), url(r'^user_builds/(?P<project_slug>[-\w]+)/translations/(?P<lang_slug>%s)/(?P<version_slug>[-._\w]+?)/(?P<filename>.*)$' % LANGUAGES_REGEX, 'core.views.server_helpful_404', name='user_builds_fallback_translations'), url(r'^i18n/', include('django.conf.urls.i18n')), url(r'^projects/', include('projects.urls.public')), url(r'^builds/', include('builds.urls')), url(r'^flagging/', include('basic.flagging.urls')), url(r'^accounts/', include('registration.backends.default.urls')), url(r'^search/project/', SearchView.as_view(), name='haystack_project'), url(r'^search/', include('haystack.urls')), url(r'^admin/', include(admin.site.urls)), url(r'^dashboard/', include('projects.urls.private')), url(r'^github', 'core.views.github_build', name='github_build'), url(r'^bitbucket', 'core.views.bitbucket_build', name='bitbucket_build'), url(r'^build/(?P<pk>[-\w]+)', 'core.views.generic_build', name='generic_build'), url(r'^random/(?P<project>[\w-]+)', 'core.views.random_page', name='random_page'), url(r'^random/$', 'core.views.random_page', name='random_page'), url(r'^depth/$', 'core.views.queue_depth', name='queue_depth'), url(r'^queue_info/$', 'core.views.queue_info', name='queue_info'), url(r'^live/$', 'core.views.live_builds', name='live_builds'), url(r'^500/$', 'core.views.divide_by_zero', name='divide_by_zero'), url(r'^filter/version/$', 'django_filters.views.object_filter', {'filter_class': VersionFilter, 'template_name': 'filter.html'}, name='filter_version'), url(r'^filter/project/$', 'django_filters.views.object_filter', {'filter_class': ProjectFilter, 'template_name': 'filter.html'}, name='filter_project'), url(r'^wipe/(?P<project_slug>[-\w]+)/(?P<version_slug>[\w]{1}[-\w\.]+)/$', 'core.views.wipe_version', name='wipe_version'), url(r'^profiles/create/', 'profiles.views.create_profile', {'form_class': UserProfileForm}, name='profiles_profile_create'), url(r'^profiles/edit/', 'profiles.views.edit_profile', {'form_class': UserProfileForm}, name='profiles_profile_edit'), url(r'^profiles/', include('profiles.urls')), url(r'^api/', include(v1_api.urls)), url(r'^api/v2/', include('restapi.urls')), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^feeds/new/$', NewProjectsFeed(), name="new_feed"), url(r'^feeds/latest/$', LatestProjectsFeed(), name="latest_feed"), url(r'^mlt/(?P<project_slug>[-\w]+)/(?P<filename>.*)$', 'core.views.morelikethis', name='morelikethis'), url(r'^websupport/', include('websupport.urls')), ) if settings.DEBUG: urlpatterns += patterns( '', # base view, flake8 complains if it is on the previous line. url('style-catalog/$', 'django.views.generic.simple.direct_to_template', {'template': 'style_catalog.html'}), url(regex='^%s/(?P<path>.*)$' % settings.MEDIA_URL.strip('/'), view='django.views.static.serve', kwargs={'document_root': settings.MEDIA_ROOT}), )
mit
-6,009,901,939,163,871,000
41.101449
144
0.624441
false
soarpenguin/ansible
lib/ansible/modules/network/netvisor/pn_vlag.py
59
10765
#!/usr/bin/python """ PN CLI vlag-create/vlag-delete/vlag-modify """ # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: pn_vlag author: "Pluribus Networks (@amitsi)" version_added: "2.2" short_description: CLI command to create/delete/modify vlag. description: - Execute vlag-create/vlag-delete/vlag-modify command. - A virtual link aggregation group (VLAG) allows links that are physically connected to two different Pluribus Networks devices to appear as a single trunk to a third device. The third device can be a switch, server, or any Ethernet device. A VLAG can provide Layer 2 multipathing, which allows you to create redundancy by increasing bandwidth, enabling multiple parallel paths between nodes and loadbalancing traffic where alternative paths exist. options: pn_cliusername: description: - Provide login username if user is not root. required: False pn_clipassword: description: - Provide login password if user is not root. required: False pn_cliswitch: description: - Target switch(es) to run this command on. state: description: - State the action to perform. Use 'present' to create vlag, 'absent' to delete vlag and 'update' to modify vlag. required: True choices: ['present', 'absent', 'update'] pn_name: description: - The C(pn_name) takes a valid name for vlag configuration. required: true pn_port: description: - Specify the local VLAG port. - Required for vlag-create. pn_peer_port: description: - Specify the peer VLAG port. - Required for vlag-create. pn_mode: description: - Specify the mode for the VLAG. Active-standby indicates one side is active and the other side is in standby mode. Active-active indicates that both sides of the vlag are up by default. choices: ['active-active', 'active-standby'] pn_peer_switch: description: - Specify the fabric-name of the peer switch. pn_failover_action: description: - Specify the failover action as move or ignore. choices: ['move', 'ignore'] pn_lacp_mode: description: - Specify the LACP mode. choices: ['off', 'passive', 'active'] pn_lacp_timeout: description: - Specify the LACP timeout as slow(30 seconds) or fast(4 seconds). choices: ['slow', 'fast'] pn_lacp_fallback: description: - Specify the LACP fallback mode as bundles or individual. choices: ['bundle', 'individual'] pn_lacp_fallback_timeout: description: - Specify the LACP fallback timeout in seconds. The range is between 30 and 60 seconds with a default value of 50 seconds. """ EXAMPLES = """ - name: create a VLAG pn_vlag: state: 'present' pn_name: spine-to-leaf pn_port: 'spine01-to-leaf' pn_peer_port: 'spine02-to-leaf' pn_peer_switch: spine02 pn_mode: 'active-active' - name: delete VLAGs pn_vlag: state: 'absent' pn_name: spine-to-leaf """ RETURN = """ command: description: The CLI command run on the target node(s). returned: always type: str stdout: description: The set of responses from the vlag command. returned: always type: list stderr: description: The set of error responses from the vlag command. returned: on error type: list changed: description: Indicates whether the CLI caused changes on the target. returned: always type: bool """ import shlex VLAG_EXISTS = None def pn_cli(module): """ This method is to generate the cli portion to launch the Netvisor cli. It parses the username, password, switch parameters from module. :param module: The Ansible module to fetch username, password and switch :return: returns the cli string for further processing """ username = module.params['pn_cliusername'] password = module.params['pn_clipassword'] cliswitch = module.params['pn_cliswitch'] if username and password: cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) else: cli = '/usr/bin/cli --quiet ' if cliswitch == 'local': cli += ' switch-local ' else: cli += ' switch ' + cliswitch return cli def check_cli(module, cli): """ This method checks for idempotency using the vlag-show command. If a vlag with given vlag exists, return VLAG_EXISTS as True else False. :param module: The Ansible module to fetch input parameters :param cli: The CLI string :return Global Booleans: VLAG_EXISTS """ name = module.params['pn_name'] show = cli + ' vlag-show format name no-show-headers' show = shlex.split(show) out = module.run_command(show)[1] out = out.split() # Global flags global VLAG_EXISTS if name in out: VLAG_EXISTS = True else: VLAG_EXISTS = False def run_cli(module, cli): """ This method executes the cli command on the target node(s) and returns the output. The module then exits based on the output. :param cli: the complete cli string to be executed on the target node(s). :param module: The Ansible module to fetch command """ cliswitch = module.params['pn_cliswitch'] state = module.params['state'] command = get_command_from_state(state) cmd = shlex.split(cli) # 'out' contains the output # 'err' contains the error messages result, out, err = module.run_command(cmd) print_cli = cli.split(cliswitch)[1] # Response in JSON format if result != 0: module.exit_json( command=print_cli, stderr=err.strip(), msg="%s operation failed" % command, changed=False ) if out: module.exit_json( command=print_cli, stdout=out.strip(), msg="%s operation completed" % command, changed=True ) else: module.exit_json( command=print_cli, msg="%s operation completed" % command, changed=True ) def get_command_from_state(state): """ This method gets appropriate command name for the state specified. It returns the command name for the specified state. :param state: The state for which the respective command name is required. """ command = None if state == 'present': command = 'vlag-create' if state == 'absent': command = 'vlag-delete' if state == 'update': command = 'vlag-modify' return command def main(): """ This section is for argument parsing """ module = AnsibleModule( argument_spec=dict( pn_cliusername=dict(required=False, type='str'), pn_clipassword=dict(required=False, type='str', no_log=True), pn_cliswitch=dict(required=False, type='str', default='local'), state =dict(required=True, type='str', choices=['present', 'absent', 'update']), pn_name=dict(required=True, type='str'), pn_port=dict(type='str'), pn_peer_port=dict(type='str'), pn_mode=dict(type='str', choices=[ 'active-standby', 'active-active']), pn_peer_switch=dict(type='str'), pn_failover_action=dict(type='str', choices=['move', 'ignore']), pn_lacp_mode=dict(type='str', choices=[ 'off', 'passive', 'active']), pn_lacp_timeout=dict(type='str', choices=['slow', 'fast']), pn_lacp_fallback=dict(type='str', choices=[ 'individual', 'bundled']), pn_lacp_fallback_timeout=dict(type='str') ), required_if=( ["state", "present", ["pn_name", "pn_port", "pn_peer_port", "pn_peer_switch"]], ["state", "absent", ["pn_name"]], ["state", "update", ["pn_name"]] ) ) # Argument accessing state = module.params['state'] name = module.params['pn_name'] port = module.params['pn_port'] peer_port = module.params['pn_peer_port'] mode = module.params['pn_mode'] peer_switch = module.params['pn_peer_switch'] failover_action = module.params['pn_failover_action'] lacp_mode = module.params['pn_lacp_mode'] lacp_timeout = module.params['pn_lacp_timeout'] lacp_fallback = module.params['pn_lacp_fallback'] lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout'] command = get_command_from_state(state) # Building the CLI command string cli = pn_cli(module) if command == 'vlag-delete': check_cli(module, cli) if VLAG_EXISTS is False: module.exit_json( skipped=True, msg='VLAG with name %s does not exist' % name ) cli += ' %s name %s ' % (command, name) else: if command == 'vlag-create': check_cli(module, cli) if VLAG_EXISTS is True: module.exit_json( skipped=True, msg='VLAG with name %s already exists' % name ) cli += ' %s name %s ' % (command, name) if port: cli += ' port %s peer-port %s ' % (port, peer_port) if mode: cli += ' mode ' + mode if peer_switch: cli += ' peer-switch ' + peer_switch if failover_action: cli += ' failover-' + failover_action + '-L2 ' if lacp_mode: cli += ' lacp-mode ' + lacp_mode if lacp_timeout: cli += ' lacp-timeout ' + lacp_timeout if lacp_fallback: cli += ' lacp-fallback ' + lacp_fallback if lacp_fallback_timeout: cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout run_cli(module, cli) # AnsibleModule boilerplate from ansible.module_utils.basic import AnsibleModule if __name__ == '__main__': main()
gpl-3.0
-5,425,393,068,046,331,000
30.023055
80
0.613562
false
django-leonardo/django-leonardo
leonardo/module/devel/widget/clientinfo/models.py
2
1628
# -#- coding: utf-8 -#- from django.template import RequestContext from django.template.loader import render_to_string from django.utils.translation import ugettext_lazy as _ from leonardo import messages from leonardo.forms import MultiSelectField from leonardo.module.web.models import Widget from .utils import add_client_type, get_client_ip INFO_CHOICES = ( ('browser', _('browser information')), ('os', _('operating system')), ('ip', _('IP address')), ('hostname', _('hostname')), ) class ClientInfoWidget(Widget): show_info = MultiSelectField( max_length=255, default='browser', verbose_name=_("show info"), choices=INFO_CHOICES) class Meta: abstract = True verbose_name = _("client info") verbose_name_plural = _('client info') def render_content(self, options): request = options.get('request') uas = None user_agent = '' ip = get_client_ip(request) host = request.get_host() if 'browser' in self.show_info or 'os' in self.show_info: try: import httpagentparser except ImportError: messages.error(request, _('Please install httpagentparser')) else: uas = request.META.get('HTTP_USER_AGENT') user_agent = httpagentparser.detect(uas) request = add_client_type(request) context = RequestContext(request, { 'user_agent': user_agent, 'ip': ip, 'host': host, 'widget': self, }) return render_to_string(self.get_template, context)
bsd-3-clause
5,043,508,662,671,122,000
30.921569
93
0.603808
false
jessrosenfield/pants
src/python/pants/goal/task_registrar.py
5
2751
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import sys import traceback from textwrap import dedent from pants.goal.goal import Goal class TaskRegistrar(object): def __init__(self, name, action, dependencies=None, serialize=True): """ :param name: the name of the task. :param action: the Task action object to invoke this task. :param dependencies: DEPRECATED the names of other goals which must be achieved before invoking this task's goal. :param serialize: a flag indicating whether or not the action to achieve this goal requires the global lock. If true, the action will block until it can acquire the lock. """ self.serialize = serialize self.name = name self._task = action if dependencies: # TODO(John Sirois): kill this warning and the kwarg after a deprecation cycle. print(dedent(''' WARNING: Registered dependencies are now ignored and only `Task.product_types` and product requirements as expressed in `Task.prepare` are used to infer Task dependencies. Please fix this registration: {reg} {location} ''').format(reg=self, location=traceback.format_list([traceback.extract_stack()[-2]])[0]), file=sys.stderr) def __repr__(self): return 'TaskRegistrar({name}, {action} serialize={serialize})'.format(name=self.name, action=self._task, serialize=self.serialize) @property def task_type(self): return self._task def install(self, goal=None, first=False, replace=False, before=None, after=None): """Install the task in the specified goal (or a new goal with the same name as the task). The placement of the task in the execution list of the goal defaults to the end but can be influence by specifying exactly one of the following arguments: :param first: Places this task 1st in the goal's execution list. :param replace: Replaces any existing tasks in the goal with this goal. :param before: Places this task before the named task in the goal's execution list. :param after: Places this task after the named task in the goal's execution list. :returns: The goal with task installed. """ goal = Goal.by_name(goal or self.name) goal.install(self, first, replace, before, after) return goal
apache-2.0
-7,935,429,179,296,964,000
40.681818
99
0.653581
false
openstack/neutron
neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py
2
3496
# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as p_const from neutron.plugins.ml2.drivers import type_vxlan from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel from neutron.tests.unit.plugins.ml2 import test_rpc from neutron.tests.unit import testlib_api VXLAN_UDP_PORT_ONE = 9999 VXLAN_UDP_PORT_TWO = 8888 class VxlanTypeTest(base_type_tunnel.TunnelTypeTestMixin, base_type_tunnel.TunnelTypeNetworkSegmentRangeTestMixin, testlib_api.SqlTestCase): DRIVER_MODULE = type_vxlan DRIVER_CLASS = type_vxlan.VxlanTypeDriver TYPE = p_const.TYPE_VXLAN def add_endpoint(self, ip=base_type_tunnel.TUNNEL_IP_ONE, host=base_type_tunnel.HOST_ONE): if ip == base_type_tunnel.TUNNEL_IP_ONE: port = VXLAN_UDP_PORT_ONE else: port = VXLAN_UDP_PORT_TWO return self.driver.add_endpoint(ip, host, port) def test_add_endpoint(self): endpoint = super(VxlanTypeTest, self).test_add_endpoint() self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port) def test_get_endpoint_by_host(self): endpoint = super(VxlanTypeTest, self).test_get_endpoint_by_host() self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port) def test_get_endpoint_by_ip(self): endpoint = super(VxlanTypeTest, self).test_get_endpoint_by_ip() self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port) def test_get_endpoints(self): self.add_endpoint() self.add_endpoint(base_type_tunnel.TUNNEL_IP_TWO, base_type_tunnel.HOST_TWO) endpoints = self.driver.get_endpoints() for endpoint in endpoints: if endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_ONE: self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint['udp_port']) self.assertEqual(base_type_tunnel.HOST_ONE, endpoint['host']) elif endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_TWO: self.assertEqual(VXLAN_UDP_PORT_TWO, endpoint['udp_port']) self.assertEqual(base_type_tunnel.HOST_TWO, endpoint['host']) class VxlanTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_vxlan.VxlanTypeDriver class VxlanTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin, test_rpc.RpcCallbacksTestCase, testlib_api.SqlTestCase): DRIVER_CLASS = type_vxlan.VxlanTypeDriver TYPE = p_const.TYPE_VXLAN class VxlanTypeTunnelMTUTest(base_type_tunnel.TunnelTypeMTUTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_vxlan.VxlanTypeDriver TYPE = p_const.TYPE_VXLAN ENCAP_OVERHEAD = p_const.VXLAN_ENCAP_OVERHEAD
apache-2.0
8,049,927,088,233,817,000
39.651163
78
0.673341
false
Antonio-Team/enigma2
lib/python/Screens/ServiceStopScreen.py
6
1493
from Screens.MessageBox import MessageBox class ServiceStopScreen: def __init__(self): try: self.session except: print "[ServiceStopScreen] ERROR: no self.session set" self.oldref = None self.onClose.append(self.__onClose) def pipAvailable(self): # pip isn't available in every state of e2 try: self.session.pipshown pipavailable = True except: pipavailable = False return pipavailable def stopService(self): if not self.oldref: ref = self.session.nav.getCurrentlyPlayingServiceOrGroup() path = ref and ref.getPath() if not path: self.oldref = ref self.session.nav.stopService() if self.pipAvailable(): if self.session.pipshown: # try to disable pip if hasattr(self.session, 'infobar'): if self.session.infobar.servicelist and self.session.infobar.servicelist.dopipzap: self.session.infobar.servicelist.togglePipzap() if hasattr(self.session, 'pip'): del self.session.pip self.session.pipshown = False def __onClose(self): if self.oldref: self.session.nav.playService(self.oldref) def restoreService(self, msg = _("Zap back to previously tuned service?")): if self.oldref: self.session.openWithCallback(self.restartPrevService, MessageBox, msg, MessageBox.TYPE_YESNO) else: self.restartPrevService(False) def restartPrevService(self, yesno=True, close=True): if not yesno: self.oldref = None if close: self.close() else: self.__onClose() self.oldref = None
gpl-2.0
-9,218,145,415,870,601,000
26.648148
97
0.712659
false
killerstorm/ngcccbase
ngcccbase/deterministic.py
4
9293
import hashlib import hmac import os from pycoin.ecdsa.secp256k1 import generator_secp256k1 as BasePoint from pycoin.encoding import from_bytes_32, public_pair_to_bitcoin_address from address import AddressRecord, LooseAddressRecord from asset import AssetDefinition from coloredcoinlib import ColorSet class DeterministicAddressRecord(AddressRecord): """Subclass of AddressRecord which is entirely deterministic. DeterministicAddressRecord will use a single master key to create addresses for specific colors and bitcoin addresses. """ def __init__(self, **kwargs): """Create an address for this color <color_set> and index <index> with the master key <master_key>. The address record returned for the same three variables will be the same every time, hence "deterministic". """ super(DeterministicAddressRecord, self).__init__(**kwargs) if len(self.color_set.get_data()) == 0: color_string = "genesis block" else: color_string = self.color_set.get_hash_string() self.index = kwargs.get('index') h = hmac.new(str(kwargs['master_key']), "%s|%s" % (color_string, self.index), hashlib.sha256) string = h.digest() self.rawPrivKey = from_bytes_32(string) self.publicPoint = BasePoint * self.rawPrivKey self.address = public_pair_to_bitcoin_address( self.publicPoint.pair(), compressed=False, address_prefix=self.prefix ) class DWalletAddressManager(object): """This class manages the creation of new AddressRecords. Specifically, it keeps track of which colors have been created in this wallet and how many addresses of each color have been created in this wallet. """ def __init__(self, colormap, config): """Create a deterministic wallet address manager given a colormap <colormap> and a configuration <config>. Note address manager configuration is in the key "dwam". """ self.config = config self.testnet = config.get('testnet', False) self.colormap = colormap self.addresses = [] # initialize the wallet manager if this is the first time # this will generate a master key. params = config.get('dwam', None) if params is None: params = self.init_new_wallet() # master key is stored in a separate config entry self.master_key = config['dw_master_key'] self.genesis_color_sets = params['genesis_color_sets'] self.color_set_states = params['color_set_states'] # import the genesis addresses for i, color_desc_list in enumerate(self.genesis_color_sets): addr = self.get_genesis_address(i) addr.color_set = ColorSet(self.colormap, color_desc_list) self.addresses.append(addr) # now import the specific color addresses for color_set_st in self.color_set_states: color_desc_list = color_set_st['color_set'] max_index = color_set_st['max_index'] color_set = ColorSet(self.colormap, color_desc_list) params = { 'testnet': self.testnet, 'master_key': self.master_key, 'color_set': color_set } for index in xrange(max_index + 1): params['index'] = index self.addresses.append(DeterministicAddressRecord(**params)) # import the one-off addresses from the config for addr_params in config.get('addresses', []): addr_params['testnet'] = self.testnet addr_params['color_set'] = ColorSet(self.colormap, addr_params['color_set']) address = LooseAddressRecord(**addr_params) self.addresses.append(address) def init_new_wallet(self): """Initialize the configuration if this is the first time we're creating addresses in this wallet. Returns the "dwam" part of the configuration. """ if not 'dw_master_key' in self.config: master_key = os.urandom(64).encode('hex') self.config['dw_master_key'] = master_key dwam_params = { 'genesis_color_sets': [], 'color_set_states': [] } self.config['dwam'] = dwam_params return dwam_params def increment_max_index_for_color_set(self, color_set): """Given a color <color_set>, record that there is one more new address for that color. """ # TODO: speed up, cache(?) for color_set_st in self.color_set_states: color_desc_list = color_set_st['color_set'] max_index = color_set_st['max_index'] cur_color_set = ColorSet(self.colormap, color_desc_list) if cur_color_set.equals(color_set): max_index += 1 color_set_st['max_index'] = max_index return max_index self.color_set_states.append({"color_set": color_set.get_data(), "max_index": 0}) return 0 def get_new_address(self, asset_or_color_set): """Given an asset or color_set <asset_or_color_set>, Create a new DeterministicAddressRecord and return it. The DWalletAddressManager will keep that tally and persist it in storage, so the address will be available later. """ if isinstance(asset_or_color_set, AssetDefinition): color_set = asset_or_color_set.get_color_set() else: color_set = asset_or_color_set index = self.increment_max_index_for_color_set(color_set) na = DeterministicAddressRecord(master_key=self.master_key, color_set=color_set, index=index, testnet=self.testnet) self.addresses.append(na) self.update_config() return na def get_genesis_address(self, genesis_index): """Given the index <genesis_index>, will return the Deterministic Address Record associated with that index. In general, that index corresponds to the nth color created by this wallet. """ return DeterministicAddressRecord( master_key=self.master_key, color_set=ColorSet(self.colormap, []), index=genesis_index, testnet=self.testnet) def get_new_genesis_address(self): """Create a new genesis address and return it. This will necessarily increment the number of genesis addresses from this wallet. """ index = len(self.genesis_color_sets) self.genesis_color_sets.append([]) self.update_config() address = self.get_genesis_address(index) address.index = index self.addresses.append(address) return address def update_genesis_address(self, address, color_set): """Updates the genesis address <address> to have a different color set <color_set>. """ assert address.color_set.color_id_set == set([]) address.color_set = color_set self.genesis_color_sets[address.index] = color_set.get_data() self.update_config() def get_some_address(self, color_set): """Returns an address associated with color <color_set>. This address will be essentially a random address in the wallet. No guarantees to what will come out. If there is not address corresponding to the color_set, thhis method will create one and return it. """ acs = self.get_addresses_for_color_set(color_set) if acs: # reuse return acs[0] else: return self.get_new_address(color_set) def get_change_address(self, color_set): """Returns an address that can receive the change amount for a color <color_set> """ return self.get_some_address(color_set) def get_all_addresses(self): """Returns the list of all AddressRecords in this wallet. """ return self.addresses def find_address_record(self, address): for address_rec in self.addresses: if address_rec.get_address() == address: return address_rec return None def get_addresses_for_color_set(self, color_set): """Given a color <color_set>, returns all AddressRecords that have that color. """ return [addr for addr in self.addresses if color_set.intersects(addr.get_color_set())] def update_config(self): """Updates the configuration for the address manager. The data will persist in the key "dwam" and consists of this data: genesis_color_sets - Colors created by this wallet color_set_states - How many addresses of each color """ dwam_params = { 'genesis_color_sets': self.genesis_color_sets, 'color_set_states': self.color_set_states } self.config['dwam'] = dwam_params
mit
-5,803,803,244,992,301,000
38.88412
75
0.599914
false
fly19890211/edx-platform
common/lib/capa/capa/inputtypes.py
11
64641
# # File: courseware/capa/inputtypes.py # """ Module containing the problem elements which render into input objects - textline - textbox (aka codeinput) - schematic - choicegroup (aka radiogroup, checkboxgroup) - javascriptinput - imageinput (for clickable image) - optioninput (for option list) - filesubmission (upload a file) - crystallography - vsepr_input - drag_and_drop - formulaequationinput - chemicalequationinput These are matched by *.html files templates/*.html which are mako templates with the actual html. Each input type takes the xml tree as 'element', the previous answer as 'value', and the graded status as'status' """ # TODO: make hints do something # TODO: make all inputtypes actually render msg # TODO: remove unused fields (e.g. 'hidden' in a few places) # TODO: add validators so that content folks get better error messages. # Possible todo: make inline the default for textlines and other "one-line" inputs. It probably # makes sense, but a bunch of problems have markup that assumes block. Bigger TODO: figure out a # general css and layout strategy for capa, document it, then implement it. import time import json import logging from lxml import etree import re import shlex # for splitting quoted strings import sys import pyparsing import html5lib import bleach from .util import sanitize_html from .registry import TagRegistry from chem import chemcalc from calc.preview import latex_preview import xqueue_interface from xqueue_interface import XQUEUE_TIMEOUT from datetime import datetime from xmodule.stringify import stringify_children log = logging.getLogger(__name__) ######################################################################### registry = TagRegistry() # pylint: disable=invalid-name class Status(object): """ Problem status attributes: classname, display_name, display_tooltip """ css_classes = { # status: css class 'unsubmitted': 'unanswered', 'incomplete': 'incorrect', 'queued': 'processing', } __slots__ = ('classname', '_status', 'display_name', 'display_tooltip') def __init__(self, status, gettext_func=unicode): self.classname = self.css_classes.get(status, status) _ = gettext_func names = { 'correct': _('correct'), 'incorrect': _('incorrect'), 'partially-correct': _('partially correct'), 'incomplete': _('incomplete'), 'unanswered': _('unanswered'), 'unsubmitted': _('unanswered'), 'queued': _('processing'), } tooltips = { # Translators: these are tooltips that indicate the state of an assessment question 'correct': _('This is correct.'), 'incorrect': _('This is incorrect.'), 'partially-correct': _('This is partially correct.'), 'unanswered': _('This is unanswered.'), 'unsubmitted': _('This is unanswered.'), 'queued': _('This is being processed.'), } self.display_name = names.get(status, unicode(status)) self.display_tooltip = tooltips.get(status, u'') self._status = status or '' def __str__(self): return self._status def __unicode__(self): return self._status.decode('utf8') def __repr__(self): return 'Status(%r)' % self._status def __eq__(self, other): return self._status == str(other) class Attribute(object): """ Allows specifying required and optional attributes for input types. """ # want to allow default to be None, but also allow required objects _sentinel = object() def __init__(self, name, default=_sentinel, transform=None, validate=None, render=True): """ Define an attribute name (str): then name of the attribute--should be alphanumeric (valid for an XML attribute) default (any type): If not specified, this attribute is required. If specified, use this as the default value if the attribute is not specified. Note that this value will not be transformed or validated. transform (function str -> any type): If not None, will be called to transform the parsed value into an internal representation. validate (function str-or-return-type-of-tranform -> unit or exception): If not None, called to validate the (possibly transformed) value of the attribute. Should raise ValueError with a helpful message if the value is invalid. render (bool): if False, don't include this attribute in the template context. """ self.name = name self.default = default self.validate = validate self.transform = transform self.render = render def parse_from_xml(self, element): """ Given an etree xml element that should have this attribute, do the obvious thing: - look for it. raise ValueError if not found and required. - transform and validate. pass through any exceptions from transform or validate. """ val = element.get(self.name) if self.default == self._sentinel and val is None: raise ValueError( 'Missing required attribute {0}.'.format(self.name) ) if val is None: # not required, so return default return self.default if self.transform is not None: val = self.transform(val) if self.validate is not None: self.validate(val) return val class InputTypeBase(object): """ Abstract base class for input types. """ template = None def __init__(self, system, xml, state): """ Instantiate an InputType class. Arguments: - system : LoncapaModule instance which provides OS, rendering, and user context. Specifically, must have a render_template function. - xml : Element tree of this Input element - state : a dictionary with optional keys: * 'value' -- the current value of this input (what the student entered last time) * 'id' -- the id of this input, typically "{problem-location}_{response-num}_{input-num}" * 'status' (answered, unanswered, unsubmitted) * 'input_state' -- dictionary containing any inputtype-specific state that has been preserved * 'feedback' (dictionary containing keys for hints, errors, or other feedback from previous attempt. Specifically 'message', 'hint', 'hintmode'. If 'hintmode' is 'always', the hint is always displayed.) """ self.xml = xml self.tag = xml.tag self.capa_system = system # NOTE: ID should only come from one place. If it comes from multiple, # we use state first, XML second (in case the xml changed, but we have # existing state with an old id). Since we don't make this guarantee, # we can swap this around in the future if there's a more logical # order. self.input_id = state.get('id', xml.get('id')) if self.input_id is None: raise ValueError( "input id state is None. xml is {0}".format(etree.tostring(xml)) ) self.value = state.get('value', '') feedback = state.get('feedback', {}) self.msg = feedback.get('message', '') self.hint = feedback.get('hint', '') self.hintmode = feedback.get('hintmode', None) self.input_state = state.get('input_state', {}) self.answervariable = state.get("answervariable", None) # put hint above msg if it should be displayed if self.hintmode == 'always': self.msg = self.hint + ('<br/>' if self.msg else '') + self.msg self.status = state.get('status', 'unanswered') try: # Pre-parse and process all the declared requirements. self.process_requirements() # Call subclass "constructor" -- means they don't have to worry about calling # super().__init__, and are isolated from changes to the input # constructor interface. self.setup() except Exception as err: # Something went wrong: add xml to message, but keep the traceback msg = u"Error in xml '{x}': {err} ".format( x=etree.tostring(xml), err=err.message) raise Exception, msg, sys.exc_info()[2] @classmethod def get_attributes(cls): """ Should return a list of Attribute objects (see docstring there for details). Subclasses should override. e.g. return [Attribute('unicorn', True), Attribute('num_dragons', 12, transform=int), ...] """ return [] def process_requirements(self): """ Subclasses can declare lists of required and optional attributes. This function parses the input xml and pulls out those attributes. This isolates most simple input types from needing to deal with xml parsing at all. Processes attributes, putting the results in the self.loaded_attributes dictionary. Also creates a set self.to_render, containing the names of attributes that should be included in the context by default. """ # Use local dicts and sets so that if there are exceptions, we don't # end up in a partially-initialized state. loaded = {} to_render = set() for attribute in self.get_attributes(): loaded[attribute.name] = attribute.parse_from_xml(self.xml) if attribute.render: to_render.add(attribute.name) self.loaded_attributes = loaded self.to_render = to_render def setup(self): """ InputTypes should override this to do any needed initialization. It is called after the constructor, so all base attributes will be set. If this method raises an exception, it will be wrapped with a message that includes the problem xml. """ pass def handle_ajax(self, dispatch, data): """ InputTypes that need to handle specialized AJAX should override this. Input: dispatch: a string that can be used to determine how to handle the data passed in data: a dictionary containing the data that was sent with the ajax call Output: a dictionary object that can be serialized into JSON. This will be sent back to the Javascript. """ pass def _get_render_context(self): """ Should return a dictionary of keys needed to render the template for the input type. (Separate from get_html to faciliate testing of logic separately from the rendering) The default implementation gets the following rendering context: basic things like value, id, status, and msg, as well as everything in self.loaded_attributes, and everything returned by self._extra_context(). This means that input types that only parse attributes and pass them to the template get everything they need, and don't need to override this method. """ context = { 'id': self.input_id, 'value': self.value, 'status': Status(self.status, self.capa_system.i18n.ugettext), 'msg': self.msg, 'STATIC_URL': self.capa_system.STATIC_URL, } context.update( (a, v) for (a, v) in self.loaded_attributes.iteritems() if a in self.to_render ) context.update(self._extra_context()) if self.answervariable: context.update({'answervariable': self.answervariable}) return context def _extra_context(self): """ Subclasses can override this to return extra context that should be passed to their templates for rendering. This is useful when the input type requires computing new template variables from the parsed attributes. """ return {} def get_html(self): """ Return the html for this input, as an etree element. """ if self.template is None: raise NotImplementedError("no rendering template specified for class {0}" .format(self.__class__)) context = self._get_render_context() html = self.capa_system.render_template(self.template, context) try: output = etree.XML(html) except etree.XMLSyntaxError as ex: # If `html` contains attrs with no values, like `controls` in <audio controls src='smth'/>, # XML parser will raise exception, so wee fallback to html5parser, which will set empty "" values for such attrs. try: output = html5lib.parseFragment(html, treebuilder='lxml', namespaceHTMLElements=False)[0] except IndexError: raise ex return output def get_user_visible_answer(self, internal_answer): """ Given the internal representation of the answer provided by the user, return the representation of the answer as the user saw it. Subclasses should override this method if and only if the internal represenation of the answer is different from the answer that is displayed to the user. """ return internal_answer #----------------------------------------------------------------------------- @registry.register class OptionInput(InputTypeBase): """ Input type for selecting and Select option input type. Example: <optioninput options="('Up','Down')" label="Where is the sky?" correct="Up"/><text>The location of the sky</text> # TODO: allow ordering to be randomized """ template = "optioninput.html" tags = ['optioninput'] @staticmethod def parse_options(options): """ Given options string, convert it into an ordered list of (option_id, option_description) tuples, where id==description for now. TODO: make it possible to specify different id and descriptions. """ # convert single quotes inside option values to html encoded string options = re.sub(r"([a-zA-Z])('|\\')([a-zA-Z])", r"\1&#39;\3", options) options = re.sub(r"\\'", r"&#39;", options) # replace already escaped single quotes # parse the set of possible options lexer = shlex.shlex(options[1:-1].encode('utf8')) lexer.quotes = "'" # Allow options to be separated by whitespace as well as commas lexer.whitespace = ", " # remove quotes # convert escaped single quotes (html encoded string) back to single quotes tokens = [x[1:-1].decode('utf8').replace("&#39;", "'") for x in lexer] # make list of (option_id, option_description), with description=id return [(t, t) for t in tokens] @classmethod def get_attributes(cls): """ Convert options to a convenient format. """ return [Attribute('options', transform=cls.parse_options), Attribute('label', ''), Attribute('inline', False)] #----------------------------------------------------------------------------- # TODO: consolidate choicegroup, radiogroup, checkboxgroup after discussion of # desired semantics. @registry.register class ChoiceGroup(InputTypeBase): """ Radio button or checkbox inputs: multiple choice or true/false TODO: allow order of choices to be randomized, following lon-capa spec. Use "location" attribute, ie random, top, bottom. Example: <choicegroup label="Which foil?"> <choice correct="false" name="foil1"> <text>This is foil One.</text> </choice> <choice correct="false" name="foil2"> <text>This is foil Two.</text> </choice> <choice correct="true" name="foil3"> <text>This is foil Three.</text> </choice> </choicegroup> """ template = "choicegroup.html" tags = ['choicegroup', 'radiogroup', 'checkboxgroup'] def setup(self): i18n = self.capa_system.i18n # suffix is '' or [] to change the way the input is handled in --as a scalar or vector # value. (VS: would be nice to make this less hackish). if self.tag == 'choicegroup': self.suffix = '' self.html_input_type = "radio" elif self.tag == 'radiogroup': self.html_input_type = "radio" self.suffix = '[]' elif self.tag == 'checkboxgroup': self.html_input_type = "checkbox" self.suffix = '[]' else: _ = i18n.ugettext # Translators: 'ChoiceGroup' is an input type and should not be translated. msg = _("ChoiceGroup: unexpected tag {tag_name}").format(tag_name=self.tag) raise Exception(msg) self.choices = self.extract_choices(self.xml, i18n) self._choices_map = dict(self.choices,) # pylint: disable=attribute-defined-outside-init @classmethod def get_attributes(cls): _ = lambda text: text return [Attribute("show_correctness", "always"), Attribute('label', ''), Attribute("submitted_message", _("Answer received."))] def _extra_context(self): return {'input_type': self.html_input_type, 'choices': self.choices, 'name_array_suffix': self.suffix} @staticmethod def extract_choices(element, i18n): """ Extracts choices for a few input types, such as ChoiceGroup, RadioGroup and CheckboxGroup. returns list of (choice_name, choice_text) tuples TODO: allow order of choices to be randomized, following lon-capa spec. Use "location" attribute, ie random, top, bottom. """ choices = [] _ = i18n.ugettext for choice in element: if choice.tag == 'choice': choices.append((choice.get("name"), stringify_children(choice))) else: if choice.tag != 'compoundhint': msg = u'[capa.inputtypes.extract_choices] {error_message}'.format( # Translators: '<choice>' and '<compoundhint>' are tag names and should not be translated. error_message=_('Expected a <choice> or <compoundhint> tag; got {given_tag} instead').format( given_tag=choice.tag ) ) raise Exception(msg) return choices def get_user_visible_answer(self, internal_answer): if isinstance(internal_answer, basestring): return self._choices_map[internal_answer] return [self._choices_map[i] for i in internal_answer] #----------------------------------------------------------------------------- @registry.register class JavascriptInput(InputTypeBase): """ Hidden field for javascript to communicate via; also loads the required scripts for rendering the problem and passes data to the problem. TODO (arjun?): document this in detail. Initial notes: - display_class is a subclass of XProblemClassDisplay (see xmodule/xmodule/js/src/capa/display.coffee), - display_file is the js script to be in /static/js/ where display_class is defined. """ template = "javascriptinput.html" tags = ['javascriptinput'] @classmethod def get_attributes(cls): """ Register the attributes. """ return [Attribute('params', None), Attribute('problem_state', None), Attribute('display_class', None), Attribute('display_file', None), ] def setup(self): # Need to provide a value that JSON can parse if there is no # student-supplied value yet. if self.value == "": self.value = 'null' #----------------------------------------------------------------------------- @registry.register class JSInput(InputTypeBase): """ Inputtype for general javascript inputs. Intended to be used with customresponse. Loads in a sandboxed iframe to help prevent css and js conflicts between frame and top-level window. iframe sandbox whitelist: - allow-scripts - allow-popups - allow-forms - allow-pointer-lock This in turn means that the iframe cannot directly access the top-level window elements. Example: <jsinput html_file="/static/test.html" gradefn="grade" height="500" width="400"/> See the documentation in docs/data/source/course_data_formats/jsinput.rst for more information. """ template = "jsinput.html" tags = ['jsinput'] @classmethod def get_attributes(cls): """ Register the attributes. """ return [ Attribute('params', None), # extra iframe params Attribute('html_file', None), Attribute('gradefn', "gradefn"), Attribute('get_statefn', None), # Function to call in iframe # to get current state. Attribute('initial_state', None), # JSON string to be used as initial state Attribute('set_statefn', None), # Function to call iframe to # set state Attribute('width', "400"), # iframe width Attribute('height', "300"), # iframe height Attribute('sop', None) # SOP will be relaxed only if this # attribute is set to false. ] def _extra_context(self): context = { 'jschannel_loader': '{static_url}js/capa/src/jschannel.js'.format( static_url=self.capa_system.STATIC_URL), 'jsinput_loader': '{static_url}js/capa/src/jsinput.js'.format( static_url=self.capa_system.STATIC_URL), 'saved_state': self.value } return context #----------------------------------------------------------------------------- @registry.register class TextLine(InputTypeBase): """ A text line input. Can do math preview if "math"="1" is specified. If "trailing_text" is set to a value, then the textline will be shown with the value after the text input, and before the checkmark or any input-specific feedback. HTML will not work, but properly escaped HTML characters will. This feature is useful if you would like to specify a specific type of units for the text input. If the hidden attribute is specified, the textline is hidden and the input id is stored in a div with name equal to the value of the hidden attribute. This is used e.g. for embedding simulations turned into questions. Example: <textline math="1" trailing_text="m/s" label="How fast is a cheetah?" /> This example will render out a text line with a math preview and the text 'm/s' after the end of the text line. """ template = "textline.html" tags = ['textline'] @classmethod def get_attributes(cls): """ Register the attributes. """ return [ Attribute('size', None), Attribute('label', ''), Attribute('hidden', False), Attribute('inline', False), # Attributes below used in setup(), not rendered directly. Attribute('math', None, render=False), # TODO: 'dojs' flag is temporary, for backwards compatibility with # 8.02x Attribute('dojs', None, render=False), Attribute('preprocessorClassName', None, render=False), Attribute('preprocessorSrc', None, render=False), Attribute('trailing_text', ''), ] def setup(self): self.do_math = bool(self.loaded_attributes['math'] or self.loaded_attributes['dojs']) # TODO: do math checking using ajax instead of using js, so # that we only have one math parser. self.preprocessor = None if self.do_math: # Preprocessor to insert between raw input and Mathjax self.preprocessor = { 'class_name': self.loaded_attributes['preprocessorClassName'], 'script_src': self.loaded_attributes['preprocessorSrc'], } if None in self.preprocessor.values(): self.preprocessor = None def _extra_context(self): return {'do_math': self.do_math, 'preprocessor': self.preprocessor, } #----------------------------------------------------------------------------- @registry.register class FileSubmission(InputTypeBase): """ Upload some files (e.g. for programming assignments) """ template = "filesubmission.html" tags = ['filesubmission'] @staticmethod def parse_files(files): """ Given a string like 'a.py b.py c.out', split on whitespace and return as a json list. """ return json.dumps(files.split()) @classmethod def get_attributes(cls): """ Convert the list of allowed files to a convenient format. """ return [Attribute('allowed_files', '[]', transform=cls.parse_files), Attribute('label', ''), Attribute('required_files', '[]', transform=cls.parse_files), ] def setup(self): """ Do some magic to handle queueing status (render as "queued" instead of "incomplete"), pull queue_len from the msg field. (TODO: get rid of the queue_len hack). """ _ = self.capa_system.i18n.ugettext submitted_msg = _("Your files have been submitted. As soon as your submission is" " graded, this message will be replaced with the grader's feedback.") self.submitted_msg = submitted_msg # Check if problem has been queued self.queue_len = 0 # Flag indicating that the problem has been queued, 'msg' is length of # queue if self.status == 'incomplete': self.status = 'queued' self.queue_len = self.msg self.msg = self.submitted_msg def _extra_context(self): return {'queue_len': self.queue_len, } #----------------------------------------------------------------------------- @registry.register class CodeInput(InputTypeBase): """ A text area input for code--uses codemirror, does syntax highlighting, special tab handling, etc. """ template = "codeinput.html" tags = [ 'codeinput', 'textbox', # Another (older) name--at some point we may want to make it use a # non-codemirror editor. ] @classmethod def get_attributes(cls): """ Convert options to a convenient format. """ return [ Attribute('rows', '30'), Attribute('cols', '80'), Attribute('hidden', ''), # For CodeMirror Attribute('mode', 'python'), Attribute('linenumbers', 'true'), # Template expects tabsize to be an int it can do math with Attribute('tabsize', 4, transform=int), ] def setup_code_response_rendering(self): """ Implement special logic: handle queueing state, and default input. """ # if no student input yet, then use the default input given by the # problem if not self.value and self.xml.text: self.value = self.xml.text.strip() # Check if problem has been queued self.queue_len = 0 # Flag indicating that the problem has been queued, 'msg' is length of # queue if self.status == 'incomplete': self.status = 'queued' self.queue_len = self.msg self.msg = bleach.clean(self.submitted_msg) def setup(self): """ setup this input type """ _ = self.capa_system.i18n.ugettext submitted_msg = _("Your answer has been submitted. As soon as your submission is" " graded, this message will be replaced with the grader's feedback.") self.submitted_msg = submitted_msg self.setup_code_response_rendering() def _extra_context(self): """Defined queue_len, add it """ return {'queue_len': self.queue_len, } #----------------------------------------------------------------------------- @registry.register class MatlabInput(CodeInput): """ InputType for handling Matlab code input Example: <matlabinput rows="10" cols="80" tabsize="4"> Initial Text </matlabinput> """ template = "matlabinput.html" tags = ['matlabinput'] def setup(self): """ Handle matlab-specific parsing """ _ = self.capa_system.i18n.ugettext submitted_msg = _("Submitted. As soon as a response is returned, " "this message will be replaced by that feedback.") self.submitted_msg = submitted_msg self.setup_code_response_rendering() xml = self.xml self.plot_payload = xml.findtext('./plot_payload') # Check if problem has been queued self.queuename = 'matlab' self.queue_msg = '' # this is only set if we don't have a graded response # the graded response takes precedence if 'queue_msg' in self.input_state and self.status in ['queued', 'incomplete', 'unsubmitted']: self.queue_msg = sanitize_html(self.input_state['queue_msg']) if 'queuestate' in self.input_state and self.input_state['queuestate'] == 'queued': self.status = 'queued' self.queue_len = 1 self.msg = self.submitted_msg # Handle situation if no response from xqueue arrived during specified time. if ('queuetime' not in self.input_state or time.time() - self.input_state['queuetime'] > XQUEUE_TIMEOUT): self.queue_len = 0 self.status = 'unsubmitted' self.msg = _( 'No response from Xqueue within {xqueue_timeout} seconds. Aborted.' ).format(xqueue_timeout=XQUEUE_TIMEOUT) def handle_ajax(self, dispatch, data): """ Handle AJAX calls directed to this input Args: - dispatch (str) - indicates how we want this ajax call to be handled - data (dict) - dictionary of key-value pairs that contain useful data Returns: dict - 'success' - whether or not we successfully queued this submission - 'message' - message to be rendered in case of error """ if dispatch == 'plot': return self._plot_data(data) return {} def ungraded_response(self, queue_msg, queuekey): """ Handle the response from the XQueue Stores the response in the input_state so it can be rendered later Args: - queue_msg (str) - message returned from the queue. The message to be rendered - queuekey (str) - a key passed to the queue. Will be matched up to verify that this is the response we're waiting for Returns: nothing """ # check the queuekey against the saved queuekey if('queuestate' in self.input_state and self.input_state['queuestate'] == 'queued' and self.input_state['queuekey'] == queuekey): msg = self._parse_data(queue_msg) # save the queue message so that it can be rendered later self.input_state['queue_msg'] = msg self.input_state['queuestate'] = None self.input_state['queuekey'] = None def button_enabled(self): """ Return whether or not we want the 'Test Code' button visible Right now, we only want this button to show up when a problem has not been checked. """ if self.status in ['correct', 'incorrect', 'partially-correct']: return False else: return True def _extra_context(self): """ Set up additional context variables""" _ = self.capa_system.i18n.ugettext queue_msg = self.queue_msg if len(self.queue_msg) > 0: # An empty string cannot be parsed as XML but is okay to include in the template. try: etree.XML(u'<div>{0}</div>'.format(self.queue_msg)) except etree.XMLSyntaxError: try: html5lib.parseFragment(self.queue_msg, treebuilder='lxml', namespaceHTMLElements=False)[0] except (IndexError, ValueError): # If neither can parse queue_msg, it contains invalid xml. queue_msg = u"<span>{0}</span>".format(_("Error running code.")) extra_context = { 'queue_len': str(self.queue_len), 'queue_msg': queue_msg, 'button_enabled': self.button_enabled(), 'matlab_editor_js': '{static_url}js/vendor/CodeMirror/octave.js'.format( static_url=self.capa_system.STATIC_URL), 'msg': sanitize_html(self.msg) # sanitize msg before rendering into template } return extra_context def _parse_data(self, queue_msg): """ Parses the message out of the queue message Args: queue_msg (str) - a JSON encoded string Returns: returns the value for the the key 'msg' in queue_msg """ try: result = json.loads(queue_msg) except (TypeError, ValueError): log.error("External message should be a JSON serialized dict." " Received queue_msg = %s", queue_msg) raise msg = result['msg'] return msg def _plot_data(self, data): """ AJAX handler for the plot button Args: get (dict) - should have key 'submission' which contains the student submission Returns: dict - 'success' - whether or not we successfully queued this submission - 'message' - message to be rendered in case of error """ _ = self.capa_system.i18n.ugettext # only send data if xqueue exists if self.capa_system.xqueue is None: return {'success': False, 'message': _('Cannot connect to the queue')} # pull relevant info out of get response = data['submission'] # construct xqueue headers qinterface = self.capa_system.xqueue['interface'] qtime = datetime.utcnow().strftime(xqueue_interface.dateformat) callback_url = self.capa_system.xqueue['construct_callback']('ungraded_response') anonymous_student_id = self.capa_system.anonymous_student_id # TODO: Why is this using self.capa_system.seed when we have self.seed??? queuekey = xqueue_interface.make_hashkey(str(self.capa_system.seed) + qtime + anonymous_student_id + self.input_id) xheader = xqueue_interface.make_xheader( lms_callback_url=callback_url, lms_key=queuekey, queue_name=self.queuename) # construct xqueue body student_info = { 'anonymous_student_id': anonymous_student_id, 'submission_time': qtime } contents = { 'grader_payload': self.plot_payload, 'student_info': json.dumps(student_info), 'student_response': response, 'token': getattr(self.capa_system, 'matlab_api_key', None), 'endpoint_version': "2", 'requestor_id': anonymous_student_id, } (error, msg) = qinterface.send_to_queue(header=xheader, body=json.dumps(contents)) # save the input state if successful if error == 0: self.input_state['queuekey'] = queuekey self.input_state['queuestate'] = 'queued' self.input_state['queuetime'] = time.time() return {'success': error == 0, 'message': msg} #----------------------------------------------------------------------------- @registry.register class Schematic(InputTypeBase): """ InputType for the schematic editor """ template = "schematicinput.html" tags = ['schematic'] @classmethod def get_attributes(cls): """ Convert options to a convenient format. """ return [ Attribute('height', None), Attribute('width', None), Attribute('parts', None), Attribute('analyses', None), Attribute('initial_value', None), Attribute('submit_analyses', None), Attribute('label', ''), ] def _extra_context(self): context = { 'setup_script': '{static_url}js/capa/schematicinput.js'.format( static_url=self.capa_system.STATIC_URL), } return context #----------------------------------------------------------------------------- @registry.register class ImageInput(InputTypeBase): """ Clickable image as an input field. Element should specify the image source, height, and width, e.g. <imageinput src="/static/Figures/Skier-conservation-of-energy.jpg" width="388" height="560" /> TODO: showanswer for imageimput does not work yet - need javascript to put rectangle over acceptable area of image. """ template = "imageinput.html" tags = ['imageinput'] @classmethod def get_attributes(cls): """ Note: src, height, and width are all required. """ return [Attribute('src'), Attribute('height'), Attribute('label', ''), Attribute('width'), ] def setup(self): """ if value is of the form [x,y] then parse it and send along coordinates of previous answer """ m = re.match(r'\[([0-9]+),([0-9]+)]', self.value.strip().replace(' ', '')) if m: # Note: we subtract 15 to compensate for the size of the dot on the screen. # (is a 30x30 image--lms/static/images/green-pointer.png). (self.gx, self.gy) = [int(x) - 15 for x in m.groups()] else: (self.gx, self.gy) = (0, 0) def _extra_context(self): return {'gx': self.gx, 'gy': self.gy} #----------------------------------------------------------------------------- @registry.register class Crystallography(InputTypeBase): """ An input for crystallography -- user selects 3 points on the axes, and we get a plane. TODO: what's the actual value format? """ template = "crystallography.html" tags = ['crystallography'] @classmethod def get_attributes(cls): """ Note: height, width are required. """ return [Attribute('height'), Attribute('width'), ] # ------------------------------------------------------------------------- @registry.register class VseprInput(InputTypeBase): """ Input for molecular geometry--show possible structures, let student pick structure and label positions with atoms or electron pairs. """ template = 'vsepr_input.html' tags = ['vsepr_input'] @classmethod def get_attributes(cls): """ Note: height, width, molecules and geometries are required. """ return [Attribute('height'), Attribute('width'), Attribute('molecules'), Attribute('geometries'), ] #------------------------------------------------------------------------- @registry.register class ChemicalEquationInput(InputTypeBase): """ An input type for entering chemical equations. Supports live preview. Example: <chemicalequationinput size="50"/> options: size -- width of the textbox. """ template = "chemicalequationinput.html" tags = ['chemicalequationinput'] @classmethod def get_attributes(cls): """ Can set size of text field. """ return [Attribute('size', '20'), Attribute('label', ''), ] def _extra_context(self): """ TODO (vshnayder): Get rid of this once we have a standard way of requiring js to be loaded. """ return { 'previewer': '{static_url}js/capa/chemical_equation_preview.js'.format( static_url=self.capa_system.STATIC_URL), } def handle_ajax(self, dispatch, data): """ Since we only have chemcalc preview this input, check to see if it matches the corresponding dispatch and send it through if it does """ if dispatch == 'preview_chemcalc': return self.preview_chemcalc(data) return {} def preview_chemcalc(self, data): """ Render an html preview of a chemical formula or equation. get should contain a key 'formula' and value 'some formula string'. Returns a json dictionary: { 'preview' : 'the-preview-html' or '' 'error' : 'the-error' or '' } """ _ = self.capa_system.i18n.ugettext result = {'preview': '', 'error': ''} try: formula = data['formula'] except KeyError: result['error'] = _("No formula specified.") return result try: result['preview'] = chemcalc.render_to_html(formula) except pyparsing.ParseException as err: result['error'] = _("Couldn't parse formula: {error_msg}").format(error_msg=err.msg) except Exception: # this is unexpected, so log log.warning( "Error while previewing chemical formula", exc_info=True) result['error'] = _("Error while rendering preview") return result #------------------------------------------------------------------------- @registry.register class FormulaEquationInput(InputTypeBase): """ An input type for entering formula equations. Supports live preview. Example: <formulaequationinput size="50" label="Enter the equation for motion" /> options: size -- width of the textbox. trailing_text -- text to show after the input textbox when rendered, same as textline (useful for units) """ template = "formulaequationinput.html" tags = ['formulaequationinput'] @classmethod def get_attributes(cls): """ Can set size of text field. """ return [ Attribute('size', '20'), Attribute('inline', False), Attribute('label', ''), Attribute('trailing_text', ''), ] def _extra_context(self): """ TODO (vshnayder): Get rid of 'previewer' once we have a standard way of requiring js to be loaded. """ # `reported_status` is basically `status`, except we say 'unanswered' return { 'previewer': '{static_url}js/capa/src/formula_equation_preview.js'.format( static_url=self.capa_system.STATIC_URL), } def handle_ajax(self, dispatch, get): """ Since we only have formcalc preview this input, check to see if it matches the corresponding dispatch and send it through if it does """ if dispatch == 'preview_formcalc': return self.preview_formcalc(get) return {} def preview_formcalc(self, get): """ Render an preview of a formula or equation. `get` should contain a key 'formula' with a math expression. Returns a json dictionary: { 'preview' : '<some latex>' or '' 'error' : 'the-error' or '' 'request_start' : <time sent with request> } """ _ = self.capa_system.i18n.ugettext result = {'preview': '', 'error': ''} try: formula = get['formula'] except KeyError: result['error'] = _("No formula specified.") return result result['request_start'] = int(get.get('request_start', 0)) try: # TODO add references to valid variables and functions # At some point, we might want to mark invalid variables as red # or something, and this is where we would need to pass those in. result['preview'] = latex_preview(formula) except pyparsing.ParseException as err: result['error'] = _("Sorry, couldn't parse formula") result['formula'] = formula except Exception: # this is unexpected, so log log.warning( "Error while previewing formula", exc_info=True ) result['error'] = _("Error while rendering preview") return result #----------------------------------------------------------------------------- @registry.register class DragAndDropInput(InputTypeBase): """ Input for drag and drop problems. Allows student to drag and drop images and labels to base image. """ template = 'drag_and_drop_input.html' tags = ['drag_and_drop_input'] def setup(self): def parse(tag, tag_type): """Parses <tag ... /> xml element to dictionary. Stores 'draggable' and 'target' tags with attributes to dictionary and returns last. Args: tag: xml etree element <tag...> with attributes tag_type: 'draggable' or 'target'. If tag_type is 'draggable' : all attributes except id (name or label or icon or can_reuse) are optional If tag_type is 'target' all attributes (name, x, y, w, h) are required. (x, y) - coordinates of center of target, w, h - weight and height of target. Returns: Dictionary of vaues of attributes: dict{'name': smth, 'label': smth, 'icon': smth, 'can_reuse': smth}. """ tag_attrs = dict() tag_attrs['draggable'] = { 'id': Attribute._sentinel, 'label': "", 'icon': "", 'can_reuse': "" } tag_attrs['target'] = { 'id': Attribute._sentinel, 'x': Attribute._sentinel, 'y': Attribute._sentinel, 'w': Attribute._sentinel, 'h': Attribute._sentinel } dic = dict() for attr_name in tag_attrs[tag_type].keys(): dic[attr_name] = Attribute(attr_name, default=tag_attrs[tag_type][attr_name]).parse_from_xml(tag) if tag_type == 'draggable' and not self.no_labels: dic['label'] = dic['label'] or dic['id'] if tag_type == 'draggable': dic['target_fields'] = [parse(target, 'target') for target in tag.iterchildren('target')] return dic # add labels to images?: self.no_labels = Attribute('no_labels', default="False").parse_from_xml(self.xml) to_js = dict() # image drag and drop onto to_js['base_image'] = Attribute('img').parse_from_xml(self.xml) # outline places on image where to drag adn drop to_js['target_outline'] = Attribute('target_outline', default="False").parse_from_xml(self.xml) # one draggable per target? to_js['one_per_target'] = Attribute('one_per_target', default="True").parse_from_xml(self.xml) # list of draggables to_js['draggables'] = [parse(draggable, 'draggable') for draggable in self.xml.iterchildren('draggable')] # list of targets to_js['targets'] = [parse(target, 'target') for target in self.xml.iterchildren('target')] # custom background color for labels: label_bg_color = Attribute('label_bg_color', default=None).parse_from_xml(self.xml) if label_bg_color: to_js['label_bg_color'] = label_bg_color self.loaded_attributes['drag_and_drop_json'] = json.dumps(to_js) self.to_render.add('drag_and_drop_json') #------------------------------------------------------------------------- @registry.register class EditAMoleculeInput(InputTypeBase): """ An input type for edit-a-molecule. Integrates with the molecule editor java applet. Example: <editamolecule size="50"/> options: size -- width of the textbox. """ template = "editamolecule.html" tags = ['editamoleculeinput'] @classmethod def get_attributes(cls): """ Can set size of text field. """ return [Attribute('file'), Attribute('missing', None)] def _extra_context(self): context = { 'applet_loader': '{static_url}js/capa/editamolecule.js'.format( static_url=self.capa_system.STATIC_URL), } return context #----------------------------------------------------------------------------- @registry.register class DesignProtein2dInput(InputTypeBase): """ An input type for design of a protein in 2D. Integrates with the Protex java applet. Example: <designprotein2d width="800" hight="500" target_shape="E;NE;NW;W;SW;E;none" /> """ template = "designprotein2dinput.html" tags = ['designprotein2dinput'] @classmethod def get_attributes(cls): """ Note: width, hight, and target_shape are required. """ return [Attribute('width'), Attribute('height'), Attribute('target_shape') ] def _extra_context(self): context = { 'applet_loader': '{static_url}js/capa/design-protein-2d.js'.format( static_url=self.capa_system.STATIC_URL), } return context #----------------------------------------------------------------------------- @registry.register class EditAGeneInput(InputTypeBase): """ An input type for editing a gene. Integrates with the genex GWT application. Example: <editagene genex_dna_sequence="CGAT" genex_problem_number="1"/> """ template = "editageneinput.html" tags = ['editageneinput'] @classmethod def get_attributes(cls): """ Note: width, height, and dna_sequencee are required. """ return [Attribute('genex_dna_sequence'), Attribute('genex_problem_number') ] def _extra_context(self): context = { 'applet_loader': '{static_url}js/capa/edit-a-gene.js'.format( static_url=self.capa_system.STATIC_URL), } return context #--------------------------------------------------------------------- @registry.register class AnnotationInput(InputTypeBase): """ Input type for annotations: students can enter some notes or other text (currently ungraded), and then choose from a set of tags/optoins, which are graded. Example: <annotationinput> <title>Annotation Exercise</title> <text> They are the ones who, at the public assembly, had put savage derangement [ate] into my thinking [phrenes] |89 on that day when I myself deprived Achilles of his honorific portion [geras] </text> <comment>Agamemnon says that ate or 'derangement' was the cause of his actions: why could Zeus say the same thing?</comment> <comment_prompt>Type a commentary below:</comment_prompt> <tag_prompt>Select one tag:</tag_prompt> <options> <option choice="correct">ate - both a cause and an effect</option> <option choice="incorrect">ate - a cause</option> <option choice="partially-correct">ate - an effect</option> </options> </annotationinput> # TODO: allow ordering to be randomized """ template = "annotationinput.html" tags = ['annotationinput'] def setup(self): xml = self.xml self.debug = False # set to True to display extra debug info with input self.return_to_annotation = True # return only works in conjunction with annotatable xmodule self.title = xml.findtext('./title', 'Annotation Exercise') self.text = xml.findtext('./text') self.comment = xml.findtext('./comment') self.comment_prompt = xml.findtext( './comment_prompt', 'Type a commentary below:') self.tag_prompt = xml.findtext('./tag_prompt', 'Select one tag:') self.options = self._find_options() # Need to provide a value that JSON can parse if there is no # student-supplied value yet. if self.value == '': self.value = 'null' self._validate_options() def _find_options(self): """ Returns an array of dicts where each dict represents an option. """ elements = self.xml.findall('./options/option') return [{ 'id': index, 'description': option.text, 'choice': option.get('choice') } for (index, option) in enumerate(elements)] def _validate_options(self): """ Raises a ValueError if the choice attribute is missing or invalid. """ valid_choices = ('correct', 'partially-correct', 'incorrect') for option in self.options: choice = option['choice'] if choice is None: raise ValueError('Missing required choice attribute.') elif choice not in valid_choices: raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format( choice, ', '.join(valid_choices))) def _unpack(self, json_value): """ Unpacks the json input state into a dict. """ d = json.loads(json_value) if not isinstance(d, dict): d = {} comment_value = d.get('comment', '') if not isinstance(comment_value, basestring): comment_value = '' options_value = d.get('options', []) if not isinstance(options_value, list): options_value = [] return { 'options_value': options_value, 'has_options_value': len(options_value) > 0, # for convenience 'comment_value': comment_value, } def _extra_context(self): extra_context = { 'title': self.title, 'text': self.text, 'comment': self.comment, 'comment_prompt': self.comment_prompt, 'tag_prompt': self.tag_prompt, 'options': self.options, 'return_to_annotation': self.return_to_annotation, 'debug': self.debug } extra_context.update(self._unpack(self.value)) return extra_context @registry.register class ChoiceTextGroup(InputTypeBase): """ Groups of radiobutton/checkboxes with text inputs. Examples: RadioButton problem <problem> <startouttext/> A person rolls a standard die 100 times and records the results. On the first roll they received a "1". Given this information select the correct choice and fill in numbers to make it accurate. <endouttext/> <choicetextresponse> <radiotextgroup label="What is the correct choice?"> <choice correct="false">The lowest number rolled was: <decoy_input/> and the highest number rolled was: <decoy_input/> .</choice> <choice correct="true">The lowest number rolled was <numtolerance_input answer="1"/> and there is not enough information to determine the highest number rolled. </choice> <choice correct="false">There is not enough information to determine the lowest number rolled, and the highest number rolled was: <decoy_input/> . </choice> </radiotextgroup> </choicetextresponse> </problem> CheckboxProblem: <problem> <startouttext/> A person randomly selects 100 times, with replacement, from the list of numbers \(\sqrt{2}\) , 2, 3, 4 ,5 ,6 and records the results. The first number they pick is \(\sqrt{2}\) Given this information select the correct choices and fill in numbers to make them accurate. <endouttext/> <choicetextresponse> <checkboxtextgroup label="What is the answer?"> <choice correct="true"> The lowest number selected was <numtolerance_input answer="1.4142" tolerance="0.01"/> </choice> <choice correct="false"> The highest number selected was <decoy_input/> . </choice> <choice correct="true">There is not enough information given to determine the highest number which was selected. </choice> <choice correct="false">There is not enough information given to determine the lowest number selected. </choice> </checkboxtextgroup> </choicetextresponse> </problem> In the preceding examples the <decoy_input/> is used to generate a textinput html element in the problem's display. Since it is inside of an incorrect choice, no answer given for it will be correct, and thus specifying an answer for it is not needed. """ template = "choicetext.html" tags = ['radiotextgroup', 'checkboxtextgroup'] def setup(self): """ Performs setup for the initial rendering of the problem. `self.html_input_type` determines whether this problem is displayed with radiobuttons or checkboxes If the initial value of `self.value` is '' change it to {} so that the template has an empty dictionary to work with. sets the value of self.choices to be equal to the return value of `self.extract_choices` """ self.text_input_values = {} if self.tag == 'radiotextgroup': self.html_input_type = "radio" elif self.tag == 'checkboxtextgroup': self.html_input_type = "checkbox" else: _ = self.capa_system.i18n.ugettext msg = _("{input_type}: unexpected tag {tag_name}").format( input_type="ChoiceTextGroup", tag_name=self.tag ) raise Exception(msg) if self.value == '': # Make `value` an empty dictionary, if it currently has an empty # value. This is necessary because the template expects a # dictionary. self.value = {} self.choices = self.extract_choices(self.xml, self.capa_system.i18n) @classmethod def get_attributes(cls): """ Returns a list of `Attribute` for this problem type """ _ = lambda text: text return [ Attribute("show_correctness", "always"), Attribute("submitted_message", _("Answer received.")), Attribute("label", ""), ] def _extra_context(self): """ Returns a dictionary of extra content necessary for rendering this InputType. `input_type` is either 'radio' or 'checkbox' indicating whether the choices for this problem will have radiobuttons or checkboxes. """ return { 'input_type': self.html_input_type, 'choices': self.choices } @staticmethod def extract_choices(element, i18n): """ Extracts choices from the xml for this problem type. If we have xml that is as follows(choice names will have been assigned by now) <radiotextgroup> <choice correct = "true" name ="1_2_1_choiceinput_0bc"> The number <numtolerance_input name = "1_2_1_choiceinput0_numtolerance_input_0" answer="5"/> Is the mean of the list. </choice> <choice correct = "false" name = "1_2_1_choiceinput_1bc> False demonstration choice </choice> </radiotextgroup> Choices are used for rendering the problem properly The function will setup choices as follows: choices =[ ("1_2_1_choiceinput_0bc", [{'type': 'text', 'contents': "The number", 'tail_text': '', 'value': '' }, {'type': 'textinput', 'contents': "1_2_1_choiceinput0_numtolerance_input_0", 'tail_text': 'Is the mean of the list', 'value': '' } ] ), ("1_2_1_choiceinput_1bc", [{'type': 'text', 'contents': "False demonstration choice", 'tail_text': '', 'value': '' } ] ) ] """ _ = i18n.ugettext choices = [] for choice in element: if choice.tag != 'choice': msg = u"[capa.inputtypes.extract_choices] {0}".format( # Translators: a "tag" is an XML element, such as "<b>" in HTML _("Expected a {expected_tag} tag; got {given_tag} instead").format( expected_tag=u"<choice>", given_tag=choice.tag, ) ) raise Exception(msg) components = [] choice_text = '' if choice.text is not None: choice_text += choice.text # Initialize our dict for the next content adder = { 'type': 'text', 'contents': choice_text, 'tail_text': '', 'value': '' } components.append(adder) for elt in choice: # for elements in the choice e.g. <text> <numtolerance_input> adder = { 'type': 'text', 'contents': '', 'tail_text': '', 'value': '' } tag_type = elt.tag # If the current `elt` is a <numtolerance_input> set the # `adder`type to 'numtolerance_input', and 'contents' to # the `elt`'s name. # Treat decoy_inputs and numtolerance_inputs the same in order # to prevent students from reading the Html and figuring out # which inputs are valid if tag_type in ('numtolerance_input', 'decoy_input'): # We set this to textinput, so that we get a textinput html # element. adder['type'] = 'textinput' adder['contents'] = elt.get('name') else: adder['contents'] = elt.text # Add any tail text("is the mean" in the example) adder['tail_text'] = elt.tail if elt.tail else '' components.append(adder) # Add the tuple for the current choice to the list of choices choices.append((choice.get("name"), components)) return choices
agpl-3.0
-1,700,529,865,447,046,400
34.400329
136
0.56178
false
sho-h/ruby_env
devkit/mingw/bin/lib/json/__init__.py
3
14173
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data interchange format. :mod:`json` exposes an API familiar to users of the standard library :mod:`marshal` and :mod:`pickle` modules. It is the externally maintained version of the :mod:`json` library contained in Python 2.6, but maintains compatibility with Python 2.4 and Python 2.5 and (currently) has significant performance advantages, even without using the optional C extension for speedups. Encoding basic Python object hierarchies:: >>> import json >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) '["foo", {"bar": ["baz", null, 1.0, 2]}]' >>> print json.dumps("\"foo\bar") "\"foo\bar" >>> print json.dumps(u'\u1234') "\u1234" >>> print json.dumps('\\') "\\" >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) {"a": 0, "b": 0, "c": 0} >>> from StringIO import StringIO >>> io = StringIO() >>> json.dump(['streaming API'], io) >>> io.getvalue() '["streaming API"]' Compact encoding:: >>> import json >>> json.dumps([1,2,3,{'4': 5, '6': 7}], sort_keys=True, separators=(',',':')) '[1,2,3,{"4":5,"6":7}]' Pretty printing:: >>> import json >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4) >>> print '\n'.join([l.rstrip() for l in s.splitlines()]) { "4": 5, "6": 7 } Decoding JSON:: >>> import json >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj True >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar' True >>> from StringIO import StringIO >>> io = StringIO('["streaming API"]') >>> json.load(io)[0] == 'streaming API' True Specializing JSON object decoding:: >>> import json >>> def as_complex(dct): ... if '__complex__' in dct: ... return complex(dct['real'], dct['imag']) ... return dct ... >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', ... object_hook=as_complex) (1+2j) >>> from decimal import Decimal >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1') True Specializing JSON object encoding:: >>> import json >>> def encode_complex(obj): ... if isinstance(obj, complex): ... return [obj.real, obj.imag] ... raise TypeError(repr(o) + " is not JSON serializable") ... >>> json.dumps(2 + 1j, default=encode_complex) '[2.0, 1.0]' >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) '[2.0, 1.0]' >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) '[2.0, 1.0]' Using json.tool from the shell to validate and pretty-print:: $ echo '{"json":"obj"}' | python -m json.tool { "json": "obj" } $ echo '{ 1.2:3.4}' | python -m json.tool Expecting property name: line 1 column 2 (char 2) """ __version__ = '2.0.9' __all__ = [ 'dump', 'dumps', 'load', 'loads', 'JSONDecoder', 'JSONEncoder', ] __author__ = 'Bob Ippolito <bob@redivi.com>' from .decoder import JSONDecoder from .encoder import JSONEncoder _default_encoder = JSONEncoder( skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, indent=None, separators=None, encoding='utf-8', default=None, ) def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is true then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is false, then the some chunks written to ``fp`` may be ``unicode`` instances, subject to normal Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is likely to cause an error. If ``check_circular`` is false, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is false, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg; otherwise ``JSONEncoder`` is used. """ # cached encoder if (not skipkeys and ensure_ascii and check_circular and allow_nan and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): iterable = _default_encoder.iterencode(obj) else: if cls is None: cls = JSONEncoder iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).iterencode(obj) # could accelerate with writelines in some versions of Python, at # a debuggability cost for chunk in iterable: fp.write(chunk) def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): """Serialize ``obj`` to a JSON formatted ``str``. If ``skipkeys`` is false then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is false, then the return value will be a ``unicode`` instance subject to normal Python ``str`` to ``unicode`` coercion rules instead of being escaped to an ASCII ``str``. If ``check_circular`` is false, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is false, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg; otherwise ``JSONEncoder`` is used. """ # cached encoder if (not skipkeys and ensure_ascii and check_circular and allow_nan and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): return _default_encoder.encode(obj) if cls is None: cls = JSONEncoder return cls( skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).encode(obj) _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing a JSON document) to a Python object. If the contents of ``fp`` is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed, and should be wrapped with ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode`` object and passed to ``loads()`` ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). ``object_pairs_hook`` is an optional function that will be called with the result of any object literal decoded with an ordered list of pairs. The return value of ``object_pairs_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, collections.OrderedDict will remember the order of insertion). If ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg; otherwise ``JSONDecoder`` is used. """ return loads(fp.read(), encoding=encoding, cls=cls, object_hook=object_hook, parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw) def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) to a Python object. If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed and should be decoded to ``unicode`` first. ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). ``object_pairs_hook`` is an optional function that will be called with the result of any object literal decoded with an ordered list of pairs. The return value of ``object_pairs_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, collections.OrderedDict will remember the order of insertion). If ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority. ``parse_float``, if specified, will be called with the string of every JSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for JSON floats (e.g. decimal.Decimal). ``parse_int``, if specified, will be called with the string of every JSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN, null, true, false. This can be used to raise an exception if invalid JSON numbers are encountered. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg; otherwise ``JSONDecoder`` is used. """ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: kw['object_hook'] = object_hook if object_pairs_hook is not None: kw['object_pairs_hook'] = object_pairs_hook if parse_float is not None: kw['parse_float'] = parse_float if parse_int is not None: kw['parse_int'] = parse_int if parse_constant is not None: kw['parse_constant'] = parse_constant return cls(encoding=encoding, **kw).decode(s)
mit
-3,087,991,006,439,876,000
39.80826
82
0.630142
false
bohdon/maya-quickmenus
src/quickmenus/scripts/quickmenus/qmenus/menus.py
1
16772
import logging import pymel.core as pm try: import resetter except: resetter = None import quickmenus __all__ = [ 'CameraQuickSwitchMenu', 'ComponentSelectionMaskingMenu', 'DisplayMaskingMenu', 'ResetterMenu', 'SelectionMaskingMenu', ] LOG = logging.getLogger('quickmenus') class SelectionMaskingMenu(quickmenus.MarkingMenu): """ A radial menu for quickly changing selection masking settings. Only displays on model viewport panels. """ allkeys = [ 'handle', 'ikHandle', 'joint', 'nurbsCurve', 'cos', 'stroke', 'nurbsSurface', 'polymesh', 'subdiv', 'plane', 'lattice', 'cluster', 'sculpt', 'nonlinear', 'particleShape', 'emitter', 'field', 'spring', 'rigidBody', 'fluid', 'hairSystem', 'follicle', 'rigidConstraint', 'collisionModel', 'light', 'camera', 'texture', 'ikEndEffector', 'locator', 'dimension', 'nCloth', 'nRigid', 'dynamicConstraint', ] def __init__(self): super(self.__class__, self).__init__() self.popupMenuId = 'QuickMenus_SelectionMaskingMenu' self.mouseButton = 1 self.buildItemsOnShow = True def shouldBuild(self): return self.panelType == 'modelPanel' def buildMenuItems(self): pm.menuItem(rp='NW', l='Reset', ecr=False, ann='Reset all selection masks', c=pm.Callback(self.resetSelectionMasking)) pm.menuItem(rp='NE', l='All Off', ecr=False, c=pm.Callback(self.setObjectSelectType, enabled=False, keys=self.allkeys)) pm.menuItem(rp='SE', l='Clear Selection', ecr=True, c=pm.Callback(pm.select, cl=True)) pm.menuItem(rp='S', l='Use Selected', c=pm.Callback(self.setMaskingToSelection)) selType = lambda x : pm.selectType(q=True, **{x:True}) # common masking pm.menuItem(rp='N', l='Polys', ecr=False, cb=selType('p'), c=pm.CallbackWithArgs(self.setObjectSelectType, keys=['polymesh'])) pm.menuItem(rp='E', l='Curves', ecr=False, cb=selType('nurbsCurve'), c=pm.CallbackWithArgs(self.setObjectSelectType, keys=['nurbsCurve', 'cos', 'stroke'])) pm.menuItem(rp='SW', l='Joints', ecr=False, cb=selType('joint'), c=pm.CallbackWithArgs(self.setObjectSelectType, keys=['joint'])) pm.menuItem(rp='W', l='Surfaces', ecr=False, cb=selType('nurbsSurface'), c=pm.CallbackWithArgs(self.setObjectSelectType, keys=['nurbsSurface', 'subdiv', 'plane'])) # extended menu pm.menuItem(l='Selection Masking', en=False) pm.menuItem(d=True) pm.menuItem(l='Render', ecr=False, cb=selType('light'), c=pm.CallbackWithArgs(self.setObjectSelectType, keys=['light', 'camera', 'texture'])) pm.menuItem(l='Deformers', ecr=False, cb=selType('lattice'), c=pm.CallbackWithArgs(self.setObjectSelectType, keys=['lattice', 'cluster', 'sculpt', 'nonlinear'])) pm.menuItem(l='Dynamics', ecr=False, cb=selType('particleShape'), c=pm.CallbackWithArgs(self.setObjectSelectType, keys=['particleShape', 'emitter', 'field', 'spring', 'rigidBody', 'fluid', 'hairSystem', 'follicle', 'rigidConstraint'])) pm.menuItem(l='Misc', ecr=False, cb=selType('ikEndEffector'), c=pm.CallbackWithArgs(self.setObjectSelectType, keys=['ikEndEffector', 'locator', 'dimension'])) def setObjectSelectType(self, enabled, keys): pm.selectMode(object=True) kwargs = {} for k in keys: kwargs[k] = enabled pm.selectType(**kwargs) def resetSelectionMasking(self): pm.selectMode(component=True) pm.selectMode(object=True) pm.mel.selectionMaskResetAll() def setMaskingToSelection(self): self.setObjectSelectType(enabled=False, keys=self.allkeys) sel = pm.selected() keys = set() for obj in sel: if obj.nodeType() == 'transform': shapes = obj.getShapes() if len(shapes): selType = shapes[0].nodeType() if selType in ['nurbsSurface', 'subdiv', 'joint', 'camera', 'locator']: keys.add(str(selType)) elif selType == 'mesh': keys.add('polymesh') elif 'light' in selType.lower(): keys.add('light') elif selType == 'nurbsCurve': keys.add('curve') if not len(keys): return LOG.info('Set selection masking to {0}'.format(', '.join(keys))) self.setObjectSelectType(enabled=True, keys=keys) class DisplayMaskingMenu(quickmenus.MarkingMenu): """ A radial menu for quickly changing display masking settings. Only displays on model viewport panels. """ def __init__(self): super(self.__class__, self).__init__() self.popupMenuId = 'QuickMenus_DisplayMaskingMenu' self.mouseButton = 2 self.buildItemsOnShow = True def shouldBuild(self): return self.panelType == 'modelPanel' def buildMenuItems(self): pm.menuItem(rp='NW', l='Show All', ecr=False, c=pm.Callback(self.setDisplay, enabled=True, keys=['allObjects'])) pm.menuItem(rp='NE', l='Hide All', ecr=False, c=pm.Callback(self.setDisplay, enabled=False, keys=['allObjects'])) pm.menuItem(rp='S', l='Hide Selected', ecr=True, c=pm.Callback(self.hideSelected)) query = lambda x : pm.modelEditor(self.panel, q=True, **{x:True}) # common masking pm.menuItem(rp='N', l='Polys', ecr=False, cb=query('polymeshes'), c=pm.CallbackWithArgs(self.setDisplay, keys=['polymeshes'])) pm.menuItem(rp='E', l='Curves', ecr=False, cb=query('nurbsCurves'), c=pm.CallbackWithArgs(self.setDisplay, keys=['nurbsCurves'])) pm.menuItem(rp='W', l='Surfaces', ecr=False, cb=query('nurbsSurfaces'), c=pm.CallbackWithArgs(self.setDisplay, keys=['nurbsSurfaces', 'subdivSurfaces'])) pm.menuItem(rp='SW', l='Joints', ecr=False, cb=query('joints'), c=pm.CallbackWithArgs(self.setDisplay, keys=['joints'])) pm.menuItem(rp='SE', l='Lights', ecr=False, cb=query('lights'), c=pm.CallbackWithArgs(self.setDisplay, keys=['lights'])) # extended menu pm.menuItem(l='Display Masking', en=False) pm.menuItem(d=True) pm.menuItem(l='Cameras', ecr=False, cb=query('cameras'), c=pm.CallbackWithArgs(self.setDisplay, keys=['cameras'])) pm.menuItem(l='Locators', ecr=False, cb=query('locators'), c=pm.CallbackWithArgs(self.setDisplay, keys=['locators'])) pm.menuItem(l='Deformers', ecr=False, cb=query('deformers'), c=pm.CallbackWithArgs(self.setDisplay, keys=['deformers'])) pm.menuItem(l='Dynamics', ecr=False, cb=query('dynamics'), c=pm.CallbackWithArgs(self.setDisplay, keys=['dynamics'])) pm.menuItem(l='Misc', ecr=False, cb=query('planes'), c=pm.CallbackWithArgs(self.setDisplay, keys=['planes', 'ikHandles', 'fluids', 'hairSystems', 'follicles', 'dynamicConstraints', 'pivots', 'handles', 'textures', 'strokes'])) # pm.menuItem(l='GUI', ecr=False, cb=query('planes'), c=pm.CallbackWithArgs(self.setObjectSelectType, keys=['ikEndEffector', 'locator', 'dimension'])) def setDisplay(self, enabled, keys): kwargs = {} for k in keys: kwargs[k] = enabled pm.modelEditor(self.panel, e=True, **kwargs) def hideSelected(self): sel = pm.selected() keys = set() for obj in sel: objkeys = self.getDisplayKeys(obj) for k in objkeys: keys.add(k) if not len(keys): return LOG.info('Hiding {0}'.format(', '.join(keys))) self.setDisplay(False, keys) def getDisplayKeys(self, obj): # conversion of node type -> display flag nodeTypeDict = { 'nurbsCurve' :'nurbsCurves', 'nurbsSurface' :'nurbsSurfaces', 'mesh' :'polymeshes', 'subdiv' :'subdivSurfaces', 'plane' :'planes', 'light' :'lights', 'camera' :'cameras', 'controlVertices' :'controlVertices', 'grid' :'grid', 'hulls' :'hulls', 'joint' :'joints', 'ikHandle' :'ikHandles', 'lattice' :'deformers', 'clusterHandle' :'deformers', 'softModHandle' :'deformers', 'deformFunc' :'deformers', 'implicitSphere' :'deformers', 'particle' :'dynamics', 'pointEmitter' :'dynamics', 'rigidBody' :'dynamics', 'field' :'dynamics', 'rigidConstraint' :'dynamics', 'fluidShape' :'fluids', 'hairSystem' :'hairSystems', 'follicle' :'follicles', 'nCloth' :'nCloths', 'nParticle' :'nParticles', 'nRigid' :'nRigids', 'dynamicConstraint' :'dynamicConstraints', 'locator' :'locators', 'manipulators' :'manipulators', 'dimensionShape' :'dimensions', 'handle' :'handles', 'pivot' :'pivots', 'place3dTexture' :'textures', 'place2dTexture' :'textures', 'pfxGeometry' :'strokes', } types = self.getShapeTypes(obj, nodeTypeDict.keys()) keys = [nodeTypeDict[t] for t in types if nodeTypeDict.has_key(t)] return keys def getShapeTypes(self, obj, options): """ Return all shape types represented by the given object that are in the given list of options Climbs the inheritance tree to find values if needed """ objtype = obj.nodeType() if objtype in options: return [objtype] # get shapes if objtype == 'transform': shapes = obj.getShapes() else: shapes = [objtype] # analyze shape types types = [] for s in shapes: inheritypes = s.nodeType(i=True) for t in reversed(inheritypes): if t in options: types.append(t) break return types class CameraQuickSwitchMenu(quickmenus.RMBMarkingMenu): """ A radial menu that displays all cameras in the scene for easy switching. """ def buildMenuItems(self): # find camera try: camUnderPointer = pm.PyNode(pm.modelPanel(self.panel, q=True, cam=True)) if isinstance(camUnderPointer, pm.nt.Camera): camera = camUnderPointer else: camera = camUnderPointer.getShape() except: LOG.warning('could not find camera for panel: {0}'.format(self.panel)) return menuItemCol = pm.radioMenuItemCollection() isOrtho = camera.isOrtho() # list same type camera in radial positions similar = sorted([c for c in pm.ls(typ='camera') if c.isOrtho() == isOrtho]) rps = quickmenus.getRadialMenuPositions(len(similar)) for cam, rp in zip(similar, rps): kw = {} if rp is not None: kw['rp'] = rp if cam == camera: kw['rb'] = True kw['cl'] = menuItemCol pm.menuItem(l=cam.getParent(), c=pm.Callback(pm.mel.lookThroughModelPanel, str(cam), str(self.panel)), **kw) if len(rps) > 8: pm.menuItem(d=True) # list other cameras dissimilar = sorted([c for c in pm.ls(typ='camera') if c.isOrtho() != isOrtho]) for cam in dissimilar: pm.menuItem(l=cam.getParent(), c=pm.Callback(pm.mel.lookThroughModelPanel, str(cam), str(self.panel))) class ComponentSelectionMaskingMenu(quickmenus.MarkingMenu): allkeys = [ 'cv', 'vertex', 'subdivMeshPoint', 'latticePoint', 'particle', 'editPoint', 'curveParameterPoint', 'surfaceParameterPoint', 'puv', 'polymeshEdge', 'subdivMeshEdge', 'isoparm', 'surfaceEdge', 'surfaceFace', 'springComponent', 'facet', 'subdivMeshFace', 'hull', 'rotatePivot', 'scalePivot', 'jointPivot', 'selectHandle', 'localRotationAxis', 'imagePlane', 'surfaceUV' ] def __init__(self): super(self.__class__, self).__init__() self.popupMenuId = 'QuickMenus_ComponentSelectionMaskingMenu' self.mouseButton = 1 self.buildItemsOnShow = True def shouldBuild(self): return self.panelType == 'modelPanel' def buildMenuItems(self): pm.menuItem(rp='N', l='Points', ecr=False, c=pm.Callback(self.setComponentSelectType, keys=['cv', 'vertex', 'subdivMeshPoint', 'latticePoint', 'particle'])) pm.menuItem(rp='NE', l='Handles', ecr=False, c=pm.Callback(self.setComponentSelectType, keys=['selectHandle'])) pm.menuItem(rp='E', l='Lines', ecr=False, c=pm.Callback(self.setComponentSelectType, keys=['polymeshEdge', 'subdivMeshEdge', 'isoparm', 'surfaceEdge', 'springComponent'])) pm.menuItem(rp='SE', l='Hulls', ecr=False, c=pm.Callback(self.setComponentSelectType, keys=['hull'])) pm.menuItem(rp='S', l='Faces', ecr=False, c=pm.Callback(self.setComponentSelectType, keys=['surfaceFace', 'facet', 'subdivMeshFace'])) pm.menuItem(rp='SW', l='Pivots', ecr=False, c=pm.Callback(self.setComponentSelectType, keys=['rotatePivot', 'scalePivot', 'jointPivot'])) pm.menuItem(rp='W', l='Param', ecr=False, c=pm.Callback(self.setComponentSelectType, keys=['editPoint', 'curveParameterPoint', 'surfaceParameterPoint', 'surfaceUV', 'puv'])) pm.menuItem(rp='NW', l='Misc', ecr=False, c=pm.Callback(self.setComponentSelectType, keys=['localRotationAxis', 'imagePlane'])) def setComponentSelectType(self, enabled=True, keys={}): pm.selectMode(component=True) kwargs = {} for k in keys: kwargs[k] = enabled for k in self.allkeys: if not kwargs.has_key(k): kwargs[k] = not enabled pm.selectType(**kwargs) class ResetterMenu(quickmenus.MarkingMenu): def __init__(self): super(self.__class__, self).__init__() self.popupMenuId = 'QuickMenus_ResetterMenu' self.mouseButton = 2 def shouldBuild(self): return self.panelType == 'modelPanel' def buildMenuItems(self): self.buildSimpleItems() self.buildResetterItems() def buildSimpleItems(self): pm.menuItem(rp='W', l='Rotate', ecr=True, c=pm.Callback(self.simpleReset, rot=True), ann='Reset the rotation of the selected objects') pm.menuItem(rp='S', l='Translate', ecr=True, c=pm.Callback(self.simpleReset, trans=True), ann='Reset the position of the selected objects') pm.menuItem(rp='E', l='Scale', ecr=True, c=pm.Callback(self.simpleReset, scale=True), ann='Reset the scale of the selected objects') if not resetter: # add fallback menu item if resetter is not available pm.menuItem(rp='N', l='TRS', ecr=True, c=pm.Callback(self.simpleReset, trans=True, rot=True, scale=True), ann='Reset the selected objects\' transformations to identity, even if defaults are set') def buildResetterItems(self): if not resetter: return pm.menuItem(rp='N', l='Smart', ecr=True, c=pm.Callback(resetter.reset), ann='Reset the selected objects\' attributes to the defaults, or identity if defaults are not set') pm.menuItem(rp='NE', l='Defaults', ecr=True, c=pm.CallbackWithArgs(resetter.reset, useBasicDefaults=False), ann='Reset the selected objects\' attributes to their defaults, does nothing if no defaults are set') pm.menuItem(rp='SE', l='All Defaults', ecr=True, c=pm.Callback(resetter.resetAll), ann='Reset all objects\' attributes with defaults set to their default values') pm.menuItem(l='Resetter', ecr=False, c=pm.Callback(resetter.GUI), ann='Open the Resetter GUI') pm.menuItem(d=True) pm.menuItem(l='Select Objects', ecr=True, c=pm.Callback(self.selectObjectsWithDefaults), ann='Select all objects in the scene that have attribute defaults') def selectObjectsWithDefaults(self): if resetter: pm.select(resetter.getObjectsWithDefaults()) def simpleReset(self, trans=False, rot=False, scale=False): for obj in pm.selected(typ='transform'): if trans: obj.t.set([0, 0, 0]) if rot: obj.r.set([0, 0, 0]) if scale: obj.s.set([1, 1, 1])
mit
4,980,898,991,851,953,000
43.136842
243
0.598974
false
drogenlied/qudi
hardware/awg/tektronix_awg5002c.py
1
57135
# -*- coding: utf-8 -*- """ This file contains the Qudi hardware module for AWG5000 Series. Qudi is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Qudi is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Qudi. If not, see <http://www.gnu.org/licenses/>. Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/> """ import time from ftplib import FTP from socket import socket, AF_INET, SOCK_STREAM import os from collections import OrderedDict from fnmatch import fnmatch from core.base import Base from interface.pulser_interface import PulserInterface class AWG5002C(Base, PulserInterface): """ Unstable and in construction, Alex Stark """ _modclass = 'awg5002c' _modtype = 'hardware' # declare connectors # _out = {'awg5002c': 'PulserInterface'} _out = {'pulser': 'PulserInterface'} def __init__(self, config, **kwargs): super().__init__(config=config, **kwargs) self.connected = False # AWG5002C has possibility for sequence output # self.use_sequencer = True self.sequence_mode = True self._marker_byte_dict = { 0:b'\x00',1:b'\x01', 2:b'\x02', 3:b'\x03'} self.current_loaded_asset = None def on_activate(self, e): """ Initialisation performed during activation of the module. @param object e: Event class object from Fysom. An object created by the state machine module Fysom, which is connected to a specific event (have a look in the Base Class). This object contains the passed event, the state before the event happened and the destination of the state which should be reached after the event had happened. """ config = self.getConfiguration() if 'awg_IP_address' in config.keys(): self.ip_address = config['awg_IP_address'] else: self.log.error('No IP address parameter "awg_IP_address" found ' 'in the config for the AWG5002C! Correct that!') if 'awg_port' in config.keys(): self.port = config['awg_port'] else: self.log.error('No port parameter "awg_port" found in the config ' 'for the AWG5002C! Correct that!') if 'timeout' in config.keys(): self._timeout = config['timeout'] else: self.log.error('No parameter "timeout" found in the config for ' 'the AWG5002C! Take a default value of 10s.') self._timeout = 10 # connect ethernet socket and FTP self.soc = socket(AF_INET, SOCK_STREAM) self.soc.settimeout(self._timeout) # set the timeout to 5 seconds self.soc.connect((self.ip_address, self.port)) self.connected = True self.input_buffer = int(2 * 1024) # buffer length for received text if 'default_sample_rate' in config.keys(): self._sample_rate = self.set_sample_rate(config['default_sample_rate']) else: self.log.warning('No parameter "default_sample_rate" found in ' 'the config for the AWG5002C! The maximum sample rate is ' 'used instead.') self._sample_rate = self.get_constraints()['sample_rate'][1] if 'awg_ftp_path' in config.keys(): self.ftp_path = config['awg_ftp_path'] else: self.log.error('No parameter "awg_ftp_path" found in the config ' 'for the AWG5002C! State the FTP folder of this device!') # settings for remote access on the AWG PC self.asset_directory = '\\waves' if 'pulsed_file_dir' in config.keys(): self.pulsed_file_dir = config['pulsed_file_dir'] if not os.path.exists(self.pulsed_file_dir): homedir = self.get_home_dir() self.pulsed_file_dir = os.path.join(homedir, 'pulsed_files') self.log.warning('The directory defined in parameter ' '"pulsed_file_dir" in the config for ' 'SequenceGeneratorLogic class does not exist!\n' 'The default home directory\n{0}\n will be taken ' 'instead.'.format(self.pulsed_file_dir)) else: homedir = self.get_home_dir() self.pulsed_file_dir = os.path.join(homedir, 'pulsed_files') self.log.warning('No parameter "pulsed_file_dir" was specified ' 'in the config for SequenceGeneratorLogic as directory ' 'for the pulsed files!\n' 'The default home directory\n{0}\n' 'will be taken instead.'.format(self.pulsed_file_dir)) self.host_waveform_directory = self._get_dir_for_name('sampled_hardware_files') def on_deactivate(self, e): """ Deinitialisation performed during deactivation of the module. @param object e: Event class object from Fysom. A more detailed explanation can be found in method activation. """ self.connected = False self.soc.close() # ========================================================================= # Below all the Pulser Interface routines. # ========================================================================= def get_constraints(self): """ Retrieve the hardware constrains from the Pulsing device. @return dict: dict with constraints for the sequence generation and GUI Provides all the constraints (e.g. sample_rate, amplitude, total_length_bins, channel_config, ...) related to the pulse generator hardware to the caller. The keys of the returned dictionary are the str name for the constraints (which are set in this method). NO OTHER KEYS SHOULD BE INVENTED! If you are not sure about the meaning, look in other hardware files to get an impression. If still additional constraints are needed, then they have to be added to all files containing this interface. The items of the keys are again dictionaries which have the generic dictionary form: {'min': <value>, 'max': <value>, 'step': <value>, 'unit': '<value>'} Only the keys 'activation_config' and differs, since it contain the channel configuration/activation information. If the constraints cannot be set in the pulsing hardware (because it might e.g. has no sequence mode) then write just zero to each generic dict. Note that there is a difference between float input (0.0) and integer input (0). ALL THE PRESENT KEYS OF THE CONSTRAINTS DICT MUST BE ASSIGNED! """ constraints = {} # if interleave option is available, then sample rate constraints must # be assigned to the output of a function called # _get_sample_rate_constraints() # which outputs the shown dictionary with the correct values depending # on the present mode. The the GUI will have to check again the # limitations if interleave was selected. constraints['sample_rate'] = {'min': 10.0e6, 'max': 600.0e6, 'step': 1, 'unit': 'Samples/s'} # The file formats are hardware specific. The sequence_generator_logic will need this # information to choose the proper output format for waveform and sequence files. constraints['waveform_format'] = 'wfm' constraints['sequence_format'] = 'seq' # the stepsize will be determined by the DAC in combination with the # maximal output amplitude (in Vpp): constraints['a_ch_amplitude'] = {'min': 0.02, 'max': 4.5, 'step': 0.001, 'unit': 'Vpp'} constraints['a_ch_offset'] = {'min': -2.25, 'max': 2.25, 'step': 0.001, 'unit': 'V'} constraints['d_ch_low'] = {'min': -1, 'max': 2.6, 'step': 0.01, 'unit': 'V'} constraints['d_ch_high'] = {'min': -0.9, 'max': 2.7, 'step': 0.01, 'unit': 'V'} # for arbitrary waveform generators, this values will be used. The # step value corresponds to the waveform granularity. constraints['sampled_file_length'] = {'min': 1, 'max': 32400000, 'step': 1, 'unit': 'Samples'} # if only digital bins can be saved, then their limitation is different # compared to a waveform file constraints['digital_bin_num'] = {'min': 0, 'max': 0, 'step': 0, 'unit': '#'} constraints['waveform_num'] = {'min': 1, 'max': 32000, 'step': 1, 'unit': '#'} constraints['sequence_num'] = {'min': 1, 'max': 4000, 'step': 1, 'unit': '#'} constraints['subsequence_num'] = {'min': 1, 'max': 8000, 'step': 1, 'unit': '#'} # If sequencer mode is enable than sequence_param should be not just an # empty dictionary. sequence_param = OrderedDict() sequence_param['repetitions'] = {'min': 0, 'max': 65536, 'step': 1, 'unit': '#'} sequence_param['trigger_wait'] = {'min': False, 'max': True, 'step': 1, 'unit': 'bool'} sequence_param['event_jump_to'] = {'min': -1, 'max': 8000, 'step': 1, 'unit': 'row'} sequence_param['go_to'] = {'min': 0, 'max': 8000, 'step': 1, 'unit': 'row'} constraints['sequence_param'] = sequence_param # the name a_ch<num> and d_ch<num> are generic names, which describe # UNAMBIGUOUSLY the channels. Here all possible channel configurations # are stated, where only the generic names should be used. The names # for the different configurations can be customary chosen. activation_config = OrderedDict() activation_config['config1'] = ['a_ch1', 'd_ch1', 'd_ch2', 'a_ch2', 'd_ch3', 'd_ch4'] activation_config['config2'] = ['a_ch1', 'd_ch1', 'd_ch2'] activation_config['config3'] = ['a_ch2', 'd_ch3', 'd_ch4'] constraints['activation_config'] = activation_config return constraints def pulser_on(self): """ Switches the pulsing device on. @return int: error code (0:OK, -1:error, higher number corresponds to current status of the device. Check then the class variable status_dic.) """ self.tell('AWGC:RUN\n') return self.get_status()[0] def pulser_off(self): """ Switches the pulsing device off. @return int: error code (0:OK, -1:error, higher number corresponds to current status of the device. Check then the class variable status_dic.) """ self.tell('AWGC:STOP\n') return self.get_status()[0] def upload_asset(self, asset_name=None): """ Upload an already hardware conform file to the device. Does NOT load into channels. @param str name: name of the ensemble/sequence to be uploaded @return int: error code (0:OK, -1:error) If nothing is passed, method will be skipped. """ if asset_name is None: self.log.warning('No asset name provided for upload!\nCorrect ' 'that!\nCommand will be ignored.') return -1 # at first delete all the name, which might lead to confusions in the # upload procedure: self.delete_asset(asset_name) # create list of filenames to be uploaded upload_names = [] filelist = os.listdir(self.host_waveform_directory) for filename in filelist: is_wfm = filename.endswith('.wfm') if is_wfm and (asset_name + '_ch') in filename: upload_names.append(filename) if (asset_name + '.seq') in filename: upload_names.append(filename) # upload files for name in upload_names: self._send_file(name) return 0 def _send_file(self, filename): """ Sends an already hardware specific waveform file to the pulse generators waveform directory. @param string filename: The file name of the source file @return int: error code (0:OK, -1:error) Unused for digital pulse generators without sequence storage capability (PulseBlaster, FPGA). """ filepath = os.path.join(self.host_waveform_directory, filename) with FTP(self.ip_address) as ftp: ftp.login() # login as default user anonymous, passwd anonymous@ ftp.cwd(self.asset_directory) with open(filepath, 'rb') as uploaded_file: ftp.storbinary('STOR '+filename, uploaded_file) def load_asset(self, asset_name, load_dict=None): """ Loads a sequence or waveform to the specified channel of the pulsing device. @param str asset_name: The name of the asset to be loaded @param dict load_dict: a dictionary with keys being one of the available channel numbers and items being the name of the already sampled waveform/sequence files. Examples: {1: rabi_Ch1, 2: rabi_Ch2} {1: rabi_Ch2, 2: rabi_Ch1} This parameter is optional. If none is given then the channel association is invoked from the sequence generation, i.e. the filename appendix (_Ch1, _Ch2 etc.) @return int: error code (0:OK, -1:error) Unused for digital pulse generators without sequence storage capability (PulseBlaster, FPGA). """ if load_dict is None: load_dict = {} path = self.ftp_path + self.get_asset_dir_on_device() # Find all files associated with the specified asset name file_list = self._get_filenames_on_device() filename = [] if (asset_name + '.seq') in file_list: file_name = asset_name + '.seq' self.tell('SOUR1:FUNC:USER "{0}/{1}"\n'.format(path, file_name)) # set the AWG to the event jump mode: self.tell('AWGCONTROL:EVENT:JMODE EJUMP') self.current_loaded_asset = asset_name else: for file in file_list: if file == asset_name+'_ch1.wfm': self.tell('SOUR1:FUNC:USER "{0}/{1}"\n'.format(path, asset_name+'_ch1.wfm')) filename.append(file) elif file == asset_name+'_ch2.wfm': self.tell('SOUR2:FUNC:USER "{0}/{1}"\n'.format(path, asset_name+'_ch2.wfm')) filename.append(file) if load_dict == {} and filename == []: self.log.warning('No file and channel provided for load!\n' 'Correct that!\nCommand will be ignored.') for channel_num in list(load_dict): file_name = str(load_dict[channel_num]) + '_ch{0}.wfm'.format(int(channel_num)) self.tell('SOUR{0}:FUNC:USER "{1}/{2}"\n'.format(channel_num, path, file_name)) if len(list(load_dict))>0: self.current_loaded_asset = asset_name return 0 def get_loaded_asset(self): """ Retrieve the currently loaded asset name of the device. @return str: Name of the current asset, that can be either a filename a waveform, a sequence ect. """ return self.current_loaded_asset def clear_all(self): """ Clears the loaded waveform from the pulse generators RAM. @return int: error code (0:OK, -1:error) Delete all waveforms and sequences from Hardware memory and clear the visual display. Unused for digital pulse generators without sequence storage capability (PulseBlaster, FPGA). """ self.tell('WLIST:WAVEFORM:DELETE ALL\n') self.current_loaded_asset = None return def get_status(self): """ Retrieves the status of the pulsing hardware @return (int, dict): inter value of the current status with the corresponding dictionary containing status description for all the possible status variables of the pulse generator hardware. 0 indicates that the instrument has stopped. 1 indicates that the instrument is waiting for trigger. 2 indicates that the instrument is running. -1 indicates that the request of the status for AWG has failed. """ status_dic = {} # the possible status of the AWG have the following meaning: status_dic[-1] = 'Failed Request or Failed Communication with device.' status_dic[0] = 'Device has stopped, but can receive commands.' status_dic[1] = 'Device is active and running.' status_dic[2] = 'Device is active and waiting for trigger.' # save the status dictionary is a class variable for later access. self.status_dic = status_dic # Keep in mind that the received integer number for the running status # is 2 for this specific AWG5000 series device. Therefore a received # message of 2 should be converted to a integer status variable of 1: try: message = int(self.ask('AWGC:RSTate?\n')) except: # if nothing comes back than the output should be marked as error return -1 if message==2: return 1, status_dic elif message ==1: return 2, status_dic else: return message, status_dic def get_sample_rate(self): """ Get the sample rate of the pulse generator hardware @return float: The current sample rate of the device (in Hz) Do not return a saved sample rate in a class variable, but instead retrieve the current sample rate directly from the device. """ self._sample_rate = float(self.ask('SOURCE1:FREQUENCY?')) return self._sample_rate def set_sample_rate(self, sample_rate): """ Set the sample rate of the pulse generator hardware. @param float sample_rate: The sampling rate to be set (in Hz) @return float: the sample rate returned from the device. Note: After setting the sampling rate of the device, retrieve it again for obtaining the actual set value and use that information for further processing. """ self.tell('SOURCE1:FREQUENCY {0:.4G}MHz\n'.format(sample_rate/1e6)) time.sleep(0.2) return self.get_sample_rate() def get_analog_level(self, amplitude=None, offset=None): """ Retrieve the analog amplitude and offset of the provided channels. @param list amplitude: optional, if a specific amplitude value (in Volt peak to peak, i.e. the full amplitude) of a channel is desired. @param list offset: optional, if a specific high value (in Volt) of a channel is desired. @return: (dict, dict): tuple of two dicts, with keys being the channel number and items being the values for those channels. Amplitude is always denoted in Volt-peak-to-peak and Offset in (absolute) Voltage. Note: Do not return a saved amplitude and/or offset value but instead retrieve the current amplitude and/or offset directly from the device. If no entries provided then the levels of all channels where simply returned. If no analog channels provided, return just an empty dict. Example of a possible input: amplitude = [1,4], offset =[1,3] to obtain the amplitude of channel 1 and 4 and the offset {1: -0.5, 4: 2.0} {} since no high request was performed. The major difference to digital signals is that analog signals are always oscillating or changing signals, otherwise you can use just digital output. In contrast to digital output levels, analog output levels are defined by an amplitude (here total signal span, denoted in Voltage peak to peak) and an offset (a value around which the signal oscillates, denoted by an (absolute) voltage). In general there is no bijective correspondence between (amplitude, offset) and (value high, value low)! """ if amplitude is None: amplitude = [] if offset is None: offset = [] amp = {} off = {} if (amplitude == []) and (offset == []): # since the available channels are not going to change for this # device you are asking directly: amp['a_ch1'] = float(self.ask('SOURCE1:VOLTAGE:AMPLITUDE?')) amp['a_ch2'] = float(self.ask('SOURCE2:VOLTAGE:AMPLITUDE?')) off['a_ch1'] = float(self.ask('SOURCE1:VOLTAGE:OFFSET?')) off['a_ch2'] = float(self.ask('SOURCE2:VOLTAGE:OFFSET?')) else: for a_ch in amplitude: if 0 <= a_ch <= self._get_num_a_ch(): amp[a_ch] = float(self.ask('SOURCE{0}:VOLTAGE:AMPLITUDE?'.format(a_ch))) else: self.log.warning('The device does not have that many ' 'analog channels! A channel number "{0}" was ' 'passed, but only "{1}" channels are available!\n' 'Command will be ignored.'.format( a_ch, self._get_num_a_ch())) for a_ch in offset: if 0 <= a_ch <= self._get_num_a_ch(): off[a_ch] = float(self.ask('SOURCE{0}:VOLTAGE:OFFSET?'.format(a_ch))) else: self.log.warning('The device does not have that many ' 'analog channels! A channel number "{0}" was ' 'passed, but only "{1}" channels are available!\n' 'Command will be ignored.'.format( a_ch, self._get_num_a_ch())) return amp, off def set_analog_level(self, amplitude=None, offset=None): """ Set amplitude and/or offset value of the provided analog channel. @param dict amplitude: dictionary, with key being the channel and items being the amplitude values (in Volt peak to peak, i.e. the full amplitude) for the desired channel. @param dict offset: dictionary, with key being the channel and items being the offset values (in absolute volt) for the desired channel. @return (dict, dict): tuple of two dicts with the actual set values for amplitude and offset. If nothing is passed then the command will return two empty dicts. Note: After setting the analog and/or offset of the device, retrieve them again for obtaining the actual set value(s) and use that information for further processing. The major difference to digital signals is that analog signals are always oscillating or changing signals, otherwise you can use just digital output. In contrast to digital output levels, analog output levels are defined by an amplitude (here total signal span, denoted in Voltage peak to peak) and an offset (a value around which the signal oscillates, denoted by an (absolute) voltage). In general there is no bijective correspondence between (amplitude, offset) and (value high, value low)! """ if amplitude is None: amplitude = {} if offset is None: offset = {} constraints = self.get_constraints() for a_ch in amplitude: if 0 <= a_ch <= self._get_num_a_ch(): constr = constraints['a_ch_amplitude'] if not(constr['min'] <= amplitude[a_ch] <= constr['max']): self.log.warning('Not possible to set for analog channel ' '{0} the amplitude value {1}Vpp, since it is not ' 'within the interval [{2},{3}]! Command will ' 'be ignored.'.format( a_ch, amplitude[a_ch], constr['min'], constr['max'])) else: self.tell('SOURCE{0}:VOLTAGE:AMPLITUDE {1}'.format(a_ch, amplitude[a_ch])) else: self.log.warning('The device does not support that much analog ' 'channels! A channel number "{0}" was passed, but ' 'only "{1}" channels are available!\nCommand will ' 'be ignored.'.format(a_ch, self._get_num_a_ch())) for a_ch in offset: if 0 <= a_ch <= self._get_num_a_ch(): constr = constraints['a_ch_offset'] if not(constr['min'] <= offset[a_ch] <= constr['max']): self.log.warning('Not possible to set for analog channel ' '{0} the offset value {1}V, since it is not ' 'within the interval [{2},{3}]! Command will ' 'be ignored.'.format( a_ch, offset[a_ch], constr['min'], constr['max'])) else: self.tell('SOURCE{0}:VOLTAGE:OFFSET {1}'.format(a_ch, offset[a_ch])) else: self.log.warning('The device does not support that much analog ' 'channels! A channel number "{0}" was passed, but ' 'only "{1}" channels are available!\nCommand will ' 'be ignored.'.format(a_ch, self._get_num_a_ch())) return self.get_analog_level(amplitude=list(amplitude), offset=list(offset)) def get_digital_level(self, low=None, high=None): """ Retrieve the digital low and high level of the provided channels. @param list low: optional, if a specific low value (in Volt) of a channel is desired. @param list high: optional, if a specific high value (in Volt) of a channel is desired. @return: (dict, dict): tuple of two dicts, with keys being the channel number and items being the values for those channels. Both low and high value of a channel is denoted in (absolute) Voltage. Note: Do not return a saved low and/or high value but instead retrieve the current low and/or high value directly from the device. If no entries provided then the levels of all channels where simply returned. If no digital channels provided, return just an empty dict. Example of a possible input: low = [1,4] to obtain the low voltage values of digital channel 1 an 4. A possible answer might be {1: -0.5, 4: 2.0} {} since no high request was performed. The major difference to analog signals is that digital signals are either ON or OFF, whereas analog channels have a varying amplitude range. In contrast to analog output levels, digital output levels are defined by a voltage, which corresponds to the ON status and a voltage which corresponds to the OFF status (both denoted in (absolute) voltage) In general there is no bijective correspondence between (amplitude, offset) and (value high, value low)! """ if low is None: low = [] if high is None: high = [] low_val = {} high_val = {} if (low == []) and (high == []): low_val[1] = float(self.ask('SOURCE1:MARKER1:VOLTAGE:LOW?')) high_val[1] = float(self.ask('SOURCE1:MARKER1:VOLTAGE:HIGH?')) low_val[2] = float(self.ask('SOURCE1:MARKER2:VOLTAGE:LOW?')) high_val[2] = float(self.ask('SOURCE1:MARKER2:VOLTAGE:HIGH?')) low_val[3] = float(self.ask('SOURCE2:MARKER1:VOLTAGE:LOW?')) high_val[3] = float(self.ask('SOURCE2:MARKER1:VOLTAGE:HIGH?')) low_val[4] = float(self.ask('SOURCE2:MARKER2:VOLTAGE:LOW?')) high_val[4] = float(self.ask('SOURCE2:MARKER2:VOLTAGE:HIGH?')) else: for d_ch in low: if 0 <= d_ch <= self._get_num_d_ch(): # a fast way to map from a channel list [1, 2, 3, 4] to a # list like [[1,2], [1,2]]: if (d_ch-2) <= 0: # the conversion to integer is just for safety. low_val[d_ch] = float(self.ask('SOURCE1:MARKER{0}:VOLTAGE:LOW?'.format(int(d_ch)))) else: low_val[d_ch] = float(self.ask('SOURCE2:MARKER{0}:VOLTAGE:LOW?'.format(int(d_ch-2)))) else: self.log.warning('The device does not have that much ' 'digital channels! A channel number "{0}" was ' 'passed, but only "{1}" channels are available!\n' 'Command will be ignored.'.format(d_ch, self._get_num_d_ch())) for d_ch in high: if 0 <= d_ch <= self._get_num_d_ch(): # a fast way to map from a channel list [1, 2, 3, 4] to a # list like [[1,2], [1,2]]: if (d_ch-2) <= 0: # the conversion to integer is just for safety. high_val[d_ch] = float(self.ask('SOURCE1:MARKER{0}:VOLTAGE:HIGH?'.format(int(d_ch)))) else: high_val[d_ch] = float(self.ask('SOURCE2:MARKER{0}:VOLTAGE:HIGH?'.format(int(d_ch-2)))) else: self.log.warning('The device does not have that much ' 'digital channels! A channel number "{0}" was ' 'passed, but only "{1}" channels are available!\n' 'Command will be ignored.'.format(d_ch, self._get_num_d_ch())) return low_val, high_val def set_digital_level(self, low=None, high=None): """ Set low and/or high value of the provided digital channel. @param dict low: dictionary, with key being the channel and items being the low values (in volt) for the desired channel. @param dict high: dictionary, with key being the channel and items being the high values (in volt) for the desired channel. @return (dict, dict): tuple of two dicts where first dict denotes the current low value and the second dict the high value. If nothing is passed then the command will return two empty dicts. Note: After setting the high and/or low values of the device, retrieve them again for obtaining the actual set value(s) and use that information for further processing. The major difference to analog signals is that digital signals are either ON or OFF, whereas analog channels have a varying amplitude range. In contrast to analog output levels, digital output levels are defined by a voltage, which corresponds to the ON status and a voltage which corresponds to the OFF status (both denoted in (absolute) voltage) In general there is no bijective correspondence between (amplitude, offset) and (value high, value low)! """ if low is None: low = {} if high is None: high = {} constraints = self.get_constraints() for d_ch in low: if 0 <= d_ch <= self._get_num_d_ch(): constr = constraints['d_ch_low'] if not(constr['min'] <= low[d_ch] <= constr['max']): self.log.warning('Not possible to set for analog channel ' '{0} the amplitude value {1}Vpp, since it is not ' 'within the interval [{2},{3}]! Command will ' 'be ignored.'.format( d_ch, low[d_ch], constr['min'], constr['max'])) else: # a fast way to map from a channel list [1, 2, 3, 4] to a # list like [[1,2], [1,2]]: if (d_ch-2) <= 0: self.tell('SOURCE1:MARKER{0}:VOLTAGE:LOW {1}'.format(d_ch, low[d_ch])) else: self.tell('SOURCE2:MARKER{0}:VOLTAGE:LOW {1}'.format(d_ch-2, low[d_ch])) else: self.log.warning('The device does not support that much ' 'digital channels! A channel number "{0}" was ' 'passed, but only "{1}" channels are available!\n' 'Command will be ignored.'.format(d_ch, self._get_num_d_ch())) for d_ch in high: if 0 <= d_ch <= self._get_num_d_ch(): constr = constraints['d_ch_high'] if not(constr['min'] <= high[d_ch] <= constr['max']): self.log.warning('Not possible to set for analog channel ' '{0} the amplitude value {1}Vpp, since it is not ' 'within the interval [{2},{3}]! Command will ' 'be ignored.'.format( d_ch, high[d_ch], constr['min'], constr['max'])) else: # a fast way to map from a channel list [1, 2, 3, 4] to a # list like [[1,2], [1,2]]: if (d_ch-2) <= 0: self.tell('SOURCE1:MARKER{0}:VOLTAGE:HIGH {1}'.format(d_ch, high[d_ch])) else: self.tell('SOURCE2:MARKER{0}:VOLTAGE:HIGH {1}'.format(d_ch-2, high[d_ch])) else: self.log.warning('The device does not support that much ' 'digital channels! A channel number "{0}" was ' 'passed, but only "{1}" channels are available!\n' 'Command will be ignored.'.format(d_ch, self._get_num_d_ch())) return self.get_digital_level(low=list(low), high=list(high)) def get_active_channels(self, ch=None): """ Get the active channels of the pulse generator hardware. @param list ch: optional, if specific analog or digital channels are needed to be asked without obtaining all the channels. @return dict: where keys denoting the channel number and items boolean expressions whether channel are active or not. Example for an possible input (order is not important): ch = ['a_ch2', 'd_ch2', 'a_ch1', 'd_ch5', 'd_ch1'] then the output might look like {'a_ch2': True, 'd_ch2': False, 'a_ch1': False, 'd_ch5': True, 'd_ch1': False} If no parameters are passed to this method all channels will be asked for their setting. """ if ch is None: ch = [] active_ch = {} if ch ==[]: # because 0 = False and 1 = True active_ch['a_ch1'] = bool(int(self.ask('OUTPUT1:STATE?'))) active_ch['a_ch2'] = bool(int(self.ask('OUTPUT2:STATE?'))) # For the AWG5000 series, the resolution of the DAC for the analog # channel is fixed to 14bit. Therefore the digital channels are # always active and cannot be deactivated. For other AWG devices the # command # self.ask('SOURCE1:DAC:RESOLUTION?')) # might be useful from which the active digital channels can be # obtained. active_ch['d_ch1'] = True active_ch['d_ch2'] = True active_ch['d_ch3'] = True active_ch['d_ch4'] = True else: for channel in ch: if 'a_ch' in channel: ana_chan = int(channel[4:]) if 0 <= ana_chan <= self._get_num_a_ch(): # because 0 = False and 1 = True active_ch[channel] = bool(int(self.ask('OUTPUT{0}:STATE?'.format(ana_chan)))) else: self.log.warning('The device does not support that ' 'many analog channels! A channel number "{0}"' ' was passed, but only "{1}" channels are ' 'available!\n' 'Command will be ignored.'.format( ana_chan, self._get_num_a_ch())) elif 'd_ch'in channel: digi_chan = int(channel[4:]) if 0 <= digi_chan <= self._get_num_d_ch(): active_ch[channel] = True else: self.log.warning('The device does not support that ' 'many digital channels! A channel number ' '"{0}" was passed, but only "{1}" channels ' 'are available!\n' 'Command will be ignored.'.format( digi_chan, self._get_num_d_ch())) return active_ch def set_active_channels(self, ch=None): """ Set the active channels for the pulse generator hardware. @param dict ch: dictionary with keys being the analog or digital string generic names for the channels with items being a boolean value. @return dict: with the actual set values for active channels for analog and digital values. If nothing is passed then the command will return an empty dict. Note: After setting the active channels of the device, retrieve them again for obtaining the actual set value(s) and use that information for further processing. Example for possible input: ch={'a_ch2': True, 'd_ch1': False, 'd_ch3': True, 'd_ch4': True} to activate analog channel 2 digital channel 3 and 4 and to deactivate digital channel 1. The hardware itself has to handle, whether separate channel activation is possible. AWG5000 Series instruments support only 14-bit resolution. Therefore this command will have no effect on the DAC for these instruments. On other devices the deactivation of digital channels increase the DAC resolution of the analog channels. """ if ch is None: ch = {} for channel in ch: if 'a_ch' in channel: ana_chan = int(channel[4:]) if 0 <= ana_chan <= self._get_num_a_ch(): if ch[channel]: state = 'ON' else: state = 'OFF' self.tell('OUTPUT{0}:STATE {1}'.format(ana_chan, state)) else: self.log.warning('The device does not support that many ' 'analog channels! A channel number "{0}" was ' 'passed, but only "{1}" channels are available!\n' 'Command will be ignored.'.format( ana_chan, self._get_num_a_ch())) # if d_ch != {}: # self.log.info('Digital Channel of the AWG5000 series will always be ' # 'active. This configuration cannot be changed.') return self.get_active_channels(ch=list(ch)) def get_uploaded_asset_names(self): """ Retrieve the names of all uploaded assets on the device. @return list: List of all uploaded asset name strings in the current device directory. Unused for digital pulse generators without sequence storage capability (PulseBlaster, FPGA). """ uploaded_files = self._get_filenames_on_device() name_list = [] for filename in uploaded_files: if fnmatch(filename, '*_ch?.wfm'): asset_name = filename.rsplit('_', 1)[0] if asset_name not in name_list: name_list.append(asset_name) if fnmatch(filename, '*.seq'): name_list.append(filename[:-4]) return name_list def get_saved_asset_names(self): """ Retrieve the names of all sampled and saved assets on the host PC. This is no list of the file names. @return list: List of all saved asset name strings in the current directory of the host PC. """ # list of all files in the waveform directory ending with .wfm file_list = self._get_filenames_on_host() # exclude the channel specifier for multiple analog channels and create return list saved_assets = [] for filename in file_list: if fnmatch(filename, '*_ch?.wfm'): asset_name = filename.rsplit('_', 1)[0] if asset_name not in saved_assets: saved_assets.append(asset_name) return saved_assets def delete_asset(self, asset_name): """ Delete all files associated with an asset with the passed asset_name from the device memory. @param str asset_name: The name of the asset to be deleted Optionally a list of asset names can be passed. @return list: a list with strings of the files which were deleted. Unused for digital pulse generators without sequence storage capability (PulseBlaster, FPGA). """ if not isinstance(asset_name, list): asset_name = [asset_name] # get all uploaded files uploaded_files = self._get_filenames_on_device() # list of uploaded files to be deleted files_to_delete = [] # determine files to delete for name in asset_name: for filename in uploaded_files: if fnmatch(filename, name+'_ch?.wfm'): files_to_delete.append(filename) elif fnmatch(filename, name+'.seq'): files_to_delete.append(filename) # delete files with FTP(self.ip_address) as ftp: ftp.login() # login as default user anonymous, passwd anonymous@ ftp.cwd(self.asset_directory) for filename in files_to_delete: ftp.delete(filename) # clear the AWG if the deleted asset is the currently loaded asset # if self.current_loaded_asset == asset_name: # self.clear_all() return files_to_delete def set_asset_dir_on_device(self, dir_path): """ Change the directory where the assets are stored on the device. @param string dir_path: The target directory @return int: error code (0:OK, -1:error) Unused for digital pulse generators without changeable file structure (PulseBlaster, FPGA). """ # check whether the desired directory exists: with FTP(self.ip_address) as ftp: ftp.login() # login as default user anonymous, passwd anonymous@ try: ftp.cwd(dir_path) except: self.log.info('Desired directory {0} not found on AWG device.\n' 'Create new.'.format(dir_path)) ftp.mkd(dir_path) self.asset_directory = dir_path return 0 def get_asset_dir_on_device(self): """ Ask for the directory where the assets are stored on the device. @return string: The current sequence directory Unused for digital pulse generators without changeable file structure (PulseBlaster, FPGA). """ return self.asset_directory def has_sequence_mode(self): """ Asks the pulse generator whether sequence mode exists. @return: bool, True for yes, False for no. """ return self.sequence_mode def get_interleave(self): """ Check whether Interleave is on in AWG. Unused for pulse generator hardware other than an AWG. The AWG 5000 Series does not have an interleave mode and this method exists only for compability reasons. @return bool: will be always False since no interleave functionality """ return False def set_interleave(self, state=False): """ Turns the interleave of an AWG on or off. @param bool state: The state the interleave should be set to (True: ON, False: OFF) @return bool: actual interleave status (True: ON, False: OFF) Note: After setting the interleave of the device, retrieve the interleave again and use that information for further processing. Unused for pulse generator hardware other than an AWG. The AWG 5000 Series does not have an interleave mode and this method exists only for compability reasons. """ self.log.warning('Interleave mode not available for the AWG 5000 ' 'Series!\n' 'Method call will be ignored.') return self.get_interleave() def tell(self, command): """Send a command string to the AWG. @param command: string containing the command @return int: error code (0:OK, -1:error) """ # check whether the return character was placed at the end. Otherwise # the communication will stuck: if not command.endswith('\n'): command += '\n' # In Python 3.x the socket send command only accepts byte type arrays # and no str command = bytes(command, 'UTF-8') self.soc.send(command) return 0 def ask(self, question): """ Asks the device a 'question' and receive an answer from it. @param string question: string containing the command @return string: the answer of the device to the 'question' """ if not question.endswith('\n'): question += '\n' # In Python 3.x the socket send command only accepts byte type arrays # and no str. question = bytes(question, 'UTF-8') self.soc.send(question) time.sleep(0.3) # you need to wait until AWG generating an answer. # This number was determined experimentally. try: message = self.soc.recv(self.input_buffer) # receive an answer message = message.decode('UTF-8') # decode bytes into a python str except OSError: self.log.error('Most propably timeout was reached during ' 'querying the AWG5000 Series device with the question:\n' '{0}\n' 'The question text must be wrong.'.format(question)) message = str(-1) message = message.replace('\n', '') # cut away the characters\r and \n. message = message.replace('\r', '') return message def reset(self): """Reset the device. @return int: error code (0:OK, -1:error) """ self.tell('*RST\n') return 0 # ========================================================================= # Below all the low level routines which are needed for the communication # and establishment of a connection. # ======================================================================== def set_lowpass_filter(self, a_ch, cutoff_freq): """ Set a lowpass filter to the analog channels of the AWG. @param int a_ch: To which channel to apply, either 1 or 2. @param cutoff_freq: Cutoff Frequency of the lowpass filter in Hz. """ if a_ch ==1: self.tell('OUTPUT1:FILTER:LPASS:FREQUENCY {0:f}MHz\n'.format(cutoff_freq/1e6) ) elif a_ch ==2: self.tell('OUTPUT2:FILTER:LPASS:FREQUENCY {0:f}MHz\n'.format(cutoff_freq/1e6) ) def set_jump_timing(self, synchronous = False): """Sets control of the jump timing in the AWG. @param bool synchronous: if True the jump timing will be set to synchornous, otherwise the jump timing will be set to asynchronous. If the Jump timing is set to asynchornous the jump occurs as quickly as possible after an event occurs (e.g. event jump tigger), if set to synchornous the jump is made after the current waveform is output. The default value is asynchornous. """ if(synchronous): self.tell('EVEN:JTIM SYNC\n') else: self.tell('EVEN:JTIM ASYNC\n') def set_mode(self, mode): """Change the output mode of the AWG5000 series. @param str mode: Options for mode (case-insensitive): continuous - 'C' triggered - 'T' gated - 'G' sequence - 'S' """ look_up = {'C' : 'CONT', 'T' : 'TRIG', 'G' : 'GAT' , 'E' : 'ENH' , 'S' : 'SEQ' } self.tell('AWGC:RMOD {0!s}\n'.format(look_up[mode.upper()])) def get_sequencer_mode(self,output_as_int=False): """ Asks the AWG which sequencer mode it is using. @param: bool output_as_int: optional boolean variable to set the output @return: str or int with the following meaning: 'HARD' or 0 indicates Hardware Mode 'SOFT' or 1 indicates Software Mode 'Error' or -1 indicates a failure of request It can be either in Hardware Mode or in Software Mode. The optional variable output_as_int sets if the returned value should be either an integer number or string. """ message = self.ask('AWGControl:SEQuencer:TYPE?\n') if output_as_int == True: if 'HARD' in message: return 0 elif 'SOFT' in message: return 1 else: return -1 else: if 'HARD' in message: return 'Hardware-Sequencer' elif 'SOFT' in message: return 'Software-Sequencer' else: return 'Request-Error' # ========================================================================= # Below all the higher level routines are situated which use the # wrapped routines as a basis to perform the desired task. # ========================================================================= def _get_dir_for_name(self, name): """ Get the path to the pulsed sub-directory 'name'. @param name: string, name of the folder @return: string, absolute path to the directory with folder 'name'. """ path = os.path.join(self.pulsed_file_dir, name) if not os.path.exists(path): os.makedirs(os.path.abspath(path)) return os.path.abspath(path) def _get_filenames_on_device(self): """ Get the full filenames of all assets saved on the device. @return: list, The full filenames of all assets saved on the device. """ filename_list = [] with FTP(self.ip_address) as ftp: ftp.login() # login as default user anonymous, passwd anonymous@ ftp.cwd(self.asset_directory) # get only the files from the dir and skip possible directories log =[] file_list = [] ftp.retrlines('LIST', callback=log.append) for line in log: if '<DIR>' not in line: # that is how a potential line is looking like: # '05-10-16 05:22PM 292 SSR aom adjusted.seq' # One can see that the first part consists of the date # information. Remove those information and separate then # the first number, which indicates the size of the file, # from the following. That is necessary if the filename has # whitespaces in the name: size_filename = line[18:].lstrip() # split after the first appearing whitespace and take the # rest as filename, remove for safety all trailing # whitespaces: actual_filename = size_filename.split(' ', 1)[1].lstrip() file_list.append(actual_filename) for filename in file_list: if filename.endswith('.wfm') or filename.endswith('.seq'): if filename not in filename_list: filename_list.append(filename) return filename_list def _get_filenames_on_host(self): """ Get the full filenames of all assets saved on the host PC. @return: list, The full filenames of all assets saved on the host PC. """ filename_list = [f for f in os.listdir(self.host_waveform_directory) if f.endswith('.wfm') or f.endswith('.seq')] return filename_list def _get_num_a_ch(self): """ Retrieve the number of available analog channels. @return int: number of analog channels. """ config = self.get_constraints()['activation_config'] all_a_ch = [] for conf in config: # extract all analog channels from the config curr_a_ch = [entry for entry in config[conf] if 'a_ch' in entry] # append all new analog channels to a temporary array for a_ch in curr_a_ch: if a_ch not in all_a_ch: all_a_ch.append(a_ch) # count the number of entries in that array return len(all_a_ch) def _get_num_d_ch(self): """ Retrieve the number of available digital channels. @return int: number of digital channels. """ config = self.get_constraints()['activation_config'] all_d_ch = [] for conf in config: # extract all digital channels from the config curr_d_ch = [entry for entry in config[conf] if 'd_ch' in entry] # append all new analog channels to a temporary array for d_ch in curr_d_ch: if d_ch not in all_d_ch: all_d_ch.append(d_ch) # count the number of entries in that array return len(all_d_ch)
gpl-3.0
6,518,244,005,501,435,000
41.103906
121
0.545165
false
quantmind/lux
tests/core/test_cache.py
1
3489
from unittest import skipUnless try: from redis import StrictRedis except ImportError: # pragma nocover StrictRedis = None from pulsar.api import ImproperlyConfigured from pulsar.apps.test import check_server from pulsar.utils.string import random_string from pulsar.apps.data.redis.client import RedisClient from lux.utils import test from tests.config import redis_cache_server REDIS_OK = check_server('redis') class LockTests: @test.green def test_lock(self): key1 = random_string() key2 = random_string() lock = self.cache.lock(key1, blocking=False) other_lock = self.cache.lock(key2, blocking=False) self.assertTrue(lock.acquire()) self.assertFalse(lock.acquire()) self.assertTrue(other_lock.acquire()) other_lock.release() lock.release() @test.green def test_lock_contextmanager(self): lock = self.cache.lock('test3', blocking=0.1) with lock: with self.assertRaises(TimeoutError): with lock: pass self.assertTrue(lock.acquire()) lock.release() @test.green def test_lock_timeout(self): lock = self.cache.lock('test4', timeout=0.1, blocking=0.2) self.assertTrue(lock.acquire()) self.assertTrue(lock.acquire()) lock.release() class TestDummyCache(test.TestCase, LockTests): def setUp(self): self.app = self.application() self.cache = self.app.cache_server def test_dummy_cache(self): from lux.core.cache import Cache self.assertIsInstance(self.cache, Cache) self.assertEqual(self.cache.ping(), True) self.assertEqual(self.cache.hmget('bla'), None) self.cache.hmset('foo', {'name': 'pippo'}) self.assertEqual(self.cache.hmget('foo'), None) self.cache.set('h', 56) self.assertEqual(self.cache.get('h'), None) def test_cache_name(self): self.assertEqual(self.cache.name, 'dummy') self.assertEqual(str(self.cache), 'dummy://') def test_bad_url(self): app = self.application(CACHE_SERVER='cbjhb://') self.assertRaises(ImproperlyConfigured, lambda: app.cache_server) @skipUnless(REDIS_OK, 'Requires a running Redis server') class TestRedisCache(test.AppTestCase, LockTests): config_params = {'CACHE_SERVER': redis_cache_server} ClientClass = RedisClient def setUp(self): self.cache = self.app.cache_server def test_client(self): self.assertTrue(self.cache.client, self.ClientClass) @test.green def test_redis_cache(self): self.assertEqual(self.cache.name, 'redis') key = test.randomname() self.assertEqual(self.cache.get_json(key), None) data = {'name': 'pippo', 'age': 4} self.assertEqual(self.cache.set_json(key, data), None) self.assertEqual(self.cache.get_json(key), data) @test.green def test_get_json(self): self.assertEqual(self.cache.name, 'redis') key = test.randomname() self.assertEqual(self.cache.set(key, '{bad-json}'), None) self.assertEqual(self.cache.get_json(key), None) self.assertEqual(self.cache.get(key), b'{bad-json}') @skipUnless(REDIS_OK and StrictRedis, ('Requires a running Redis server and ' 'redis python client')) class TestRedisCacheSync(TestRedisCache): ClientClass = StrictRedis
bsd-3-clause
-5,759,654,982,690,471,000
30.432432
77
0.639438
false
aperigault/ansible
lib/ansible/modules/source_control/bitbucket/bitbucket_access_key.py
18
8916
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community', } DOCUMENTATION = r''' --- module: bitbucket_access_key short_description: Manages Bitbucket repository access keys description: - Manages Bitbucket repository access keys (also called deploy keys). version_added: "2.8" author: - Evgeniy Krysanov (@catcombo) options: client_id: description: - The OAuth consumer key. - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used. type: str client_secret: description: - The OAuth consumer secret. - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used. type: str repository: description: - The repository name. type: str required: true username: description: - The repository owner. type: str required: true key: description: - The SSH public key. type: str label: description: - The key label. type: str required: true state: description: - Indicates desired state of the access key. type: str required: true choices: [ absent, present ] notes: - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth. - Bitbucket OAuth consumer should have permissions to read and administrate account repositories. - Check mode is supported. ''' EXAMPLES = r''' - name: Create access key bitbucket_access_key: repository: 'bitbucket-repo' username: bitbucket_username key: '{{lookup("file", "bitbucket.pub") }}' label: 'Bitbucket' state: present - name: Delete access key bitbucket_access_key: repository: bitbucket-repo username: bitbucket_username label: Bitbucket state: absent ''' RETURN = r''' # ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.source_control.bitbucket import BitbucketHelper error_messages = { 'required_key': '`key` is required when the `state` is `present`', 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository', 'invalid_username_or_repo': 'Invalid `repository` or `username`', 'invalid_key': 'Invalid SSH key or key is already in use', } BITBUCKET_API_ENDPOINTS = { 'deploy-key-list': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL, 'deploy-key-detail': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL, } def get_existing_deploy_key(module, bitbucket): """ Search for an existing deploy key on Bitbucket with the label specified in module param `label` :param module: instance of the :class:`AnsibleModule` :param bitbucket: instance of the :class:`BitbucketHelper` :return: existing deploy key or None if not found :rtype: dict or None Return example:: { "id": 123, "label": "mykey", "created_on": "2019-03-23T10:15:21.517377+00:00", "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", "type": "deploy_key", "comment": "", "last_used": None, "repository": { "links": { "self": { "href": "https://api.bitbucket.org/2.0/repositories/mleu/test" }, "html": { "href": "https://bitbucket.org/mleu/test" }, "avatar": { "href": "..." } }, "type": "repository", "name": "test", "full_name": "mleu/test", "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}" }, "links": { "self": { "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123" } }, } """ content = { 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( username=module.params['username'], repo_slug=module.params['repository'], ) } # Look through the all response pages in search of deploy key we need while 'next' in content: info, content = bitbucket.request( api_url=content['next'], method='GET', ) if info['status'] == 404: module.fail_json(msg=error_messages['invalid_username_or_repo']) if info['status'] == 403: module.fail_json(msg=error_messages['required_permission']) if info['status'] != 200: module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info)) res = next(filter(lambda v: v['label'] == module.params['label'], content['values']), None) if res is not None: return res return None def create_deploy_key(module, bitbucket): info, content = bitbucket.request( api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( username=module.params['username'], repo_slug=module.params['repository'], ), method='POST', data={ 'key': module.params['key'], 'label': module.params['label'], }, ) if info['status'] == 404: module.fail_json(msg=error_messages['invalid_username_or_repo']) if info['status'] == 403: module.fail_json(msg=error_messages['required_permission']) if info['status'] == 400: module.fail_json(msg=error_messages['invalid_key']) if info['status'] != 200: module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format( label=module.params['label'], info=info, )) def delete_deploy_key(module, bitbucket, key_id): info, content = bitbucket.request( api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format( username=module.params['username'], repo_slug=module.params['repository'], key_id=key_id, ), method='DELETE', ) if info['status'] == 404: module.fail_json(msg=error_messages['invalid_username_or_repo']) if info['status'] == 403: module.fail_json(msg=error_messages['required_permission']) if info['status'] != 204: module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format( label=module.params['label'], info=info, )) def main(): argument_spec = BitbucketHelper.bitbucket_argument_spec() argument_spec.update( repository=dict(type='str', required=True), username=dict(type='str', required=True), key=dict(type='str'), label=dict(type='str', required=True), state=dict(type='str', choices=['present', 'absent'], required=True), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) bitbucket = BitbucketHelper(module) key = module.params['key'] state = module.params['state'] # Check parameters if (key is None) and (state == 'present'): module.fail_json(msg=error_messages['required_key']) # Retrieve access token for authorized API requests bitbucket.fetch_access_token() # Retrieve existing deploy key (if any) existing_deploy_key = get_existing_deploy_key(module, bitbucket) changed = False # Create new deploy key in case it doesn't exists if not existing_deploy_key and (state == 'present'): if not module.check_mode: create_deploy_key(module, bitbucket) changed = True # Update deploy key if the old value does not match the new one elif existing_deploy_key and (state == 'present'): if not key.startswith(existing_deploy_key.get('key')): if not module.check_mode: # Bitbucket doesn't support update key for the same label, # so we need to delete the old one first delete_deploy_key(module, bitbucket, existing_deploy_key['id']) create_deploy_key(module, bitbucket) changed = True # Delete deploy key elif existing_deploy_key and (state == 'absent'): if not module.check_mode: delete_deploy_key(module, bitbucket, existing_deploy_key['id']) changed = True module.exit_json(changed=changed) if __name__ == '__main__': main()
gpl-3.0
-6,451,170,308,698,137,000
30.284211
127
0.599372
false
eckardm/archivematica
src/dashboard/src/components/ingest/views_as.py
1
8870
import ast from functools import wraps import json import sys from django.core.urlresolvers import reverse from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect, HttpResponseServerError from components import advanced_search from main import models sys.path.append("/usr/lib/archivematica/archivematicaCommon") from archivesspace.client import ArchivesSpaceClient, AuthenticationError, ConnectionError from components.ingest import pair_matcher def get_as_system_client(): repl_dict = models.MicroServiceChoiceReplacementDic.objects.get(description='ArchivesSpace Config') config = ast.literal_eval(repl_dict.replacementdic) return ArchivesSpaceClient( host=config['%host%'], port=config['%port%'], user=config['%user%'], passwd=config['%passwd%'], repository=config['%repository%'] ) def _get_reset_view(uuid): if models.ArchivesSpaceDIPObjectResourcePairing.objects.filter(dipuuid=uuid).count() > 0: return 'components.ingest.views_as.ingest_upload_as_reset' def _authenticate_to_archivesspace(func): @wraps(func) def wrapper(*args, **kwargs): try: client = get_as_system_client() except AuthenticationError: return HttpResponseServerError("Unable to authenticate to ArchivesSpace server using the default user! Check administrative settings.") except ConnectionError: return HttpResponseServerError("Unable to connect to ArchivesSpace server at the default location! Check administrative settings.") return func(client, *args, **kwargs) return wrapper @_authenticate_to_archivesspace def ingest_upload_as(client, request, uuid): query = request.GET.get('query', '').strip() identifier = request.GET.get('identifier', '').strip() page = request.GET.get('page', 1) sort = request.GET.get('sort') search_params = advanced_search.extract_url_search_params_from_request(request) return pair_matcher.list_records(client, request, query, identifier, page, sort, search_params, 'ingest/as/resource_list.html', _get_reset_view(uuid), uuid) def ingest_upload_as_save(request, uuid): return pair_matcher.pairs_saved_response(ingest_upload_as_save_to_db(request, uuid)) def ingest_upload_as_reset(request, uuid): models.ArchivesSpaceDIPObjectResourcePairing.objects.filter(dipuuid=uuid).delete() return HttpResponseRedirect(reverse("components.ingest.views_as.ingest_upload_as", args=[uuid])) def ingest_upload_as_save_to_db(request, uuid): saved = 0 # delete existing mapping, if any, for this DIP models.ArchivesSpaceDIPObjectResourcePairing.objects.filter(dipuuid=uuid).delete() pairs = pair_matcher.getDictArray(request.POST, 'pairs') keys = pairs.keys() keys.sort() for key in keys: models.ArchivesSpaceDIPObjectResourcePairing.objects.create( dipuuid=pairs[key]['DIPUUID'], fileuuid=pairs[key]['objectUUID'], resourceid=pairs[key]['resourceId'] ) saved = saved + 1 return saved @_authenticate_to_archivesspace def ingest_upload_as_resource(client, request, uuid, resource_id): query = request.GET.get('query', '').strip() page = request.GET.get('page', 1) sort = request.GET.get('sort') search_params = advanced_search.extract_url_search_params_from_request(request) return pair_matcher.render_resource(client, request, resource_id, query, page, sort, search_params, 'components.ingest.views_as.ingest_upload_as_match_dip_objects_to_resource_levels', 'ingest/as/resource_detail.html', _get_reset_view(uuid), uuid) @_authenticate_to_archivesspace def ingest_upload_as_resource_component(client, request, uuid, resource_component_id): query = request.GET.get('query', '').strip() page = request.GET.get('page', 1) sort = request.GET.get('sort') search_params = advanced_search.extract_url_search_params_from_request(request) return pair_matcher.render_resource_component(client, request, resource_component_id, query, page, sort, search_params, 'components.ingest.views_as.ingest_upload_as_match_dip_objects_to_resource_component_levels', 'ingest/as/resource_component.html', _get_reset_view(uuid), uuid) def _format_pair(client, resourceid, fileuuid): return { "resource_id": resourceid, "file_uuid": fileuuid, # Returns verbose details about the resource/component, required # in order to populate the pair matching UI. "resource": client.get_resource_component_children(resourceid) } @_authenticate_to_archivesspace def ingest_upload_as_match_dip_objects_to_resource_levels(client, request, uuid, resource_id): # Locate existing matches for display in the "Pairs" panel pairs = models.ArchivesSpaceDIPObjectResourcePairing.objects.filter(dipuuid=uuid) matches = [_format_pair(client, pair.resourceid, pair.fileuuid) for pair in pairs] parent_type, parent_id = client.find_parent_id_for_component(resource_id) if parent_type == type(client).RESOURCE: parent_url = 'components.ingest.views_as.ingest_upload_as_resource' else: parent_url = 'components.ingest.views_as.ingest_upload_as_resource_component' return pair_matcher.match_dip_objects_to_resource_levels(client, request, resource_id, 'ingest/as/match.html', parent_id, parent_url, _get_reset_view(uuid), uuid, matches=matches) @_authenticate_to_archivesspace def ingest_upload_as_match_dip_objects_to_resource_component_levels(client, request, uuid, resource_component_id): # Locate existing matches for display in the "Pairs" panel pairs = models.ArchivesSpaceDIPObjectResourcePairing.objects.filter( dipuuid=uuid) matches = [_format_pair(client, pair.resourceid, pair.fileuuid) for pair in pairs] parent_type, parent_id = client.find_parent_id_for_component(resource_component_id) if parent_type == type(client).RESOURCE: parent_url = 'components.ingest.views_as.ingest_upload_as_resource' else: parent_url = 'components.ingest.views_as.ingest_upload_as_resource_component' return pair_matcher.match_dip_objects_to_resource_component_levels(client, request, resource_component_id, 'ingest/as/match.html', parent_id, parent_url, _get_reset_view(uuid), uuid, matches=matches) @_authenticate_to_archivesspace def ingest_upload_as_review_matches(client, request, uuid): pairs = models.ArchivesSpaceDIPObjectResourcePairing.objects.filter(dipuuid=uuid) matches = [_format_pair(client, pair.resourceid, pair.fileuuid) for pair in pairs] return pair_matcher.review_matches(client, request, 'ingest/as/review_matches.html', uuid, matches=matches) def ingest_upload_as_match(request, uuid): try: payload = json.load(request) except ValueError: payload = {} resource_id = payload.get('resource_id') file_uuid = payload.get('file_uuid') if not resource_id or not file_uuid: return HttpResponseBadRequest("Both a resource_id and file_uuid must be specified.") if request.method == 'POST': criteria = { "dipuuid": uuid, "fileuuid": file_uuid } # Ensure that this file hasn't already been matched before saving to the DB records = models.ArchivesSpaceDIPObjectResourcePairing.objects.filter(**criteria) if records.count() < 1: models.ArchivesSpaceDIPObjectResourcePairing.objects.create( dipuuid=uuid, resourceid=resource_id, fileuuid=file_uuid ) return HttpResponse(status=201) else: return HttpResponse(status=409) elif request.method == 'DELETE': rows = models.ArchivesSpaceDIPObjectResourcePairing.objects.filter(dipuuid=uuid, resourceid=resource_id, fileuuid=file_uuid) with open("/tmp/delete.log", "a") as log: print >> log, "Resource", resource_id, "File", "file_uuid", "matches", rows.count() models.ArchivesSpaceDIPObjectResourcePairing.objects.filter(dipuuid=uuid, resourceid=resource_id, fileuuid=file_uuid).delete() return HttpResponse(status=204) else: return HttpResponse(status=405)
agpl-3.0
-3,347,778,996,672,821,000
41.644231
203
0.664713
false
swartn/sam-vs-jet-paper
analysis_plotting/plot_seas_trends_1951_2011.py
1
19262
""" Plots seasonal trends over 1951 to 2011 of SAM and jet properties for various reanalyses and CMIP5. Outputs: -------- This script produces plots: 1. Trends in the above variables over 1951-2011. 2. The relationship between SAM and strength trends. 3. The relationship between SAM and trends in the other variables, as well as trends in SAM and the climatology of the variables. (e.g. SAM trend vs climatological jet position). .. moduleauthor:: Neil Swart <neil.swart@ec.gc.ca> """ import numpy as np import scipy as sp import matplotlib.pyplot as plt import matplotlib as mpl from datetime import datetime from dateutil.relativedelta import relativedelta import pandas as pd import trend_ts # set plotting params. plt.close('all') font = {'size' : 12} plt.rc('font', **font) #============================================# # Define some functions def get_seasons(df): """Extract the 4 seasons and the annual mean from dataframe df, and save them as df.djf, df.mam, df.jja, df.son and df.ann and then return df. Note December is from the previous year. """ df.mam = df[ ( df.index.month >= 3 ) & ( df.index.month <= 5 )] df.jja = df[ ( df.index.month >= 6 ) & ( df.index.month <= 8 )] df.son = df[ ( df.index.month >= 9 ) & ( df.index.month <= 11 )] dfsh = df.shift(12) df.djf = pd.concat( [ dfsh[dfsh.index.month==12 ] ,\ df[ ( df.index.month >= 1 ) & ( df.index.month <= 2 )] ],axis=0) df.djf = df.djf.sort() df.ann = df.resample('A') df.mon = df return df def year_lim(df, ys, ye): """ Limits the dataframe df to between years starting in ys and ending in ye inclusive""" dfo = df[ ( df.index.year >= ys ) & ( df.index.year <= ye ) ] if ( df.index.year.min() > ys ): print 'WARNING: data record begins after requested trend start.' elif ( df.index.year.max() < ye ): print 'WARNING: data record ends before requested trend end.', df.index.year.max() return dfo def calc_trends(dfp, var, ys , ye ): """Calculate linear trend in the dataframe dfp between years (datetime indices) ys and ye inclusive. Saves the trend as dfp.slope and calculates and saves the linear prediction (dfp.yhat) for all input years. """ dfp = year_lim( dfp.resample('A') , ys, ye ) dfp.slope , conf_int , p_value, yhat, intercept =\ trend_ts.trend_ts(dfp.index.year, dfp[var]) dfp['yhat'] = dfp.slope * dfp.index.year + intercept return dfp def rean_proc(dfr, axtrend=None, tys=None, tye=None, mew=2, ms=15, ls='_'): """ Loop over the columns of dfr (corresponding to different reanalyses) and 2. if axtrend is given then compute the linear trend between years tys and tye (inclusive) and plot the trends on axis axtrends. 3. Return the trends. The data from dfr are colored columwise in the plots using colors provided in the global variable rlc, and similarly are labelled using the names listed in the global variable rean. """ seas = ['mam', 'jja', 'son', 'djf', 'ann'] rean_trends = np.zeros((len(dfr.columns), 5)) dfr.seasons = get_seasons( dfr ) ; # Loop over reanalyses and do some basic dataframe checks and adjustments. # We're assuming len(dfr.columns) == len(rean). for (i, cn) in enumerate( dfr.columns ): # check that we are not trying to use data that doesn't exist if ( dfr[cn].dropna().index.year.min() > tys ): print cn, 'WARNING: start >', str(tys) elif ( dfr[cn].dropna().index.year.max() < tye ): print cn, 'WARNING: end <', str(tye) # If axtrend was passed, plot the linear trend between tys and tye for # each season and each reanalysis. Season names are listed in the # global variable seas. if (axtrend): for (k, nm) in enumerate(seas): names = 'dfr.seasons.' + nm mt = calc_trends( eval( names ), cn, tys , tye ) rean_trends[i, k] = mt.slope * 10 if (nm == 'ann') & ( not np.isnan(rean_trends[i, k])): axtrend.plot(k, rean_trends[i, k], ls, ms=ms, mew=mew, label=cn) else: axtrend.plot(k, rean_trends[i, k], ls, ms=ms, mew=mew, label='') axtrend.set_xticks(np.arange(5 + 1)) axtrend.plot([-1, 5], [0, 0], 'k--') return rean_trends def mod_proc(df, axtrend, tys, tye, color='r', ls='_', label='CMIP5'): """ Loop over the columns of df calculate trends for each one, plus plot the ensemble trend stats""" seas = ['mam', 'jja', 'son', 'djf', 'ann'] num_models = len( df.columns ) mod_trends = np.empty( ( num_models , 5 ) ) df.seasons = get_seasons(df) for i, cn in enumerate(df.columns): for (k , nm) in enumerate(seas): names = 'df.seasons.' + nm mt = calc_trends( eval(names), cn, tys, tye ) mod_trends[i, k] = mt.slope * 10 if i == ( num_models - 1 ): mod_trend_mean = np.mean( mod_trends[ : , k ] ) mod_trend_std = np.std( mod_trends[ : , k ] ) c = sp.stats.t.isf(0.025, num_models - 1 ) #print c, num_models, np.sqrt(num_models) mod_95_ci = ( c * mod_trend_std ) / np.sqrt( num_models ) mod_5thp = np.percentile( mod_trends[ : , k ] , 2.5 ) mod_95thp = np.percentile( mod_trends[ : , k ] , 97.5 ) axtrend.plot( [ k , k ] , [ mod_5thp , mod_95thp ],color, linewidth=4, alpha=0.25) axtrend.plot([k, k], [mod_trend_mean - mod_95_ci, mod_trend_mean + mod_95_ci ], color, linewidth=4) if nm == 'ann': axtrend.plot(k, np.mean(mod_trends[:, k]), '_' + color, ms=15, mew=2, label=label) else: axtrend.plot(k, np.mean(mod_trends[:, k]), '_' + color, ms=15, mew=2, label='') axtrend.set_xticks(np.arange(5 + 1)) axtrend.plot([-1, 5], [0, 0], 'k--') return mod_trends def relp(xs, ys, corr=False, reg=None, label='CMIP5', color='r', alpha=1, line=True): """ plot a scatter of ys vs xs, and then compute the OLS regression line and plot on yhat. If corr=True then compute the pearson r and p-value and print in near the bottom right corner """ svj_slope , conf_int , p_value, svj_yhat, svj_intercept =\ trend_ts.trend_ts( xs , ys ) xvals = np.arange( np.min( xs ) - abs(np.max( xs )*15) ,\ abs( np.max( xs )*15) ) ypred = xvals * svj_slope + svj_intercept plt.plot( xs , ys , color + 'o', label=label, alpha=alpha) if line==True: plt.plot( xvals , ypred , color + '--' ) if corr==True: r, p = sp.stats.pearsonr(xs, ys) yrange = max(ys) - min(ys) xran = max(xs) - min(xs) plt.text(max(xs) -0.25*xran, min(ys)-0.2*yrange, "$r$: " + str(np.round(r,2)), color=color) plt.text(max(xs) -0.25*xran, min(ys) - 0.4*yrange, "$p$: " + str(np.round(p,2)), color=color ) #plt.xlim( [ min(xs) - 0.15*xran, max(xs) + 0.15*xran] ) #plt.ylim( [ min(ys) - 0.15*yrange, max(ys) + 0.15*yrange] ) #plt.autoscale(enable=True, axis='both', tight=True) if reg: plt.text(0.15, -0.4+ reg, "$b$: " + str(np.round(svj_slope,1)), color=color) def reanp( xs , ys, trend=True): """ Plot a scatter (using x's) of ys vs xs using colors in the global variable rlc. We're assuming len(xs) == len(ys) == len(rean) == len(rlc), where reana and rlc are global variables. If corr=True compute the OLS regression line and plot on yhat in black. """ for i in range(xs.shape[0]): if( not np.isnan(ys[i]) ): plt.plot(xs[i], ys[i], 'x', color=rlc[int(i)], markersize=10,markeredgewidth=3, label=rean[i]) if trend == True: svj_slope , conf_int , p_value, svj_yhat, svj_intercept =\ trend_ts.trend_ts( xs , ys ) xvals = np.arange( np.min( xs ) - np.max( xs )*15 ,\ np.max( xs )*15 ) ypred = xvals * svj_slope + svj_intercept plt.plot( xvals , ypred , 'k--' ) #============================================# def plot_seas_trends_1951_2011(datapath): """ Plots seasonal trends over 1951 to 2011 of SAM and jet properties """ #============================================# # Define the years for the trend analysis tys = 1951 # start (inclusive) tye = 2011 # stop (inclusive) #============================================# # # Define some global variables that we use repeatedly # # the names of the reanalyses we are using (in column-order of the dataframes) rean = ['R1', 'R2', '20CR', 'ERA', 'CFSR', 'MERRA'] # corresponding colors to use for plotting each reanalysis rlc = ['k', 'y','g', 'b', 'c', 'm'] rean = ['20CR'] rlc = ['g'] ls = ['_g'] # names of the seasons seas = ['mam', 'jja', 'son', 'djf', 'ann'] xtics = [datetime(1870,1,1) + relativedelta(years=20*jj) for jj in range(8) ] # Load the data from `datapath` which is (mostly) saved as pandas dataframes #in HDF5, and plot annual mean timeseries. # load in the reanlaysis data h5f = pd.HDFStore(datapath + 'zonmean_sam-jet_analysis_reanalysis.h5', 'r') dfr = h5f['zonmean_sam'] h5f.close() dfhadslp = dfr['HadSLP2r']/100. dfhadslp = pd.DataFrame(dfhadslp.dropna(), columns=['HadSLP2r']) # load in the 20CR ensemble data h5f_20CR = pd.HDFStore(datapath + 'zonmean_sam-jet_analysis_20CR_ensemble.h5', 'r') df_20cr_ens_sam = h5f_20CR['zonmean_sam']/100. df_20cr_ens_locmax = h5f_20CR['locmax'] df_20cr_ens_maxspd = h5f_20CR['maxspd'] df_20cr_ens_width = h5f_20CR['width'] h5f_20CR.close() # load in the CMIP5 model data h5f_c5 = pd.HDFStore(datapath + 'zonmean_sam-jet_analysis_cmip5.h5', 'r') df_c5_ens_sam = h5f_c5['zonmean_sam']/100. df_c5_ens_locmax = h5f_c5['locmax'] df_c5_ens_maxspd = h5f_c5['maxspd'] df_c5_ens_width = h5f_c5['width'] h5f_c5.close() #========= SAM - press ===============# f3 = plt.figure(3) plt.figure(3).set_size_inches((8,8), forward=True ) f3a = plt.subplot(421) hadslp_sam_trends = rean_proc(dfhadslp, axtrend=f3a, tys=tys, tye=tye, mew=2, ms=10, ls='.k') mod_sam_trends = mod_proc(df_c5_ens_sam, f3a, tys=tys, tye=tye, color='r') tcr_sam_trends = mod_proc(df_20cr_ens_sam, f3a, tys=tys, tye=tye, color='g', ls='_g', label='20CR') #========= Jet max speed - uspd ===============# f3b = plt.subplot(423) mod_uspd_trends = mod_proc(df_c5_ens_maxspd, f3b, tys=tys, tye=tye, color='r') tcr_uspd_trends = mod_proc(df_20cr_ens_maxspd, f3b, tys=tys, tye=tye, color='g', label='20CR') #========= Location - locmax ===============# f3c = plt.subplot(425) mod_pos_trends = mod_proc(df_c5_ens_locmax, f3c, tys=tys, tye=tye, color='r') tcr_pos_trends = mod_proc(df_20cr_ens_locmax, f3c, tys=tys, tye=tye, color='g', label='20CR') #========= Width ===============# f3d = plt.subplot(427) mod_width_trends = mod_proc(df_c5_ens_width, f3d, tys=tys, tye=tye, color='r') tcr_width_trends = mod_proc(df_20cr_ens_width, f3d, tys=tys, tye=tye, color='g', label='20CR') # ========= Do some figure beautifying and labelled etc ========= # panlab = ['a)', 'b)', 'c)', 'd)', 'e)', 'f)' ,'g)', 'h)'] f3ax = [ f3a, f3b, f3c, f3d] yaxlab = ['SAM trend \n(hPa dec$^{-1}$)', 'Strength trend \n(ms$^{-1}$ dec$^{-1}$)', 'Position trend \n($^{\circ}$ lat. dec$^{-1}$)', 'Width trend \n($^{\circ}$ lat. dec$^{-1}$)' ] yaxlab1 = ['SAM Index (hPa)' , 'Strength (m/s)', 'Position ($^{\circ}$S)', 'Width ($^{\circ}$ lat.)' ] for i, ax in enumerate(f3ax): ylim = ax.get_ylim() yrange = max(ylim) - min(ylim) ax.text( -0.35, max( ylim ) -0.15*yrange, panlab[i]) ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(5, prune='upper')) ax.set_xlim([-0.5, 5 -0.5]) if (ax != f3d): # only keep xlabels for the bottom panels ax.set_xticklabels([]) ax.set_xlabel('') ax.set_ylabel( yaxlab[i] ) f3d.set_xticklabels( [ s.upper() for s in seas] ) #f3d.yaxis.set_major_locator(mpl.ticker.MaxNLocator(5, prune='upper')) plt.figure(3).subplots_adjust(hspace=0.06, wspace=0.05, right=0.8, left=0.2) f3a.legend(ncol=3, prop={'size':12},numpoints=1, bbox_to_anchor=(1.075, 1.265), handlelength=0.01, handletextpad=0.8, frameon=False ) f3d.yaxis.set_ticks([-0.4, -0.2, 0, 0.2]) # save some pdfs plt.figure(3).savefig('../plots/seas_trends_1951-2011.pdf',format='pdf' , dpi=300, bbox_inches='tight') #for i in range(5): #print seas[i] #print sp.stats.ttest_ind(mod_sam_trends[:,i], #tcr_sam_trends[:,i], equal_var=False) # ------------------------------------------------------------------------------ # Do SAM vs SPEED trend plots # ------------------------------------------------------------------------------ # Setup up some someplots f, gs2 = plt.subplots(3,2, sharex=True, sharey=True) f.delaxes(gs2[0,1]) maxis = [-0.1, 0.35, -0.8, 1.6] plt.axis(maxis) rat = ( maxis[1] - maxis[0] ) / ( maxis[3] - maxis[2] ) plt.setp(gs2.flat, aspect = rat, adjustable='box-forced') f.subplots_adjust( hspace=0.15, wspace = -0.65) gs2[2,1].xaxis.set_major_locator( mpl.ticker.MaxNLocator(4, prune='both')) gs2[2,1].yaxis.set_major_locator( mpl.ticker.MaxNLocator(6,prune='upper')) order = [4, 0, 1, 2, 3] # order of the seasons in seas we want to use. axorder = [0, 2, 3, 4, 5] # order of the axis positions we want to use. # list over all seasons in seas and plot a scatter of uspd vs sam trends for #models and reanalysis. for i,ord in enumerate(order): plt.sca( gs2.flatten()[ axorder[i] ] ) relp( mod_uspd_trends[ : , ord ],mod_sam_trends[ : , ord ], reg=0.01, label='CMIP5' ) relp( tcr_uspd_trends[ : , ord ],tcr_sam_trends[ : , ord ], reg=-0.225, color='g', label='20CR') seas_label = [ ['a) ANN', '' ] , ['b) MAM','c) JJA'] , ['d) SON', 'e) DJF'] ] plt.text(maxis[0] -maxis[1],maxis[2]*1.7 , 'Strength trend (ms$^{-1}$dec$^{-1}$)') gs2[1,0].set_ylabel('SAM trend (hPa dec$^{-1}$)') [gs2[m,n].text(-0.075, 1.25, seas_label[m][n] ) for m in range(3) for n in range(2)] gs2[0,0].legend(ncol=1, prop={'size':12},numpoints=1, bbox_to_anchor=(1.8, 1.05),handlelength=0.01, handletextpad=1, borderpad=1, frameon=False ) plt.savefig('../plots/sam_v_uspd_seas_1951_2011.pdf',format='pdf',dpi=300, bbox_inches='tight' ) #============================================================================= # Look at some more relationships between SAM trends and (left) trends in uspd, #pos and width # and (right) between SAM trends and climatological uspd, pos and width. # set up subplots. f, gs2 = plt.subplots(3,2, sharey=True) f.set_size_inches((8,8), forward=True ) gs2[0,0].set_ylim([-0.5,2]) # compute climatological uspd, pos and width in the CMIP5 models. modsam = year_lim( df_c5_ens_sam , tys , tys ).mean() modpos = year_lim( df_c5_ens_locmax , tys , tys ).mean() moduspd = year_lim( df_c5_ens_maxspd , tys , tys ).mean() modwidth = year_lim( df_c5_ens_width , tys , tys ).mean() # compute climatological uspd, pos and width in the 20CR ensemble tcrsam = year_lim( df_20cr_ens_sam , tys , tys ).mean() tcrpos = year_lim( df_20cr_ens_locmax , tys , tys ).mean() tcruspd = year_lim( df_20cr_ens_maxspd , tys , tys ).mean() tcrwidth = year_lim( df_20cr_ens_width , tys , tys ).mean() lvars =['uspd','pos', 'width' ] s=3 # choose a season to look at. 3 = djf. lab1 = ['a)', 'c)', 'e)'] lab2 = ['b)', 'd)', 'f)'] # loop over lvars and plot the scatter of var trend vs SAM trend (left) and # var_climatology vs SAM trend (right) for i,var in enumerate(lvars): plt.sca( gs2[i,0] ) relp(eval('mod_' + var + '_trends' + '[:,s]' ) , mod_sam_trends[:,s] , corr=True) relp(eval('tcr_' + var + '_trends' + '[:,s]' ) , tcr_sam_trends[:,s] , corr=False, color='g', label='20CR', alpha=1, line=False) gs2[i,0].set_xlabel(yaxlab[i+1].replace('\n','')) gs2[i,0].xaxis.set_major_locator( mpl.ticker.MaxNLocator(4, prune='both')) plt.sca( gs2[i,1] ) relp( eval('mod' + var ) , mod_sam_trends[:,s], corr=True ) relp( eval('tcr' + var ) , tcr_sam_trends[:,s], corr=False, color='g',label='20CR', alpha=1, line=False ) gs2[i,1].set_xlabel(yaxlab1[i+1].replace('\n','')) gs2[i,0].xaxis.set_major_locator( mpl.ticker.MaxNLocator(4, prune='both')) gs2[i,1].xaxis.set_major_locator( mpl.ticker.MaxNLocator(6)) gs2[1,1].xaxis.set_major_locator( mpl.ticker.MaxNLocator(5)) plt.subplots_adjust(hspace=0.5, right=0.7, wspace=0.1) gs2[0,0].set_ylabel('SAM trend (hPa dec$^{-1}$)') gs2[1,0].set_ylabel('SAM trend (hPa dec$^{-1}$)') gs2[2,0].set_ylabel('SAM trend (hPa dec$^{-1}$)') gs2[0,0].set_title('Trends') gs2[0,1].set_title('Climatology') gs2[0,1].legend(ncol=1, prop={'size':12},numpoints=1, bbox_to_anchor=(1.475, 1.05),handlelength=0.01, handletextpad=1, borderpad=1, frameon=False ) xl = [(-0.025, 0.2), (5,9.5), (-0.6, 0.05), (-54, -41.5), (-0.2, 0.375), (29, 38)] for i, ax in enumerate(gs2.flatten()): ax.set_ylim([-0.5, 2]) ax.set_xlim(xl[i]) for i,var in enumerate(lvars): xr = abs(gs2[i,0].axis()[1] - gs2[i,0].axis()[0])*0.05 + gs2[i,0].axis()[0] gs2[i,0].text(xr, 1.75, lab1[i]) xr = abs(gs2[i,1].axis()[1] - gs2[i,1].axis()[0])*0.05 + gs2[i,1].axis()[0] gs2[i,1].text(xr, 1.75, lab2[i]) plt.savefig('../plots/sam_vs_jet_scatter.pdf',format='pdf',dpi=300, bbox_inches='tight') if __name__ == '__main__': plt.ion() plot_seas_trends_1951_2011(datapath='../data_retrieval/data/')
gpl-2.0
8,451,271,451,699,709,000
42.190583
88
0.533589
false
marbu/pylatest
tests/xsphinx/conftest.py
1
8986
# -*- coding: utf-8 -*- """ Sphinx conftest.py module for pytest, copied without modifications (with exception of this docstring and copying notice) from ``tests/conftest.py`` file of Sphinx project, commit 139e09d12023a255b206c1158487d215217be920. Done as a workaround for: https://github.com/sphinx-doc/sphinx/issues/3458 """ # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # This file incorporates work covered by the following copyright and # permission notice: # # Copyright (c) 2007-2017 by the Sphinx team (see AUTHORS file). # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function import sys import subprocess from collections import namedtuple import pytest from _pytest.mark import MarkDecorator, MarkInfo from six import StringIO, string_types import util @pytest.fixture def app_params(request, test_params, shared_result): """ parameters that is specified by 'pytest.mark.sphinx' for sphinx.application.Sphinx initialization """ # ##### process pytest.mark.sphinx markers = request.node.get_marker("sphinx") pargs = {} kwargs = {} # HACK: this makes parametrized markers work if isinstance(markers, MarkDecorator): markers = [markers] if markers is not None: # to avoid stacking positional args for info in reversed(list(markers)): for i, a in enumerate(info.args): pargs[i] = a kwargs.update(info.kwargs) args = [pargs[i] for i in sorted(pargs.keys())] # ##### process pytest.mark.test_params if test_params['shared_result']: if 'srcdir' in kwargs: raise pytest.Exception('You can not spcify shared_result and ' 'srcdir in same time.') kwargs['srcdir'] = test_params['shared_result'] restore = shared_result.restore(test_params['shared_result']) kwargs.update(restore) # ##### prepare Application params if 'srcdir' in kwargs: srcdir = util.tempdir / kwargs['srcdir'] else: srcdir = util.tempdir / kwargs.get('testroot', 'root') kwargs['srcdir'] = srcdir if kwargs.get('testroot') is None: testroot_path = util.rootdir / 'root' else: testroot_path = util.rootdir / 'roots' / ('test-' + kwargs['testroot']) if not srcdir.exists(): testroot_path.copytree(srcdir) return namedtuple('app_params', 'args,kwargs')(args, kwargs) @pytest.fixture def test_params(request): """ test parameters that is specified by 'pytest.mark.test_params' :param Union[str] shared_result: If the value is provided, app._status and app._warning objects will be shared in the parametrized test functions and/or test functions that have same 'shared_result' value. **NOTE**: You can not specify shared_result and srcdir in same time. """ env = request.node.get_marker('test_params') kwargs = env.kwargs if env else {} result = { 'shared_result': None, } result.update(kwargs) if (result['shared_result'] and not isinstance(result['shared_result'], string_types)): raise pytest.Exception('You can only provide a string type of value ' 'for "shared_result" ') return result class SphinxTestAppWrapperForSkipBuilding(object): """ This class is a wrapper for SphinxTestApp to speed up the test by skipping `app.build` process if it is already built and there is even one output file. """ def __init__(self, app_): self.app = app_ def __getattr__(self, name): return getattr(self.app, name) def build(self, *args, **kw): if not self.app.outdir.listdir(): # if listdir is empty, do build. self.app.build(*args, **kw) # otherwise, we can use built cache @pytest.fixture(scope='function') def app(test_params, app_params, make_app, shared_result): """ provides sphinx.application.Sphinx object """ args, kwargs = app_params app_ = make_app(*args, **kwargs) yield app_ print('# testroot:', kwargs.get('testroot', 'root')) print('# builder:', app_.builder.name) print('# srcdir:', app_.srcdir) print('# outdir:', app_.outdir) print('# status:', '\n' + app_._status.getvalue()) print('# warning:', '\n' + app_._warning.getvalue()) if test_params['shared_result']: shared_result.store(test_params['shared_result'], app_) @pytest.fixture(scope='function') def status(app): """ compat for testing with previous @with_app decorator """ return app._status @pytest.fixture(scope='function') def warning(app): """ compat for testing with previous @with_app decorator """ return app._warning @pytest.fixture() def make_app(test_params): """ provides make_app function to initialize SphinxTestApp instance. if you want to initialize 'app' in your test function. please use this instead of using SphinxTestApp class directory. """ apps = [] syspath = sys.path[:] def make(*args, **kwargs): status, warning = StringIO(), StringIO() kwargs.setdefault('status', status) kwargs.setdefault('warning', warning) app_ = util.SphinxTestApp(*args, **kwargs) apps.append(app_) if test_params['shared_result']: app_ = SphinxTestAppWrapperForSkipBuilding(app_) return app_ yield make sys.path[:] = syspath for app_ in apps: app_.cleanup() class SharedResult(object): cache = {} def store(self, key, app_): if key in self.cache: return data = { 'status': app_._status.getvalue(), 'warning': app_._warning.getvalue(), } self.cache[key] = data def restore(self, key): if key not in self.cache: return {} data = self.cache[key] return { 'status': StringIO(data['status']), 'warning': StringIO(data['warning']), } @pytest.fixture def shared_result(): return SharedResult() @pytest.fixture(scope='module', autouse=True) def _shared_result_cache(): SharedResult.cache.clear() @pytest.fixture def if_graphviz_found(app): """ The test will be skipped when using 'if_graphviz_found' fixture and graphviz dot command is not found. """ graphviz_dot = getattr(app.config, 'graphviz_dot', '') try: if graphviz_dot: dot = subprocess.Popen([graphviz_dot, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # show version dot.communicate() return except OSError: # No such file or directory pass pytest.skip('graphviz "dot" is not available') @pytest.fixture def tempdir(tmpdir): """ temporary directory that wrapped with `path` class. this fixture is for compat with old test implementation. """ return util.path(tmpdir)
gpl-3.0
4,330,400,934,318,240,300
30.41958
80
0.651013
false
wwchun123/sumatrapdf
scripts/update_auto_update_ver.py
19
4749
#!/usr/bin/env python import SquareTree import sys import urllib2 from util import load_config import s3 def getch_unix(): import sys, tty, termios fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch def getch_win(): import msvcrt return msvcrt.getch() def discover_getch(): try: import msvcrt return getch_win except ImportError: return getch_unix getch = discover_getch() def report_invalid_ver(ver): print("'%s' is not a valid program version" % ver) sys.exit(1) def is_num(s): try: return str(int(s)) == s except: return False def validate_ver(ver): parts = ver.split(".") for p in parts: if not is_num(p): report_invalid_ver(ver) def get_update_versions(url): try: data = urllib2.urlopen(url).read() root = SquareTree.Parse(data) node = root.GetChild("SumatraPDF") return (node.GetValue("Stable"), node.GetValue("Latest")) except: return (None, None) def get_latest_version(url): try: s = urllib2.urlopen(url).read() return s.strip() except: return None def v2fhelper(v, suff, version, weight): parts = v.split(suff) if 2 != len(parts): return v version[4] = weight version[5] = parts[1] return parts[0] # Convert a Mozilla-style version string into a floating-point number # 1.2.3.4, 1.2a5, 2.3.4b1pre, 3.0rc2, etc def version2float(v): version = [ 0, 0, 0, 0, # 4-part numerical revision 4, # Alpha, beta, RC or (default) final 0, # Alpha, beta, or RC version revision 1 # Pre or (default) final ] parts = v.split("pre") if 2 == len(parts): version[6] = 0 v = parts[0] v = v2fhelper(v, "a", version, 1) v = v2fhelper(v, "b", version, 2) v = v2fhelper(v, "rc", version, 3) parts = v.split(".")[:4] for (p, i) in zip(parts, range(len(parts))): version[i] = p ver = float(version[0]) ver += float(version[1]) / 100. ver += float(version[2]) / 10000. ver += float(version[3]) / 1000000. ver += float(version[4]) / 100000000. ver += float(version[5]) / 10000000000. ver += float(version[6]) / 1000000000000. return ver # Return True if ver1 > ver2 using semantics of comparing version # numbers def ProgramVersionGreater(ver1, ver2): v1f = version2float(ver1) v2f = version2float(ver2) return v1f > v2f def verify_version_not_lower(myver, curr1, curr2): if curr1 != None and ProgramVersionGreater(curr1, myver): print("version you gave is less than sumpdf-latest.txt (%s < %s)" % (myver, curr1)) sys.exit(1) if curr2 != None and ProgramVersionGreater(curr2, myver): print("version you gave is less than sumpdf-latest.txt (%s < %s)" % (myver, curr2)) sys.exit(1) def main(): url_update = "http://kjkpub.s3.amazonaws.com/sumatrapdf/sumpdf-update.txt" url_latest = "http://kjkpub.s3.amazonaws.com/sumatrapdf/sumpdf-latest.txt" conf = load_config() assert conf.aws_access != "" and conf.aws_secret != "" s3.set_secrets(conf.aws_access, conf.aws_secret) s3.set_bucket("kjkpub") v1 = get_latest_version(url_latest) (v2, ver) = get_update_versions(url_update) validate_ver(ver) assert not v2 or v1 == v2, "sumpdf-update.txt and sumpdf-latest.txt don't agree on Stable version, run build.py -release first" verify_version_not_lower(ver, v1, v2) sys.stdout.write("Going to update auto-update version to %s. Are you sure? [y/N] " % ver) sys.stdout.flush() ch = getch() print() if ch not in ['y', 'Y']: print("Didn't update because you didn't press 'y'") sys.exit(1) # remove the Stable version from sumpdf-update.txt s = "[SumatraPDF]\nLatest %s\n" % ver s3.upload_data_public(s, "sumatrapdf/sumpdf-update.txt") # keep updating the legacy file for now s = "%s\n" % ver s3.upload_data_public(s, "sumatrapdf/sumpdf-latest.txt") v1 = get_latest_version(url_latest) (v2, v3) = get_update_versions(url_update) if v1 != ver or v2 != None or v3 != ver: print("Upload failed because v1 or v3 != ver ('%s' or '%s' != '%s'" % (v1, v3, ver)) sys.exit(1) print("Successfully update auto-update version to '%s'" % ver) if __name__ == "__main__": main()
gpl-3.0
-931,593,761,548,807,300
26.437126
131
0.58686
false
vorburger/mcedit2
src/mcedit2/rendering/scenegraph/depth_test.py
1
1496
""" depth_test """ from __future__ import absolute_import, division, print_function, unicode_literals import logging from OpenGL import GL from mcedit2.rendering.scenegraph.rendernode import RenderstateRenderNode from mcedit2.rendering.scenegraph.scenenode import Node log = logging.getLogger(__name__) class DepthMaskRenderNode(RenderstateRenderNode): def enter(self): GL.glPushAttrib(GL.GL_DEPTH_BUFFER_BIT) GL.glDepthMask(self.sceneNode.mask) def exit(self): GL.glPopAttrib() class DepthMaskNode(Node): RenderNodeClass = DepthMaskRenderNode mask = False class DepthFuncRenderNode(RenderstateRenderNode): def enter(self): GL.glPushAttrib(GL.GL_DEPTH_BUFFER_BIT) GL.glDepthFunc(self.sceneNode.func) def exit(self): GL.glPopAttrib() class DepthFuncNode(Node): RenderNodeClass = DepthFuncRenderNode def __init__(self, func=GL.GL_LESS): super(DepthFuncNode, self).__init__() self.func = func class DepthOffsetRenderNode(RenderstateRenderNode): def enter(self): GL.glPushAttrib(GL.GL_POLYGON_BIT) GL.glPolygonOffset(self.sceneNode.depthOffset, self.sceneNode.depthOffset) GL.glEnable(GL.GL_POLYGON_OFFSET_FILL) def exit(self): GL.glPopAttrib() class DepthOffsetNode(Node): RenderNodeClass = DepthOffsetRenderNode def __init__(self, depthOffset): super(DepthOffsetNode, self).__init__() self.depthOffset = depthOffset
bsd-3-clause
-6,395,307,032,541,451,000
24.355932
82
0.705882
false
lakshayg/tensorflow
tensorflow/contrib/distributions/python/ops/inverse_gamma.py
14
10451
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The InverseGamma distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import random_ops from tensorflow.python.ops.distributions import distribution from tensorflow.python.ops.distributions import util as distribution_util __all__ = [ "InverseGamma", "InverseGammaWithSoftplusConcentrationRate", ] class InverseGamma(distribution.Distribution): """InverseGamma distribution. The `InverseGamma` distribution is defined over positive real numbers using parameters `concentration` (aka "alpha") and `rate` (aka "beta"). #### Mathematical Details The probability density function (pdf) is, ```none pdf(x; alpha, beta, x > 0) = x**(-alpha - 1) exp(-beta / x) / Z Z = Gamma(alpha) beta**-alpha ``` where: * `concentration = alpha`, * `rate = beta`, * `Z` is the normalizing constant, and, * `Gamma` is the [gamma function]( https://en.wikipedia.org/wiki/Gamma_function). The cumulative density function (cdf) is, ```none cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta / x) / Gamma(alpha) ``` where `GammaInc` is the [upper incomplete Gamma function]( https://en.wikipedia.org/wiki/Incomplete_gamma_function). The parameters can be intuited via their relationship to mean and stddev, ```none concentration = alpha = (mean / stddev)**2 rate = beta = mean / stddev**2 ``` Distribution parameters are automatically broadcast in all functions; see examples for details. WARNING: This distribution may draw 0-valued samples for small concentration values. See note in `tf.random_gamma` docstring. #### Examples ```python tfd = tf.contrib.distributions dist = tfd.InverseGamma(concentration=3.0, rate=2.0) dist2 = tfd.InverseGamma(concentration=[3.0, 4.0], rate=[2.0, 3.0]) ``` """ def __init__(self, concentration, rate, validate_args=False, allow_nan_stats=True, name="InverseGamma"): """Construct InverseGamma with `concentration` and `rate` parameters. The parameters `concentration` and `rate` must be shaped in a way that supports broadcasting (e.g. `concentration + rate` is a valid operation). Args: concentration: Floating point tensor, the concentration params of the distribution(s). Must contain only positive values. rate: Floating point tensor, the inverse scale params of the distribution(s). Must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: TypeError: if `concentration` and `rate` are different dtypes. """ parameters = locals() with ops.name_scope(name, values=[concentration, rate]): with ops.control_dependencies([ check_ops.assert_positive(concentration), check_ops.assert_positive(rate), ] if validate_args else []): self._concentration = array_ops.identity( concentration, name="concentration") self._rate = array_ops.identity(rate, name="rate") check_ops.assert_same_float_dtype( [self._concentration, self._rate]) super(InverseGamma, self).__init__( dtype=self._concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=distribution.NOT_REPARAMETERIZED, parameters=parameters, graph_parents=[self._concentration, self._rate], name=name) @staticmethod def _param_shapes(sample_shape): return dict( zip(("concentration", "rate"), ([ops.convert_to_tensor( sample_shape, dtype=dtypes.int32)] * 2))) @property def concentration(self): """Concentration parameter.""" return self._concentration @property def rate(self): """Rate parameter.""" return self._rate def _batch_shape_tensor(self): return array_ops.broadcast_dynamic_shape( array_ops.shape(self.concentration), array_ops.shape(self.rate)) def _batch_shape(self): return array_ops.broadcast_static_shape( self.concentration.get_shape(), self.rate.get_shape()) def _event_shape_tensor(self): return constant_op.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.scalar() @distribution_util.AppendDocstring( """Note: See `tf.random_gamma` docstring for sampling details and caveats.""") def _sample_n(self, n, seed=None): return 1. / random_ops.random_gamma( shape=[n], alpha=self.concentration, beta=self.rate, dtype=self.dtype, seed=seed) def _log_prob(self, x): return self._log_unnormalized_prob(x) - self._log_normalization() def _prob(self, x): return math_ops.exp(self._log_prob(x)) def _log_cdf(self, x): return math_ops.log(self._cdf(x)) def _cdf(self, x): x = self._maybe_assert_valid_sample(x) # Note that igammac returns the upper regularized incomplete gamma # function Q(a, x), which is what we want for the CDF. return math_ops.igammac(self.concentration, self.rate / x) def _log_unnormalized_prob(self, x): x = self._maybe_assert_valid_sample(x) return -(1. + self.concentration) * math_ops.log(x) - self.rate / x def _log_normalization(self): return (math_ops.lgamma(self.concentration) - self.concentration * math_ops.log(self.rate)) def _entropy(self): return (self.concentration + math_ops.log(self.rate) + math_ops.lgamma(self.concentration) - ((1. + self.concentration) * math_ops.digamma(self.concentration))) @distribution_util.AppendDocstring( """The mean of an inverse gamma distribution is `rate / (concentration - 1)`, when `concentration > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception will be raised rather than returning `NaN`""") def _mean(self): mean = self.rate / (self.concentration - 1.) if self.allow_nan_stats: nan = array_ops.fill( self.batch_shape_tensor(), np.array(np.nan, dtype=self.dtype.as_numpy_dtype()), name="nan") return array_ops.where(self.concentration > 1., mean, nan) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones([], self.dtype), self.concentration, message="mean undefined when any concentration <= 1"), ], mean) @distribution_util.AppendDocstring( """Variance for inverse gamma is defined only for `concentration > 2`. If `self.allow_nan_stats` is `False`, an exception will be raised rather than returning `NaN`.""") def _variance(self): var = (math_ops.square(self.rate) / math_ops.square(self.concentration - 1.) / (self.concentration - 2.)) if self.allow_nan_stats: nan = array_ops.fill( self.batch_shape_tensor(), np.array(np.nan, dtype=self.dtype.as_numpy_dtype()), name="nan") return array_ops.where(self.concentration > 2., var, nan) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( constant_op.constant(2., dtype=self.dtype), self.concentration, message="variance undefined when any concentration <= 2"), ], var) @distribution_util.AppendDocstring( """The mode of an inverse gamma distribution is `rate / (concentration + 1)`.""") def _mode(self): return self.rate / (1. + self.concentration) def _maybe_assert_valid_sample(self, x): check_ops.assert_same_float_dtype( tensors=[x], dtype=self.dtype) if not self.validate_args: return x return control_flow_ops.with_dependencies([ check_ops.assert_positive(x), ], x) class InverseGammaWithSoftplusConcentrationRate(InverseGamma): """`InverseGamma` with softplus of `concentration` and `rate`.""" def __init__(self, concentration, rate, validate_args=False, allow_nan_stats=True, name="InverseGammaWithSoftplusConcentrationRate"): parameters = locals() with ops.name_scope(name, values=[concentration, rate]): super(InverseGammaWithSoftplusConcentrationRate, self).__init__( concentration=nn.softplus(concentration, name="softplus_concentration"), rate=nn.softplus(rate, name="softplus_rate"), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name) self._parameters = parameters
apache-2.0
3,149,243,629,071,600,000
34.07047
80
0.654961
false
roidayan/rtslib
rtslib/root.py
2
4998
''' Implements the RTSRoot class. This file is part of LIO(tm). Copyright (c) 2011-2014 by Datera, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import re import os import glob from node import CFSNode from target import Target, FabricModule from tcm import FileIOBackstore, IBlockBackstore from tcm import PSCSIBackstore, RDMCPBackstore from utils import RTSLibError, RTSLibBrokenLink class RTSRoot(CFSNode): ''' This is an interface to the root of the configFS object tree. Is allows one to start browsing Target and Backstore objects, as well as helper methods to return arbitrary objects from the configFS tree. >>> import rtslib.root as root >>> rtsroot = root.RTSRoot() >>> rtsroot.path '/sys/kernel/config/target' >>> rtsroot.exists True >>> rtsroot.targets # doctest: +ELLIPSIS [...] >>> rtsroot.backstores # doctest: +ELLIPSIS [...] >>> rtsroot.tpgs # doctest: +ELLIPSIS [...] >>> rtsroot.storage_objects # doctest: +ELLIPSIS [...] >>> rtsroot.network_portals # doctest: +ELLIPSIS [...] ''' # The core target/tcm kernel module target_core_mod = 'target_core_mod' # RTSRoot private stuff def __init__(self): ''' Instantiate an RTSRoot object. Basically checks for configfs setup and base kernel modules (tcm ) ''' super(RTSRoot, self).__init__() def _list_targets(self): self._check_self() targets = set([]) for fabric_module in self.fabric_modules: for target in fabric_module.targets: yield target def _list_backstores(self): self._check_self() if os.path.isdir("%s/core" % self.path): backstore_dirs = glob.glob("%s/core/*_*" % self.path) for backstore_dir in [os.path.basename(path) for path in backstore_dirs]: regex = re.search("([a-z]+[_]*[a-z]+)(_)([0-9]+)", backstore_dir) if regex: if regex.group(1) == "fileio": yield FileIOBackstore(int(regex.group(3)), 'lookup') elif regex.group(1) == "pscsi": yield PSCSIBackstore(int(regex.group(3)), 'lookup') elif regex.group(1) == "iblock": yield IBlockBackstore(int(regex.group(3)), 'lookup') elif regex.group(1) == "rd_mcp": yield RDMCPBackstore(int(regex.group(3)), 'lookup') def _list_storage_objects(self): self._check_self() for bs in self.backstores: for so in bs.storage_objects: yield so def _list_tpgs(self): self._check_self() for t in self.targets: for tpg in t.tpgs: yield tpg def _list_node_acls(self): self._check_self() for t in self.tpgs: for node_acl in t.node_acls: yield node_acl def _list_network_portals(self): self._check_self() for t in self.tpgs: for p in t.network_portals: yield p def _list_luns(self): self._check_self() for t in self.tpgs: for lun in t.luns: yield lun def _list_fabric_modules(self): self._check_self() for mod in FabricModule.all(): yield mod def __str__(self): return "rtslib" # RTSRoot public stuff backstores = property(_list_backstores, doc="Get the list of Backstore objects.") targets = property(_list_targets, doc="Get the list of Target objects.") tpgs = property(_list_tpgs, doc="Get the list of all the existing TPG objects.") node_acls = property(_list_node_acls, doc="Get the list of all the existing NodeACL objects.") network_portals = property(_list_network_portals, doc="Get the list of all the existing Network Portal objects.") storage_objects = property(_list_storage_objects, doc="Get the list of all the existing Storage objects.") luns = property(_list_luns, doc="Get the list of all existing LUN objects.") fabric_modules = property(_list_fabric_modules, doc="Get the list of all FabricModule objects.") def _test(): '''Run the doctests.''' import doctest doctest.testmod() if __name__ == "__main__": _test()
apache-2.0
-9,142,912,052,505,027,000
31.454545
78
0.590636
false
IDSIA/sacred
sacred/config/config_files.py
1
1710
#!/usr/bin/env python # coding=utf-8 import os import pickle import json import sacred.optional as opt from sacred.serializer import flatten, restore __all__ = ("load_config_file", "save_config_file") class Handler: def __init__(self, load, dump, mode): self.load = load self.dump = dump self.mode = mode HANDLER_BY_EXT = { ".json": Handler( lambda fp: restore(json.load(fp)), lambda obj, fp: json.dump(flatten(obj), fp, sort_keys=True, indent=2), "", ), ".pickle": Handler(pickle.load, pickle.dump, "b"), } yaml_extensions = (".yaml", ".yml") if opt.has_yaml: def load_yaml(filename): return opt.yaml.load(filename, Loader=opt.yaml.FullLoader) yaml_handler = Handler(load_yaml, opt.yaml.dump, "") for extension in yaml_extensions: HANDLER_BY_EXT[extension] = yaml_handler def get_handler(filename): _, extension = os.path.splitext(filename) if extension in yaml_extensions and not opt.has_yaml: raise KeyError( 'Configuration file "{}" cannot be loaded as ' "you do not have PyYAML installed.".format(filename) ) try: return HANDLER_BY_EXT[extension] except KeyError: raise ValueError( 'Configuration file "{}" has invalid or unsupported extension ' '"{}".'.format(filename, extension) ) def load_config_file(filename): handler = get_handler(filename) with open(filename, "r" + handler.mode) as f: return handler.load(f) def save_config_file(config, filename): handler = get_handler(filename) with open(filename, "w" + handler.mode) as f: handler.dump(config, f)
mit
-2,056,052,895,043,427,600
24.147059
78
0.624561
false
allenta/varnish-bans-manager
varnish_bans_manager/core/tasks/bans.py
1
4654
# -*- coding: utf-8 -*- ''' :copyright: (c) 2012 by Allenta Consulting, see AUTHORS.txt for more details. :license: GPL, see LICENSE.txt for more details. ''' from __future__ import absolute_import from django.conf import settings from django.utils import timezone from templated_email import send_templated_mail from varnish_bans_manager.core.tasks.base import MonitoredTask, SingleInstanceTask from varnish_bans_manager.core.models import BanSubmission, BanSubmissionItem, Setting class NotifySubmissions(SingleInstanceTask): ''' Send a notification to the administrator with a report of all bans submited lately. ''' ignore_result = True soft_time_limit = 600 # 10 minutes. def irun(self): # Recover current state to go on where we ended last time, # or start from the beginning. if Setting.notify_bans and settings.VBM_NOTIFICATIONS_EMAIL: ban_submissions = BanSubmission.objects.filter( launched_at__isnull=False) if Setting.notify_bans_task_status is not None: ban_submissions = ban_submissions.filter( pk__gt=Setting.notify_bans_task_status) # Prepare data. submissions_log = [{ 'id': ban_submission.id, 'launched_at': ban_submission.launched_at, 'user': ban_submission.user.human_name, 'ban_type': ban_submission.human_ban_type_name, 'expression': ban_submission.expression, 'target_type': ban_submission.target.human_class_name, 'target': ban_submission.target.human_name, 'items': ban_submission.items.all(), } for ban_submission in ban_submissions.iterator()] if len(submissions_log) > 0: # Send e-mail. send_templated_mail( template_name='varnish-bans-manager/core/bans/submissions', from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[settings.VBM_NOTIFICATIONS_EMAIL], bcc=settings.DEFAULT_BCC_EMAILS, context={ 'base_url': settings.VBM_BASE_URL, 'submissions_log': submissions_log, }, ) # Store last seen id to keep track of our position. Setting.notify_bans_task_status = submissions_log[-1]['id'] class Submit(MonitoredTask): ''' Perform a ban submission. ''' def irun(self, ban_submission): ban_submission.launched_at = timezone.now() ban_submission.save() num_items = len(ban_submission.target.items) for index, node in enumerate(ban_submission.target.items): # Build ban submission item with the result of the # current operation. ban_submission_item = BanSubmissionItem(node=node) try: node.ban(ban_submission.expression) ban_submission_item.success = True except Exception as e: ban_submission_item.success = False ban_submission_item.message = unicode(e) # Save ban submission item and update progress. ban_submission.items.add(ban_submission_item) self.set_progress(index + 1, num_items) return ban_submission.id class Status(MonitoredTask): ''' Fetches & merges lists of bans. ''' def irun(self, cache): # Init result. result = { 'cache': cache, 'bans': { 'shared': [], 'differences': [], }, 'errors': [], } # Fetch expressions. bans = [] num_items = len(cache.items) for index, node in enumerate(cache.items): try: bans.append((node, set(node.ban_list()))) except Exception as e: result['errors'].append((node.human_name, unicode(e))) self.set_progress(index + 1, num_items) # Merge expressions. if bans: shared = set.intersection(*[ expressions for (node, expressions) in bans]) for (node, expressions) in bans: difference = expressions.difference(shared) if difference: result['bans']['differences'].append( (node.human_name, sorted(list(difference)))) result['bans']['shared'] = sorted(list(shared)) # Done! return result
gpl-3.0
-1,156,311,358,076,189,200
37.139344
86
0.562218
false
sorenk/ansible
contrib/inventory/rudder.py
46
10663
#!/usr/bin/env python # Copyright (c) 2015, Normation SAS # # Inspired by the EC2 inventory plugin: # https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ###################################################################### ''' Rudder external inventory script ================================= Generates inventory that Ansible can understand by making API request to a Rudder server. This script is compatible with Rudder 2.10 or later. The output JSON includes all your Rudder groups, containing the hostnames of their nodes. Groups and nodes have a variable called rudder_group_id and rudder_node_id, which is the Rudder internal id of the item, allowing to identify them uniquely. Hosts variables also include your node properties, which are key => value properties set by the API and specific to each node. This script assumes there is an rudder.ini file alongside it. To specify a different path to rudder.ini, define the RUDDER_INI_PATH environment variable: export RUDDER_INI_PATH=/path/to/my_rudder.ini You have to configure your Rudder server information, either in rudder.ini or by overriding it with environment variables: export RUDDER_API_VERSION='latest' export RUDDER_API_TOKEN='my_token' export RUDDER_API_URI='https://rudder.local/rudder/api' ''' import sys import os import re import argparse import six import httplib2 as http from time import time from ansible.module_utils.six.moves import configparser from ansible.module_utils.six.moves.urllib.parse import urlparse try: import json except ImportError: import simplejson as json class RudderInventory(object): def __init__(self): ''' Main execution path ''' # Empty inventory by default self.inventory = {} # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() # Create connection self.conn = http.Http(disable_ssl_certificate_validation=self.disable_ssl_validation) # Cache if self.args.refresh_cache: self.update_cache() elif not self.is_cache_valid(): self.update_cache() else: self.load_cache() data_to_print = {} if self.args.host: data_to_print = self.get_host_info(self.args.host) elif self.args.list: data_to_print = self.get_list_info() print(self.json_format_dict(data_to_print, True)) def read_settings(self): ''' Reads the settings from the rudder.ini file ''' if six.PY2: config = configparser.SafeConfigParser() else: config = configparser.ConfigParser() rudder_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rudder.ini') rudder_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('RUDDER_INI_PATH', rudder_default_ini_path))) config.read(rudder_ini_path) self.token = os.environ.get('RUDDER_API_TOKEN', config.get('rudder', 'token')) self.version = os.environ.get('RUDDER_API_VERSION', config.get('rudder', 'version')) self.uri = os.environ.get('RUDDER_API_URI', config.get('rudder', 'uri')) self.disable_ssl_validation = config.getboolean('rudder', 'disable_ssl_certificate_validation') self.group_name = config.get('rudder', 'group_name') self.fail_if_name_collision = config.getboolean('rudder', 'fail_if_name_collision') self.cache_path = config.get('rudder', 'cache_path') self.cache_max_age = config.getint('rudder', 'cache_max_age') def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Rudder inventory') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to Rudder (default: False - use cache files)') self.args = parser.parse_args() def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path): mod_time = os.path.getmtime(self.cache_path) current_time = time() if (mod_time + self.cache_max_age) > current_time: return True return False def load_cache(self): ''' Reads the cache from the cache file sets self.cache ''' cache = open(self.cache_path, 'r') json_cache = cache.read() try: self.inventory = json.loads(json_cache) except ValueError as e: self.fail_with_error('Could not parse JSON response from local cache', 'parsing local cache') def write_cache(self): ''' Writes data in JSON format to a file ''' json_data = self.json_format_dict(self.inventory, True) cache = open(self.cache_path, 'w') cache.write(json_data) cache.close() def get_nodes(self): ''' Gets the nodes list from Rudder ''' path = '/nodes?select=nodeAndPolicyServer' result = self.api_call(path) nodes = {} for node in result['data']['nodes']: nodes[node['id']] = {} nodes[node['id']]['hostname'] = node['hostname'] if 'properties' in node: nodes[node['id']]['properties'] = node['properties'] else: nodes[node['id']]['properties'] = [] return nodes def get_groups(self): ''' Gets the groups list from Rudder ''' path = '/groups' result = self.api_call(path) groups = {} for group in result['data']['groups']: groups[group['id']] = {'hosts': group['nodeIds'], 'name': self.to_safe(group[self.group_name])} return groups def update_cache(self): ''' Fetches the inventory information from Rudder and creates the inventory ''' nodes = self.get_nodes() groups = self.get_groups() inventory = {} for group in groups: # Check for name collision if self.fail_if_name_collision: if groups[group]['name'] in inventory: self.fail_with_error('Name collision on groups: "%s" appears twice' % groups[group]['name'], 'creating groups') # Add group to inventory inventory[groups[group]['name']] = {} inventory[groups[group]['name']]['hosts'] = [] inventory[groups[group]['name']]['vars'] = {} inventory[groups[group]['name']]['vars']['rudder_group_id'] = group for node in groups[group]['hosts']: # Add node to group inventory[groups[group]['name']]['hosts'].append(nodes[node]['hostname']) properties = {} for node in nodes: # Check for name collision if self.fail_if_name_collision: if nodes[node]['hostname'] in properties: self.fail_with_error('Name collision on hosts: "%s" appears twice' % nodes[node]['hostname'], 'creating hosts') # Add node properties to inventory properties[nodes[node]['hostname']] = {} properties[nodes[node]['hostname']]['rudder_node_id'] = node for node_property in nodes[node]['properties']: properties[nodes[node]['hostname']][self.to_safe(node_property['name'])] = node_property['value'] inventory['_meta'] = {} inventory['_meta']['hostvars'] = properties self.inventory = inventory if self.cache_max_age > 0: self.write_cache() def get_list_info(self): ''' Gets inventory information from local cache ''' return self.inventory def get_host_info(self, hostname): ''' Gets information about a specific host from local cache ''' if hostname in self.inventory['_meta']['hostvars']: return self.inventory['_meta']['hostvars'][hostname] else: return {} def api_call(self, path): ''' Performs an API request ''' headers = { 'X-API-Token': self.token, 'X-API-Version': self.version, 'Content-Type': 'application/json;charset=utf-8' } target = urlparse(self.uri + path) method = 'GET' body = '' try: response, content = self.conn.request(target.geturl(), method, body, headers) except: self.fail_with_error('Error connecting to Rudder server') try: data = json.loads(content) except ValueError as e: self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response') return data def fail_with_error(self, err_msg, err_operation=None): ''' Logs an error to std err for ansible-playbook to consume and exit ''' if err_operation: err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( err_msg=err_msg, err_operation=err_operation) sys.stderr.write(err_msg) sys.exit(1) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible variable names ''' return re.sub(r'[^A-Za-z0-9\_]', '_', word) # Run the script RudderInventory()
gpl-3.0
663,702,047,180,933,600
34.781879
131
0.610522
false
randynobx/ansible
lib/ansible/modules/windows/win_file.py
20
2875
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = r''' --- module: win_file version_added: "1.9.2" short_description: Creates, touches or removes files or directories. description: - Creates (empty) files, updates file modification stamps of existing files, and can create or remove directories. - Unlike M(file), does not modify ownership, permissions or manipulate links. - For non-Windows targets, use the M(file) module instead. notes: - For non-Windows targets, use the M(file) module instead. - See also M(win_copy), M(win_template), M(copy), M(template), M(assemble) requirements: [ ] author: "Jon Hawkesworth (@jhawkesworth)" options: path: description: - 'path to the file being managed. Aliases: I(dest), I(name)' required: true aliases: ['dest', 'name'] state: description: - If C(directory), all immediate subdirectories will be created if they do not exist. If C(file), the file will NOT be created if it does not exist, see the M(copy) or M(template) module if you want that behavior. If C(absent), directories will be recursively deleted, and files will be removed. If C(touch), an empty file will be created if the C(path) does not exist, while an existing file or directory will receive updated file access and modification times (similar to the way C(touch) works from the command line). choices: [ file, directory, touch, absent ] ''' EXAMPLES = r''' - name: Create a file win_file: path: C:\Temp\foo.conf state: file - name: Touch a file (creates if not present, updates modification time if present) win_file: path: C:\Temp\foo.conf state: touch - name: Remove a file, if present win_file: path: C:\Temp\foo.conf state: absent - name: Create directory structure win_file: path: C:\Temp\folder\subfolder state: directory - name: Remove directory structure win_file: path: C:\Temp state: absent '''
gpl-3.0
3,467,949,795,511,355,400
32.430233
87
0.684174
false
CiuffysHub/MITMf
mitmflib-0.18.4/build/lib.linux-i686-2.7/mitmflib/argh/decorators.py
2
5296
# coding: utf-8 # # Copyright © 2010—2014 Andrey Mikhaylenko and contributors # # This file is part of Argh. # # Argh is free software under terms of the GNU Lesser # General Public License version 3 (LGPLv3) as published by the Free # Software Foundation. See the file README.rst for copying conditions. # """ Command decorators ~~~~~~~~~~~~~~~~~~ """ from mitmflib.argh.constants import (ATTR_ALIASES, ATTR_ARGS, ATTR_NAME, ATTR_WRAPPED_EXCEPTIONS, ATTR_WRAPPED_EXCEPTIONS_PROCESSOR, ATTR_EXPECTS_NAMESPACE_OBJECT) __all__ = ['aliases', 'named', 'arg', 'wrap_errors', 'expects_obj'] def named(new_name): """ Sets given string as command name instead of the function name. The string is used verbatim without further processing. Usage:: @named('load') def do_load_some_stuff_and_keep_the_original_function_name(args): ... The resulting command will be available only as ``load``. To add aliases without renaming the command, check :func:`aliases`. .. versionadded:: 0.19 """ def wrapper(func): setattr(func, ATTR_NAME, new_name) return func return wrapper def aliases(*names): """ Defines alternative command name(s) for given function (along with its original name). Usage:: @aliases('co', 'check') def checkout(args): ... The resulting command will be available as ``checkout``, ``check`` and ``co``. .. note:: This decorator only works with a recent version of argparse (see `Python issue 9324`_ and `Python rev 4c0426`_). Such version ships with **Python 3.2+** and may be available in other environments as a separate package. Argh does not issue warnings and simply ignores aliases if they are not supported. See :attr:`~argh.assembling.SUPPORTS_ALIASES`. .. _Python issue 9324: http://bugs.python.org/issue9324 .. _Python rev 4c0426: http://hg.python.org/cpython/rev/4c0426261148/ .. versionadded:: 0.19 """ def wrapper(func): setattr(func, ATTR_ALIASES, names) return func return wrapper def arg(*args, **kwargs): """ Declares an argument for given function. Does not register the function anywhere, nor does it modify the function in any way. The signature is exactly the same as that of :meth:`argparse.ArgumentParser.add_argument`, only some keywords are not required if they can be easily guessed. Usage:: @arg('path') @arg('--format', choices=['yaml','json'], default='json') @arg('--dry-run', default=False) @arg('-v', '--verbosity', choices=range(0,3), default=1) def load(args): loaders = {'json': json.load, 'yaml': yaml.load} loader = loaders[args.format] data = loader(args.path) if not args.dry_run: if 1 < verbosity: print('saving to the database') put_to_database(data) Note that: * you didn't have to specify ``action="store_true"`` for ``--dry-run``; * you didn't have to specify ``type=int`` for ``--verbosity``. """ def wrapper(func): declared_args = getattr(func, ATTR_ARGS, []) # The innermost decorator is called first but appears last in the code. # We need to preserve the expected order of positional arguments, so # the outermost decorator inserts its value before the innermost's: declared_args.insert(0, dict(option_strings=args, **kwargs)) setattr(func, ATTR_ARGS, declared_args) return func return wrapper def wrap_errors(errors=None, processor=None, *args): """ Decorator. Wraps given exceptions into :class:`~argh.exceptions.CommandError`. Usage:: @wrap_errors([AssertionError]) def foo(x=None, y=None): assert x or y, 'x or y must be specified' If the assertion fails, its message will be correctly printed and the stack hidden. This helps to avoid boilerplate code. :param errors: A list of exception classes to catch. :param processor: A callable that expects the exception object and returns a string. For example, this renders all wrapped errors in red colour:: from termcolor import colored def failure(err): return colored(str(err), 'red') @wrap_errors(processor=failure) def my_command(...): ... """ def wrapper(func): if errors: setattr(func, ATTR_WRAPPED_EXCEPTIONS, errors) if processor: setattr(func, ATTR_WRAPPED_EXCEPTIONS_PROCESSOR, processor) return func return wrapper def expects_obj(func): """ Marks given function as expecting a namespace object. Usage:: @arg('bar') @arg('--quux', default=123) @expects_obj def foo(args): yield args.bar, args.quux This is equivalent to:: def foo(bar, quux=123): yield bar, quux In most cases you don't need this decorator. """ setattr(func, ATTR_EXPECTS_NAMESPACE_OBJECT, True) return func
gpl-3.0
-5,380,725,655,337,717,000
29.245714
82
0.612507
false
kartikm/yokadi
yokadi/ycli/textlistrenderer.py
1
9034
# -*- coding: UTF-8 -*- """ Text rendering of t_list output @author: Aurélien Gâteau <mail@agateau.com> @author: Sébastien Renard <Sebastien.Renard@digitalfox.org> @license: GPL v3 or later """ from datetime import datetime, timedelta from sqlalchemy.sql import func import yokadi.ycli.colors as C from yokadi.core import ydateutils from yokadi.core import db from yokadi.core.db import Task from yokadi.ycli import tui def colorizer(value, reverse=False): """Return a color according to value. @param value: value used to determine color. Low (0) value means not urgent/visible, high (100) value means important @param reverse: If false low value means important and vice versa @return: a color code or None for no color""" if reverse: value = 100 - value if value > 75: return C.RED elif value > 50: return C.PURPLE elif value > 25: return C.ORANGE else: return None class Column(object): __slots__ = ["title", "width", "formater"] def __init__(self, title, width, formater): """ formater is a callable which accepts a task and returns a tuple of the form (string, color) color may be None if no color should be applied """ self.title = title self.width = width self.formater = formater def createHeader(self): return self.title.ljust(self.width) def createCell(self, task): value, color = self.formater(task) if color: cell = color + value.ljust(self.width) + C.RESET else: cell = value.ljust(self.width) return cell def idFormater(task): return str(task.id), None class TitleFormater(object): TITLE_WITH_KEYWORDS_TEMPLATE = "%s (%s)" def __init__(self, width, cryptoMgr): self.cryptoMgr = cryptoMgr self.width = width def __call__(self, task): keywords = task.getUserKeywordsNameAsString() hasDescription = task.description is not None and task.description != "" title = self.cryptoMgr.decrypt(task.title) # Compute title, titleWidth and colorWidth maxWidth = self.width if hasDescription: maxWidth -= 1 if keywords and len(task.title) < maxWidth: title = self.TITLE_WITH_KEYWORDS_TEMPLATE % (title, C.BOLD + keywords) colorWidth = len(C.BOLD) else: colorWidth = 0 # Adjust title to fit in self.width titleWidth = len(title) - colorWidth if titleWidth > maxWidth: title = title[:maxWidth - 1 + colorWidth] + C.RESET + ">" else: title = title.ljust(maxWidth + colorWidth) + C.RESET if hasDescription: title = title + "*" return title, None def urgencyFormater(task): return str(task.urgency), colorizer(task.urgency) def statusFormater(task): if task.status == "started": color = C.BOLD else: color = None return task.status[0].upper(), color class AgeFormater(object): def __init__(self, today, asDate=False): self.today = today self.asDate = asDate def __call__(self, task): delta = self.today - task.creationDate.replace(microsecond=0) if self.asDate: return task.creationDate.strftime("%x %H:%M"), None else: return ydateutils.formatTimeDelta(delta), colorizer(delta.days) class DueDateFormater(object): def __init__(self, today, shortFormat): self.today = today self.shortFormat = shortFormat def __call__(self, task): if not task.dueDate: return "", None delta = task.dueDate - self.today if delta.days != 0: value = task.dueDate.strftime("%x %H:%M") else: value = task.dueDate.strftime("%H:%M") if self.shortFormat: value = ydateutils.formatTimeDelta(delta) else: value += " (%s)" % ydateutils.formatTimeDelta(delta) color = colorizer(delta.days * 33, reverse=True) return value, color class TextListRenderer(object): def __init__(self, out, termWidth=None, cryptoMgr=None, renderAsNotes=False, splitOnDate=False): """ @param out: output target @param termWidth: terminal width (int) @param decrypt: whether to decrypt or not (bool) @param renderAsNotes: whether to display task as notes (with dates) instead of tasks (with age). (boot)""" self.out = out self.termWidth = termWidth or tui.getTermWidth() self.taskLists = [] self.maxTitleWidth = len("Title") self.today = datetime.today().replace(microsecond=0) self.firstHeader = True self.cryptoMgr = cryptoMgr self.splitOnDate = splitOnDate if self.termWidth < 100: dueColumnWidth = 8 shortDateFormat = True else: dueColumnWidth = 26 shortDateFormat = False if renderAsNotes: self.splitOnDate = True creationDateColumnWidth = 16 creationDateTitle = "Creation date" else: creationDateColumnWidth = 8 creationDateTitle = "Age" # All fields set to None must be defined in end() self.columns = [ Column("ID" , None , idFormater), Column("Title" , None , None), Column("U" , 3 , urgencyFormater), Column("S" , 1 , statusFormater), Column(creationDateTitle, creationDateColumnWidth , AgeFormater(self.today, renderAsNotes)), Column("Due date" , dueColumnWidth , DueDateFormater(self.today, shortDateFormat)), ] self.idColumn = self.columns[0] self.titleColumn = self.columns[1] def addTaskList(self, sectionName, taskList): """Store tasks for this section @param sectionName: name of the task groupment section @type sectionName: unicode @param taskList: list of tasks to display @type taskList: list of db.Task instances """ self.taskLists.append((sectionName, taskList)) # Find max title width for task in taskList: title = self.cryptoMgr.decrypt(task.title) keywords = task.getUserKeywordsNameAsString() if keywords: title = TitleFormater.TITLE_WITH_KEYWORDS_TEMPLATE % (title, keywords) titleWidth = len(title) if task.description: titleWidth += 1 self.maxTitleWidth = max(self.maxTitleWidth, titleWidth) def end(self): today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) # Adjust idColumn maxId = db.getSession().query(func.max(Task.id)).one()[0] self.idColumn.width = max(2, len(str(maxId))) # Adjust titleColumn self.titleColumn.width = self.maxTitleWidth totalWidth = sum([x.width for x in self.columns]) + len(self.columns) if totalWidth >= self.termWidth: self.titleColumn.width -= (totalWidth - self.termWidth) + len(self.columns) self.titleColumn.formater = TitleFormater(self.titleColumn.width, self.cryptoMgr) # Print table for sectionName, taskList in self.taskLists: dateSplitters = [(1, "day"), (7, "week"), (30, "month"), (30 * 4, "quarter"), (365, "year")] splitterRange, splitterName = dateSplitters.pop() splitterText = None self._renderTaskListHeader(sectionName) for task in taskList: while self.splitOnDate and task.creationDate > today - timedelta(splitterRange): splitterText = "Last %s" % splitterName if len(dateSplitters) > 0: splitterRange, splitterName = dateSplitters.pop() else: self.splitOnDate = False if splitterText: print(C.GREEN + splitterText.center(totalWidth) + C.RESET, file=self.out) splitterText = None self._renderTaskListRow(task) def _renderTaskListHeader(self, sectionName): """ @param sectionName: name used for list header @type sectionName: unicode""" cells = [x.createHeader() for x in self.columns] line = "|".join(cells) width = len(line) if self.firstHeader: self.firstHeader = False else: print(file=self.out) print(C.CYAN + sectionName.center(width) + C.RESET, file=self.out) print(C.BOLD + line + C.RESET, file=self.out) print("-" * width, file=self.out) def _renderTaskListRow(self, task): cells = [column.createCell(task) for column in self.columns] print("|".join(cells), file=self.out) # vi: ts=4 sw=4 et
gpl-3.0
-3,334,397,768,262,249,500
33.338403
121
0.592958
false
redmoo-info/proddict
ru/clothing_accessories.py
1
13968
# -*- coding: utf-8 -*- """'Clothing, accessories' part of product categories dictionary. Must hold subcategories of 'Clothing, accessories' category in the form of python dictionary data type. """ kids_sizes = { ('86 cm', '86 см'): {}, ('92 cm', '92 см'): {}, ('98 cm', '98 см'): {}, ('104 cm', '104 см'): {}, ('110 cm', '110 см'): {}, ('116 cm', '116 см'): {}, ('122 cm', '122 см'): {}, ('128 cm', '128 см'): {}, ('134 cm', '134 см'): {}, ('140 cm', '140 см'): {}, ('146 cm', '146 см'): {}, ('152 cm', '152 см'): {}, ('164 plus cm', '164 плюс см'): {}, } men_sizes = { ('multiple sizes', 'несколько размеров'): {}, ('S', 'S'): {}, ('M', 'M'): {}, ('L', 'L'): {}, ('XL', 'XL'): {}, ('XXL', 'XXL'): {}, ('XXXL plus', 'XXXL плюс'): {}, } adult_shoe_size = { ('200 mm', '200 мм'): {}, ('210 mm', '210 мм'): {}, ('220 mm', '220 мм'): {}, ('230 mm', '230 мм'): {}, ('240 mm', '240 мм'): {}, ('250 mm', '250 мм'): {}, ('260 mm', '260 мм'): {}, ('270 mm', '270 мм'): {}, ('280 mm', '280 мм'): {}, ('290 mm', '290 мм'): {}, ('300 mm', '300 мм'): {}, ('310 mm', '310 мм'): {}, ('320 plus mm', '320 плюс мм'): {}, } kids_shoe_size = { ('80 mm', '80 мм'): {}, ('90 mm', '90 мм'): {}, ('100 mm', '100 мм'): {}, ('110 mm', '110 мм'): {}, ('120 mm', '120 мм'): {}, ('130 mm', '130 мм'): {}, ('140 mm', '140 мм'): {}, ('150 mm', '150 мм'): {}, ('160 mm', '160 мм'): {}, ('170 mm', '170 мм'): {}, ('180 mm', '180 мм'): {}, ('190 mm', '190 мм'): {}, ('200 plus mm', '200 плюс мм'): {}, } women_sizes = { ('multiple sizes', 'несколько размеров'): {}, ('XS', 'XS'): {}, ('S', 'S'): {}, ('M', 'M'): {}, ('L', 'L'): {}, ('XL plus', 'XL плюс'): {}, } clothing_accessories = {('clothing, accessories', 'одежда, аксессуары'): { ('boys', 'мальчики'): { ('accessories', 'аксессуары'): { ('bags, backpacks', 'сумки, рюкзаки'): {}, ('belts', 'ремни'): { ('black', 'черные'): {}, ('brown', 'коричневые'): {}, ('white', 'белые'): {}, ('other', 'другое'): {}, }, ('belt buckles', 'пряжки ремней'): {}, ('gloves, mittens', 'перчатки, варежки'): {}, ('handkerchiefs', 'платки носовые'): {}, ('hats', 'головные уборы'): {}, ('scarves', 'шарфы'): {}, ('sunglasses', 'очки темные'): {}, ('suspenders', 'подтяжки'): {}, ('ties', 'галстуки'): {}, ('wallets', 'бумажникии'): {}, ('other', 'другое'): {}, }, ('coats', 'пальто'): kids_sizes, ('costumes', 'маскарад, образы'): kids_sizes, ('footwear', 'обувь'): kids_shoe_size, ('jerseys, hoodies', 'толстовки'): kids_sizes, ('jackets', 'куртки'): kids_sizes, ('jeans', 'джинсы'): kids_sizes, ('shirts', 'рубашки'): kids_sizes, ('shorts', 'шорты'): kids_sizes, ('sleepwear, robes', 'пижамы, халаты'): kids_sizes, ('sportswear', 'спортивная одежда'): { ('top', 'верх'): kids_sizes, ('bottom', 'низ'): kids_sizes, ('other', 'другое'): kids_sizes, }, ('socks', 'носки'): kids_shoe_size, ('suits', 'костюмы'): kids_sizes, ('sweaters', 'свитеры'): kids_sizes, ('swimwear', 'купальные костюмы'): kids_sizes, ('t-shirts', 'футболки'): kids_sizes, ('trousers', 'брюки'): kids_sizes, ('underwear', 'нижнее белье'): kids_sizes, ('uniforms', 'форма'): kids_sizes, ('winter clothes', 'зимняя одежда'): kids_sizes, ('bulk lots', 'оптом'): kids_sizes, ('other', 'другое'): kids_sizes, }, ('men', 'мужчины'): { ('accessories', 'аксессуары'): { ('bags', 'сумки'): {}, ('belts', 'ремни'): { ('black', 'черные'): {}, ('brown', 'коричневые'): {}, ('white', 'белые'): {}, ('other', 'другое'): {}, }, ('belt buckles', 'пряжки ремней'): {}, ('cufflinks', 'запонки'): {}, ('gloves, mittens', 'перчатки, варежки'): {}, ('handkerchiefs', 'платки носовые'): {}, ('hats', 'головные уборы'): {}, ('scarves', 'шарфы'): {}, ('sunglasses', 'очки темные'): {}, ('suspenders', 'подтяжки'): {}, ('ties', 'галстуки'): {}, ('wallets', 'бумажникии'): {}, ('other', 'другое'): {}, }, ('costumes', 'маскарад, образы'): men_sizes, ('footwear', 'обувь'): adult_shoe_size, ('jerseys, hoodies', 'толстовки'): men_sizes, ('sportswear', 'спортивная одежда'): { ('top', 'верх'): men_sizes, ('bottom', 'низ'): men_sizes, ('other', 'другое'): men_sizes, }, ('winter clothes', 'зимняя одежда'): men_sizes, ('casual shirts', 'повседневные рубашки'): men_sizes, ('dress shirts', 'рубашки'): men_sizes, ('t-shirts', 'футболки'): men_sizes, ('coats', 'пальто'): men_sizes, ('jackets', 'куртки'): men_sizes, ('jeans', 'джинсы'): men_sizes, ('trousers', 'брюки'): men_sizes, ('shorts', 'шорты'): men_sizes, ('sleepwear, robes', 'пижамы, халаты'): men_sizes, ('socks', 'носки'): adult_shoe_size, ('suits', 'костюмы'): men_sizes, ('sweaters', 'свитеры'): men_sizes, ('swimwear', 'купальные костюмы'): men_sizes, ('underwear', 'нижнее белье'): men_sizes, ('bulk lots', 'оптом'): men_sizes, ('other', 'другое'): men_sizes, }, ('girls', 'девочки'): { ('accessories', 'аксессуары'): { ('bags', 'сумки'): {}, ('backpacks', 'рюкзаки'): {}, ('belts', 'ремни'): {}, ('gloves, mittens', 'перчатки, варежки'): {}, ('hair accessories', 'аксессуары для волос'): {}, ('handbags', 'женские сумки'): {}, ('handkerchiefs', 'платки носовые'): {}, ('hats', 'головные уборы'): {}, ('purses, wallets', 'кошельки'): {}, ('scarves', 'шарфы'): {}, ('sunglasses', 'очки темные'): {}, ('other', 'другое'): {}, }, ('coats', 'пальто'): kids_sizes, ('costumes', 'маскарад, образы'): kids_sizes, ('dresses', 'платья'): { ('casual', 'повседневные'): kids_sizes, ('evening', 'вечерние'): kids_sizes, ('other', 'другое'): kids_sizes, }, ('footwear', 'обувь'): kids_shoe_size, ('hosiery, socks', 'чулочно-носочные'): { ('leg warmers', 'гетры'): {}, ('tights', 'колготки'): { ('size 1', 'размер 1'): {}, ('size 2', 'размер 2'): {}, ('size 3', 'размер 3'): {}, ('size 4', 'размер 4'): {}, }, ('socks', 'носки'): kids_shoe_size, ('stockings', 'гольфы'): kids_sizes, ('other', 'другое'): kids_sizes, }, ('jerseys, hoodies', 'толстовки'): kids_sizes, ('jackets', 'куртки'): kids_sizes, ('jeans', 'джинсы'): kids_sizes, ('shirts', 'рубашки'): kids_sizes, ('shorts', 'шорты'): kids_sizes, ('skirts', 'юбки'): kids_sizes, ('sleepwear, robes', 'пижамы, халаты'): kids_sizes, ('sportswear', 'спортивная одежда'): { ('top', 'верх'): kids_sizes, ('bottom', 'низ'): kids_sizes, ('other', 'другое'): kids_sizes, }, ('suits', 'костюмы'): kids_sizes, ('sweaters', 'свитеры'): kids_sizes, ('swimwear', 'купальные костюмы'): kids_sizes, ('t-shirts', 'футболки'): kids_sizes, ('tops, blouses', 'топы, блузы'): kids_sizes, ('trousers', 'брюки'): kids_sizes, ('underwear', 'нижнее белье'): kids_sizes, ('uniforms', 'форма'): kids_sizes, ('winter clothes', 'зимняя одежда'): kids_sizes, ('bulk lots', 'оптом'): kids_sizes, ('other', 'другое'): kids_sizes, }, ('women', 'женщины'): { ('accessories', 'аксессуары'): { ('bags', 'сумки'): {}, ('backpacks', 'рюкзаки'): {}, ('belts', 'ремни'): {}, ('gloves, mittens', 'перчатки, варежки'): {}, ('hair accessories', 'аксессуары для волос'): {}, ('handbags', 'женские сумки'): {}, ('handkerchiefs', 'платки носовые'): {}, ('hats', 'головные уборы'): {}, ('purses, wallets', 'кошельки'): {}, ('scarves', 'шарфы'): {}, ('sunglasses', 'очки темные'): {}, ('wigs, extensions', 'парики'): {}, ('other', 'другое'): {}, }, ('coats', 'пальто'): women_sizes, ('costumes', 'маскарад, образы'): women_sizes, ('dresses', 'платья'): { ('casual', 'повседневные'): women_sizes, ('evening', 'вечерние'): women_sizes, ('other', 'другое'): women_sizes, }, ('footwear', 'обувь'): adult_shoe_size, ('hosiery, socks', 'чулочно-носочные'): { ('leg warmers', 'гетры'): {}, ('pantyhose, tights', 'колготки, чулки'): { ('size 1', 'размер 1'): {}, ('size 2', 'размер 2'): {}, ('size 3', 'размер 3'): {}, ('size 4', 'размер 4'): {}, }, ('socks', 'носки'): adult_shoe_size, ('stockings, thigh-highs', 'чулки'): women_sizes, ('other', 'другое'): women_sizes, }, ('jerseys, hoodies', 'толстовки'): women_sizes, ('jackets', 'куртки'): women_sizes, ('jeans', 'джинсы'): women_sizes, ('lingerie, underwear', 'нижнее белье'): { ('bras, bra sets', 'бюстгалтеры, комплекты'): { ('multiple sizes', 'несколько размеров'): {}, ('one size items', 'один размер'): {}, }, ('panties', 'трусы'): women_sizes, ('adult', 'для взрослых'): women_sizes, ('other', 'другое'): women_sizes, }, ('maternity', 'для беременных'): { ('dresses', 'платья'): women_sizes, ('bras, bra sets', 'бюстгалтеры, комплекты'): { ('multiple sizes', 'несколько размеров'): {}, ('one size items', 'один размер'): {}, }, ('jeans', 'джинсы'): women_sizes, ('trousers', 'брюки'): women_sizes, ('sleepwear, underwear', 'пижамы, белье'): women_sizes, ('shorts', 'шорты'): women_sizes, ('skirts', 'юбки'): women_sizes, ('sweaters', 'свитеры'): women_sizes, ('swimwear', 'купальные костюмы'): women_sizes, ('t-shirts', 'футболки'): women_sizes, ('tops, blouses', 'топы, блузы'): women_sizes, ('bulk lots', 'оптом'): women_sizes, ('other', 'другое'): women_sizes, }, ('shirts', 'рубашки'): women_sizes, ('shorts', 'шорты'): women_sizes, ('skirts', 'юбки'): women_sizes, ('sleepwear, robes', 'пижамы, халаты'): women_sizes, ('sportswear', 'спортивная одежда'): { ('top', 'верх'): women_sizes, ('bottom', 'низ'): women_sizes, ('other', 'другое'): women_sizes, }, ('suits', 'костюмы'): women_sizes, ('sweaters', 'свитеры'): women_sizes, ('swimwear', 'купальные костюмы'): women_sizes, ('t-shirts', 'футболки'): women_sizes, ('tops, blouses', 'топы, блузы'): women_sizes, ('trousers', 'брюки'): women_sizes, ('winter clothes', 'зимняя одежда'): women_sizes, ('bulk lots', 'оптом'): women_sizes, ('other', 'другое'): women_sizes, }, }}
mit
1,447,159,099,828,185,600
37.935275
74
0.446596
false
openstack/neutron-lib
neutron_lib/exceptions/qos.py
1
4177
# Copyright 2011 VMware, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib._i18n import _ from neutron_lib import exceptions as e class QosPolicyNotFound(e.NotFound): message = _("QoS policy %(policy_id)s could not be found.") class QosRuleNotFound(e.NotFound): message = _("QoS rule %(rule_id)s for policy %(policy_id)s " "could not be found.") class QoSPolicyDefaultAlreadyExists(e.Conflict): message = _("A default QoS policy exists for project %(project_id)s.") class PortQosBindingNotFound(e.NotFound): message = _("QoS binding for port %(port_id)s and policy %(policy_id)s " "could not be found.") class PortQosBindingError(e.NeutronException): message = _("QoS binding for port %(port_id)s and policy %(policy_id)s " "could not be created: %(db_error)s.") class NetworkQosBindingNotFound(e.NotFound): message = _("QoS binding for network %(net_id)s and policy %(policy_id)s " "could not be found.") class FloatingIPQosBindingNotFound(e.NotFound): message = _("QoS binding for floating IP %(fip_id)s and policy " "%(policy_id)s could not be found.") class QosPolicyInUse(e.InUse): message = _("QoS Policy %(policy_id)s is used by " "%(object_type)s %(object_id)s.") class FloatingIPQosBindingError(e.NeutronException): message = _("QoS binding for floating IP %(fip_id)s and policy " "%(policy_id)s could not be created: %(db_error)s.") class NetworkQosBindingError(e.NeutronException): message = _("QoS binding for network %(net_id)s and policy %(policy_id)s " "could not be created: %(db_error)s.") class QosRuleNotSupported(e.Conflict): message = _("Rule %(rule_type)s is not supported by port %(port_id)s") class QosRuleNotSupportedByNetwork(e.Conflict): message = _("Rule %(rule_type)s is not supported " "by network %(network_id)s") class QoSRuleParameterConflict(e.Conflict): message = _("Unable to add the rule with value %(rule_value)s to the " "policy %(policy_id)s as the existing rule of type " "%(existing_rule)s restricts the bandwidth to " "%(existing_value)s.") class QoSRulesConflict(e.Conflict): message = _("Rule %(new_rule_type)s conflicts with " "rule %(rule_id)s which already exists in " "QoS Policy %(policy_id)s.") class PolicyRemoveAuthorizationError(e.NotAuthorized): message = _("Failed to remove provided policy %(policy_id)s " "because you are not authorized.") class TcLibQdiscTypeError(e.NeutronException): message = _("TC Qdisc type %(qdisc_type)s is not supported; supported " "types: %(supported_qdisc_types)s.") class TcLibQdiscNeededArguments(e.NeutronException): message = _("TC Qdisc type %(qdisc_type)s needs following arguments: " "%(needed_arguments)s.") class RouterQosBindingNotFound(e.NotFound): message = _("QoS binding for router %(router_id)s gateway and policy " "%(policy_id)s could not be found.") class RouterQosBindingError(e.NeutronException): message = _("QoS binding for router %(router_id)s gateway and policy " "%(policy_id)s could not be created: %(db_error)s.") class QosPlacementAllocationConflict(e.Conflict): message = _("Allocation for consumer %(consumer)s is not possible on " "resource provider %(rp)s, the requested amount of bandwidth " "would exceed the capacity available.")
apache-2.0
3,458,554,079,653,858,300
34.398305
78
0.65717
false
suyashphadtare/sajil-final-erp
erpnext/accounts/report/balance_sheet/balance_sheet.py
39
1666
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe import _ from frappe.utils import flt from erpnext.accounts.report.financial_statements import (get_period_list, get_columns, get_data) def execute(filters=None): period_list = get_period_list(filters.fiscal_year, filters.periodicity, from_beginning=True) asset = get_data(filters.company, "Asset", "Debit", period_list) liability = get_data(filters.company, "Liability", "Credit", period_list) equity = get_data(filters.company, "Equity", "Credit", period_list) provisional_profit_loss = get_provisional_profit_loss(asset, liability, equity, period_list) data = [] data.extend(asset or []) data.extend(liability or []) data.extend(equity or []) if provisional_profit_loss: data.append(provisional_profit_loss) columns = get_columns(period_list) return columns, data def get_provisional_profit_loss(asset, liability, equity, period_list): if asset and (liability or equity): provisional_profit_loss = { "account_name": _("Provisional Profit / Loss (Credit)"), "account": None, "warn_if_negative": True } has_value = False for period in period_list: effective_liability = 0.0 if liability: effective_liability += flt(liability[-2][period.key]) if equity: effective_liability += flt(equity[-2][period.key]) provisional_profit_loss[period.key] = flt(asset[-2][period.key]) - effective_liability if provisional_profit_loss[period.key]: has_value = True if has_value: return provisional_profit_loss
agpl-3.0
3,698,225,509,537,346,600
31.038462
97
0.726891
false
PRIMEDesigner15/PRIMEDesigner15
dependencies/Lib/test/unittests/test_hashlib.py
23
16135
# Test hashlib module # # $Id$ # # Copyright (C) 2005-2010 Gregory P. Smith (greg@krypto.org) # Licensed to PSF under a Contributor Agreement. # import array import hashlib import itertools import os import sys try: import threading except ImportError: threading = None import unittest import warnings from test import support from test.support import _4G, bigmemtest # Were we compiled --with-pydebug or with #define Py_DEBUG? COMPILED_WITH_PYDEBUG = hasattr(sys, 'gettotalrefcount') def hexstr(s): assert isinstance(s, bytes), repr(s) h = "0123456789abcdef" r = '' for i in s: r += h[(i >> 4) & 0xF] + h[i & 0xF] return r class HashLibTestCase(unittest.TestCase): supported_hash_names = ( 'md5', 'MD5', 'sha1', 'SHA1', 'sha224', 'SHA224', 'sha256', 'SHA256', 'sha384', 'SHA384', 'sha512', 'SHA512' ) # Issue #14693: fallback modules are always compiled under POSIX _warn_on_extension_import = os.name == 'posix' or COMPILED_WITH_PYDEBUG def _conditional_import_module(self, module_name): """Import a module and return a reference to it or None on failure.""" try: exec('import '+module_name) except ImportError as error: if self._warn_on_extension_import: warnings.warn('Did a C extension fail to compile? %s' % error) return locals().get(module_name) def __init__(self, *args, **kwargs): algorithms = set() for algorithm in self.supported_hash_names: algorithms.add(algorithm.lower()) self.constructors_to_test = {} for algorithm in algorithms: self.constructors_to_test[algorithm] = set() # For each algorithm, test the direct constructor and the use # of hashlib.new given the algorithm name. for algorithm, constructors in self.constructors_to_test.items(): constructors.add(getattr(hashlib, algorithm)) def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm): if data is None: return hashlib.new(_alg) return hashlib.new(_alg, data) constructors.add(_test_algorithm_via_hashlib_new) _hashlib = self._conditional_import_module('_hashlib') if _hashlib: # These two algorithms should always be present when this module # is compiled. If not, something was compiled wrong. assert hasattr(_hashlib, 'openssl_md5') assert hasattr(_hashlib, 'openssl_sha1') for algorithm, constructors in self.constructors_to_test.items(): constructor = getattr(_hashlib, 'openssl_'+algorithm, None) if constructor: constructors.add(constructor) _md5 = self._conditional_import_module('_md5') if _md5: self.constructors_to_test['md5'].add(_md5.md5) _sha1 = self._conditional_import_module('_sha1') if _sha1: self.constructors_to_test['sha1'].add(_sha1.sha1) _sha256 = self._conditional_import_module('_sha256') if _sha256: self.constructors_to_test['sha224'].add(_sha256.sha224) self.constructors_to_test['sha256'].add(_sha256.sha256) _sha512 = self._conditional_import_module('_sha512') if _sha512: self.constructors_to_test['sha384'].add(_sha512.sha384) self.constructors_to_test['sha512'].add(_sha512.sha512) super(HashLibTestCase, self).__init__(*args, **kwargs) @property def hash_constructors(self): constructors = self.constructors_to_test.values() return itertools.chain.from_iterable(constructors) def test_hash_array(self): a = array.array("b", range(10)) for cons in self.hash_constructors: c = cons(a) c.hexdigest() def test_algorithms_guaranteed(self): self.assertEqual(hashlib.algorithms_guaranteed, set(_algo for _algo in self.supported_hash_names if _algo.islower())) def test_algorithms_available(self): self.assertTrue(set(hashlib.algorithms_guaranteed). issubset(hashlib.algorithms_available)) def test_unknown_hash(self): self.assertRaises(ValueError, hashlib.new, 'spam spam spam spam spam') self.assertRaises(TypeError, hashlib.new, 1) def test_get_builtin_constructor(self): get_builtin_constructor = hashlib.__dict__[ '__get_builtin_constructor'] self.assertRaises(ValueError, get_builtin_constructor, 'test') try: import _md5 except ImportError: pass # This forces an ImportError for "import _md5" statements sys.modules['_md5'] = None try: self.assertRaises(ValueError, get_builtin_constructor, 'md5') finally: if '_md5' in locals(): sys.modules['_md5'] = _md5 else: del sys.modules['_md5'] self.assertRaises(TypeError, get_builtin_constructor, 3) def test_hexdigest(self): for cons in self.hash_constructors: h = cons() assert isinstance(h.digest(), bytes), name self.assertEqual(hexstr(h.digest()), h.hexdigest()) def test_large_update(self): aas = b'a' * 128 bees = b'b' * 127 cees = b'c' * 126 dees = b'd' * 2048 # HASHLIB_GIL_MINSIZE for cons in self.hash_constructors: m1 = cons() m1.update(aas) m1.update(bees) m1.update(cees) m1.update(dees) m2 = cons() m2.update(aas + bees + cees + dees) self.assertEqual(m1.digest(), m2.digest()) m3 = cons(aas + bees + cees + dees) self.assertEqual(m1.digest(), m3.digest()) # verify copy() doesn't touch original m4 = cons(aas + bees + cees) m4_digest = m4.digest() m4_copy = m4.copy() m4_copy.update(dees) self.assertEqual(m1.digest(), m4_copy.digest()) self.assertEqual(m4.digest(), m4_digest) def check(self, name, data, hexdigest): hexdigest = hexdigest.lower() constructors = self.constructors_to_test[name] # 2 is for hashlib.name(...) and hashlib.new(name, ...) self.assertGreaterEqual(len(constructors), 2) for hash_object_constructor in constructors: m = hash_object_constructor(data) computed = m.hexdigest() self.assertEqual( computed, hexdigest, "Hash algorithm %s constructed using %s returned hexdigest" " %r for %d byte input data that should have hashed to %r." % (name, hash_object_constructor, computed, len(data), hexdigest)) computed = m.digest() digest = bytes.fromhex(hexdigest) self.assertEqual(computed, digest) self.assertEqual(len(digest), m.digest_size) def check_no_unicode(self, algorithm_name): # Unicode objects are not allowed as input. constructors = self.constructors_to_test[algorithm_name] for hash_object_constructor in constructors: self.assertRaises(TypeError, hash_object_constructor, 'spam') def test_no_unicode(self): self.check_no_unicode('md5') self.check_no_unicode('sha1') self.check_no_unicode('sha224') self.check_no_unicode('sha256') self.check_no_unicode('sha384') self.check_no_unicode('sha512') def check_blocksize_name(self, name, block_size=0, digest_size=0): constructors = self.constructors_to_test[name] for hash_object_constructor in constructors: m = hash_object_constructor() self.assertEqual(m.block_size, block_size) self.assertEqual(m.digest_size, digest_size) self.assertEqual(len(m.digest()), digest_size) self.assertEqual(m.name.lower(), name.lower()) self.assertIn(name.split("_")[0], repr(m).lower()) def test_blocksize_name(self): self.check_blocksize_name('md5', 64, 16) self.check_blocksize_name('sha1', 64, 20) self.check_blocksize_name('sha224', 64, 28) self.check_blocksize_name('sha256', 64, 32) self.check_blocksize_name('sha384', 128, 48) self.check_blocksize_name('sha512', 128, 64) def test_case_md5_0(self): self.check('md5', b'', 'd41d8cd98f00b204e9800998ecf8427e') def test_case_md5_1(self): self.check('md5', b'abc', '900150983cd24fb0d6963f7d28e17f72') def test_case_md5_2(self): self.check('md5', b'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', 'd174ab98d277d9f5a5611c2c9f419d9f') @bigmemtest(size=_4G + 5, memuse=1) def test_case_md5_huge(self, size): if size == _4G + 5: try: self.check('md5', b'A'*size, 'c9af2dff37468ce5dfee8f2cfc0a9c6d') except OverflowError: pass # 32-bit arch @bigmemtest(size=_4G - 1, memuse=1) def test_case_md5_uintmax(self, size): if size == _4G - 1: try: self.check('md5', b'A'*size, '28138d306ff1b8281f1a9067e1a1a2b3') except OverflowError: pass # 32-bit arch # use the three examples from Federal Information Processing Standards # Publication 180-1, Secure Hash Standard, 1995 April 17 # http://www.itl.nist.gov/div897/pubs/fip180-1.htm def test_case_sha1_0(self): self.check('sha1', b"", "da39a3ee5e6b4b0d3255bfef95601890afd80709") def test_case_sha1_1(self): self.check('sha1', b"abc", "a9993e364706816aba3e25717850c26c9cd0d89d") def test_case_sha1_2(self): self.check('sha1', b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", "84983e441c3bd26ebaae4aa1f95129e5e54670f1") def test_case_sha1_3(self): self.check('sha1', b"a" * 1000000, "34aa973cd4c4daa4f61eeb2bdbad27316534016f") # use the examples from Federal Information Processing Standards # Publication 180-2, Secure Hash Standard, 2002 August 1 # http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf def test_case_sha224_0(self): self.check('sha224', b"", "d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f") def test_case_sha224_1(self): self.check('sha224', b"abc", "23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7") def test_case_sha224_2(self): self.check('sha224', b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", "75388b16512776cc5dba5da1fd890150b0c6455cb4f58b1952522525") def test_case_sha224_3(self): self.check('sha224', b"a" * 1000000, "20794655980c91d8bbb4c1ea97618a4bf03f42581948b2ee4ee7ad67") def test_case_sha256_0(self): self.check('sha256', b"", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") def test_case_sha256_1(self): self.check('sha256', b"abc", "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad") def test_case_sha256_2(self): self.check('sha256', b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", "248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1") def test_case_sha256_3(self): self.check('sha256', b"a" * 1000000, "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0") def test_case_sha384_0(self): self.check('sha384', b"", "38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da"+ "274edebfe76f65fbd51ad2f14898b95b") def test_case_sha384_1(self): self.check('sha384', b"abc", "cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed"+ "8086072ba1e7cc2358baeca134c825a7") def test_case_sha384_2(self): self.check('sha384', b"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+ b"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu", "09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712"+ "fcc7c71a557e2db966c3e9fa91746039") def test_case_sha384_3(self): self.check('sha384', b"a" * 1000000, "9d0e1809716474cb086e834e310a4a1ced149e9c00f248527972cec5704c2a5b"+ "07b8b3dc38ecc4ebae97ddd87f3d8985") def test_case_sha512_0(self): self.check('sha512', b"", "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce"+ "47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e") def test_case_sha512_1(self): self.check('sha512', b"abc", "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a"+ "2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f") def test_case_sha512_2(self): self.check('sha512', b"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+ b"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu", "8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018"+ "501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909") def test_case_sha512_3(self): self.check('sha512', b"a" * 1000000, "e718483d0ce769644e2e42c7bc15b4638e1f98b13b2044285632a803afa973eb"+ "de0ff244877ea60a4cb0432ce577c31beb009c5c2c49aa2e4eadb217ad8cc09b") def test_gil(self): # Check things work fine with an input larger than the size required # for multithreaded operation (which is hardwired to 2048). gil_minsize = 2048 for cons in self.hash_constructors: m = cons() m.update(b'1') m.update(b'#' * gil_minsize) m.update(b'1') m = cons(b'x' * gil_minsize) m.update(b'1') m = hashlib.md5() m.update(b'1') m.update(b'#' * gil_minsize) m.update(b'1') self.assertEqual(m.hexdigest(), 'cb1e1a2cbc80be75e19935d621fb9b21') m = hashlib.md5(b'x' * gil_minsize) self.assertEqual(m.hexdigest(), 'cfb767f225d58469c5de3632a8803958') @unittest.skipUnless(threading, 'Threading required for this test.') @support.reap_threads def test_threaded_hashing(self): # Updating the same hash object from several threads at once # using data chunk sizes containing the same byte sequences. # # If the internal locks are working to prevent multiple # updates on the same object from running at once, the resulting # hash will be the same as doing it single threaded upfront. hasher = hashlib.sha1() num_threads = 5 smallest_data = b'swineflu' data = smallest_data*200000 expected_hash = hashlib.sha1(data*num_threads).hexdigest() def hash_in_chunks(chunk_size, event): index = 0 while index < len(data): hasher.update(data[index:index+chunk_size]) index += chunk_size event.set() events = [] for threadnum in range(num_threads): chunk_size = len(data) // (10**threadnum) assert chunk_size > 0 assert chunk_size % len(smallest_data) == 0 event = threading.Event() events.append(event) threading.Thread(target=hash_in_chunks, args=(chunk_size, event)).start() for event in events: event.wait() self.assertEqual(expected_hash, hasher.hexdigest()) def test_main(): support.run_unittest(HashLibTestCase) if __name__ == "__main__": test_main()
bsd-3-clause
-4,712,899,883,000,602,000
36.964706
85
0.617973
false
carlgao/lenga
images/lenny64-peon/usr/share/python-support/mercurial-common/hgext/purge.py
1
5682
# Copyright (C) 2006 - Marco Barisione <marco@barisione.org> # # This is a small extension for Mercurial (http://www.selenic.com/mercurial) # that removes files not known to mercurial # # This program was inspired by the "cvspurge" script contained in CVS utilities # (http://www.red-bean.com/cvsutils/). # # To enable the "purge" extension put these lines in your ~/.hgrc: # [extensions] # hgext.purge = # # For help on the usage of "hg purge" use: # hg help purge # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. from mercurial import util, commands from mercurial.i18n import _ import os def dopurge(ui, repo, dirs=None, act=True, ignored=False, abort_on_err=False, eol='\n', force=False, include=None, exclude=None): def error(msg): if abort_on_err: raise util.Abort(msg) else: ui.warn(_('warning: %s\n') % msg) def remove(remove_func, name): if act: try: remove_func(os.path.join(repo.root, name)) except OSError, e: error(_('%s cannot be removed') % name) else: ui.write('%s%s' % (name, eol)) if not force: _check_fs(ui, repo) directories = [] files = [] missing = [] roots, match, anypats = util.cmdmatcher(repo.root, repo.getcwd(), dirs, include, exclude) for src, f, st in repo.dirstate.statwalk(files=roots, match=match, ignored=ignored, directories=True): if src == 'd': directories.append(f) elif src == 'm': missing.append(f) elif src == 'f' and f not in repo.dirstate: files.append(f) directories.sort() for f in files: if f not in repo.dirstate: ui.note(_('Removing file %s\n') % f) remove(os.remove, f) for f in directories[::-1]: if match(f) and not os.listdir(repo.wjoin(f)): ui.note(_('Removing directory %s\n') % f) remove(os.rmdir, f) def _check_fs(ui, repo): """Abort if there is the chance of having problems with name-mangling fs In a name mangling filesystem (e.g. a case insensitive one) dirstate.walk() can yield filenames different from the ones stored in the dirstate. This already confuses the status and add commands, but with purge this may cause data loss. To prevent this, this function will abort if there are uncommitted changes. """ # We can't use (files, match) to do a partial walk here - we wouldn't # notice a modified README file if the user ran "hg purge readme" modified, added, removed, deleted = repo.status()[:4] if modified or added or removed or deleted: if not util.checkfolding(repo.path) and not ui.quiet: ui.warn(_("Purging on name mangling filesystems is not " "fully supported.\n")) raise util.Abort(_("outstanding uncommitted changes")) def purge(ui, repo, *dirs, **opts): '''removes files not tracked by mercurial Delete files not known to mercurial, this is useful to test local and uncommitted changes in the otherwise clean source tree. This means that purge will delete: - Unknown files: files marked with "?" by "hg status" - Ignored files: files usually ignored by Mercurial because they match a pattern in a ".hgignore" file - Empty directories: in fact Mercurial ignores directories unless they contain files under source control managment But it will leave untouched: - Unmodified tracked files - Modified tracked files - New files added to the repository (with "hg add") If directories are given on the command line, only files in these directories are considered. Be careful with purge, you could irreversibly delete some files you forgot to add to the repository. If you only want to print the list of files that this program would delete use the --print option. ''' act = not opts['print'] ignored = bool(opts['all']) abort_on_err = bool(opts['abort_on_err']) eol = opts['print0'] and '\0' or '\n' if eol == '\0': # --print0 implies --print act = False force = bool(opts['force']) include = opts['include'] exclude = opts['exclude'] dopurge(ui, repo, dirs, act, ignored, abort_on_err, eol, force, include, exclude) cmdtable = { 'purge|clean': (purge, [('a', 'abort-on-err', None, _('abort if an error occurs')), ('', 'all', None, _('purge ignored files too')), ('f', 'force', None, _('purge even when there are uncommitted changes')), ('p', 'print', None, _('print the file names instead of deleting them')), ('0', 'print0', None, _('end filenames with NUL, for use with xargs' ' (implies -p)')), ] + commands.walkopts, _('hg purge [OPTION]... [DIR]...')) }
mit
3,707,048,103,799,443,500
36.381579
83
0.622492
false
stephane-martin/salt-debian-packaging
salt-2016.3.2/tests/eventlisten.py
2
3652
# -*- coding: utf-8 -*- ''' Use this script to dump the event data out to the terminal. It needs to know what the sock_dir is. This script is a generic tool to test event output ''' # Import Python libs from __future__ import absolute_import, print_function import optparse import pprint import time import os # Import Salt libs import salt.utils.event # Import 3rd-party libs import salt.ext.six as six def parse(): ''' Parse the script command line inputs ''' parser = optparse.OptionParser() parser.add_option( '-s', '--sock-dir', dest='sock_dir', default='/var/run/salt', help=('Statically define the directory holding the salt unix ' 'sockets for communication') ) parser.add_option( '-n', '--node', dest='node', default='master', help=('State if this listener will attach to a master or a ' 'minion daemon, pass "master" or "minion"') ) parser.add_option( '-f', '--func_count', default='', help=('Return a count of the number of minions which have ' 'replied to a job with a given func.') ) parser.add_option( '-i', '--id', default='', help=('If connecting to a live master or minion, pass in the id') ) parser.add_option( '-t', '--transport', default='zeromq', help=('Transport to use. (Default: \'zeromq\'') ) options, args = parser.parse_args() opts = {} for k, v in six.iteritems(options.__dict__): if v is not None: opts[k] = v opts['sock_dir'] = os.path.join(opts['sock_dir'], opts['node']) if 'minion' in options.node: if args: opts['id'] = args[0] return opts if options.id: opts['id'] = options.id else: opts['id'] = options.node return opts def check_access_and_print_warning(sock_dir): ''' Check if this user is able to access the socket directory and print a warning if not ''' if (os.access(sock_dir, os.R_OK) and os.access(sock_dir, os.W_OK) and os.access(sock_dir, os.X_OK)): return else: print('WARNING: Events will not be reported' ' (not able to access {0})'.format(sock_dir)) def listen(opts): ''' Attach to the pub socket and grab messages ''' event = salt.utils.event.get_event( opts['node'], sock_dir=opts['sock_dir'], transport=opts['transport'], opts=opts, listen=True ) check_access_and_print_warning(opts['sock_dir']) print(event.puburi) jid_counter = 0 found_minions = [] while True: ret = event.get_event(full=True) if ret is None: continue if opts['func_count']: data = ret.get('data', False) if data: if 'id' in six.iterkeys(data) and data.get('id', False) not in found_minions: if data['fun'] == opts['func_count']: jid_counter += 1 found_minions.append(data['id']) print('Reply received from [{0}]. Total replies now: [{1}].'.format(ret['data']['id'], jid_counter)) continue else: print('Event fired at {0}'.format(time.asctime())) print('*' * 25) print('Tag: {0}'.format(ret['tag'])) print('Data:') pprint.pprint(ret['data']) if __name__ == '__main__': opts = parse() listen(opts)
apache-2.0
-4,836,599,035,430,854,000
25.085714
124
0.531216
false
EmreAtes/spack
var/spack/repos/builtin/packages/py-cpuinfo/package.py
5
1584
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class PyCpuinfo(PythonPackage): """Get CPU info with pure Python 2 & 3""" homepage = "https://github.com/workhorsy/py-cpuinfo" url = "https://pypi.io/packages/source/p/py-cpuinfo/py-cpuinfo-0.2.3.tar.gz" version('0.2.3', '780ff46a0e122af09cb2c40b2706c6dc') depends_on('py-setuptools', type='build')
lgpl-2.1
-5,139,975,135,684,637,000
43
85
0.672348
false
nitzmahone/ansible
lib/ansible/modules/network/cloudengine/ce_aaa_server.py
7
70237
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ce_aaa_server version_added: "2.4" short_description: Manages AAA server global configuration on HUAWEI CloudEngine switches. description: - Manages AAA server global configuration on HUAWEI CloudEngine switches. author: - wangdezhuang (@QijunPan) options: state: description: - Specify desired state of the resource. default: present choices: ['present', 'absent'] authen_scheme_name: description: - Name of an authentication scheme. The value is a string of 1 to 32 characters. first_authen_mode: description: - Preferred authentication mode. choices: ['invalid', 'local', 'hwtacacs', 'radius', 'none'] author_scheme_name: description: - Name of an authorization scheme. The value is a string of 1 to 32 characters. first_author_mode: description: - Preferred authorization mode. choices: ['invalid', 'local', 'hwtacacs', 'if-authenticated', 'none'] acct_scheme_name: description: - Accounting scheme name. The value is a string of 1 to 32 characters. accounting_mode: description: - Accounting Mode. choices: ['invalid', 'hwtacacs', 'radius', 'none'] domain_name: description: - Name of a domain. The value is a string of 1 to 64 characters. radius_server_group: description: - RADIUS server group's name. The value is a string of 1 to 32 case-insensitive characters. hwtacas_template: description: - Name of a HWTACACS template. The value is a string of 1 to 32 case-insensitive characters. local_user_group: description: - Name of the user group where the user belongs. The user inherits all the rights of the user group. The value is a string of 1 to 32 characters. ''' EXAMPLES = ''' - name: AAA server test hosts: cloudengine connection: local gather_facts: no vars: cli: host: "{{ inventory_hostname }}" port: "{{ ansible_ssh_port }}" username: "{{ username }}" password: "{{ password }}" transport: cli tasks: - name: "Radius authentication Server Basic settings" ce_aaa_server: state: present authen_scheme_name: test1 first_authen_mode: radius radius_server_group: test2 provider: "{{ cli }}" - name: "Undo radius authentication Server Basic settings" ce_aaa_server: state: absent authen_scheme_name: test1 first_authen_mode: radius radius_server_group: test2 provider: "{{ cli }}" - name: "Hwtacacs accounting Server Basic settings" ce_aaa_server: state: present acct_scheme_name: test1 accounting_mode: hwtacacs hwtacas_template: test2 provider: "{{ cli }}" - name: "Undo hwtacacs accounting Server Basic settings" ce_aaa_server: state: absent acct_scheme_name: test1 accounting_mode: hwtacacs hwtacas_template: test2 provider: "{{ cli }}" ''' RETURN = ''' changed: description: check to see if a change was made on the device returned: always type: boolean sample: true proposed: description: k/v pairs of parameters passed into module returned: always type: dict sample: {"accounting_mode": "hwtacacs", "acct_scheme_name": "test1", "hwtacas_template": "test2", "state": "present"} existing: description: k/v pairs of existing aaa server returned: always type: dict sample: {"accounting scheme": [["hwtacacs"], ["default"]], "hwtacacs template": ["huawei"]} end_state: description: k/v pairs of aaa params after module execution returned: always type: dict sample: {"accounting scheme": [["hwtacacs", "test1"]], "hwtacacs template": ["huawei", "test2"]} updates: description: command sent to the device returned: always type: list sample: ["accounting-scheme test1", "accounting-mode hwtacacs", "hwtacacs server template test2", "hwtacacs enable"] ''' import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec SUCCESS = """success""" FAILED = """failed""" INVALID_SCHEME_CHAR = [' ', '/', '\\', ':', '*', '?', '"', '|', '<', '>'] INVALID_DOMAIN_CHAR = [' ', '*', '?', '"', '\''] INVALID_GROUP_CHAR = ['/', '\\', ':', '*', '?', '"', '|', '<', '>'] # get authentication scheme CE_GET_AUTHENTICATION_SCHEME = """ <filter type="subtree"> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <authenticationSchemes> <authenticationScheme> <authenSchemeName></authenSchemeName> <firstAuthenMode></firstAuthenMode> <secondAuthenMode></secondAuthenMode> </authenticationScheme> </authenticationSchemes> </aaa> </filter> """ # merge authentication scheme CE_MERGE_AUTHENTICATION_SCHEME = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <authenticationSchemes> <authenticationScheme operation="merge"> <authenSchemeName>%s</authenSchemeName> <firstAuthenMode>%s</firstAuthenMode> <secondAuthenMode>invalid</secondAuthenMode> </authenticationScheme> </authenticationSchemes> </aaa> </config> """ # create authentication scheme CE_CREATE_AUTHENTICATION_SCHEME = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <authenticationSchemes> <authenticationScheme operation="create"> <authenSchemeName>%s</authenSchemeName> <firstAuthenMode>%s</firstAuthenMode> <secondAuthenMode>invalid</secondAuthenMode> </authenticationScheme> </authenticationSchemes> </aaa> </config> """ # delete authentication scheme CE_DELETE_AUTHENTICATION_SCHEME = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <authenticationSchemes> <authenticationScheme operation="delete"> <authenSchemeName>%s</authenSchemeName> <firstAuthenMode>%s</firstAuthenMode> <secondAuthenMode>invalid</secondAuthenMode> </authenticationScheme> </authenticationSchemes> </aaa> </config> """ # get authorization scheme CE_GET_AUTHORIZATION_SCHEME = """ <filter type="subtree"> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <authorizationSchemes> <authorizationScheme> <authorSchemeName></authorSchemeName> <firstAuthorMode></firstAuthorMode> <secondAuthorMode></secondAuthorMode> </authorizationScheme> </authorizationSchemes> </aaa> </filter> """ # merge authorization scheme CE_MERGE_AUTHORIZATION_SCHEME = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <authorizationSchemes> <authorizationScheme operation="merge"> <authorSchemeName>%s</authorSchemeName> <firstAuthorMode>%s</firstAuthorMode> <secondAuthorMode>invalid</secondAuthorMode> </authorizationScheme> </authorizationSchemes> </aaa> </config> """ # create authorization scheme CE_CREATE_AUTHORIZATION_SCHEME = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <authorizationSchemes> <authorizationScheme operation="create"> <authorSchemeName>%s</authorSchemeName> <firstAuthorMode>%s</firstAuthorMode> <secondAuthorMode>invalid</secondAuthorMode> </authorizationScheme> </authorizationSchemes> </aaa> </config> """ # delete authorization scheme CE_DELETE_AUTHORIZATION_SCHEME = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <authorizationSchemes> <authorizationScheme operation="delete"> <authorSchemeName>%s</authorSchemeName> <firstAuthorMode>%s</firstAuthorMode> <secondAuthorMode>invalid</secondAuthorMode> </authorizationScheme> </authorizationSchemes> </aaa> </config> """ # get accounting scheme CE_GET_ACCOUNTING_SCHEME = """ <filter type="subtree"> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <accountingSchemes> <accountingScheme> <acctSchemeName></acctSchemeName> <accountingMode></accountingMode> </accountingScheme> </accountingSchemes> </aaa> </filter> """ # merge accounting scheme CE_MERGE_ACCOUNTING_SCHEME = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <accountingSchemes> <accountingScheme operation="merge"> <acctSchemeName>%s</acctSchemeName> <accountingMode>%s</accountingMode> </accountingScheme> </accountingSchemes> </aaa> </config> """ # create accounting scheme CE_CREATE_ACCOUNTING_SCHEME = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <accountingSchemes> <accountingScheme operation="create"> <acctSchemeName>%s</acctSchemeName> <accountingMode>%s</accountingMode> </accountingScheme> </accountingSchemes> </aaa> </config> """ # delete accounting scheme CE_DELETE_ACCOUNTING_SCHEME = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <accountingSchemes> <accountingScheme operation="delete"> <acctSchemeName>%s</acctSchemeName> <accountingMode>%s</accountingMode> </accountingScheme> </accountingSchemes> </aaa> </config> """ # get authentication domain CE_GET_AUTHENTICATION_DOMAIN = """ <filter type="subtree"> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <domains> <domain> <domainName></domainName> <authenSchemeName></authenSchemeName> </domain> </domains> </aaa> </filter> """ # merge authentication domain CE_MERGE_AUTHENTICATION_DOMAIN = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <domains> <domain operation="merge"> <domainName>%s</domainName> <authenSchemeName>%s</authenSchemeName> </domain> </domains> </aaa> </config> """ # create authentication domain CE_CREATE_AUTHENTICATION_DOMAIN = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <domains> <domain operation="create"> <domainName>%s</domainName> <authenSchemeName>%s</authenSchemeName> </domain> </domains> </aaa> </config> """ # delete authentication domain CE_DELETE_AUTHENTICATION_DOMAIN = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <domains> <domain operation="delete"> <domainName>%s</domainName> <authenSchemeName>%s</authenSchemeName> </domain> </domains> </aaa> </config> """ # get authorization domain CE_GET_AUTHORIZATION_DOMAIN = """ <filter type="subtree"> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <domains> <domain> <domainName></domainName> <authorSchemeName></authorSchemeName> </domain> </domains> </aaa> </filter> """ # merge authorization domain CE_MERGE_AUTHORIZATION_DOMAIN = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <domains> <domain operation="merge"> <domainName>%s</domainName> <authorSchemeName>%s</authorSchemeName> </domain> </domains> </aaa> </config> """ # create authorization domain CE_CREATE_AUTHORIZATION_DOMAIN = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <domains> <domain operation="create"> <domainName>%s</domainName> <authorSchemeName>%s</authorSchemeName> </domain> </domains> </aaa> </config> """ # delete authorization domain CE_DELETE_AUTHORIZATION_DOMAIN = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <domains> <domain operation="delete"> <domainName>%s</domainName> <authorSchemeName>%s</authorSchemeName> </domain> </domains> </aaa> </config> """ # get accounting domain CE_GET_ACCOUNTING_DOMAIN = """ <filter type="subtree"> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <domains> <domain> <domainName></domainName> <acctSchemeName></acctSchemeName> </domain> </domains> </aaa> </filter> """ # merge accounting domain CE_MERGE_ACCOUNTING_DOMAIN = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <domains> <domain operation="merge"> <domainName>%s</domainName> <acctSchemeName>%s</acctSchemeName> </domain> </domains> </aaa> </config> """ # create accounting domain CE_CREATE_ACCOUNTING_DOMAIN = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <domains> <domain operation="create"> <domainName>%s</domainName> <acctSchemeName>%s</acctSchemeName> </domain> </domains> </aaa> </config> """ # delete accounting domain CE_DELETE_ACCOUNTING_DOMAIN = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <domains> <domain operation="delete"> <domainName>%s</domainName> <acctSchemeName>%s</acctSchemeName> </domain> </domains> </aaa> </config> """ # get radius template CE_GET_RADIUS_TEMPLATE = """ <filter type="subtree"> <radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <rdsTemplates> <rdsTemplate> <groupName></groupName> <retransmissionCount></retransmissionCount> <retransmissionInterval></retransmissionInterval> </rdsTemplate> </rdsTemplates> </radius> </filter> """ # merge radius template CE_MERGE_RADIUS_TEMPLATE = """ <config> <radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <rdsTemplates> <rdsTemplate operation="merge"> <groupName>%s</groupName> <retransmissionCount>3</retransmissionCount> <retransmissionInterval>5</retransmissionInterval> </rdsTemplate> </rdsTemplates> </radius> </config> """ # create radius template CE_CREATE_RADIUS_TEMPLATE = """ <config> <radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <rdsTemplates> <rdsTemplate operation="create"> <groupName>%s</groupName> <retransmissionCount>3</retransmissionCount> <retransmissionInterval>5</retransmissionInterval> </rdsTemplate> </rdsTemplates> </radius> </config> """ # delete radius template CE_DELETE_RADIUS_TEMPLATE = """ <config> <radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <rdsTemplates> <rdsTemplate operation="delete"> <groupName>%s</groupName> <retransmissionCount>3</retransmissionCount> <retransmissionInterval>5</retransmissionInterval> </rdsTemplate> </rdsTemplates> </radius> </config> """ # get hwtacacs template CE_GET_HWTACACS_TEMPLATE = """ <filter type="subtree"> <hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <hwTacTempCfgs> <hwTacTempCfg> <templateName></templateName> <isDomainInclude></isDomainInclude> <responseTimeout></responseTimeout> </hwTacTempCfg> </hwTacTempCfgs> </hwtacacs> </filter> """ # merge hwtacacs template CE_MERGE_HWTACACS_TEMPLATE = """ <config> <hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <hwTacTempCfgs> <hwTacTempCfg operation="merge"> <templateName>%s</templateName> <isDomainInclude>true</isDomainInclude> <responseTimeout>5</responseTimeout> </hwTacTempCfg> </hwTacTempCfgs> </hwtacacs> </config> """ # create hwtacacs template CE_CREATE_HWTACACS_TEMPLATE = """ <config> <hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <hwTacTempCfgs> <hwTacTempCfg operation="create"> <templateName>%s</templateName> <isDomainInclude>true</isDomainInclude> <responseTimeout>5</responseTimeout> </hwTacTempCfg> </hwTacTempCfgs> </hwtacacs> </config> """ # delete hwtacacs template CE_DELETE_HWTACACS_TEMPLATE = """ <config> <hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <hwTacTempCfgs> <hwTacTempCfg operation="delete"> <templateName>%s</templateName> </hwTacTempCfg> </hwTacTempCfgs> </hwtacacs> </config> """ # get radius client CE_GET_RADIUS_CLIENT = """ <filter type="subtree"> <radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <rdsClient> <isEnable></isEnable> <coaEnable></coaEnable> <authClientIdentifier></authClientIdentifier> </rdsClient> </radius> </filter> """ # merge radius client CE_MERGE_RADIUS_CLIENT = """ <config> <radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <rdsClient operation="merge"> <isEnable>%s</isEnable> </rdsClient> </radius> </config> """ # get hwtacacs global config CE_GET_HWTACACS_GLOBAL_CFG = """ <filter type="subtree"> <hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <hwTacGlobalCfg> <isEnable></isEnable> <totalTemplateNo></totalTemplateNo> <totalSrvNo></totalSrvNo> </hwTacGlobalCfg> </hwtacacs> </filter> """ # merge hwtacacs global config CE_MERGE_HWTACACS_GLOBAL_CFG = """ <config> <hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <hwTacGlobalCfg operation="merge"> <isEnable>%s</isEnable> </hwTacGlobalCfg> </hwtacacs> </config> """ # get local user group CE_GET_LOCAL_USER_GROUP = """ <filter type="subtree"> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <userGroups> <userGroup> <userGroupName></userGroupName> </userGroup> </userGroups> </aaa> </filter> """ # merge local user group CE_MERGE_LOCAL_USER_GROUP = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <userGroups> <userGroup operation="merge"> <userGroupName>%s</userGroupName> </userGroup> </userGroups> </aaa> </config> """ # delete local user group CE_DELETE_LOCAL_USER_GROUP = """ <config> <aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <userGroups> <userGroup operation="delete"> <userGroupName>%s</userGroupName> </userGroup> </userGroups> </aaa> </config> """ class AaaServer(object): """ Manages aaa configuration """ def netconf_get_config(self, **kwargs): """ Get configure by netconf """ module = kwargs["module"] conf_str = kwargs["conf_str"] xml_str = get_nc_config(module, conf_str) return xml_str def netconf_set_config(self, **kwargs): """ Set configure by netconf """ module = kwargs["module"] conf_str = kwargs["conf_str"] recv_xml = set_nc_config(module, conf_str) return recv_xml def get_authentication_scheme(self, **kwargs): """ Get scheme of authentication """ module = kwargs["module"] conf_str = CE_GET_AUTHENTICATION_SCHEME xml_str = self.netconf_get_config(module=module, conf_str=conf_str) result = list() if "<data/>" in xml_str: return result else: re_find = re.findall( r'.*<firstAuthenMode>(.*)</firstAuthenMode>.*\s*' r'<secondAuthenMode>(.*)</secondAuthenMode>.*\s*' r'<authenSchemeName>(.*)</authenSchemeName>.*', xml_str) if re_find: return re_find else: return result def get_authentication_domain(self, **kwargs): """ Get domain of authentication """ module = kwargs["module"] conf_str = CE_GET_AUTHENTICATION_DOMAIN xml_str = self.netconf_get_config(module=module, conf_str=conf_str) result = list() if "<data/>" in xml_str: return result else: re_find = re.findall( r'.*<domainName>(.*)</domainName>.*\s*' r'<authenSchemeName>(.*)</authenSchemeName>.*', xml_str) if re_find: return re_find else: return result def merge_authentication_scheme(self, **kwargs): """ Merge scheme of authentication """ authen_scheme_name = kwargs["authen_scheme_name"] first_authen_mode = kwargs["first_authen_mode"] module = kwargs["module"] conf_str = CE_MERGE_AUTHENTICATION_SCHEME % ( authen_scheme_name, first_authen_mode) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Merge authentication scheme failed.') cmds = [] cmd = "authentication-scheme %s" % authen_scheme_name cmds.append(cmd) cmd = "authentication-mode %s" % first_authen_mode cmds.append(cmd) return cmds def merge_authentication_domain(self, **kwargs): """ Merge domain of authentication """ domain_name = kwargs["domain_name"] authen_scheme_name = kwargs["authen_scheme_name"] module = kwargs["module"] conf_str = CE_MERGE_AUTHENTICATION_DOMAIN % ( domain_name, authen_scheme_name) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Merge authentication domain failed.') cmds = [] cmd = "domain %s" % domain_name cmds.append(cmd) cmd = "authentication-scheme %s" % authen_scheme_name cmds.append(cmd) return cmds def create_authentication_scheme(self, **kwargs): """ Create scheme of authentication """ authen_scheme_name = kwargs["authen_scheme_name"] first_authen_mode = kwargs["first_authen_mode"] module = kwargs["module"] conf_str = CE_CREATE_AUTHENTICATION_SCHEME % ( authen_scheme_name, first_authen_mode) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Create authentication scheme failed.') cmds = [] cmd = "authentication-scheme %s" % authen_scheme_name cmds.append(cmd) cmd = "authentication-mode %s" % first_authen_mode cmds.append(cmd) return cmds def create_authentication_domain(self, **kwargs): """ Create domain of authentication """ domain_name = kwargs["domain_name"] authen_scheme_name = kwargs["authen_scheme_name"] module = kwargs["module"] conf_str = CE_CREATE_AUTHENTICATION_DOMAIN % ( domain_name, authen_scheme_name) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Create authentication domain failed.') cmds = [] cmd = "domain %s" % domain_name cmds.append(cmd) cmd = "authentication-scheme %s" % authen_scheme_name cmds.append(cmd) return cmds def delete_authentication_scheme(self, **kwargs): """ Delete scheme of authentication """ authen_scheme_name = kwargs["authen_scheme_name"] first_authen_mode = kwargs["first_authen_mode"] module = kwargs["module"] if authen_scheme_name == "default": return SUCCESS conf_str = CE_DELETE_AUTHENTICATION_SCHEME % ( authen_scheme_name, first_authen_mode) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Delete authentication scheme failed.') cmds = [] cmd = "undo authentication-scheme %s" % authen_scheme_name cmds.append(cmd) cmd = "authentication-mode none" cmds.append(cmd) return cmds def delete_authentication_domain(self, **kwargs): """ Delete domain of authentication """ domain_name = kwargs["domain_name"] authen_scheme_name = kwargs["authen_scheme_name"] module = kwargs["module"] if domain_name == "default": return SUCCESS conf_str = CE_DELETE_AUTHENTICATION_DOMAIN % ( domain_name, authen_scheme_name) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Delete authentication domain failed.') cmds = [] cmd = "undo authentication-scheme" cmds.append(cmd) cmd = "undo domain %s" % domain_name cmds.append(cmd) return cmds def get_authorization_scheme(self, **kwargs): """ Get scheme of authorization """ module = kwargs["module"] conf_str = CE_GET_AUTHORIZATION_SCHEME xml_str = self.netconf_get_config(module=module, conf_str=conf_str) result = list() if "<data/>" in xml_str: return result else: re_find = re.findall( r'.*<firstAuthorMode>(.*)</firstAuthorMode>.*\s*' r'<secondAuthorMode>(.*)</secondAuthorMode>.*\s*' r'<authorSchemeName>(.*)</authorSchemeName>.*', xml_str) if re_find: return re_find else: return result def get_authorization_domain(self, **kwargs): """ Get domain of authorization """ module = kwargs["module"] conf_str = CE_GET_AUTHORIZATION_DOMAIN xml_str = self.netconf_get_config(module=module, conf_str=conf_str) result = list() if "<data/>" in xml_str: return result else: re_find = re.findall( r'.*<domainName>(.*)</domainName>.*\s*' r'<authorSchemeName>(.*)</authorSchemeName>.*', xml_str) if re_find: return re_find else: return result def merge_authorization_scheme(self, **kwargs): """ Merge scheme of authorization """ author_scheme_name = kwargs["author_scheme_name"] first_author_mode = kwargs["first_author_mode"] module = kwargs["module"] conf_str = CE_MERGE_AUTHORIZATION_SCHEME % ( author_scheme_name, first_author_mode) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Merge authorization scheme failed.') cmds = [] cmd = "authorization-scheme %s" % author_scheme_name cmds.append(cmd) cmd = "authorization-mode %s" % first_author_mode cmds.append(cmd) return cmds def merge_authorization_domain(self, **kwargs): """ Merge domain of authorization """ domain_name = kwargs["domain_name"] author_scheme_name = kwargs["author_scheme_name"] module = kwargs["module"] conf_str = CE_MERGE_AUTHORIZATION_DOMAIN % ( domain_name, author_scheme_name) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Merge authorization domain failed.') cmds = [] cmd = "domain %s" % domain_name cmds.append(cmd) cmd = "authorization-scheme %s" % author_scheme_name cmds.append(cmd) return cmds def create_authorization_scheme(self, **kwargs): """ Create scheme of authorization """ author_scheme_name = kwargs["author_scheme_name"] first_author_mode = kwargs["first_author_mode"] module = kwargs["module"] conf_str = CE_CREATE_AUTHORIZATION_SCHEME % ( author_scheme_name, first_author_mode) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Create authorization scheme failed.') cmds = [] cmd = "authorization-scheme %s" % author_scheme_name cmds.append(cmd) cmd = "authorization-mode %s" % first_author_mode cmds.append(cmd) return cmds def create_authorization_domain(self, **kwargs): """ Create domain of authorization """ domain_name = kwargs["domain_name"] author_scheme_name = kwargs["author_scheme_name"] module = kwargs["module"] conf_str = CE_CREATE_AUTHORIZATION_DOMAIN % ( domain_name, author_scheme_name) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Create authorization domain failed.') cmds = [] cmd = "domain %s" % domain_name cmds.append(cmd) cmd = "authorization-scheme %s" % author_scheme_name cmds.append(cmd) return cmds def delete_authorization_scheme(self, **kwargs): """ Delete scheme of authorization """ author_scheme_name = kwargs["author_scheme_name"] first_author_mode = kwargs["first_author_mode"] module = kwargs["module"] if author_scheme_name == "default": return SUCCESS conf_str = CE_DELETE_AUTHORIZATION_SCHEME % ( author_scheme_name, first_author_mode) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Delete authorization scheme failed.') cmds = [] cmd = "undo authorization-scheme %s" % author_scheme_name cmds.append(cmd) cmd = "authorization-mode none" cmds.append(cmd) return cmds def delete_authorization_domain(self, **kwargs): """ Delete domain of authorization """ domain_name = kwargs["domain_name"] author_scheme_name = kwargs["author_scheme_name"] module = kwargs["module"] if domain_name == "default": return SUCCESS conf_str = CE_DELETE_AUTHORIZATION_DOMAIN % ( domain_name, author_scheme_name) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Delete authorization domian failed.') cmds = [] cmd = "undo authorization-scheme" cmds.append(cmd) cmd = "undo domain %s" % domain_name cmds.append(cmd) return cmds def get_accounting_scheme(self, **kwargs): """ Get scheme of accounting """ module = kwargs["module"] conf_str = CE_GET_ACCOUNTING_SCHEME xml_str = self.netconf_get_config(module=module, conf_str=conf_str) result = list() if "<data/>" in xml_str: return result else: re_find = re.findall( r'.*<accountingMode>(.*)</accountingMode>.*\s*' r'<acctSchemeName>(.*)</acctSchemeName>.*', xml_str) if re_find: return re_find else: return result def get_accounting_domain(self, **kwargs): """ Get domain of accounting """ module = kwargs["module"] conf_str = CE_GET_ACCOUNTING_DOMAIN xml_str = self.netconf_get_config(module=module, conf_str=conf_str) result = list() if "<data/>" in xml_str: return result else: re_find = re.findall( r'.*<domainName>(.*)</domainName>.*\s*' r'<acctSchemeName>(.*)</acctSchemeName>.*', xml_str) if re_find: return re_find else: return result def merge_accounting_scheme(self, **kwargs): """ Merge scheme of accounting """ acct_scheme_name = kwargs["acct_scheme_name"] accounting_mode = kwargs["accounting_mode"] module = kwargs["module"] conf_str = CE_MERGE_ACCOUNTING_SCHEME % ( acct_scheme_name, accounting_mode) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Merge accounting scheme failed.') cmds = [] cmd = "accounting-scheme %s" % acct_scheme_name cmds.append(cmd) cmd = "accounting-mode %s" % accounting_mode cmds.append(cmd) return cmds def merge_accounting_domain(self, **kwargs): """ Merge domain of accounting """ domain_name = kwargs["domain_name"] acct_scheme_name = kwargs["acct_scheme_name"] module = kwargs["module"] conf_str = CE_MERGE_ACCOUNTING_DOMAIN % (domain_name, acct_scheme_name) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Merge accounting domain failed.') cmds = [] cmd = "domain %s" % domain_name cmds.append(cmd) cmd = "accounting-scheme %s" % acct_scheme_name cmds.append(cmd) return cmds def create_accounting_scheme(self, **kwargs): """ Create scheme of accounting """ acct_scheme_name = kwargs["acct_scheme_name"] accounting_mode = kwargs["accounting_mode"] module = kwargs["module"] conf_str = CE_CREATE_ACCOUNTING_SCHEME % ( acct_scheme_name, accounting_mode) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Create accounting scheme failed.') cmds = [] cmd = "accounting-scheme %s" % acct_scheme_name cmds.append(cmd) cmd = "accounting-mode %s" % accounting_mode cmds.append(cmd) return cmds def create_accounting_domain(self, **kwargs): """ Create domain of accounting """ domain_name = kwargs["domain_name"] acct_scheme_name = kwargs["acct_scheme_name"] module = kwargs["module"] conf_str = CE_CREATE_ACCOUNTING_DOMAIN % ( domain_name, acct_scheme_name) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Create accounting domain failed.') cmds = [] cmd = "domain %s" % domain_name cmds.append(cmd) cmd = "accounting-scheme %s" % acct_scheme_name cmds.append(cmd) return cmds def delete_accounting_scheme(self, **kwargs): """ Delete scheme of accounting """ acct_scheme_name = kwargs["acct_scheme_name"] accounting_mode = kwargs["accounting_mode"] module = kwargs["module"] if acct_scheme_name == "default": return SUCCESS conf_str = CE_DELETE_ACCOUNTING_SCHEME % ( acct_scheme_name, accounting_mode) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Delete accounting scheme failed.') cmds = [] cmd = "undo accounting-scheme %s" % acct_scheme_name cmds.append(cmd) cmd = "accounting-mode none" cmds.append(cmd) return cmds def delete_accounting_domain(self, **kwargs): """ Delete domain of accounting """ domain_name = kwargs["domain_name"] acct_scheme_name = kwargs["acct_scheme_name"] module = kwargs["module"] if domain_name == "default": return SUCCESS conf_str = CE_DELETE_ACCOUNTING_DOMAIN % ( domain_name, acct_scheme_name) xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Delete accounting domain failed.') cmds = [] cmd = "undo domain %s" % domain_name cmds.append(cmd) cmd = "undo accounting-scheme" cmds.append(cmd) return cmds def get_radius_template(self, **kwargs): """ Get radius template """ module = kwargs["module"] conf_str = CE_GET_RADIUS_TEMPLATE xml_str = self.netconf_get_config(module=module, conf_str=conf_str) result = list() if "<data/>" in xml_str: return result else: re_find = re.findall( r'.*<groupName>(.*)</groupName>.*', xml_str) if re_find: return re_find else: return result def merge_radius_template(self, **kwargs): """ Merge radius template """ radius_server_group = kwargs["radius_server_group"] module = kwargs["module"] conf_str = CE_MERGE_RADIUS_TEMPLATE % radius_server_group xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Merge radius template failed.') cmds = [] cmd = "radius server group %s" % radius_server_group cmds.append(cmd) return cmds def create_radius_template(self, **kwargs): """ Create radius template """ radius_server_group = kwargs["radius_server_group"] module = kwargs["module"] conf_str = CE_CREATE_RADIUS_TEMPLATE % radius_server_group xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Create radius template failed.') cmds = [] cmd = "radius server group %s" % radius_server_group cmds.append(cmd) return cmds def delete_radius_template(self, **kwargs): """ Delete radius template """ radius_server_group = kwargs["radius_server_group"] module = kwargs["module"] conf_str = CE_DELETE_RADIUS_TEMPLATE % radius_server_group xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Delete radius template failed.') cmds = [] cmd = "undo radius server group %s" % radius_server_group cmds.append(cmd) return cmds def get_radius_client(self, **kwargs): """ Get radius client """ module = kwargs["module"] conf_str = CE_GET_RADIUS_CLIENT xml_str = self.netconf_get_config(module=module, conf_str=conf_str) result = list() if "<data/>" in xml_str: return result else: re_find = re.findall( r'.*<isEnable>(.*)</isEnable>.*', xml_str) if re_find: return re_find else: return result def merge_radius_client(self, **kwargs): """ Merge radius client """ enable = kwargs["isEnable"] module = kwargs["module"] conf_str = CE_MERGE_RADIUS_CLIENT % enable xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Merge radius client failed.') cmds = [] if enable == "true": cmd = "radius enable" else: cmd = "undo radius enable" cmds.append(cmd) return cmds def get_hwtacacs_template(self, **kwargs): """ Get hwtacacs template """ module = kwargs["module"] conf_str = CE_GET_HWTACACS_TEMPLATE xml_str = self.netconf_get_config(module=module, conf_str=conf_str) result = list() if "<data/>" in xml_str: return result else: re_find = re.findall( r'.*<templateName>(.*)</templateName>.*', xml_str) if re_find: return re_find else: return result def merge_hwtacacs_template(self, **kwargs): """ Merge hwtacacs template """ hwtacas_template = kwargs["hwtacas_template"] module = kwargs["module"] conf_str = CE_MERGE_HWTACACS_TEMPLATE % hwtacas_template xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Merge hwtacacs template failed.') cmds = [] cmd = "hwtacacs server template %s" % hwtacas_template cmds.append(cmd) return cmds def create_hwtacacs_template(self, **kwargs): """ Create hwtacacs template """ hwtacas_template = kwargs["hwtacas_template"] module = kwargs["module"] conf_str = CE_CREATE_HWTACACS_TEMPLATE % hwtacas_template xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Create hwtacacs template failed.') cmds = [] cmd = "hwtacacs server template %s" % hwtacas_template cmds.append(cmd) return cmds def delete_hwtacacs_template(self, **kwargs): """ Delete hwtacacs template """ hwtacas_template = kwargs["hwtacas_template"] module = kwargs["module"] conf_str = CE_DELETE_HWTACACS_TEMPLATE % hwtacas_template xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Delete hwtacacs template failed.') cmds = [] cmd = "undo hwtacacs server template %s" % hwtacas_template cmds.append(cmd) return cmds def get_hwtacacs_global_cfg(self, **kwargs): """ Get hwtacacs global configure """ module = kwargs["module"] conf_str = CE_GET_HWTACACS_GLOBAL_CFG xml_str = self.netconf_get_config(module=module, conf_str=conf_str) result = list() if "<data/>" in xml_str: return result else: re_find = re.findall( r'.*<isEnable>(.*)</isEnable>.*', xml_str) if re_find: return re_find else: return result def merge_hwtacacs_global_cfg(self, **kwargs): """ Merge hwtacacs global configure """ enable = kwargs["isEnable"] module = kwargs["module"] conf_str = CE_MERGE_HWTACACS_GLOBAL_CFG % enable xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Merge hwtacacs global config failed.') cmds = [] if enable == "true": cmd = "hwtacacs enable" else: cmd = "undo hwtacacs enable" cmds.append(cmd) return cmds def get_local_user_group(self, **kwargs): """ Get local user group """ module = kwargs["module"] conf_str = CE_GET_LOCAL_USER_GROUP xml_str = self.netconf_get_config(module=module, conf_str=conf_str) result = list() if "<data/>" in xml_str: return result else: re_find = re.findall( r'.*<userGroupName>(.*)</userGroupName>.*', xml_str) if re_find: return re_find else: return result def merge_local_user_group(self, **kwargs): """ Merge local user group """ local_user_group = kwargs["local_user_group"] module = kwargs["module"] conf_str = CE_MERGE_LOCAL_USER_GROUP % local_user_group xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Merge local user group failed.') cmds = [] cmd = "user-group %s" % local_user_group cmds.append(cmd) return cmds def delete_local_user_group(self, **kwargs): """ Delete local user group """ local_user_group = kwargs["local_user_group"] module = kwargs["module"] conf_str = CE_DELETE_LOCAL_USER_GROUP % local_user_group xml = self.netconf_set_config(module=module, conf_str=conf_str) if "<ok/>" not in xml: module.fail_json(msg='Error: Delete local user group failed.') cmds = [] cmd = "undo user-group %s" % local_user_group cmds.append(cmd) return cmds def check_name(**kwargs): """ Check invalid name """ module = kwargs["module"] name = kwargs["name"] invalid_char = kwargs["invalid_char"] for item in invalid_char: if item in name: module.fail_json( msg='Error: invalid char %s is in the name %s.' % (item, name)) def check_module_argument(**kwargs): """ Check module argument """ module = kwargs["module"] authen_scheme_name = module.params['authen_scheme_name'] author_scheme_name = module.params['author_scheme_name'] acct_scheme_name = module.params['acct_scheme_name'] domain_name = module.params['domain_name'] radius_server_group = module.params['radius_server_group'] hwtacas_template = module.params['hwtacas_template'] local_user_group = module.params['local_user_group'] if authen_scheme_name: if len(authen_scheme_name) > 32: module.fail_json( msg='Error: authen_scheme_name %s ' 'is large than 32.' % authen_scheme_name) check_name(module=module, name=authen_scheme_name, invalid_char=INVALID_SCHEME_CHAR) if author_scheme_name: if len(author_scheme_name) > 32: module.fail_json( msg='Error: author_scheme_name %s ' 'is large than 32.' % author_scheme_name) check_name(module=module, name=author_scheme_name, invalid_char=INVALID_SCHEME_CHAR) if acct_scheme_name: if len(acct_scheme_name) > 32: module.fail_json( msg='Error: acct_scheme_name %s ' 'is large than 32.' % acct_scheme_name) check_name(module=module, name=acct_scheme_name, invalid_char=INVALID_SCHEME_CHAR) if domain_name: if len(domain_name) > 64: module.fail_json( msg='Error: domain_name %s ' 'is large than 64.' % domain_name) check_name(module=module, name=domain_name, invalid_char=INVALID_DOMAIN_CHAR) if domain_name == "-" or domain_name == "--": module.fail_json(msg='domain_name %s ' 'is invalid.' % domain_name) if radius_server_group and len(radius_server_group) > 32: module.fail_json(msg='Error: radius_server_group %s ' 'is large than 32.' % radius_server_group) if hwtacas_template and len(hwtacas_template) > 32: module.fail_json( msg='Error: hwtacas_template %s ' 'is large than 32.' % hwtacas_template) if local_user_group: if len(local_user_group) > 32: module.fail_json( msg='Error: local_user_group %s ' 'is large than 32.' % local_user_group) check_name(module=module, name=local_user_group, invalid_char=INVALID_GROUP_CHAR) def main(): """ Module main """ argument_spec = dict( state=dict(choices=['present', 'absent'], default='present'), authen_scheme_name=dict(type='str'), first_authen_mode=dict(choices=['invalid', 'local', 'hwtacacs', 'radius', 'none']), author_scheme_name=dict(type='str'), first_author_mode=dict(choices=['invalid', 'local', 'hwtacacs', 'if-authenticated', 'none']), acct_scheme_name=dict(type='str'), accounting_mode=dict(choices=['invalid', 'hwtacacs', 'radius', 'none']), domain_name=dict(type='str'), radius_server_group=dict(type='str'), hwtacas_template=dict(type='str'), local_user_group=dict(type='str') ) argument_spec.update(ce_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) check_module_argument(module=module) changed = False proposed = dict() existing = dict() end_state = dict() updates = [] state = module.params['state'] authen_scheme_name = module.params['authen_scheme_name'] first_authen_mode = module.params['first_authen_mode'] author_scheme_name = module.params['author_scheme_name'] first_author_mode = module.params['first_author_mode'] acct_scheme_name = module.params['acct_scheme_name'] accounting_mode = module.params['accounting_mode'] domain_name = module.params['domain_name'] radius_server_group = module.params['radius_server_group'] hwtacas_template = module.params['hwtacas_template'] local_user_group = module.params['local_user_group'] ce_aaa_server = AaaServer() if not ce_aaa_server: module.fail_json(msg='Error: init module failed.') # get proposed proposed["state"] = state if authen_scheme_name: proposed["authen_scheme_name"] = authen_scheme_name if first_authen_mode: proposed["first_authen_mode"] = first_authen_mode if author_scheme_name: proposed["author_scheme_name"] = author_scheme_name if first_author_mode: proposed["first_author_mode"] = first_author_mode if acct_scheme_name: proposed["acct_scheme_name"] = acct_scheme_name if accounting_mode: proposed["accounting_mode"] = accounting_mode if domain_name: proposed["domain_name"] = domain_name if radius_server_group: proposed["radius_server_group"] = radius_server_group if hwtacas_template: proposed["hwtacas_template"] = hwtacas_template if local_user_group: proposed["local_user_group"] = local_user_group # authentication if authen_scheme_name: scheme_exist = ce_aaa_server.get_authentication_scheme(module=module) scheme_new = (first_authen_mode.lower(), "invalid", authen_scheme_name.lower()) existing["authentication scheme"] = scheme_exist if state == "present": # present authentication scheme if len(scheme_exist) == 0: cmd = ce_aaa_server.create_authentication_scheme( module=module, authen_scheme_name=authen_scheme_name, first_authen_mode=first_authen_mode) updates.append(cmd) changed = True elif scheme_new not in scheme_exist: cmd = ce_aaa_server.merge_authentication_scheme( module=module, authen_scheme_name=authen_scheme_name, first_authen_mode=first_authen_mode) updates.append(cmd) changed = True # present authentication domain if domain_name: domain_exist = ce_aaa_server.get_authentication_domain( module=module) domain_new = (domain_name.lower(), authen_scheme_name.lower()) if len(domain_exist) == 0: cmd = ce_aaa_server.create_authentication_domain( module=module, domain_name=domain_name, authen_scheme_name=authen_scheme_name) updates.append(cmd) changed = True elif domain_new not in domain_exist: cmd = ce_aaa_server.merge_authentication_domain( module=module, domain_name=domain_name, authen_scheme_name=authen_scheme_name) updates.append(cmd) changed = True else: # absent authentication scheme if len(scheme_exist) == 0: pass elif scheme_new not in scheme_exist: pass else: cmd = ce_aaa_server.delete_authentication_scheme( module=module, authen_scheme_name=authen_scheme_name, first_authen_mode=first_authen_mode) updates.append(cmd) changed = True # absent authentication domain if domain_name: domain_exist = ce_aaa_server.get_authentication_domain( module=module) domain_new = (domain_name.lower(), authen_scheme_name.lower()) if len(domain_exist) == 0: pass elif domain_new not in domain_exist: pass else: cmd = ce_aaa_server.delete_authentication_domain( module=module, domain_name=domain_name, authen_scheme_name=authen_scheme_name) updates.append(cmd) changed = True scheme_end = ce_aaa_server.get_authentication_scheme(module=module) end_state["authentication scheme"] = scheme_end # authorization if author_scheme_name: scheme_exist = ce_aaa_server.get_authorization_scheme(module=module) scheme_new = (first_author_mode.lower(), "invalid", author_scheme_name.lower()) existing["authorization scheme"] = scheme_exist if state == "present": # present authorization scheme if len(scheme_exist) == 0: cmd = ce_aaa_server.create_authorization_scheme( module=module, author_scheme_name=author_scheme_name, first_author_mode=first_author_mode) updates.append(cmd) changed = True elif scheme_new not in scheme_exist: cmd = ce_aaa_server.merge_authorization_scheme( module=module, author_scheme_name=author_scheme_name, first_author_mode=first_author_mode) updates.append(cmd) changed = True # present authorization domain if domain_name: domain_exist = ce_aaa_server.get_authorization_domain( module=module) domain_new = (domain_name.lower(), author_scheme_name.lower()) if len(domain_exist) == 0: cmd = ce_aaa_server.create_authorization_domain( module=module, domain_name=domain_name, author_scheme_name=author_scheme_name) updates.append(cmd) changed = True elif domain_new not in domain_exist: cmd = ce_aaa_server.merge_authorization_domain( module=module, domain_name=domain_name, author_scheme_name=author_scheme_name) updates.append(cmd) changed = True else: # absent authorization scheme if len(scheme_exist) == 0: pass elif scheme_new not in scheme_exist: pass else: cmd = ce_aaa_server.delete_authorization_scheme( module=module, author_scheme_name=author_scheme_name, first_author_mode=first_author_mode) updates.append(cmd) changed = True # absent authorization domain if domain_name: domain_exist = ce_aaa_server.get_authorization_domain( module=module) domain_new = (domain_name.lower(), author_scheme_name.lower()) if len(domain_exist) == 0: pass elif domain_new not in domain_exist: pass else: cmd = ce_aaa_server.delete_authorization_domain( module=module, domain_name=domain_name, author_scheme_name=author_scheme_name) updates.append(cmd) changed = True scheme_end = ce_aaa_server.get_authorization_scheme(module=module) end_state["authorization scheme"] = scheme_end # accounting if acct_scheme_name: scheme_exist = ce_aaa_server.get_accounting_scheme(module=module) scheme_new = (accounting_mode.lower(), acct_scheme_name.lower()) existing["accounting scheme"] = scheme_exist if state == "present": # present accounting scheme if len(scheme_exist) == 0: cmd = ce_aaa_server.create_accounting_scheme( module=module, acct_scheme_name=acct_scheme_name, accounting_mode=accounting_mode) updates.append(cmd) changed = True elif scheme_new not in scheme_exist: cmd = ce_aaa_server.merge_accounting_scheme( module=module, acct_scheme_name=acct_scheme_name, accounting_mode=accounting_mode) updates.append(cmd) changed = True # present accounting domain if domain_name: domain_exist = ce_aaa_server.get_accounting_domain( module=module) domain_new = (domain_name.lower(), acct_scheme_name.lower()) if len(domain_exist) == 0: cmd = ce_aaa_server.create_accounting_domain( module=module, domain_name=domain_name, acct_scheme_name=acct_scheme_name) updates.append(cmd) changed = True elif domain_new not in domain_exist: cmd = ce_aaa_server.merge_accounting_domain( module=module, domain_name=domain_name, acct_scheme_name=acct_scheme_name) updates.append(cmd) changed = True else: # absent accounting scheme if len(scheme_exist) == 0: pass elif scheme_new not in scheme_exist: pass else: cmd = ce_aaa_server.delete_accounting_scheme( module=module, acct_scheme_name=acct_scheme_name, accounting_mode=accounting_mode) updates.append(cmd) changed = True # absent accounting domain if domain_name: domain_exist = ce_aaa_server.get_accounting_domain( module=module) domain_new = (domain_name.lower(), acct_scheme_name.lower()) if len(domain_exist) == 0: pass elif domain_new not in domain_exist: pass else: cmd = ce_aaa_server.delete_accounting_domain( module=module, domain_name=domain_name, acct_scheme_name=acct_scheme_name) updates.append(cmd) changed = True scheme_end = ce_aaa_server.get_accounting_scheme(module=module) end_state["accounting scheme"] = scheme_end # radius group name if (authen_scheme_name and first_authen_mode.lower() == "radius") \ or (acct_scheme_name and accounting_mode.lower() == "radius"): if not radius_server_group: module.fail_json(msg='please input radius_server_group when use radius.') rds_template_exist = ce_aaa_server.get_radius_template(module=module) rds_template_new = (radius_server_group) rds_enable_exist = ce_aaa_server.get_radius_client(module=module) existing["radius template"] = rds_template_exist existing["radius enable"] = rds_enable_exist if state == "present": # present radius group name if len(rds_template_exist) == 0: cmd = ce_aaa_server.create_radius_template( module=module, radius_server_group=radius_server_group) updates.append(cmd) changed = True elif rds_template_new not in rds_template_exist: cmd = ce_aaa_server.merge_radius_template( module=module, radius_server_group=radius_server_group) updates.append(cmd) changed = True rds_enable_new = ("true") if rds_enable_new not in rds_enable_exist: cmd = ce_aaa_server.merge_radius_client( module=module, isEnable="true") updates.append(cmd) changed = True else: # absent radius group name if len(rds_template_exist) == 0: pass elif rds_template_new not in rds_template_exist: pass else: cmd = ce_aaa_server.delete_radius_template( module=module, radius_server_group=radius_server_group) updates.append(cmd) changed = True rds_enable_new = ("false") if rds_enable_new not in rds_enable_exist: cmd = ce_aaa_server.merge_radius_client( module=module, isEnable="false") updates.append(cmd) changed = True else: pass rds_template_end = ce_aaa_server.get_radius_template(module=module) end_state["radius template"] = rds_template_end rds_enable_end = ce_aaa_server.get_radius_client(module=module) end_state["radius enable"] = rds_enable_end tmp_scheme = author_scheme_name # hwtacas template if (authen_scheme_name and first_authen_mode.lower() == "hwtacacs") \ or (tmp_scheme and first_author_mode.lower() == "hwtacacs") \ or (acct_scheme_name and accounting_mode.lower() == "hwtacacs"): if not hwtacas_template: module.fail_json( msg='please input hwtacas_template when use hwtacas.') hwtacacs_exist = ce_aaa_server.get_hwtacacs_template(module=module) hwtacacs_new = (hwtacas_template) hwtacacs_enbale_exist = ce_aaa_server.get_hwtacacs_global_cfg( module=module) existing["hwtacacs template"] = hwtacacs_exist existing["hwtacacs enable"] = hwtacacs_enbale_exist if state == "present": # present hwtacas template if len(hwtacacs_exist) == 0: cmd = ce_aaa_server.create_hwtacacs_template( module=module, hwtacas_template=hwtacas_template) updates.append(cmd) changed = True elif hwtacacs_new not in hwtacacs_exist: cmd = ce_aaa_server.merge_hwtacacs_template( module=module, hwtacas_template=hwtacas_template) updates.append(cmd) changed = True hwtacacs_enbale_new = ("true") if hwtacacs_enbale_new not in hwtacacs_enbale_exist: cmd = ce_aaa_server.merge_hwtacacs_global_cfg( module=module, isEnable="true") updates.append(cmd) changed = True else: # absent hwtacas template if len(hwtacacs_exist) == 0: pass elif hwtacacs_new not in hwtacacs_exist: pass else: cmd = ce_aaa_server.delete_hwtacacs_template( module=module, hwtacas_template=hwtacas_template) updates.append(cmd) changed = True hwtacacs_enbale_new = ("false") if hwtacacs_enbale_new not in hwtacacs_enbale_exist: cmd = ce_aaa_server.merge_hwtacacs_global_cfg( module=module, isEnable="false") updates.append(cmd) changed = True else: pass hwtacacs_end = ce_aaa_server.get_hwtacacs_template(module=module) end_state["hwtacacs template"] = hwtacacs_end hwtacacs_enable_end = ce_aaa_server.get_hwtacacs_global_cfg( module=module) end_state["hwtacacs enable"] = hwtacacs_enable_end # local user group if local_user_group: user_group_exist = ce_aaa_server.get_local_user_group(module=module) user_group_new = (local_user_group) existing["local user group"] = user_group_exist if state == "present": # present local user group if len(user_group_exist) == 0: cmd = ce_aaa_server.merge_local_user_group( module=module, local_user_group=local_user_group) updates.append(cmd) changed = True elif user_group_new not in user_group_exist: cmd = ce_aaa_server.merge_local_user_group( module=module, local_user_group=local_user_group) updates.append(cmd) changed = True else: # absent local user group if len(user_group_exist) == 0: pass elif user_group_new not in user_group_exist: pass else: cmd = ce_aaa_server.delete_local_user_group( module=module, local_user_group=local_user_group) updates.append(cmd) changed = True user_group_end = ce_aaa_server.get_local_user_group(module=module) end_state["local user group"] = user_group_end results = dict() results['proposed'] = proposed results['existing'] = existing results['changed'] = changed results['end_state'] = end_state results['updates'] = updates module.exit_json(**results) if __name__ == '__main__': main()
gpl-3.0
6,941,948,130,217,425,000
31.367281
112
0.568034
false
kenshay/ImageScript
ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/GL/EXT/provoking_vertex.py
9
2070
'''OpenGL extension EXT.provoking_vertex This module customises the behaviour of the OpenGL.raw.GL.EXT.provoking_vertex to provide a more Python-friendly API Overview (from the spec) This extension provides an alternative provoking vertex convention for rendering lines, triangles, and (optionally depending on the implementation) quads. The provoking vertex of a primitive is the vertex that determines the constant primary and secondary colors when flat shading is enabled. In OpenGL, the provoking vertex for triangle, quad, line, and (trivially) point primitives is the last vertex used to assemble the primitive. The polygon primitive is an exception in OpenGL where the first vertex of a polygon primitive determines the color of the polygon, even if actually broken into triangles and/or quads. See section 2.14.7 (Flatshading) of the OpenGL 2.1 specification, particularly Table 2.12 for more details. Alternatively the provoking vertex could be the first vertex of the primitive. Other APIs with flat-shading functionality such as Reality Lab and Direct3D have adopted the "first vertex of the primitive" convention to determine the provoking vertex. However, these APIs lack quads so do not have a defined provoking vertex convention for quads. The motivation for this extension is to allow applications developed for APIs with a "first vertex of the primitive" provoking vertex to be easily converted to OpenGL. The official definition of this extension is available here: http://www.opengl.org/registry/specs/EXT/provoking_vertex.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.EXT.provoking_vertex import * from OpenGL.raw.GL.EXT.provoking_vertex import _EXTENSION_NAME def glInitProvokingVertexEXT(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
gpl-3.0
8,362,022,892,857,040,000
38.826923
71
0.795652
false
mfazekas/safaridriver
py_test.py
3
2257
#!/usr/bin/env python # Copyright 2008-2009 WebDriver committers # Copyright 2008-2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A script for running the unit test and example tests for the python binding.""" import optparse import os import shutil import subprocess import sys def run_script(script_name, *args): command = [sys.executable, script_name.replace("/", os.path.sep)] command.extend(args) return subprocess.Popen(command) if __name__ == "__main__": usage = 'usage: %prog [options] arg' parser = optparse.OptionParser(usage) parser.add_option('-d', '--driver', dest='driver', action='store', default='firefox', type='choice', choices=['chrome', 'firefox', 'remote'], help='Which driver to test.') (options, args) = parser.parse_args() driver_tests_dict = { 'chrome': ['api_examples'], 'firefox': ['api_examples', 'cookie_tests', 'firefox_launcher_tests'], 'remote': ['api_examples'], } base_dir = os.path.abspath(os.path.dirname(__file__)) print 'base_dir:',base_dir os.environ["WEBDRIVER"] = base_dir os.environ["PYTHONPATH"] = os.pathsep.join([os.environ.get("PYTHONPATH", ""), os.path.join(base_dir, "../../../", "firefox", "lib-src"), os.path.join(base_dir, '..')]) try: for test in driver_tests_dict[options.driver]: process = run_script(os.path.join(base_dir, "%s_tests/%s.py" % (options.driver, test))) assert process.wait() == 0, "Test %s failed" % test finally: try: os.kill(process.pid, 9) except: pass
apache-2.0
684,375,463,691,647,200
35.403226
103
0.613646
false
thaim/ansible
lib/ansible/modules/network/fortios/fortios_router_multicast6.py
14
12199
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_router_multicast6 short_description: Configure IPv6 multicast in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify router feature and multicast6 category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true version_added: 2.9 router_multicast6: description: - Configure IPv6 multicast. default: null type: dict suboptions: interface: description: - Protocol Independent Multicast (PIM) interfaces. type: list suboptions: hello_holdtime: description: - Time before old neighbour information expires (1 - 65535 sec). type: int hello_interval: description: - Interval between sending PIM hello messages (1 - 65535 sec).. type: int name: description: - Interface name. Source system.interface.name. required: true type: str multicast_pmtu: description: - Enable/disable PMTU for IPv6 multicast. type: str choices: - enable - disable multicast_routing: description: - Enable/disable IPv6 multicast routing. type: str choices: - enable - disable pim_sm_global: description: - PIM sparse-mode global settings. type: dict suboptions: register_rate_limit: description: - Limit of packets/sec per source registered through this RP (0 means unlimited). type: int rp_address: description: - Statically configured RP addresses. type: list suboptions: id: description: - ID of the entry. required: true type: int ip6_address: description: - RP router IPv6 address. type: str ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure IPv6 multicast. fortios_router_multicast6: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" router_multicast6: interface: - hello_holdtime: "4" hello_interval: "5" name: "default_name_6 (source system.interface.name)" multicast_pmtu: "enable" multicast_routing: "enable" pim_sm_global: register_rate_limit: "10" rp_address: - id: "12" ip6_address: "<your_own_value>" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_router_multicast6_data(json): option_list = ['interface', 'multicast_pmtu', 'multicast_routing', 'pim_sm_global'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def router_multicast6(data, fos): vdom = data['vdom'] router_multicast6_data = data['router_multicast6'] filtered_data = underscore_to_hyphen(filter_router_multicast6_data(router_multicast6_data)) return fos.set('router', 'multicast6', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_router(data, fos): if data['router_multicast6']: resp = router_multicast6(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "router_multicast6": { "required": False, "type": "dict", "default": None, "options": { "interface": {"required": False, "type": "list", "options": { "hello_holdtime": {"required": False, "type": "int"}, "hello_interval": {"required": False, "type": "int"}, "name": {"required": True, "type": "str"} }}, "multicast_pmtu": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "multicast_routing": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "pim_sm_global": {"required": False, "type": "dict", "options": { "register_rate_limit": {"required": False, "type": "int"}, "rp_address": {"required": False, "type": "list", "options": { "id": {"required": True, "type": "int"}, "ip6_address": {"required": False, "type": "str"} }} }} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_router(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_router(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
mit
7,574,012,992,704,808,000
31.793011
109
0.539307
false
edespino/gpdb
src/test/tinc/tincrepo/mpp/gpdb/tests/catalog/udf_exception_handling/test_udf_exception_handling.py
9
11911
""" Copyright (c) 2004-Present Pivotal Software, Inc. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import glob import sys import tinctest from tinctest.lib import local_path, Gpdiff from mpp.models import MPPTestCase from tinctest.models.scenario import ScenarioTestCase from mpp.lib.PSQL import PSQL from mpp.gpdb.tests.catalog.udf_exception_handling.udf import UDFExceptionHandling class UDFTestCase(ScenarioTestCase, MPPTestCase): ''' Testing exception handling in subtransactions ''' def __init__(self, methodName): super(UDFTestCase,self).__init__(methodName) self.udf_obj=UDFExceptionHandling() @classmethod def setUpClass(cls): super(UDFTestCase, cls).setUpClass() ''' Create UDF to exercise subtransactions upfront. They will be used in the tests . ''' base_dir = os.path.dirname(sys.modules[cls.__module__].__file__) udf_name = ['test_excep','test_protocol_allseg','setup_sql_exceptionhandling'] for uname in udf_name: udfsql='' udfsql= os.path.join(os.path.dirname(sys.modules[cls.__module__].__file__), "sql")+'/'+uname+'.sql' udfans= os.path.join(os.path.dirname(sys.modules[cls.__module__].__file__), "expected")+'/'+uname+'.ans' udfout= os.path.join(os.path.dirname(sys.modules[cls.__module__].__file__), "sql")+'/'+uname+'.out' tinctest.logger.info( '\n Creating UDF : %s' % udfsql ) res=PSQL.run_sql_file(sql_file = udfsql,out_file=udfout) init_file=os.path.join( base_dir, "sql",'init_file') result = Gpdiff.are_files_equal(udfout, udfans, match_sub =[init_file]) assert result, 'Gpdiff are not equal' def test_UDF_exception(self): ''' @param debug_dtm_action_segment @param debug_dtm_action_target @param debug_dtm_action_protocol or debug_dtm_action_sql_command_tag @param debug_dtm_action @param debug_dtm_action_nestinglevel @description: This tests the Exception Handling of GPDB PL/PgSQL UDF It exercises ; 1. PROTOCOL or SQL type of dtm_action_target 2. Various levels of sub-transactions 3. dtm_action_protocol(PROTOCOL): subtransaction_begin, subtransaction_rollback or subtransaction_release or debug_dtm_action_sql_command_tag(SQL) : 'MPPEXEC UPDATE' 4. dtm_action: fail_begin_command, fail_end_command or panic_begin_comand debug_dtm_action: Using this can specify what action to be triggered/simulated and at what point like error / panic / delay and at start or end command after receiving by the segment. debug_dtm_action_segment: Using this can specify segment number to trigger the specified dtm_action. debug_dtm_action_target: Allows to set target for specified dtm_action should it be DTM protocol command or SQL command from master to segment. debug_dtm_action_protocol: Allows to specify sub-type of DTM protocol for which to perform specified dtm_action (like prepare, abort_no_prepared, commit_prepared, abort_prepared, subtransaction_begin, subtransaction_release, subtransaction_rollback, etc... debug_dtm_action_sql_command_tag: If debug_dtm_action_target is sql then this parameter can be used to set the type of sql that should trigger the exeception. Ex: 'MPPEXEC UPDATE' debug_dtm_action_nestinglevel: This allows to optional specify at which specific depth level in transaction to take the specified dtm_action. This apples only to target with protocol and not SQL. STEPS: @data_provider data_types_provider ''' tinctest.logger.info("\n ===============================================") tinctest.logger.info("\n Starting New Test: %s%s " % (self.test_data[0][0], self.test_data[0][1] )) debug_dtm_action_segment = self.test_data[1][0] debug_dtm_action_target = self.test_data[1][1] debug_dtm_action_protocol='' debug_dtm_action_sql_command_tag='' if debug_dtm_action_target == 'protocol': debug_dtm_action_protocol = self.test_data[1][2] elif debug_dtm_action_target == 'sql': debug_dtm_action_sql_command_tag = self.test_data[1][2] debug_dtm_action = self.test_data[1][3] debug_dtm_action_nestinglevel = self.test_data[1][4] tinctest.logger.info( '\ndebug_dtm_action_segment: %s' % debug_dtm_action_segment) tinctest.logger.info( 'debug_dtm_action_target: %s' % debug_dtm_action_target) tinctest.logger.info( 'debug_dtm_action_protocol: %s ' % debug_dtm_action_protocol) tinctest.logger.info( 'debug_dtm_action_sql_command_tag: %s ' % debug_dtm_action_sql_command_tag) tinctest.logger.info( 'debug_dtm_action: %s ' % debug_dtm_action) tinctest.logger.info( 'debug_dtm_action_nestinglevel: %s ' % debug_dtm_action_nestinglevel) tinctest.logger.info("\n ===============================================") self.udf_obj.reset_protocol_conf() if debug_dtm_action_target == 'protocol': self.udf_obj.set_protocol_conf(debug_dtm_action_segment, debug_dtm_action_target, debug_dtm_action_protocol,debug_dtm_action,debug_dtm_action_nestinglevel) self.udf_obj.run_test(debug_dtm_action_segment, debug_dtm_action_target, debug_dtm_action_protocol,debug_dtm_action,debug_dtm_action_nestinglevel) if debug_dtm_action_target == 'sql': self.udf_obj.set_sql_conf(debug_dtm_action_segment, debug_dtm_action_target, debug_dtm_action_sql_command_tag,debug_dtm_action,debug_dtm_action_nestinglevel) self.udf_obj.run_test(debug_dtm_action_segment, debug_dtm_action_target, debug_dtm_action_sql_command_tag,debug_dtm_action,debug_dtm_action_nestinglevel) @tinctest.dataProvider('data_types_provider') def test_data_provider(): data = { '01_protocol_seg0_subtxnbegin_failbegcmd_nstlvl0': ['0','protocol','subtransaction_begin','fail_begin_command','0'], '02_protocol_seg0_subtxnbegin_failbegcmd_nstlvl3': ['0','protocol','subtransaction_begin','fail_begin_command','3'], '03_protocol_seg0_subtxnbegin_failbegcmd_nstlvl4': ['0','protocol','subtransaction_begin','fail_begin_command','4'], '04_protocol_seg0_subtxnrollbk_failbegcmd_nstlvl0': ['0','protocol','subtransaction_rollback','fail_begin_command','0'], '05_protocol_seg0_subtxnrollbk_failbegcmd_nstlvl3': ['0','protocol','subtransaction_rollback','fail_begin_command','3'], '06_protocol_seg0_subtxnrollbk_failbegcmd_nstlvl4': ['0','protocol','subtransaction_rollback','fail_begin_command','4'], '07_protocol_seg0_subtxnrelse_failbegcmd_nstlvl0': ['0','protocol','subtransaction_release','fail_begin_command','0'], '08_protocol_seg0_subtxnrelse_failbegcmd_nstlvl4': ['0','protocol','subtransaction_release','fail_begin_command','4'], '09_protocol_seg0_subtxnrelse_failbegcmd_nstlvl5': ['0','protocol','subtransaction_release','fail_begin_command','5'], '10_protocol_seg0_subtxnbegin_failendcmd_nstlvl0': ['0','protocol','subtransaction_begin','fail_end_command','0'], '11_protocol_seg0_subtxnbegin_failendcmd_nstlvl3': ['0','protocol','subtransaction_begin','fail_end_command','3'], '12_protocol_seg0_subtxnbegin_failendcmd_nstlvl4': ['0','protocol','subtransaction_begin','fail_end_command','4'], '13_protocol_seg0_subtxnrollbk_failendcmd_nstlvl0': ['0','protocol','subtransaction_rollback','fail_end_command','0'], '14_protocol_seg0_subtxnrollbk_failendcmd_nstlvl3': ['0','protocol','subtransaction_rollback','fail_end_command','3'], '15_protocol_seg0_subtxnrollbk_failendcmd_nstlvl4': ['0','protocol','subtransaction_rollback','fail_end_command','4'], '16_protocol_seg0_subtxnrelse_failendcmd_nstlvl0': ['0','protocol','subtransaction_release','fail_end_command','0'], '17_protocol_seg0_subtxnrelse_failendcmd_nstlvl4': ['0','protocol','subtransaction_release','fail_end_command','4'], '18_protocol_seg0_subtxnrelse_failendcmd_nstlvl5': ['0','protocol','subtransaction_release','fail_end_command','5'], '19_protocol_seg0_subtxnbegin_panicbegcmd_nstlvl0': ['0','protocol','subtransaction_begin','panic_begin_command','0'], '20_protocol_seg0_subtxnbegin_panicbegcmd_nstlvl3': ['0','protocol','subtransaction_begin','panic_begin_command','3'], '21_protocol_seg0_subtxnbegin_panicbegcmd_nstlvl4': ['0','protocol','subtransaction_begin','panic_begin_command','4'], '22_protocol_seg0_subtxnrollbk_panicbegcmd_nstlvl0': ['0','protocol','subtransaction_rollback','panic_begin_command','0'], '23_protocol_seg0_subtxnrollbk_panicbegcmd_nstlvl3': ['0','protocol','subtransaction_rollback','panic_begin_command','3'], '24_protocol_seg0_subtxnrollbk_panicbegcmd_nstlvl4': ['0','protocol','subtransaction_rollback','panic_begin_command','4'], '25_protocol_seg0_subtxnrelse_panicbegcmd_nstlvl0': ['0','protocol','subtransaction_release','panic_begin_command','0'], '26_protocol_seg0_subtxnrelse_panicbegcmd_nstlvl4': ['0','protocol','subtransaction_release','panic_begin_command','4'], '27_protocol_seg0_subtxnrelse_panicbegcmd_nstlvl5': ['0','protocol','subtransaction_release','panic_begin_command','5'], '28_protocol_seg1_subtxnbegin_panicbegcmd_nstlvl0': ['1','protocol','subtransaction_begin','panic_begin_command','0'], '29_protocol_seg1_subtxnbegin_panicbegcmd_nstlvl3': ['1','protocol','subtransaction_begin','panic_begin_command','3'], '30_protocol_seg1_subtxnbegin_panicbegcmd_nstlvl4': ['1','protocol','subtransaction_begin','panic_begin_command','4'], '31_protocol_seg1_subtxnrollbk_panicbegcmd_nstlvl0': ['1','protocol','subtransaction_rollback','panic_begin_command','0'], '32_protocol_seg1_subtxnrollbk_panicbegcmd_nstlvl3': ['1','protocol','subtransaction_rollback','panic_begin_command','3'], '33_protocol_seg1_subtxnrollbk_panicbegcmd_nstlvl4': ['1','protocol','subtransaction_rollback','panic_begin_command','4'], '34_protocol_seg1_subtxnrelse_panicbegcmd_nstlvl0': ['1','protocol','subtransaction_release','panic_begin_command','0'], '35_protocol_seg1_subtxnrelse_panicbegcmd_nstlvl4': ['1','protocol','subtransaction_release','panic_begin_command','4'], '36_protocol_seg1_subtxnrelse_panicbegcmd_nstlvl5': ['1','protocol','subtransaction_release','panic_begin_command','5'], '37_sql_seg0_subtxnbegin_failbegcmd_nstlvl0': ['0','sql','"\'MPPEXEC UPDATE\'"','fail_begin_command','0'], '38_sql_seg0_subtxnbegin_failbegcmd_nstlvl0': ['0','sql','"\'MPPEXEC UPDATE\'"','fail_end_command','0'] } return data
apache-2.0
1,709,474,834,917,103,000
67.062857
169
0.653849
false
django-pci/django-axes
axes/backends.py
2
2807
from django.contrib.auth.backends import ModelBackend from axes.exceptions import ( AxesBackendPermissionDenied, AxesBackendRequestParameterRequired, ) from axes.handlers.proxy import AxesProxyHandler from axes.helpers import get_credentials, get_lockout_message, toggleable class AxesBackend(ModelBackend): """ Authentication backend class that forbids login attempts for locked out users. Use this class as the first item of ``AUTHENTICATION_BACKENDS`` to prevent locked out users from being logged in by the Django authentication flow. .. note:: This backend does not log your user in. It monitors login attempts. Authentication is handled by the following backends that are configured in ``AUTHENTICATION_BACKENDS``. """ @toggleable def authenticate( self, request, username: str = None, password: str = None, **kwargs: dict ): """ Checks user lockout status and raises an exception if user is not allowed to log in. This method interrupts the login flow and inserts error message directly to the ``response_context`` attribute that is supplied as a keyword argument. :keyword response_context: kwarg that will be have its ``error`` attribute updated with context. :raises AxesBackendRequestParameterRequired: if request parameter is not passed. :raises AxesBackendPermissionDenied: if user is already locked out. """ if request is None: raise AxesBackendRequestParameterRequired( "AxesBackend requires a request as an argument to authenticate" ) credentials = get_credentials(username=username, password=password, **kwargs) if AxesProxyHandler.is_allowed(request, credentials): return # Locked out, don't try to authenticate, just update response_context and return. # Its a bit weird to pass a context and expect a response value but its nice to get a "why" back. error_msg = get_lockout_message() response_context = kwargs.get("response_context", {}) response_context["error"] = error_msg # Raise an error that stops the authentication flows at django.contrib.auth.authenticate. # This error stops bubbling up at the authenticate call which catches backend PermissionDenied errors. # After this error is caught by authenticate it emits a signal indicating user login failed, # which is processed by axes.signals.log_user_login_failed which logs and flags the failed request. # The axes.middleware.AxesMiddleware further processes the flagged request into a readable response. raise AxesBackendPermissionDenied( "AxesBackend detected that the given user is locked out" )
mit
3,371,056,431,732,445,000
44.274194
117
0.710367
false
StefanRijnhart/account-financial-reporting
__unported__/account_journal_report_xls/__openerp__.py
6
1737
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # # Copyright (c) 2014 Noviat nv/sa (www.noviat.com). All rights reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Financial Journal reports', 'version': '0.2', 'license': 'AGPL-3', 'author': 'Noviat', 'category': 'Accounting & Finance', 'description': """ Journal Reports =============== This module adds journal reports by period and by fiscal year with - entries printed per move - option to group entries with same general account & VAT case - vat info per entry - vat summary These reports are available in PDF and XLS format. This module depends upon the 'report_xls' module, cf. https://launchpad.net/openerp-reporting-engines """, 'depends': [ 'account_voucher', 'report_xls', ], 'demo': [], 'data': [ 'wizard/print_journal_wizard.xml', ], }
agpl-3.0
-3,413,281,182,335,184,400
31.166667
78
0.605066
false
aleksandra-tarkowska/django
django/db/models/fields/__init__.py
2
81786
# -*- coding: utf-8 -*- from __future__ import unicode_literals import collections import copy import datetime import decimal import math import warnings from base64 import b64decode, b64encode from itertools import tee from django.apps import apps from django.db import connection from django.db.models.lookups import default_lookups, RegisterLookupMixin from django.db.models.query_utils import QueryWrapper from django.conf import settings from django import forms from django.core import exceptions, validators, checks from django.utils.datastructures import DictWrapper from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.deprecation import RemovedInDjango19Warning from django.utils.functional import cached_property, curry, total_ordering, Promise from django.utils.text import capfirst from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import (smart_text, force_text, force_bytes, python_2_unicode_compatible) from django.utils.ipv6 import clean_ipv6_address from django.utils import six from django.utils.itercompat import is_iterable # Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals # makes these strings unicode __all__ = [str(x) for x in ( 'AutoField', 'BLANK_CHOICE_DASH', 'BigIntegerField', 'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField', 'DateTimeField', 'DecimalField', 'EmailField', 'Empty', 'Field', 'FieldDoesNotExist', 'FilePathField', 'FloatField', 'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED', 'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField', )] class Empty(object): pass class NOT_PROVIDED: pass # The values to use for "blank" in SelectFields. Will be appended to the start # of most "choices" lists. BLANK_CHOICE_DASH = [("", "---------")] def _load_field(app_label, model_name, field_name): return apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0] class FieldDoesNotExist(Exception): pass # A guide to Field parameters: # # * name: The name of the field specified in the model. # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * db_column: The db_column specified in the model (or None). # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. # # Code that introspects values, or does other dynamic things, should use # attname. For example, this gets the primary key value of object "obj": # # getattr(obj, opts.pk.attname) def _empty(of_cls): new = Empty() new.__class__ = of_cls return new @total_ordering @python_2_unicode_compatible class Field(RegisterLookupMixin): """Base class for all field types""" # Designates whether empty strings fundamentally are allowed at the # database level. empty_strings_allowed = True empty_values = list(validators.EMPTY_VALUES) # These track each time a Field instance is created. Used to retain order. # The auto_creation_counter is used for fields that Django implicitly # creates, creation_counter is used for all user-specified fields. creation_counter = 0 auto_creation_counter = -1 default_validators = [] # Default set of validators default_error_messages = { 'invalid_choice': _('Value %(value)r is not a valid choice.'), 'null': _('This field cannot be null.'), 'blank': _('This field cannot be blank.'), 'unique': _('%(model_name)s with this %(field_label)s ' 'already exists.'), 'unique_for_date': _("%(field_label)s must be unique for " "%(date_field_label)s %(lookup_type)s."), } class_lookups = default_lookups.copy() # Generic field type description, usually overridden by subclasses def _description(self): return _('Field of type: %(field_type)s') % { 'field_type': self.__class__.__name__ } description = property(_description) def __init__(self, verbose_name=None, name=None, primary_key=False, max_length=None, unique=False, blank=False, null=False, db_index=False, rel=None, default=NOT_PROVIDED, editable=True, serialize=True, unique_for_date=None, unique_for_month=None, unique_for_year=None, choices=None, help_text='', db_column=None, db_tablespace=None, auto_created=False, validators=[], error_messages=None): self.name = name self.verbose_name = verbose_name # May be set by set_attributes_from_name self._verbose_name = verbose_name # Store original for deconstruction self.primary_key = primary_key self.max_length, self._unique = max_length, unique self.blank, self.null = blank, null self.rel = rel self.default = default self.editable = editable self.serialize = serialize self.unique_for_date = unique_for_date self.unique_for_month = unique_for_month self.unique_for_year = unique_for_year self._choices = choices or [] self.help_text = help_text self.db_column = db_column self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE self.auto_created = auto_created # Set db_index to True if the field has a relationship and doesn't # explicitly set db_index. self.db_index = db_index # Adjust the appropriate creation counter, and save our local copy. if auto_created: self.creation_counter = Field.auto_creation_counter Field.auto_creation_counter -= 1 else: self.creation_counter = Field.creation_counter Field.creation_counter += 1 self._validators = validators # Store for deconstruction later messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, 'default_error_messages', {})) messages.update(error_messages or {}) self._error_messages = error_messages # Store for deconstruction later self.error_messages = messages def __str__(self): """ Return "app_label.model_label.field_name". """ model = self.model app = model._meta.app_label return '%s.%s.%s' % (app, model._meta.object_name, self.name) def __repr__(self): """ Displays the module, class and name of the field. """ path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__) name = getattr(self, 'name', None) if name is not None: return '<%s: %s>' % (path, name) return '<%s>' % path def check(self, **kwargs): errors = [] errors.extend(self._check_field_name()) errors.extend(self._check_choices()) errors.extend(self._check_db_index()) errors.extend(self._check_null_allowed_for_primary_keys()) errors.extend(self._check_backend_specific_checks(**kwargs)) return errors def _check_field_name(self): """ Check if field name is valid, i.e. 1) does not end with an underscore, 2) does not contain "__" and 3) is not "pk". """ if self.name.endswith('_'): return [ checks.Error( 'Field names must not end with an underscore.', hint=None, obj=self, id='fields.E001', ) ] elif '__' in self.name: return [ checks.Error( 'Field names must not contain "__".', hint=None, obj=self, id='fields.E002', ) ] elif self.name == 'pk': return [ checks.Error( "'pk' is a reserved word that cannot be used as a field name.", hint=None, obj=self, id='fields.E003', ) ] else: return [] def _check_choices(self): if self.choices: if (isinstance(self.choices, six.string_types) or not is_iterable(self.choices)): return [ checks.Error( "'choices' must be an iterable (e.g., a list or tuple).", hint=None, obj=self, id='fields.E004', ) ] elif any(isinstance(choice, six.string_types) or not is_iterable(choice) or len(choice) != 2 for choice in self.choices): return [ checks.Error( ("'choices' must be an iterable containing " "(actual value, human readable name) tuples."), hint=None, obj=self, id='fields.E005', ) ] else: return [] else: return [] def _check_db_index(self): if self.db_index not in (None, True, False): return [ checks.Error( "'db_index' must be None, True or False.", hint=None, obj=self, id='fields.E006', ) ] else: return [] def _check_null_allowed_for_primary_keys(self): if (self.primary_key and self.null and not connection.features.interprets_empty_strings_as_nulls): # We cannot reliably check this for backends like Oracle which # consider NULL and '' to be equal (and thus set up # character-based fields a little differently). return [ checks.Error( 'Primary keys must not have null=True.', hint=('Set null=False on the field, or ' 'remove primary_key=True argument.'), obj=self, id='fields.E007', ) ] else: return [] def _check_backend_specific_checks(self, **kwargs): return connection.validation.check_field(self, **kwargs) def deconstruct(self): """ Returns enough information to recreate the field as a 4-tuple: * The name of the field on the model, if contribute_to_class has been run * The import path of the field, including the class: django.db.models.IntegerField This should be the most portable version, so less specific may be better. * A list of positional arguments * A dict of keyword arguments Note that the positional or keyword arguments must contain values of the following types (including inner values of collection types): * None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict * UUID * datetime.datetime (naive), datetime.date * top-level classes, top-level functions - will be referenced by their full import path * Storage instances - these have their own deconstruct() method This is because the values here must be serialized into a text format (possibly new Python code, possibly JSON) and these are the only types with encoding handlers defined. There's no need to return the exact way the field was instantiated this time, just ensure that the resulting field is the same - prefer keyword arguments over positional ones, and omit parameters with their default values. """ # Short-form way of fetching all the default parameters keywords = {} possibles = { "verbose_name": None, "primary_key": False, "max_length": None, "unique": False, "blank": False, "null": False, "db_index": False, "default": NOT_PROVIDED, "editable": True, "serialize": True, "unique_for_date": None, "unique_for_month": None, "unique_for_year": None, "choices": [], "help_text": '', "db_column": None, "db_tablespace": settings.DEFAULT_INDEX_TABLESPACE, "auto_created": False, "validators": [], "error_messages": None, } attr_overrides = { "unique": "_unique", "choices": "_choices", "error_messages": "_error_messages", "validators": "_validators", "verbose_name": "_verbose_name", } equals_comparison = set(["choices", "validators", "db_tablespace"]) for name, default in possibles.items(): value = getattr(self, attr_overrides.get(name, name)) # Unroll anything iterable for choices into a concrete list if name == "choices" and isinstance(value, collections.Iterable): value = list(value) # Do correct kind of comparison if name in equals_comparison: if value != default: keywords[name] = value else: if value is not default: keywords[name] = value # Work out path - we shorten it for known Django core fields path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) if path.startswith("django.db.models.fields.related"): path = path.replace("django.db.models.fields.related", "django.db.models") if path.startswith("django.db.models.fields.files"): path = path.replace("django.db.models.fields.files", "django.db.models") if path.startswith("django.db.models.fields.proxy"): path = path.replace("django.db.models.fields.proxy", "django.db.models") if path.startswith("django.db.models.fields"): path = path.replace("django.db.models.fields", "django.db.models") # Return basic info - other fields should override this. return ( force_text(self.name, strings_only=True), path, [], keywords, ) def clone(self): """ Uses deconstruct() to clone a new copy of this Field. Will not preserve any class attachments/attribute names. """ name, path, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) def __eq__(self, other): # Needed for @total_ordering if isinstance(other, Field): return self.creation_counter == other.creation_counter return NotImplemented def __lt__(self, other): # This is needed because bisect does not take a comparison function. if isinstance(other, Field): return self.creation_counter < other.creation_counter return NotImplemented def __hash__(self): return hash(self.creation_counter) def __deepcopy__(self, memodict): # We don't have to deepcopy very much here, since most things are not # intended to be altered after initial creation. obj = copy.copy(self) if self.rel: obj.rel = copy.copy(self.rel) if hasattr(self.rel, 'field') and self.rel.field is self: obj.rel.field = obj memodict[id(self)] = obj return obj def __copy__(self): # We need to avoid hitting __reduce__, so define this # slightly weird copy construct. obj = Empty() obj.__class__ = self.__class__ obj.__dict__ = self.__dict__.copy() return obj def __reduce__(self): """ Pickling should return the model._meta.fields instance of the field, not a new copy of that field. So, we use the app registry to load the model and then the field back. """ if not hasattr(self, 'model'): # Fields are sometimes used without attaching them to models (for # example in aggregation). In this case give back a plain field # instance. The code below will create a new empty instance of # class self.__class__, then update its dict with self.__dict__ # values - so, this is very close to normal pickle. return _empty, (self.__class__,), self.__dict__ if self.model._deferred: # Deferred model will not be found from the app registry. This # could be fixed by reconstructing the deferred model on unpickle. raise RuntimeError("Fields of deferred models can't be reduced") return _load_field, (self.model._meta.app_label, self.model._meta.object_name, self.name) def to_python(self, value): """ Converts the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Returns the converted value. Subclasses should override this. """ return value @cached_property def validators(self): # Some validators can't be created at field initialization time. # This method provides a way to delay their creation until required. return self.default_validators + self._validators def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except exceptions.ValidationError as e: if hasattr(e, 'code') and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise exceptions.ValidationError(errors) def validate(self, value, model_instance): """ Validates value and throws ValidationError. Subclasses should override this to provide validation logic. """ if not self.editable: # Skip validation for non-editable fields. return if self._choices and value not in self.empty_values: for option_key, option_value in self.choices: if isinstance(option_value, (list, tuple)): # This is an optgroup, so look inside the group for # options. for optgroup_key, optgroup_value in option_value: if value == optgroup_key: return elif value == option_key: return raise exceptions.ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value}, ) if value is None and not self.null: raise exceptions.ValidationError(self.error_messages['null'], code='null') if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages['blank'], code='blank') def clean(self, value, model_instance): """ Convert the value's type and run validation. Validation errors from to_python and validate are propagated. The correct value is returned if no error is raised. """ value = self.to_python(value) self.validate(value, model_instance) self.run_validators(value) return value def db_type(self, connection): """ Returns the database column data type for this field, for the provided connection. """ # The default implementation of this method looks at the # backend-specific data_types dictionary, looking up the field by its # "internal type". # # A Field class can implement the get_internal_type() method to specify # which *preexisting* Django Field class it's most similar to -- i.e., # a custom field might be represented by a TEXT column type, which is # the same as the TextField Django field type, which means the custom # field's get_internal_type() returns 'TextField'. # # But the limitation of the get_internal_type() / data_types approach # is that it cannot handle database column types that aren't already # mapped to one of the built-in Django field types. In this case, you # can implement db_type() instead of get_internal_type() to specify # exactly which wacky database column type you want to use. data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_") try: return connection.creation.data_types[self.get_internal_type()] % data except KeyError: return None def db_parameters(self, connection): """ Extension of db_type(), providing a range of different return values (type, checks). This will look at db_type(), allowing custom model fields to override it. """ data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_") type_string = self.db_type(connection) try: check_string = connection.creation.data_type_check_constraints[self.get_internal_type()] % data except KeyError: check_string = None return { "type": type_string, "check": check_string, } def db_type_suffix(self, connection): return connection.creation.data_types_suffix.get(self.get_internal_type()) @property def unique(self): return self._unique or self.primary_key def set_attributes_from_name(self, name): if not self.name: self.name = name self.attname, self.column = self.get_attname_column() if self.verbose_name is None and self.name: self.verbose_name = self.name.replace('_', ' ') def contribute_to_class(self, cls, name, virtual_only=False): self.set_attributes_from_name(name) self.model = cls if virtual_only: cls._meta.add_virtual_field(self) else: cls._meta.add_field(self) if self.choices: setattr(cls, 'get_%s_display' % self.name, curry(cls._get_FIELD_display, field=self)) def get_attname(self): return self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_cache_name(self): return '_%s_cache' % self.name def get_internal_type(self): return self.__class__.__name__ def pre_save(self, model_instance, add): """ Returns field's value just before saving. """ return getattr(model_instance, self.attname) def get_prep_value(self, value): """ Perform preliminary non-db specific value checks and conversions. """ if isinstance(value, Promise): value = value._proxy____cast() return value def get_db_prep_value(self, value, connection, prepared=False): """Returns field's value prepared for interacting with the database backend. Used by the default implementations of ``get_db_prep_save``and `get_db_prep_lookup``` """ if not prepared: value = self.get_prep_value(value) return value def get_db_prep_save(self, value, connection): """ Returns field's value prepared for saving into a database. """ return self.get_db_prep_value(value, connection=connection, prepared=False) def get_prep_lookup(self, lookup_type, value): """ Perform preliminary non-db specific lookup checks and conversions """ if hasattr(value, 'prepare'): return value.prepare() if hasattr(value, '_prepare'): return value._prepare() if lookup_type in { 'iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'month', 'day', 'week_day', 'hour', 'minute', 'second', 'isnull', 'search', 'regex', 'iregex', }: return value elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'): return self.get_prep_value(value) elif lookup_type in ('range', 'in'): return [self.get_prep_value(v) for v in value] elif lookup_type == 'year': try: return int(value) except ValueError: raise ValueError("The __year lookup type requires an integer " "argument") return self.get_prep_value(value) def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False): """ Returns field's value prepared for database lookup. """ if not prepared: value = self.get_prep_lookup(lookup_type, value) prepared = True if hasattr(value, 'get_compiler'): value = value.get_compiler(connection=connection) if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'): # If the value has a relabeled_clone method it means the # value will be handled later on. if hasattr(value, 'relabeled_clone'): return value if hasattr(value, 'as_sql'): sql, params = value.as_sql() else: sql, params = value._as_sql(connection=connection) return QueryWrapper(('(%s)' % sql), params) if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second', 'search', 'regex', 'iregex'): return [value] elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'): return [self.get_db_prep_value(value, connection=connection, prepared=prepared)] elif lookup_type in ('range', 'in'): return [self.get_db_prep_value(v, connection=connection, prepared=prepared) for v in value] elif lookup_type in ('contains', 'icontains'): return ["%%%s%%" % connection.ops.prep_for_like_query(value)] elif lookup_type == 'iexact': return [connection.ops.prep_for_iexact_query(value)] elif lookup_type in ('startswith', 'istartswith'): return ["%s%%" % connection.ops.prep_for_like_query(value)] elif lookup_type in ('endswith', 'iendswith'): return ["%%%s" % connection.ops.prep_for_like_query(value)] elif lookup_type == 'isnull': return [] elif lookup_type == 'year': if isinstance(self, DateTimeField): return connection.ops.year_lookup_bounds_for_datetime_field(value) elif isinstance(self, DateField): return connection.ops.year_lookup_bounds_for_date_field(value) else: return [value] # this isn't supposed to happen else: return [value] def has_default(self): """ Returns a boolean of whether this field has a default value. """ return self.default is not NOT_PROVIDED def get_default(self): """ Returns the default value for this field. """ if self.has_default(): if callable(self.default): return self.default() return force_text(self.default, strings_only=True) if (not self.empty_strings_allowed or (self.null and not connection.features.interprets_empty_strings_as_nulls)): return None return "" def get_validator_unique_lookup_type(self): return '%s__exact' % self.name def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH): """Returns choices with a default blank choices included, for use as SelectField choices for this field.""" blank_defined = False for choice, __ in self.choices: if choice in ('', None): blank_defined = True break first_choice = (blank_choice if include_blank and not blank_defined else []) if self.choices: return first_choice + list(self.choices) rel_model = self.rel.to if hasattr(self.rel, 'get_related_field'): lst = [(getattr(x, self.rel.get_related_field().attname), smart_text(x)) for x in rel_model._default_manager.complex_filter( self.get_limit_choices_to())] else: lst = [(x._get_pk_val(), smart_text(x)) for x in rel_model._default_manager.complex_filter( self.get_limit_choices_to())] return first_choice + lst def get_choices_default(self): return self.get_choices() def get_flatchoices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH): """ Returns flattened choices with a default blank choice included. """ first_choice = blank_choice if include_blank else [] return first_choice + list(self.flatchoices) def _get_val_from_obj(self, obj): if obj is not None: return getattr(obj, self.attname) else: return self.get_default() def value_to_string(self, obj): """ Returns a string value of this field from the passed obj. This is used by the serialization framework. """ return smart_text(self._get_val_from_obj(obj)) def bind(self, fieldmapping, original, bound_field_class): return bound_field_class(self, fieldmapping, original) def _get_choices(self): if isinstance(self._choices, collections.Iterator): choices, self._choices = tee(self._choices) return choices else: return self._choices choices = property(_get_choices) def _get_flatchoices(self): """Flattened version of choices tuple.""" flat = [] for choice, value in self.choices: if isinstance(value, (list, tuple)): flat.extend(value) else: flat.append((choice, value)) return flat flatchoices = property(_get_flatchoices) def save_form_data(self, instance, data): setattr(instance, self.name, data) def formfield(self, form_class=None, choices_form_class=None, **kwargs): """ Returns a django.forms.Field instance for this database Field. """ defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text} if self.has_default(): if callable(self.default): defaults['initial'] = self.default defaults['show_hidden_initial'] = True else: defaults['initial'] = self.get_default() if self.choices: # Fields with choices get special treatment. include_blank = (self.blank or not (self.has_default() or 'initial' in kwargs)) defaults['choices'] = self.get_choices(include_blank=include_blank) defaults['coerce'] = self.to_python if self.null: defaults['empty_value'] = None if choices_form_class is not None: form_class = choices_form_class else: form_class = forms.TypedChoiceField # Many of the subclass-specific formfield arguments (min_value, # max_value) don't apply for choice fields, so be sure to only pass # the values that TypedChoiceField will understand. for k in list(kwargs): if k not in ('coerce', 'empty_value', 'choices', 'required', 'widget', 'label', 'initial', 'help_text', 'error_messages', 'show_hidden_initial'): del kwargs[k] defaults.update(kwargs) if form_class is None: form_class = forms.CharField return form_class(**defaults) def value_from_object(self, obj): """ Returns the value of this field in the given model instance. """ return getattr(obj, self.attname) class AutoField(Field): description = _("Integer") empty_strings_allowed = False default_error_messages = { 'invalid': _("'%(value)s' value must be an integer."), } def __init__(self, *args, **kwargs): kwargs['blank'] = True super(AutoField, self).__init__(*args, **kwargs) def check(self, **kwargs): errors = super(AutoField, self).check(**kwargs) errors.extend(self._check_primary_key()) return errors def _check_primary_key(self): if not self.primary_key: return [ checks.Error( 'AutoFields must set primary_key=True.', hint=None, obj=self, id='fields.E100', ), ] else: return [] def deconstruct(self): name, path, args, kwargs = super(AutoField, self).deconstruct() del kwargs['blank'] kwargs['primary_key'] = True return name, path, args, kwargs def get_internal_type(self): return "AutoField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def validate(self, value, model_instance): pass def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) value = connection.ops.validate_autopk_value(value) return value def get_prep_value(self, value): value = super(AutoField, self).get_prep_value(value) if value is None: return None return int(value) def contribute_to_class(self, cls, name): assert not cls._meta.has_auto_field, \ "A model can't have more than one AutoField." super(AutoField, self).contribute_to_class(cls, name) cls._meta.has_auto_field = True cls._meta.auto_field = self def formfield(self, **kwargs): return None class BooleanField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _("'%(value)s' value must be either True or False."), } description = _("Boolean (Either True or False)") def __init__(self, *args, **kwargs): kwargs['blank'] = True super(BooleanField, self).__init__(*args, **kwargs) def check(self, **kwargs): errors = super(BooleanField, self).check(**kwargs) errors.extend(self._check_null(**kwargs)) return errors def _check_null(self, **kwargs): if getattr(self, 'null', False): return [ checks.Error( 'BooleanFields do not accept null values.', hint='Use a NullBooleanField instead.', obj=self, id='fields.E110', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super(BooleanField, self).deconstruct() del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "BooleanField" def to_python(self, value): if value in (True, False): # if value is 1 or 0 than it's equal to True or False, but we want # to return a true bool for semantic reasons. return bool(value) if value in ('t', 'True', '1'): return True if value in ('f', 'False', '0'): return False raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def get_prep_lookup(self, lookup_type, value): # Special-case handling for filters coming from a Web request (e.g. the # admin interface). Only works for scalar values (not lists). If you're # passing in a list, you might as well make things the right type when # constructing the list. if value in ('1', '0'): value = bool(int(value)) return super(BooleanField, self).get_prep_lookup(lookup_type, value) def get_prep_value(self, value): value = super(BooleanField, self).get_prep_value(value) if value is None: return None return bool(value) def formfield(self, **kwargs): # Unlike most fields, BooleanField figures out include_blank from # self.null instead of self.blank. if self.choices: include_blank = (self.null or not (self.has_default() or 'initial' in kwargs)) defaults = {'choices': self.get_choices(include_blank=include_blank)} else: defaults = {'form_class': forms.BooleanField} defaults.update(kwargs) return super(BooleanField, self).formfield(**defaults) class CharField(Field): description = _("String (up to %(max_length)s)") def __init__(self, *args, **kwargs): super(CharField, self).__init__(*args, **kwargs) self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): errors = super(CharField, self).check(**kwargs) errors.extend(self._check_max_length_attribute(**kwargs)) return errors def _check_max_length_attribute(self, **kwargs): try: max_length = int(self.max_length) if max_length <= 0: raise ValueError() except TypeError: return [ checks.Error( "CharFields must define a 'max_length' attribute.", hint=None, obj=self, id='fields.E120', ) ] except ValueError: return [ checks.Error( "'max_length' must be a positive integer.", hint=None, obj=self, id='fields.E121', ) ] else: return [] def get_internal_type(self): return "CharField" def to_python(self, value): if isinstance(value, six.string_types) or value is None: return value return smart_text(value) def get_prep_value(self, value): value = super(CharField, self).get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). defaults = {'max_length': self.max_length} defaults.update(kwargs) return super(CharField, self).formfield(**defaults) # TODO: Maybe move this into contrib, because it's specialized. class CommaSeparatedIntegerField(CharField): default_validators = [validators.validate_comma_separated_integer_list] description = _("Comma-separated integers") def formfield(self, **kwargs): defaults = { 'error_messages': { 'invalid': _('Enter only digits separated by commas.'), } } defaults.update(kwargs) return super(CommaSeparatedIntegerField, self).formfield(**defaults) class DateTimeCheckMixin(object): def check(self, **kwargs): errors = super(DateTimeCheckMixin, self).check(**kwargs) errors.extend(self._check_mutually_exclusive_options()) errors.extend(self._check_fix_default_value()) return errors def _check_mutually_exclusive_options(self): # auto_now, auto_now_add, and default are mutually exclusive # options. The use of more than one of these options together # will trigger an Error mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()] enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True) if enabled_options > 1: return [ checks.Error( "The options auto_now, auto_now_add, and default " "are mutually exclusive. Only one of these options " "may be present.", hint=None, obj=self, id='fields.E160', ) ] else: return [] def _check_fix_default_value(self): return [] class DateField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { 'invalid': _("'%(value)s' value has an invalid date format. It must be " "in YYYY-MM-DD format."), 'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) " "but it is an invalid date."), } description = _("Date (without time)") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True super(DateField, self).__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Adds a warning to the checks framework stating, that using an actual date or datetime value is probably wrong; it's only being evaluated on server start-up. For details see ticket #21905 """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): if not timezone.is_naive(value): value = timezone.make_naive(value, timezone.utc) value = value.date() elif isinstance(value, datetime.date): # Nothing to do, as dates don't have tz information pass else: # No explicit date / datetime value -- no checks necessary return [] offset = datetime.timedelta(days=1) lower = (now - offset).date() upper = (now + offset).date() if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def deconstruct(self): name, path, args, kwargs = super(DateField, self).deconstruct() if self.auto_now: kwargs['auto_now'] = True if self.auto_now_add: kwargs['auto_now_add'] = True if self.auto_now or self.auto_now_add: del kwargs['editable'] del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "DateField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): if settings.USE_TZ and timezone.is_aware(value): # Convert aware datetimes to the default time zone # before casting them to dates (#17742). default_timezone = timezone.get_default_timezone() value = timezone.make_naive(value, default_timezone) return value.date() if isinstance(value, datetime.date): return value try: parsed = parse_date(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_date'], code='invalid_date', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.date.today() setattr(model_instance, self.attname, value) return value else: return super(DateField, self).pre_save(model_instance, add) def contribute_to_class(self, cls, name): super(DateField, self).contribute_to_class(cls, name) if not self.null: setattr(cls, 'get_next_by_%s' % self.name, curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=True)) setattr(cls, 'get_previous_by_%s' % self.name, curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=False)) def get_prep_lookup(self, lookup_type, value): # For dates lookups, convert the value to an int # so the database backend always sees a consistent type. if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'): return int(value) return super(DateField, self).get_prep_lookup(lookup_type, value) def get_prep_value(self, value): value = super(DateField, self).get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts dates into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.value_to_db_date(value) def value_to_string(self, obj): val = self._get_val_from_obj(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): defaults = {'form_class': forms.DateField} defaults.update(kwargs) return super(DateField, self).formfield(**defaults) class DateTimeField(DateField): empty_strings_allowed = False default_error_messages = { 'invalid': _("'%(value)s' value has an invalid format. It must be in " "YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."), 'invalid_date': _("'%(value)s' value has the correct format " "(YYYY-MM-DD) but it is an invalid date."), 'invalid_datetime': _("'%(value)s' value has the correct format " "(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) " "but it is an invalid date/time."), } description = _("Date (with time)") # __init__ is inherited from DateField def _check_fix_default_value(self): """ Adds a warning to the checks framework stating, that using an actual date or datetime value is probably wrong; it's only being evaluated on server start-up. For details see ticket #21905 """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc) elif isinstance(value, datetime.date): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset lower = datetime.datetime(lower.year, lower.month, lower.day) upper = now + second_offset upper = datetime.datetime(upper.year, upper.month, upper.day) value = datetime.datetime(value.year, value.month, value.day) else: # No explicit date / datetime value -- no checks necessary return [] if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def get_internal_type(self): return "DateTimeField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): return value if isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) if settings.USE_TZ: # For backwards compatibility, interpret naive datetimes in # local time. This won't work during DST change, but we can't # do much about it, so we let the exceptions percolate up the # call stack. warnings.warn("DateTimeField %s.%s received a naive datetime " "(%s) while time zone support is active." % (self.model.__name__, self.name, value), RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value try: parsed = parse_datetime(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_datetime'], code='invalid_datetime', params={'value': value}, ) try: parsed = parse_date(value) if parsed is not None: return datetime.datetime(parsed.year, parsed.month, parsed.day) except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_date'], code='invalid_date', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = timezone.now() setattr(model_instance, self.attname, value) return value else: return super(DateTimeField, self).pre_save(model_instance, add) # contribute_to_class is inherited from DateField, it registers # get_next_by_FOO and get_prev_by_FOO # get_prep_lookup is inherited from DateField def get_prep_value(self, value): value = super(DateTimeField, self).get_prep_value(value) value = self.to_python(value) if value is not None and settings.USE_TZ and timezone.is_naive(value): # For backwards compatibility, interpret naive datetimes in local # time. This won't work during DST change, but we can't do much # about it, so we let the exceptions percolate up the call stack. warnings.warn("DateTimeField %s.%s received a naive datetime (%s)" " while time zone support is active." % (self.model.__name__, self.name, value), RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value def get_db_prep_value(self, value, connection, prepared=False): # Casts datetimes into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.value_to_db_datetime(value) def value_to_string(self, obj): val = self._get_val_from_obj(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): defaults = {'form_class': forms.DateTimeField} defaults.update(kwargs) return super(DateTimeField, self).formfield(**defaults) class DecimalField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _("'%(value)s' value must be a decimal number."), } description = _("Decimal number") def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs): self.max_digits, self.decimal_places = max_digits, decimal_places super(DecimalField, self).__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super(DecimalField, self).check(**kwargs) digits_errors = self._check_decimal_places() digits_errors.extend(self._check_max_digits()) if not digits_errors: errors.extend(self._check_decimal_places_and_max_digits(**kwargs)) else: errors.extend(digits_errors) return errors def _check_decimal_places(self): try: decimal_places = int(self.decimal_places) if decimal_places < 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'decimal_places' attribute.", hint=None, obj=self, id='fields.E130', ) ] except ValueError: return [ checks.Error( "'decimal_places' must be a non-negative integer.", hint=None, obj=self, id='fields.E131', ) ] else: return [] def _check_max_digits(self): try: max_digits = int(self.max_digits) if max_digits <= 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'max_digits' attribute.", hint=None, obj=self, id='fields.E132', ) ] except ValueError: return [ checks.Error( "'max_digits' must be a positive integer.", hint=None, obj=self, id='fields.E133', ) ] else: return [] def _check_decimal_places_and_max_digits(self, **kwargs): if int(self.decimal_places) > int(self.max_digits): return [ checks.Error( "'max_digits' must be greater or equal to 'decimal_places'.", hint=None, obj=self, id='fields.E134', ) ] return [] def deconstruct(self): name, path, args, kwargs = super(DecimalField, self).deconstruct() if self.max_digits is not None: kwargs['max_digits'] = self.max_digits if self.decimal_places is not None: kwargs['decimal_places'] = self.decimal_places return name, path, args, kwargs def get_internal_type(self): return "DecimalField" def to_python(self, value): if value is None: return value try: return decimal.Decimal(value) except decimal.InvalidOperation: raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def _format(self, value): if isinstance(value, six.string_types) or value is None: return value else: return self.format_number(value) def format_number(self, value): """ Formats a number into a string with the requisite number of digits and decimal places. """ # Method moved to django.db.backends.utils. # # It is preserved because it is used by the oracle backend # (django.db.backends.oracle.query), and also for # backwards-compatibility with any external code which may have used # this method. from django.db.backends import utils return utils.format_number(value, self.max_digits, self.decimal_places) def get_db_prep_save(self, value, connection): return connection.ops.value_to_db_decimal(self.to_python(value), self.max_digits, self.decimal_places) def get_prep_value(self, value): value = super(DecimalField, self).get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): defaults = { 'max_digits': self.max_digits, 'decimal_places': self.decimal_places, 'form_class': forms.DecimalField, } defaults.update(kwargs) return super(DecimalField, self).formfield(**defaults) class EmailField(CharField): default_validators = [validators.validate_email] description = _("Email address") def __init__(self, *args, **kwargs): # max_length should be overridden to 254 characters to be fully # compliant with RFCs 3696 and 5321 kwargs['max_length'] = kwargs.get('max_length', 75) super(EmailField, self).__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super(EmailField, self).deconstruct() # We do not exclude max_length if it matches default as we want to change # the default in future. return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause email validation to be performed # twice. defaults = { 'form_class': forms.EmailField, } defaults.update(kwargs) return super(EmailField, self).formfield(**defaults) class FilePathField(Field): description = _("File path") def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, allow_files=True, allow_folders=False, **kwargs): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders kwargs['max_length'] = kwargs.get('max_length', 100) super(FilePathField, self).__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super(FilePathField, self).check(**kwargs) errors.extend(self._check_allowing_files_or_folders(**kwargs)) return errors def _check_allowing_files_or_folders(self, **kwargs): if not self.allow_files and not self.allow_folders: return [ checks.Error( "FilePathFields must have either 'allow_files' or 'allow_folders' set to True.", hint=None, obj=self, id='fields.E140', ) ] return [] def deconstruct(self): name, path, args, kwargs = super(FilePathField, self).deconstruct() if self.path != '': kwargs['path'] = self.path if self.match is not None: kwargs['match'] = self.match if self.recursive is not False: kwargs['recursive'] = self.recursive if self.allow_files is not True: kwargs['allow_files'] = self.allow_files if self.allow_folders is not False: kwargs['allow_folders'] = self.allow_folders if kwargs.get("max_length", None) == 100: del kwargs["max_length"] return name, path, args, kwargs def get_prep_value(self, value): value = super(FilePathField, self).get_prep_value(value) if value is None: return None return six.text_type(value) def formfield(self, **kwargs): defaults = { 'path': self.path, 'match': self.match, 'recursive': self.recursive, 'form_class': forms.FilePathField, 'allow_files': self.allow_files, 'allow_folders': self.allow_folders, } defaults.update(kwargs) return super(FilePathField, self).formfield(**defaults) def get_internal_type(self): return "FilePathField" class FloatField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _("'%(value)s' value must be a float."), } description = _("Floating point number") def get_prep_value(self, value): value = super(FloatField, self).get_prep_value(value) if value is None: return None return float(value) def get_internal_type(self): return "FloatField" def to_python(self, value): if value is None: return value try: return float(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def formfield(self, **kwargs): defaults = {'form_class': forms.FloatField} defaults.update(kwargs) return super(FloatField, self).formfield(**defaults) class IntegerField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _("'%(value)s' value must be an integer."), } description = _("Integer") @cached_property def validators(self): # These validators can't be added at field initialization time since # they're based on values retrieved from `connection`. range_validators = [] internal_type = self.get_internal_type() min_value, max_value = connection.ops.integer_field_range(internal_type) if min_value is not None: range_validators.append(validators.MinValueValidator(min_value)) if max_value is not None: range_validators.append(validators.MaxValueValidator(max_value)) return super(IntegerField, self).validators + range_validators def get_prep_value(self, value): value = super(IntegerField, self).get_prep_value(value) if value is None: return None return int(value) def get_prep_lookup(self, lookup_type, value): if ((lookup_type == 'gte' or lookup_type == 'lt') and isinstance(value, float)): value = math.ceil(value) return super(IntegerField, self).get_prep_lookup(lookup_type, value) def get_internal_type(self): return "IntegerField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def formfield(self, **kwargs): defaults = {'form_class': forms.IntegerField} defaults.update(kwargs) return super(IntegerField, self).formfield(**defaults) class BigIntegerField(IntegerField): empty_strings_allowed = False description = _("Big (8 byte) integer") MAX_BIGINT = 9223372036854775807 def get_internal_type(self): return "BigIntegerField" def formfield(self, **kwargs): defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1, 'max_value': BigIntegerField.MAX_BIGINT} defaults.update(kwargs) return super(BigIntegerField, self).formfield(**defaults) class IPAddressField(Field): empty_strings_allowed = False description = _("IPv4 address") def __init__(self, *args, **kwargs): warnings.warn("IPAddressField has been deprecated. Use GenericIPAddressField instead.", RemovedInDjango19Warning) kwargs['max_length'] = 15 super(IPAddressField, self).__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super(IPAddressField, self).deconstruct() del kwargs['max_length'] return name, path, args, kwargs def get_prep_value(self, value): value = super(IPAddressField, self).get_prep_value(value) if value is None: return None return six.text_type(value) def get_internal_type(self): return "IPAddressField" def formfield(self, **kwargs): defaults = {'form_class': forms.IPAddressField} defaults.update(kwargs) return super(IPAddressField, self).formfield(**defaults) class GenericIPAddressField(Field): empty_strings_allowed = True description = _("IP address") default_error_messages = {} def __init__(self, verbose_name=None, name=None, protocol='both', unpack_ipv4=False, *args, **kwargs): self.unpack_ipv4 = unpack_ipv4 self.protocol = protocol self.default_validators, invalid_error_message = \ validators.ip_address_validators(protocol, unpack_ipv4) self.default_error_messages['invalid'] = invalid_error_message kwargs['max_length'] = 39 super(GenericIPAddressField, self).__init__(verbose_name, name, *args, **kwargs) def check(self, **kwargs): errors = super(GenericIPAddressField, self).check(**kwargs) errors.extend(self._check_blank_and_null_values(**kwargs)) return errors def _check_blank_and_null_values(self, **kwargs): if not getattr(self, 'null', False) and getattr(self, 'blank', False): return [ checks.Error( ('GenericIPAddressFields cannot have blank=True if null=False, ' 'as blank values are stored as nulls.'), hint=None, obj=self, id='fields.E150', ) ] return [] def deconstruct(self): name, path, args, kwargs = super(GenericIPAddressField, self).deconstruct() if self.unpack_ipv4 is not False: kwargs['unpack_ipv4'] = self.unpack_ipv4 if self.protocol != "both": kwargs['protocol'] = self.protocol if kwargs.get("max_length", None) == 39: del kwargs['max_length'] return name, path, args, kwargs def get_internal_type(self): return "GenericIPAddressField" def to_python(self, value): if value and ':' in value: return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid']) return value def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) return value or None def get_prep_value(self, value): value = super(GenericIPAddressField, self).get_prep_value(value) if value is None: return None if value and ':' in value: try: return clean_ipv6_address(value, self.unpack_ipv4) except exceptions.ValidationError: pass return six.text_type(value) def formfield(self, **kwargs): defaults = { 'protocol': self.protocol, 'form_class': forms.GenericIPAddressField, } defaults.update(kwargs) return super(GenericIPAddressField, self).formfield(**defaults) class NullBooleanField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _("'%(value)s' value must be either None, True or False."), } description = _("Boolean (Either True, False or None)") def __init__(self, *args, **kwargs): kwargs['null'] = True kwargs['blank'] = True super(NullBooleanField, self).__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super(NullBooleanField, self).deconstruct() del kwargs['null'] del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "NullBooleanField" def to_python(self, value): if value is None: return None if value in (True, False): return bool(value) if value in ('None',): return None if value in ('t', 'True', '1'): return True if value in ('f', 'False', '0'): return False raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def get_prep_lookup(self, lookup_type, value): # Special-case handling for filters coming from a Web request (e.g. the # admin interface). Only works for scalar values (not lists). If you're # passing in a list, you might as well make things the right type when # constructing the list. if value in ('1', '0'): value = bool(int(value)) return super(NullBooleanField, self).get_prep_lookup(lookup_type, value) def get_prep_value(self, value): value = super(NullBooleanField, self).get_prep_value(value) if value is None: return None return bool(value) def formfield(self, **kwargs): defaults = { 'form_class': forms.NullBooleanField, 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text} defaults.update(kwargs) return super(NullBooleanField, self).formfield(**defaults) class PositiveIntegerField(IntegerField): description = _("Positive integer") def get_internal_type(self): return "PositiveIntegerField" def formfield(self, **kwargs): defaults = {'min_value': 0} defaults.update(kwargs) return super(PositiveIntegerField, self).formfield(**defaults) class PositiveSmallIntegerField(IntegerField): description = _("Positive small integer") def get_internal_type(self): return "PositiveSmallIntegerField" def formfield(self, **kwargs): defaults = {'min_value': 0} defaults.update(kwargs) return super(PositiveSmallIntegerField, self).formfield(**defaults) class SlugField(CharField): default_validators = [validators.validate_slug] description = _("Slug (up to %(max_length)s)") def __init__(self, *args, **kwargs): kwargs['max_length'] = kwargs.get('max_length', 50) # Set db_index=True unless it's been set manually. if 'db_index' not in kwargs: kwargs['db_index'] = True super(SlugField, self).__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super(SlugField, self).deconstruct() if kwargs.get("max_length", None) == 50: del kwargs['max_length'] if self.db_index is False: kwargs['db_index'] = False else: del kwargs['db_index'] return name, path, args, kwargs def get_internal_type(self): return "SlugField" def formfield(self, **kwargs): defaults = {'form_class': forms.SlugField} defaults.update(kwargs) return super(SlugField, self).formfield(**defaults) class SmallIntegerField(IntegerField): description = _("Small integer") def get_internal_type(self): return "SmallIntegerField" class TextField(Field): description = _("Text") def get_internal_type(self): return "TextField" def get_prep_value(self, value): value = super(TextField, self).get_prep_value(value) if isinstance(value, six.string_types) or value is None: return value return smart_text(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). defaults = {'max_length': self.max_length, 'widget': forms.Textarea} defaults.update(kwargs) return super(TextField, self).formfield(**defaults) class TimeField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { 'invalid': _("'%(value)s' value has an invalid format. It must be in " "HH:MM[:ss[.uuuuuu]] format."), 'invalid_time': _("'%(value)s' value has the correct format " "(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."), } description = _("Time") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True super(TimeField, self).__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Adds a warning to the checks framework stating, that using an actual time or datetime value is probably wrong; it's only being evaluated on server start-up. For details see ticket #21905 """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc) elif isinstance(value, datetime.time): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset value = datetime.datetime.combine(now.date(), value) if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc).time() else: # No explicit time / datetime value -- no checks necessary return [] if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def deconstruct(self): name, path, args, kwargs = super(TimeField, self).deconstruct() if self.auto_now is not False: kwargs["auto_now"] = self.auto_now if self.auto_now_add is not False: kwargs["auto_now_add"] = self.auto_now_add return name, path, args, kwargs def get_internal_type(self): return "TimeField" def to_python(self, value): if value is None: return None if isinstance(value, datetime.time): return value if isinstance(value, datetime.datetime): # Not usually a good idea to pass in a datetime here (it loses # information), but this can be a side-effect of interacting with a # database backend (e.g. Oracle), so we'll be accommodating. return value.time() try: parsed = parse_time(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_time'], code='invalid_time', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.datetime.now().time() setattr(model_instance, self.attname, value) return value else: return super(TimeField, self).pre_save(model_instance, add) def get_prep_value(self, value): value = super(TimeField, self).get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts times into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.value_to_db_time(value) def value_to_string(self, obj): val = self._get_val_from_obj(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): defaults = {'form_class': forms.TimeField} defaults.update(kwargs) return super(TimeField, self).formfield(**defaults) class URLField(CharField): default_validators = [validators.URLValidator()] description = _("URL") def __init__(self, verbose_name=None, name=None, **kwargs): kwargs['max_length'] = kwargs.get('max_length', 200) super(URLField, self).__init__(verbose_name, name, **kwargs) def deconstruct(self): name, path, args, kwargs = super(URLField, self).deconstruct() if kwargs.get("max_length", None) == 200: del kwargs['max_length'] return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause URL validation to be performed # twice. defaults = { 'form_class': forms.URLField, } defaults.update(kwargs) return super(URLField, self).formfield(**defaults) class BinaryField(Field): description = _("Raw binary data") empty_values = [None, b''] def __init__(self, *args, **kwargs): kwargs['editable'] = False super(BinaryField, self).__init__(*args, **kwargs) if self.max_length is not None: self.validators.append(validators.MaxLengthValidator(self.max_length)) def get_internal_type(self): return "BinaryField" def get_default(self): if self.has_default() and not callable(self.default): return self.default default = super(BinaryField, self).get_default() if default == '': return b'' return default def get_db_prep_value(self, value, connection, prepared=False): value = super(BinaryField, self).get_db_prep_value(value, connection, prepared) if value is not None: return connection.Database.Binary(value) return value def value_to_string(self, obj): """Binary data is serialized as base64""" return b64encode(force_bytes(self._get_val_from_obj(obj))).decode('ascii') def to_python(self, value): # If it's a string, it should be base64-encoded data if isinstance(value, six.text_type): return six.memoryview(b64decode(force_bytes(value))) return value
bsd-3-clause
5,217,430,932,472,338,000
36.12483
107
0.569755
false
hulu/uwsgi
tests/websockets_chat.py
21
2748
#!./uwsgi --http-socket :9090 --gevent 100 --module tests.websockets_chat --gevent-monkey-patch import uwsgi import time import gevent.select import redis def application(env, sr): ws_scheme = 'ws' if 'HTTPS' in env or env['wsgi.url_scheme'] == 'https': ws_scheme = 'wss' if env['PATH_INFO'] == '/': sr('200 OK', [('Content-Type', 'text/html')]) return """ <html> <head> <script language="Javascript"> var s = new WebSocket("%s://%s/foobar/"); s.onopen = function() { alert("connected !!!"); s.send("ciao"); }; s.onmessage = function(e) { var bb = document.getElementById('blackboard') var html = bb.innerHTML; bb.innerHTML = html + '<br/>' + e.data; }; s.onerror = function(e) { alert(e); } s.onclose = function(e) { alert("connection closed"); } function invia() { var value = document.getElementById('testo').value; s.send(value); } </script> </head> <body> <h1>WebSocket</h1> <input type="text" id="testo"/> <input type="button" value="invia" onClick="invia();"/> <div id="blackboard" style="width:640px;height:480px;background-color:black;color:white;border: solid 2px red;overflow:auto"> </div> </body> </html> """ % (ws_scheme, env['HTTP_HOST']) elif env['PATH_INFO'] == '/favicon.ico': return "" elif env['PATH_INFO'] == '/foobar/': uwsgi.websocket_handshake(env['HTTP_SEC_WEBSOCKET_KEY'], env.get('HTTP_ORIGIN', '')) print "websockets..." r = redis.StrictRedis(host='localhost', port=6379, db=0) channel = r.pubsub() channel.subscribe('foobar') websocket_fd = uwsgi.connection_fd() redis_fd = channel.connection._sock.fileno() while True: # wait max 4 seconds to allow ping to be sent ready = gevent.select.select([websocket_fd, redis_fd], [], [], 4.0) # send ping on timeout if not ready[0]: uwsgi.websocket_recv_nb() for fd in ready[0]: if fd == websocket_fd: msg = uwsgi.websocket_recv_nb() if msg: r.publish('foobar', msg) elif fd == redis_fd: msg = channel.parse_response() # only interested in user messages if msg[0] == 'message': uwsgi.websocket_send("[%s] %s" % (time.time(), msg))
gpl-2.0
3,579,560,876,276,562,400
32.925926
133
0.491266
false
rahuldan/sympy
sympy/core/tests/test_expr.py
2
57428
from __future__ import division from sympy import (Add, Basic, S, Symbol, Wild, Float, Integer, Rational, I, sin, cos, tan, exp, log, nan, oo, sqrt, symbols, Integral, sympify, WildFunction, Poly, Function, Derivative, Number, pi, NumberSymbol, zoo, Piecewise, Mul, Pow, nsimplify, ratsimp, trigsimp, radsimp, powsimp, simplify, together, collect, factorial, apart, combsimp, factor, refine, cancel, Tuple, default_sort_key, DiracDelta, gamma, Dummy, Sum, E, exp_polar, expand, diff, O, Heaviside, Si, Max) from sympy.core.function import AppliedUndef from sympy.core.compatibility import range from sympy.physics.secondquant import FockState from sympy.physics.units import meter from sympy.series.formal import FormalPowerSeries from sympy.utilities.pytest import raises, XFAIL from sympy.abc import a, b, c, n, t, u, x, y, z class DummyNumber(object): """ Minimal implementation of a number that works with SymPy. If one has a Number class (e.g. Sage Integer, or some other custom class) that one wants to work well with SymPy, one has to implement at least the methods of this class DummyNumber, resp. its subclasses I5 and F1_1. Basically, one just needs to implement either __int__() or __float__() and then one needs to make sure that the class works with Python integers and with itself. """ def __radd__(self, a): if isinstance(a, (int, float)): return a + self.number return NotImplemented def __truediv__(a, b): return a.__div__(b) def __rtruediv__(a, b): return a.__rdiv__(b) def __add__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number + a return NotImplemented def __rsub__(self, a): if isinstance(a, (int, float)): return a - self.number return NotImplemented def __sub__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number - a return NotImplemented def __rmul__(self, a): if isinstance(a, (int, float)): return a * self.number return NotImplemented def __mul__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number * a return NotImplemented def __rdiv__(self, a): if isinstance(a, (int, float)): return a / self.number return NotImplemented def __div__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number / a return NotImplemented def __rpow__(self, a): if isinstance(a, (int, float)): return a ** self.number return NotImplemented def __pow__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number ** a return NotImplemented def __pos__(self): return self.number def __neg__(self): return - self.number class I5(DummyNumber): number = 5 def __int__(self): return self.number class F1_1(DummyNumber): number = 1.1 def __float__(self): return self.number i5 = I5() f1_1 = F1_1() # basic sympy objects basic_objs = [ Rational(2), Float("1.3"), x, y, pow(x, y)*y, ] # all supported objects all_objs = basic_objs + [ 5, 5.5, i5, f1_1 ] def dotest(s): for x in all_objs: for y in all_objs: s(x, y) return True def test_basic(): def j(a, b): x = a x = +a x = -a x = a + b x = a - b x = a*b x = a/b x = a**b assert dotest(j) def test_ibasic(): def s(a, b): x = a x += b x = a x -= b x = a x *= b x = a x /= b assert dotest(s) def test_relational(): from sympy import Lt assert (pi < 3) is S.false assert (pi <= 3) is S.false assert (pi > 3) is S.true assert (pi >= 3) is S.true assert (-pi < 3) is S.true assert (-pi <= 3) is S.true assert (-pi > 3) is S.false assert (-pi >= 3) is S.false r = Symbol('r', real=True) assert (r - 2 < r - 3) is S.false assert Lt(x + I, x + I + 2).func == Lt # issue 8288 def test_relational_assumptions(): from sympy import Lt, Gt, Le, Ge m1 = Symbol("m1", nonnegative=False) m2 = Symbol("m2", positive=False) m3 = Symbol("m3", nonpositive=False) m4 = Symbol("m4", negative=False) assert (m1 < 0) == Lt(m1, 0) assert (m2 <= 0) == Le(m2, 0) assert (m3 > 0) == Gt(m3, 0) assert (m4 >= 0) == Ge(m4, 0) m1 = Symbol("m1", nonnegative=False, real=True) m2 = Symbol("m2", positive=False, real=True) m3 = Symbol("m3", nonpositive=False, real=True) m4 = Symbol("m4", negative=False, real=True) assert (m1 < 0) is S.true assert (m2 <= 0) is S.true assert (m3 > 0) is S.true assert (m4 >= 0) is S.true m1 = Symbol("m1", negative=True) m2 = Symbol("m2", nonpositive=True) m3 = Symbol("m3", positive=True) m4 = Symbol("m4", nonnegative=True) assert (m1 < 0) is S.true assert (m2 <= 0) is S.true assert (m3 > 0) is S.true assert (m4 >= 0) is S.true m1 = Symbol("m1", negative=False, real=True) m2 = Symbol("m2", nonpositive=False, real=True) m3 = Symbol("m3", positive=False, real=True) m4 = Symbol("m4", nonnegative=False, real=True) assert (m1 < 0) is S.false assert (m2 <= 0) is S.false assert (m3 > 0) is S.false assert (m4 >= 0) is S.false def test_relational_noncommutative(): from sympy import Lt, Gt, Le, Ge A, B = symbols('A,B', commutative=False) assert (A < B) == Lt(A, B) assert (A <= B) == Le(A, B) assert (A > B) == Gt(A, B) assert (A >= B) == Ge(A, B) def test_basic_nostr(): for obj in basic_objs: raises(TypeError, lambda: obj + '1') raises(TypeError, lambda: obj - '1') if obj == 2: assert obj * '1' == '11' else: raises(TypeError, lambda: obj * '1') raises(TypeError, lambda: obj / '1') raises(TypeError, lambda: obj ** '1') def test_series_expansion_for_uniform_order(): assert (1/x + y + x).series(x, 0, 0) == 1/x + O(1, x) assert (1/x + y + x).series(x, 0, 1) == 1/x + y + O(x) assert (1/x + 1 + x).series(x, 0, 0) == 1/x + O(1, x) assert (1/x + 1 + x).series(x, 0, 1) == 1/x + 1 + O(x) assert (1/x + x).series(x, 0, 0) == 1/x + O(1, x) assert (1/x + y + y*x + x).series(x, 0, 0) == 1/x + O(1, x) assert (1/x + y + y*x + x).series(x, 0, 1) == 1/x + y + O(x) def test_leadterm(): assert (3 + 2*x**(log(3)/log(2) - 1)).leadterm(x) == (3, 0) assert (1/x**2 + 1 + x + x**2).leadterm(x)[1] == -2 assert (1/x + 1 + x + x**2).leadterm(x)[1] == -1 assert (x**2 + 1/x).leadterm(x)[1] == -1 assert (1 + x**2).leadterm(x)[1] == 0 assert (x + 1).leadterm(x)[1] == 0 assert (x + x**2).leadterm(x)[1] == 1 assert (x**2).leadterm(x)[1] == 2 def test_as_leading_term(): assert (3 + 2*x**(log(3)/log(2) - 1)).as_leading_term(x) == 3 assert (1/x**2 + 1 + x + x**2).as_leading_term(x) == 1/x**2 assert (1/x + 1 + x + x**2).as_leading_term(x) == 1/x assert (x**2 + 1/x).as_leading_term(x) == 1/x assert (1 + x**2).as_leading_term(x) == 1 assert (x + 1).as_leading_term(x) == 1 assert (x + x**2).as_leading_term(x) == x assert (x**2).as_leading_term(x) == x**2 assert (x + oo).as_leading_term(x) == oo def test_leadterm2(): assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).leadterm(x) == \ (sin(1 + sin(1)), 0) def test_leadterm3(): assert (y + z + x).leadterm(x) == (y + z, 0) def test_as_leading_term2(): assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).as_leading_term(x) == \ sin(1 + sin(1)) def test_as_leading_term3(): assert (2 + pi + x).as_leading_term(x) == 2 + pi assert (2*x + pi*x + x**2).as_leading_term(x) == (2 + pi)*x def test_as_leading_term4(): # see issue 6843 n = Symbol('n', integer=True, positive=True) r = -n**3/(2*n**2 + 4*n + 2) - n**2/(n**2 + 2*n + 1) + \ n**2/(n + 1) - n/(2*n**2 + 4*n + 2) + n/(n*x + x) + 2*n/(n + 1) - \ 1 + 1/(n*x + x) + 1/(n + 1) - 1/x assert r.as_leading_term(x).cancel() == n/2 def test_as_leading_term_stub(): class foo(Function): pass assert foo(1/x).as_leading_term(x) == foo(1/x) assert foo(1).as_leading_term(x) == foo(1) raises(NotImplementedError, lambda: foo(x).as_leading_term(x)) def test_atoms(): assert x.atoms() == {x} assert (1 + x).atoms() == {x, S(1)} assert (1 + 2*cos(x)).atoms(Symbol) == {x} assert (1 + 2*cos(x)).atoms(Symbol, Number) == {S(1), S(2), x} assert (2*(x**(y**x))).atoms() == {S(2), x, y} assert Rational(1, 2).atoms() == {S.Half} assert Rational(1, 2).atoms(Symbol) == set([]) assert sin(oo).atoms(oo) == set() assert Poly(0, x).atoms() == {S.Zero} assert Poly(1, x).atoms() == {S.One} assert Poly(x, x).atoms() == {x} assert Poly(x, x, y).atoms() == {x} assert Poly(x + y, x, y).atoms() == {x, y} assert Poly(x + y, x, y, z).atoms() == {x, y} assert Poly(x + y*t, x, y, z).atoms() == {t, x, y} assert (I*pi).atoms(NumberSymbol) == {pi} assert (I*pi).atoms(NumberSymbol, I) == \ (I*pi).atoms(I, NumberSymbol) == {pi, I} assert exp(exp(x)).atoms(exp) == {exp(exp(x)), exp(x)} assert (1 + x*(2 + y) + exp(3 + z)).atoms(Add) == \ {1 + x*(2 + y) + exp(3 + z), 2 + y, 3 + z} # issue 6132 f = Function('f') e = (f(x) + sin(x) + 2) assert e.atoms(AppliedUndef) == \ {f(x)} assert e.atoms(AppliedUndef, Function) == \ {f(x), sin(x)} assert e.atoms(Function) == \ {f(x), sin(x)} assert e.atoms(AppliedUndef, Number) == \ {f(x), S(2)} assert e.atoms(Function, Number) == \ {S(2), sin(x), f(x)} def test_is_polynomial(): k = Symbol('k', nonnegative=True, integer=True) assert Rational(2).is_polynomial(x, y, z) is True assert (S.Pi).is_polynomial(x, y, z) is True assert x.is_polynomial(x) is True assert x.is_polynomial(y) is True assert (x**2).is_polynomial(x) is True assert (x**2).is_polynomial(y) is True assert (x**(-2)).is_polynomial(x) is False assert (x**(-2)).is_polynomial(y) is True assert (2**x).is_polynomial(x) is False assert (2**x).is_polynomial(y) is True assert (x**k).is_polynomial(x) is False assert (x**k).is_polynomial(k) is False assert (x**x).is_polynomial(x) is False assert (k**k).is_polynomial(k) is False assert (k**x).is_polynomial(k) is False assert (x**(-k)).is_polynomial(x) is False assert ((2*x)**k).is_polynomial(x) is False assert (x**2 + 3*x - 8).is_polynomial(x) is True assert (x**2 + 3*x - 8).is_polynomial(y) is True assert (x**2 + 3*x - 8).is_polynomial() is True assert sqrt(x).is_polynomial(x) is False assert (sqrt(x)**3).is_polynomial(x) is False assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(x) is True assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(y) is False assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial() is True assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial() is False assert ( (x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial(x, y) is True assert ( (x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial(x, y) is False def test_is_rational_function(): assert Integer(1).is_rational_function() is True assert Integer(1).is_rational_function(x) is True assert Rational(17, 54).is_rational_function() is True assert Rational(17, 54).is_rational_function(x) is True assert (12/x).is_rational_function() is True assert (12/x).is_rational_function(x) is True assert (x/y).is_rational_function() is True assert (x/y).is_rational_function(x) is True assert (x/y).is_rational_function(x, y) is True assert (x**2 + 1/x/y).is_rational_function() is True assert (x**2 + 1/x/y).is_rational_function(x) is True assert (x**2 + 1/x/y).is_rational_function(x, y) is True assert (sin(y)/x).is_rational_function() is False assert (sin(y)/x).is_rational_function(y) is False assert (sin(y)/x).is_rational_function(x) is True assert (sin(y)/x).is_rational_function(x, y) is False assert (S.NaN).is_rational_function() is False assert (S.Infinity).is_rational_function() is False assert (-S.Infinity).is_rational_function() is False assert (S.ComplexInfinity).is_rational_function() is False def test_is_algebraic_expr(): assert sqrt(3).is_algebraic_expr(x) is True assert sqrt(3).is_algebraic_expr() is True eq = ((1 + x**2)/(1 - y**2))**(S(1)/3) assert eq.is_algebraic_expr(x) is True assert eq.is_algebraic_expr(y) is True assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(x) is True assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(y) is True assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr() is True assert (cos(y)/sqrt(x)).is_algebraic_expr() is False assert (cos(y)/sqrt(x)).is_algebraic_expr(x) is True assert (cos(y)/sqrt(x)).is_algebraic_expr(y) is False assert (cos(y)/sqrt(x)).is_algebraic_expr(x, y) is False def test_SAGE1(): #see https://github.com/sympy/sympy/issues/3346 class MyInt: def _sympy_(self): return Integer(5) m = MyInt() e = Rational(2)*m assert e == 10 raises(TypeError, lambda: Rational(2)*MyInt) def test_SAGE2(): class MyInt(object): def __int__(self): return 5 assert sympify(MyInt()) == 5 e = Rational(2)*MyInt() assert e == 10 raises(TypeError, lambda: Rational(2)*MyInt) def test_SAGE3(): class MySymbol: def __rmul__(self, other): return ('mys', other, self) o = MySymbol() e = x*o assert e == ('mys', x, o) def test_len(): e = x*y assert len(e.args) == 2 e = x + y + z assert len(e.args) == 3 def test_doit(): a = Integral(x**2, x) assert isinstance(a.doit(), Integral) is False assert isinstance(a.doit(integrals=True), Integral) is False assert isinstance(a.doit(integrals=False), Integral) is True assert (2*Integral(x, x)).doit() == x**2 def test_attribute_error(): raises(AttributeError, lambda: x.cos()) raises(AttributeError, lambda: x.sin()) raises(AttributeError, lambda: x.exp()) def test_args(): assert (x*y).args in ((x, y), (y, x)) assert (x + y).args in ((x, y), (y, x)) assert (x*y + 1).args in ((x*y, 1), (1, x*y)) assert sin(x*y).args == (x*y,) assert sin(x*y).args[0] == x*y assert (x**y).args == (x, y) assert (x**y).args[0] == x assert (x**y).args[1] == y def test_noncommutative_expand_issue_3757(): A, B, C = symbols('A,B,C', commutative=False) assert A*B - B*A != 0 assert (A*(A + B)*B).expand() == A**2*B + A*B**2 assert (A*(A + B + C)*B).expand() == A**2*B + A*B**2 + A*C*B def test_as_numer_denom(): a, b, c = symbols('a, b, c') assert nan.as_numer_denom() == (nan, 1) assert oo.as_numer_denom() == (oo, 1) assert (-oo).as_numer_denom() == (-oo, 1) assert zoo.as_numer_denom() == (zoo, 1) assert (-zoo).as_numer_denom() == (zoo, 1) assert x.as_numer_denom() == (x, 1) assert (1/x).as_numer_denom() == (1, x) assert (x/y).as_numer_denom() == (x, y) assert (x/2).as_numer_denom() == (x, 2) assert (x*y/z).as_numer_denom() == (x*y, z) assert (x/(y*z)).as_numer_denom() == (x, y*z) assert Rational(1, 2).as_numer_denom() == (1, 2) assert (1/y**2).as_numer_denom() == (1, y**2) assert (x/y**2).as_numer_denom() == (x, y**2) assert ((x**2 + 1)/y).as_numer_denom() == (x**2 + 1, y) assert (x*(y + 1)/y**7).as_numer_denom() == (x*(y + 1), y**7) assert (x**-2).as_numer_denom() == (1, x**2) assert (a/x + b/2/x + c/3/x).as_numer_denom() == \ (6*a + 3*b + 2*c, 6*x) assert (a/x + b/2/x + c/3/y).as_numer_denom() == \ (2*c*x + y*(6*a + 3*b), 6*x*y) assert (a/x + b/2/x + c/.5/x).as_numer_denom() == \ (2*a + b + 4.0*c, 2*x) # this should take no more than a few seconds assert int(log(Add(*[Dummy()/i/x for i in range(1, 705)] ).as_numer_denom()[1]/x).n(4)) == 705 for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]: assert (i + x/3).as_numer_denom() == \ (x + i, 3) assert (S.Infinity + x/3 + y/4).as_numer_denom() == \ (4*x + 3*y + S.Infinity, 12) assert (oo*x + zoo*y).as_numer_denom() == \ (zoo*y + oo*x, 1) A, B, C = symbols('A,B,C', commutative=False) assert (A*B*C**-1).as_numer_denom() == (A*B*C**-1, 1) assert (A*B*C**-1/x).as_numer_denom() == (A*B*C**-1, x) assert (C**-1*A*B).as_numer_denom() == (C**-1*A*B, 1) assert (C**-1*A*B/x).as_numer_denom() == (C**-1*A*B, x) assert ((A*B*C)**-1).as_numer_denom() == ((A*B*C)**-1, 1) assert ((A*B*C)**-1/x).as_numer_denom() == ((A*B*C)**-1, x) def test_as_independent(): assert S.Zero.as_independent(x, as_Add=True) == (0, 0) assert S.Zero.as_independent(x, as_Add=False) == (0, 0) assert (2*x*sin(x) + y + x).as_independent(x) == (y, x + 2*x*sin(x)) assert (2*x*sin(x) + y + x).as_independent(y) == (x + 2*x*sin(x), y) assert (2*x*sin(x) + y + x).as_independent(x, y) == (0, y + x + 2*x*sin(x)) assert (x*sin(x)*cos(y)).as_independent(x) == (cos(y), x*sin(x)) assert (x*sin(x)*cos(y)).as_independent(y) == (x*sin(x), cos(y)) assert (x*sin(x)*cos(y)).as_independent(x, y) == (1, x*sin(x)*cos(y)) assert (sin(x)).as_independent(x) == (1, sin(x)) assert (sin(x)).as_independent(y) == (sin(x), 1) assert (2*sin(x)).as_independent(x) == (2, sin(x)) assert (2*sin(x)).as_independent(y) == (2*sin(x), 1) # issue 4903 = 1766b n1, n2, n3 = symbols('n1 n2 n3', commutative=False) assert (n1 + n1*n2).as_independent(n2) == (n1, n1*n2) assert (n2*n1 + n1*n2).as_independent(n2) == (0, n1*n2 + n2*n1) assert (n1*n2*n1).as_independent(n2) == (n1, n2*n1) assert (n1*n2*n1).as_independent(n1) == (1, n1*n2*n1) assert (3*x).as_independent(x, as_Add=True) == (0, 3*x) assert (3*x).as_independent(x, as_Add=False) == (3, x) assert (3 + x).as_independent(x, as_Add=True) == (3, x) assert (3 + x).as_independent(x, as_Add=False) == (1, 3 + x) # issue 5479 assert (3*x).as_independent(Symbol) == (3, x) # issue 5648 assert (n1*x*y).as_independent(x) == (n1*y, x) assert ((x + n1)*(x - y)).as_independent(x) == (1, (x + n1)*(x - y)) assert ((x + n1)*(x - y)).as_independent(y) == (x + n1, x - y) assert (DiracDelta(x - n1)*DiracDelta(x - y)).as_independent(x) \ == (1, DiracDelta(x - n1)*DiracDelta(x - y)) assert (x*y*n1*n2*n3).as_independent(n2) == (x*y*n1, n2*n3) assert (x*y*n1*n2*n3).as_independent(n1) == (x*y, n1*n2*n3) assert (x*y*n1*n2*n3).as_independent(n3) == (x*y*n1*n2, n3) assert (DiracDelta(x - n1)*DiracDelta(y - n1)*DiracDelta(x - n2)).as_independent(y) == \ (DiracDelta(x - n1)*DiracDelta(x - n2), DiracDelta(y - n1)) # issue 5784 assert (x + Integral(x, (x, 1, 2))).as_independent(x, strict=True) == \ (Integral(x, (x, 1, 2)), x) eq = Add(x, -x, 2, -3, evaluate=False) assert eq.as_independent(x) == (-1, Add(x, -x, evaluate=False)) eq = Mul(x, 1/x, 2, -3, evaluate=False) eq.as_independent(x) == (-6, Mul(x, 1/x, evaluate=False)) @XFAIL def test_call_2(): # TODO UndefinedFunction does not subclass Expr f = Function('f') assert (2*f)(x) == 2*f(x) def test_replace(): f = log(sin(x)) + tan(sin(x**2)) assert f.replace(sin, cos) == log(cos(x)) + tan(cos(x**2)) assert f.replace( sin, lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2)) a = Wild('a') b = Wild('b') assert f.replace(sin(a), cos(a)) == log(cos(x)) + tan(cos(x**2)) assert f.replace( sin(a), lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2)) # test exact assert (2*x).replace(a*x + b, b - a, exact=True) == 2*x assert (2*x).replace(a*x + b, b - a) == 2/x assert (2*x).replace(a*x + b, lambda a, b: b - a, exact=True) == 2*x assert (2*x).replace(a*x + b, lambda a, b: b - a) == 2/x g = 2*sin(x**3) assert g.replace( lambda expr: expr.is_Number, lambda expr: expr**2) == 4*sin(x**9) assert cos(x).replace(cos, sin, map=True) == (sin(x), {cos(x): sin(x)}) assert sin(x).replace(cos, sin) == sin(x) cond, func = lambda x: x.is_Mul, lambda x: 2*x assert (x*y).replace(cond, func, map=True) == (2*x*y, {x*y: 2*x*y}) assert (x*(1 + x*y)).replace(cond, func, map=True) == \ (2*x*(2*x*y + 1), {x*(2*x*y + 1): 2*x*(2*x*y + 1), x*y: 2*x*y}) assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y, map=True) == \ (sin(x), {sin(x): sin(x)/y}) # if not simultaneous then y*sin(x) -> y*sin(x)/y = sin(x) -> sin(x)/y assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y, simultaneous=False) == sin(x)/y assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e) == O(1, x) assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e, simultaneous=False) == x**2/2 + O(x**3) assert (x*(x*y + 3)).replace(lambda x: x.is_Mul, lambda x: 2 + x) == \ x*(x*y + 5) + 2 e = (x*y + 1)*(2*x*y + 1) + 1 assert e.replace(cond, func, map=True) == ( 2*((2*x*y + 1)*(4*x*y + 1)) + 1, {2*x*y: 4*x*y, x*y: 2*x*y, (2*x*y + 1)*(4*x*y + 1): 2*((2*x*y + 1)*(4*x*y + 1))}) assert x.replace(x, y) == y assert (x + 1).replace(1, 2) == x + 2 # https://groups.google.com/forum/#!topic/sympy/8wCgeC95tz0 n1, n2, n3 = symbols('n1:4', commutative=False) f = Function('f') assert (n1*f(n2)).replace(f, lambda x: x) == n1*n2 assert (n3*f(n2)).replace(f, lambda x: x) == n3*n2 def test_find(): expr = (x + y + 2 + sin(3*x)) assert expr.find(lambda u: u.is_Integer) == {S(2), S(3)} assert expr.find(lambda u: u.is_Symbol) == {x, y} assert expr.find(lambda u: u.is_Integer, group=True) == {S(2): 1, S(3): 1} assert expr.find(lambda u: u.is_Symbol, group=True) == {x: 2, y: 1} assert expr.find(Integer) == {S(2), S(3)} assert expr.find(Symbol) == {x, y} assert expr.find(Integer, group=True) == {S(2): 1, S(3): 1} assert expr.find(Symbol, group=True) == {x: 2, y: 1} a = Wild('a') expr = sin(sin(x)) + sin(x) + cos(x) + x assert expr.find(lambda u: type(u) is sin) == {sin(x), sin(sin(x))} assert expr.find( lambda u: type(u) is sin, group=True) == {sin(x): 2, sin(sin(x)): 1} assert expr.find(sin(a)) == {sin(x), sin(sin(x))} assert expr.find(sin(a), group=True) == {sin(x): 2, sin(sin(x)): 1} assert expr.find(sin) == {sin(x), sin(sin(x))} assert expr.find(sin, group=True) == {sin(x): 2, sin(sin(x)): 1} def test_count(): expr = (x + y + 2 + sin(3*x)) assert expr.count(lambda u: u.is_Integer) == 2 assert expr.count(lambda u: u.is_Symbol) == 3 assert expr.count(Integer) == 2 assert expr.count(Symbol) == 3 assert expr.count(2) == 1 a = Wild('a') assert expr.count(sin) == 1 assert expr.count(sin(a)) == 1 assert expr.count(lambda u: type(u) is sin) == 1 def test_has_basics(): f = Function('f') g = Function('g') p = Wild('p') assert sin(x).has(x) assert sin(x).has(sin) assert not sin(x).has(y) assert not sin(x).has(cos) assert f(x).has(x) assert f(x).has(f) assert not f(x).has(y) assert not f(x).has(g) assert f(x).diff(x).has(x) assert f(x).diff(x).has(f) assert f(x).diff(x).has(Derivative) assert not f(x).diff(x).has(y) assert not f(x).diff(x).has(g) assert not f(x).diff(x).has(sin) assert (x**2).has(Symbol) assert not (x**2).has(Wild) assert (2*p).has(Wild) assert not x.has() def test_has_multiple(): f = x**2*y + sin(2**t + log(z)) assert f.has(x) assert f.has(y) assert f.has(z) assert f.has(t) assert not f.has(u) assert f.has(x, y, z, t) assert f.has(x, y, z, t, u) i = Integer(4400) assert not i.has(x) assert (i*x**i).has(x) assert not (i*y**i).has(x) assert (i*y**i).has(x, y) assert not (i*y**i).has(x, z) def test_has_piecewise(): f = (x*y + 3/y)**(3 + 2) g = Function('g') h = Function('h') p = Piecewise((g(x), x < -1), (1, x <= 1), (f, True)) assert p.has(x) assert p.has(y) assert not p.has(z) assert p.has(1) assert p.has(3) assert not p.has(4) assert p.has(f) assert p.has(g) assert not p.has(h) def test_has_iterative(): A, B, C = symbols('A,B,C', commutative=False) f = x*gamma(x)*sin(x)*exp(x*y)*A*B*C*cos(x*A*B) assert f.has(x) assert f.has(x*y) assert f.has(x*sin(x)) assert not f.has(x*sin(y)) assert f.has(x*A) assert f.has(x*A*B) assert not f.has(x*A*C) assert f.has(x*A*B*C) assert not f.has(x*A*C*B) assert f.has(x*sin(x)*A*B*C) assert not f.has(x*sin(x)*A*C*B) assert not f.has(x*sin(y)*A*B*C) assert f.has(x*gamma(x)) assert not f.has(x + sin(x)) assert (x & y & z).has(x & z) def test_has_integrals(): f = Integral(x**2 + sin(x*y*z), (x, 0, x + y + z)) assert f.has(x + y) assert f.has(x + z) assert f.has(y + z) assert f.has(x*y) assert f.has(x*z) assert f.has(y*z) assert not f.has(2*x + y) assert not f.has(2*x*y) def test_has_tuple(): f = Function('f') g = Function('g') h = Function('h') assert Tuple(x, y).has(x) assert not Tuple(x, y).has(z) assert Tuple(f(x), g(x)).has(x) assert not Tuple(f(x), g(x)).has(y) assert Tuple(f(x), g(x)).has(f) assert Tuple(f(x), g(x)).has(f(x)) assert not Tuple(f, g).has(x) assert Tuple(f, g).has(f) assert not Tuple(f, g).has(h) assert Tuple(True).has(True) is True # .has(1) will also be True def test_has_units(): from sympy.physics.units import m, s assert (x*m/s).has(x) assert (x*m/s).has(y, z) is False def test_has_polys(): poly = Poly(x**2 + x*y*sin(z), x, y, t) assert poly.has(x) assert poly.has(x, y, z) assert poly.has(x, y, z, t) def test_has_physics(): assert FockState((x, y)).has(x) def test_as_poly_as_expr(): f = x**2 + 2*x*y assert f.as_poly().as_expr() == f assert f.as_poly(x, y).as_expr() == f assert (f + sin(x)).as_poly(x, y) is None p = Poly(f, x, y) assert p.as_poly() == p def test_nonzero(): assert bool(S.Zero) is False assert bool(S.One) is True assert bool(x) is True assert bool(x + y) is True assert bool(x - x) is False assert bool(x*y) is True assert bool(x*1) is True assert bool(x*0) is False def test_is_number(): assert Float(3.14).is_number is True assert Integer(737).is_number is True assert Rational(3, 2).is_number is True assert Rational(8).is_number is True assert x.is_number is False assert (2*x).is_number is False assert (x + y).is_number is False assert log(2).is_number is True assert log(x).is_number is False assert (2 + log(2)).is_number is True assert (8 + log(2)).is_number is True assert (2 + log(x)).is_number is False assert (8 + log(2) + x).is_number is False assert (1 + x**2/x - x).is_number is True assert Tuple(Integer(1)).is_number is False assert Add(2, x).is_number is False assert Mul(3, 4).is_number is True assert Pow(log(2), 2).is_number is True assert oo.is_number is True g = WildFunction('g') assert g.is_number is False assert (2*g).is_number is False assert (x**2).subs(x, 3).is_number is True # test extensibility of .is_number # on subinstances of Basic class A(Basic): pass a = A() assert a.is_number is False def test_as_coeff_add(): assert S(2).as_coeff_add() == (2, ()) assert S(3.0).as_coeff_add() == (0, (S(3.0),)) assert S(-3.0).as_coeff_add() == (0, (S(-3.0),)) assert x.as_coeff_add() == (0, (x,)) assert (x - 1).as_coeff_add() == (-1, (x,)) assert (x + 1).as_coeff_add() == (1, (x,)) assert (x + 2).as_coeff_add() == (2, (x,)) assert (x + y).as_coeff_add(y) == (x, (y,)) assert (3*x).as_coeff_add(y) == (3*x, ()) # don't do expansion e = (x + y)**2 assert e.as_coeff_add(y) == (0, (e,)) def test_as_coeff_mul(): assert S(2).as_coeff_mul() == (2, ()) assert S(3.0).as_coeff_mul() == (1, (S(3.0),)) assert S(-3.0).as_coeff_mul() == (-1, (S(3.0),)) assert S(-3.0).as_coeff_mul(rational=False) == (-S(3.0), ()) assert x.as_coeff_mul() == (1, (x,)) assert (-x).as_coeff_mul() == (-1, (x,)) assert (2*x).as_coeff_mul() == (2, (x,)) assert (x*y).as_coeff_mul(y) == (x, (y,)) assert (3 + x).as_coeff_mul() == (1, (3 + x,)) assert (3 + x).as_coeff_mul(y) == (3 + x, ()) # don't do expansion e = exp(x + y) assert e.as_coeff_mul(y) == (1, (e,)) e = 2**(x + y) assert e.as_coeff_mul(y) == (1, (e,)) assert (1.1*x).as_coeff_mul(rational=False) == (1.1, (x,)) assert (1.1*x).as_coeff_mul() == (1, (1.1, x)) assert (-oo*x).as_coeff_mul(rational=True) == (-1, (oo, x)) def test_as_coeff_exponent(): assert (3*x**4).as_coeff_exponent(x) == (3, 4) assert (2*x**3).as_coeff_exponent(x) == (2, 3) assert (4*x**2).as_coeff_exponent(x) == (4, 2) assert (6*x**1).as_coeff_exponent(x) == (6, 1) assert (3*x**0).as_coeff_exponent(x) == (3, 0) assert (2*x**0).as_coeff_exponent(x) == (2, 0) assert (1*x**0).as_coeff_exponent(x) == (1, 0) assert (0*x**0).as_coeff_exponent(x) == (0, 0) assert (-1*x**0).as_coeff_exponent(x) == (-1, 0) assert (-2*x**0).as_coeff_exponent(x) == (-2, 0) assert (2*x**3 + pi*x**3).as_coeff_exponent(x) == (2 + pi, 3) assert (x*log(2)/(2*x + pi*x)).as_coeff_exponent(x) == \ (log(2)/(2 + pi), 0) # issue 4784 D = Derivative f = Function('f') fx = D(f(x), x) assert fx.as_coeff_exponent(f(x)) == (fx, 0) def test_extractions(): assert ((x*y)**3).extract_multiplicatively(x**2 * y) == x*y**2 assert ((x*y)**3).extract_multiplicatively(x**4 * y) is None assert (2*x).extract_multiplicatively(2) == x assert (2*x).extract_multiplicatively(3) is None assert (2*x).extract_multiplicatively(-1) is None assert (Rational(1, 2)*x).extract_multiplicatively(3) == x/6 assert (sqrt(x)).extract_multiplicatively(x) is None assert (sqrt(x)).extract_multiplicatively(1/x) is None assert x.extract_multiplicatively(-x) is None assert ((x*y)**3).extract_additively(1) is None assert (x + 1).extract_additively(x) == 1 assert (x + 1).extract_additively(2*x) is None assert (x + 1).extract_additively(-x) is None assert (-x + 1).extract_additively(2*x) is None assert (2*x + 3).extract_additively(x) == x + 3 assert (2*x + 3).extract_additively(2) == 2*x + 1 assert (2*x + 3).extract_additively(3) == 2*x assert (2*x + 3).extract_additively(-2) is None assert (2*x + 3).extract_additively(3*x) is None assert (2*x + 3).extract_additively(2*x) == 3 assert x.extract_additively(0) == x assert S(2).extract_additively(x) is None assert S(2.).extract_additively(2) == S.Zero assert S(2*x + 3).extract_additively(x + 1) == x + 2 assert S(2*x + 3).extract_additively(y + 1) is None assert S(2*x - 3).extract_additively(x + 1) is None assert S(2*x - 3).extract_additively(y + z) is None assert ((a + 1)*x*4 + y).extract_additively(x).expand() == \ 4*a*x + 3*x + y assert ((a + 1)*x*4 + 3*y).extract_additively(x + 2*y).expand() == \ 4*a*x + 3*x + y assert (y*(x + 1)).extract_additively(x + 1) is None assert ((y + 1)*(x + 1) + 3).extract_additively(x + 1) == \ y*(x + 1) + 3 assert ((x + y)*(x + 1) + x + y + 3).extract_additively(x + y) == \ x*(x + y) + 3 assert (x + y + 2*((x + y)*(x + 1)) + 3).extract_additively((x + y)*(x + 1)) == \ x + y + (x + 1)*(x + y) + 3 assert ((y + 1)*(x + 2*y + 1) + 3).extract_additively(y + 1) == \ (x + 2*y)*(y + 1) + 3 n = Symbol("n", integer=True) assert (Integer(-3)).could_extract_minus_sign() is True assert (-n*x + x).could_extract_minus_sign() != \ (n*x - x).could_extract_minus_sign() assert (x - y).could_extract_minus_sign() != \ (-x + y).could_extract_minus_sign() assert (1 - x - y).could_extract_minus_sign() is True assert (1 - x + y).could_extract_minus_sign() is False assert ((-x - x*y)/y).could_extract_minus_sign() is True assert (-(x + x*y)/y).could_extract_minus_sign() is True assert ((x + x*y)/(-y)).could_extract_minus_sign() is True assert ((x + x*y)/y).could_extract_minus_sign() is False assert (x*(-x - x**3)).could_extract_minus_sign() is True assert ((-x - y)/(x + y)).could_extract_minus_sign() is True # The results of each of these will vary on different machines, e.g. # the first one might be False and the other (then) is true or vice versa, # so both are included. assert ((-x - y)/(x - y)).could_extract_minus_sign() is False or \ ((-x - y)/(y - x)).could_extract_minus_sign() is False assert (x - y).could_extract_minus_sign() is False assert (-x + y).could_extract_minus_sign() is True def test_nan_extractions(): for r in (1, 0, I, nan): assert nan.extract_additively(r) is None assert nan.extract_multiplicatively(r) is None def test_coeff(): assert (x + 1).coeff(x + 1) == 1 assert (3*x).coeff(0) == 0 assert (z*(1 + x)*x**2).coeff(1 + x) == z*x**2 assert (1 + 2*x*x**(1 + x)).coeff(x*x**(1 + x)) == 2 assert (1 + 2*x**(y + z)).coeff(x**(y + z)) == 2 assert (3 + 2*x + 4*x**2).coeff(1) == 0 assert (3 + 2*x + 4*x**2).coeff(-1) == 0 assert (3 + 2*x + 4*x**2).coeff(x) == 2 assert (3 + 2*x + 4*x**2).coeff(x**2) == 4 assert (3 + 2*x + 4*x**2).coeff(x**3) == 0 assert (-x/8 + x*y).coeff(x) == -S(1)/8 + y assert (-x/8 + x*y).coeff(-x) == S(1)/8 assert (4*x).coeff(2*x) == 0 assert (2*x).coeff(2*x) == 1 assert (-oo*x).coeff(x*oo) == -1 assert (10*x).coeff(x, 0) == 0 assert (10*x).coeff(10*x, 0) == 0 n1, n2 = symbols('n1 n2', commutative=False) assert (n1*n2).coeff(n1) == 1 assert (n1*n2).coeff(n2) == n1 assert (n1*n2 + x*n1).coeff(n1) == 1 # 1*n1*(n2+x) assert (n2*n1 + x*n1).coeff(n1) == n2 + x assert (n2*n1 + x*n1**2).coeff(n1) == n2 assert (n1**x).coeff(n1) == 0 assert (n1*n2 + n2*n1).coeff(n1) == 0 assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=1) == n2 assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=0) == 2 f = Function('f') assert (2*f(x) + 3*f(x).diff(x)).coeff(f(x)) == 2 expr = z*(x + y)**2 expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2 assert expr.coeff(z) == (x + y)**2 assert expr.coeff(x + y) == 0 assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2 assert (x + y + 3*z).coeff(1) == x + y assert (-x + 2*y).coeff(-1) == x assert (x - 2*y).coeff(-1) == 2*y assert (3 + 2*x + 4*x**2).coeff(1) == 0 assert (-x - 2*y).coeff(2) == -y assert (x + sqrt(2)*x).coeff(sqrt(2)) == x assert (3 + 2*x + 4*x**2).coeff(x) == 2 assert (3 + 2*x + 4*x**2).coeff(x**2) == 4 assert (3 + 2*x + 4*x**2).coeff(x**3) == 0 assert (z*(x + y)**2).coeff((x + y)**2) == z assert (z*(x + y)**2).coeff(x + y) == 0 assert (2 + 2*x + (x + 1)*y).coeff(x + 1) == y assert (x + 2*y + 3).coeff(1) == x assert (x + 2*y + 3).coeff(x, 0) == 2*y + 3 assert (x**2 + 2*y + 3*x).coeff(x**2, 0) == 2*y + 3*x assert x.coeff(0, 0) == 0 assert x.coeff(x, 0) == 0 n, m, o, l = symbols('n m o l', commutative=False) assert n.coeff(n) == 1 assert y.coeff(n) == 0 assert (3*n).coeff(n) == 3 assert (2 + n).coeff(x*m) == 0 assert (2*x*n*m).coeff(x) == 2*n*m assert (2 + n).coeff(x*m*n + y) == 0 assert (2*x*n*m).coeff(3*n) == 0 assert (n*m + m*n*m).coeff(n) == 1 + m assert (n*m + m*n*m).coeff(n, right=True) == m # = (1 + m)*n*m assert (n*m + m*n).coeff(n) == 0 assert (n*m + o*m*n).coeff(m*n) == o assert (n*m + o*m*n).coeff(m*n, right=1) == 1 assert (n*m + n*m*n).coeff(n*m, right=1) == 1 + n # = n*m*(n + 1) def test_coeff2(): r, kappa = symbols('r, kappa') psi = Function("psi") g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2)) g = g.expand() assert g.coeff((psi(r).diff(r))) == 2/r def test_coeff2_0(): r, kappa = symbols('r, kappa') psi = Function("psi") g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2)) g = g.expand() assert g.coeff(psi(r).diff(r, 2)) == 1 def test_coeff_expand(): expr = z*(x + y)**2 expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2 assert expr.coeff(z) == (x + y)**2 assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2 def test_integrate(): assert x.integrate(x) == x**2/2 assert x.integrate((x, 0, 1)) == S(1)/2 def test_as_base_exp(): assert x.as_base_exp() == (x, S.One) assert (x*y*z).as_base_exp() == (x*y*z, S.One) assert (x + y + z).as_base_exp() == (x + y + z, S.One) assert ((x + y)**z).as_base_exp() == (x + y, z) def test_issue_4963(): assert hasattr(Mul(x, y), "is_commutative") assert hasattr(Mul(x, y, evaluate=False), "is_commutative") assert hasattr(Pow(x, y), "is_commutative") assert hasattr(Pow(x, y, evaluate=False), "is_commutative") expr = Mul(Pow(2, 2, evaluate=False), 3, evaluate=False) + 1 assert hasattr(expr, "is_commutative") def test_action_verbs(): assert nsimplify((1/(exp(3*pi*x/5) + 1))) == \ (1/(exp(3*pi*x/5) + 1)).nsimplify() assert ratsimp(1/x + 1/y) == (1/x + 1/y).ratsimp() assert trigsimp(log(x), deep=True) == (log(x)).trigsimp(deep=True) assert radsimp(1/(2 + sqrt(2))) == (1/(2 + sqrt(2))).radsimp() assert powsimp(x**y*x**z*y**z, combine='all') == \ (x**y*x**z*y**z).powsimp(combine='all') assert simplify(x**y*x**z*y**z) == (x**y*x**z*y**z).simplify() assert together(1/x + 1/y) == (1/x + 1/y).together() assert collect(a*x**2 + b*x**2 + a*x - b*x + c, x) == \ (a*x**2 + b*x**2 + a*x - b*x + c).collect(x) assert apart(y/(y + 2)/(y + 1), y) == (y/(y + 2)/(y + 1)).apart(y) assert combsimp(y/(x + 2)/(x + 1)) == (y/(x + 2)/(x + 1)).combsimp() assert factor(x**2 + 5*x + 6) == (x**2 + 5*x + 6).factor() assert refine(sqrt(x**2)) == sqrt(x**2).refine() assert cancel((x**2 + 5*x + 6)/(x + 2)) == ((x**2 + 5*x + 6)/(x + 2)).cancel() def test_as_powers_dict(): assert x.as_powers_dict() == {x: 1} assert (x**y*z).as_powers_dict() == {x: y, z: 1} assert Mul(2, 2, evaluate=False).as_powers_dict() == {S(2): S(2)} assert (x*y).as_powers_dict()[z] == 0 assert (x + y).as_powers_dict()[z] == 0 def test_as_coefficients_dict(): check = [S(1), x, y, x*y, 1] assert [Add(3*x, 2*x, y, 3).as_coefficients_dict()[i] for i in check] == \ [3, 5, 1, 0, 3] assert [(3*x*y).as_coefficients_dict()[i] for i in check] == \ [0, 0, 0, 3, 0] assert (3.0*x*y).as_coefficients_dict()[3.0*x*y] == 1 def test_args_cnc(): A = symbols('A', commutative=False) assert (x + A).args_cnc() == \ [[], [x + A]] assert (x + a).args_cnc() == \ [[a + x], []] assert (x*a).args_cnc() == \ [[a, x], []] assert (x*y*A*(A + 1)).args_cnc(cset=True) == \ [{x, y}, [A, 1 + A]] assert Mul(x, x, evaluate=False).args_cnc(cset=True, warn=False) == \ [{x}, []] assert Mul(x, x**2, evaluate=False).args_cnc(cset=True, warn=False) == \ [{x, x**2}, []] raises(ValueError, lambda: Mul(x, x, evaluate=False).args_cnc(cset=True)) assert Mul(x, y, x, evaluate=False).args_cnc() == \ [[x, y, x], []] # always split -1 from leading number assert (-1.*x).args_cnc() == [[-1, 1.0, x], []] def test_new_rawargs(): n = Symbol('n', commutative=False) a = x + n assert a.is_commutative is False assert a._new_rawargs(x).is_commutative assert a._new_rawargs(x, y).is_commutative assert a._new_rawargs(x, n).is_commutative is False assert a._new_rawargs(x, y, n).is_commutative is False m = x*n assert m.is_commutative is False assert m._new_rawargs(x).is_commutative assert m._new_rawargs(n).is_commutative is False assert m._new_rawargs(x, y).is_commutative assert m._new_rawargs(x, n).is_commutative is False assert m._new_rawargs(x, y, n).is_commutative is False assert m._new_rawargs(x, n, reeval=False).is_commutative is False assert m._new_rawargs(S.One) is S.One def test_issue_5226(): assert Add(evaluate=False) == 0 assert Mul(evaluate=False) == 1 assert Mul(x + y, evaluate=False).is_Add def test_free_symbols(): # free_symbols should return the free symbols of an object assert S(1).free_symbols == set() assert (x).free_symbols == {x} assert Integral(x, (x, 1, y)).free_symbols == {y} assert (-Integral(x, (x, 1, y))).free_symbols == {y} assert meter.free_symbols == set() assert (meter**x).free_symbols == {x} def test_issue_5300(): x = Symbol('x', commutative=False) assert x*sqrt(2)/sqrt(6) == x*sqrt(3)/3 def test_floordiv(): from sympy.functions.elementary.integers import floor assert x // y == floor(x / y) def test_as_coeff_Mul(): assert S(0).as_coeff_Mul() == (S.One, S.Zero) assert Integer(3).as_coeff_Mul() == (Integer(3), Integer(1)) assert Rational(3, 4).as_coeff_Mul() == (Rational(3, 4), Integer(1)) assert Float(5.0).as_coeff_Mul() == (Float(5.0), Integer(1)) assert (Integer(3)*x).as_coeff_Mul() == (Integer(3), x) assert (Rational(3, 4)*x).as_coeff_Mul() == (Rational(3, 4), x) assert (Float(5.0)*x).as_coeff_Mul() == (Float(5.0), x) assert (Integer(3)*x*y).as_coeff_Mul() == (Integer(3), x*y) assert (Rational(3, 4)*x*y).as_coeff_Mul() == (Rational(3, 4), x*y) assert (Float(5.0)*x*y).as_coeff_Mul() == (Float(5.0), x*y) assert (x).as_coeff_Mul() == (S.One, x) assert (x*y).as_coeff_Mul() == (S.One, x*y) assert (-oo*x).as_coeff_Mul(rational=True) == (-1, oo*x) def test_as_coeff_Add(): assert Integer(3).as_coeff_Add() == (Integer(3), Integer(0)) assert Rational(3, 4).as_coeff_Add() == (Rational(3, 4), Integer(0)) assert Float(5.0).as_coeff_Add() == (Float(5.0), Integer(0)) assert (Integer(3) + x).as_coeff_Add() == (Integer(3), x) assert (Rational(3, 4) + x).as_coeff_Add() == (Rational(3, 4), x) assert (Float(5.0) + x).as_coeff_Add() == (Float(5.0), x) assert (Float(5.0) + x).as_coeff_Add(rational=True) == (0, Float(5.0) + x) assert (Integer(3) + x + y).as_coeff_Add() == (Integer(3), x + y) assert (Rational(3, 4) + x + y).as_coeff_Add() == (Rational(3, 4), x + y) assert (Float(5.0) + x + y).as_coeff_Add() == (Float(5.0), x + y) assert (x).as_coeff_Add() == (S.Zero, x) assert (x*y).as_coeff_Add() == (S.Zero, x*y) def test_expr_sorting(): f, g = symbols('f,g', cls=Function) exprs = [1/x**2, 1/x, sqrt(sqrt(x)), sqrt(x), x, sqrt(x)**3, x**2] assert sorted(exprs, key=default_sort_key) == exprs exprs = [x, 2*x, 2*x**2, 2*x**3, x**n, 2*x**n, sin(x), sin(x)**n, sin(x**2), cos(x), cos(x**2), tan(x)] assert sorted(exprs, key=default_sort_key) == exprs exprs = [x + 1, x**2 + x + 1, x**3 + x**2 + x + 1] assert sorted(exprs, key=default_sort_key) == exprs exprs = [S(4), x - 3*I/2, x + 3*I/2, x - 4*I + 1, x + 4*I + 1] assert sorted(exprs, key=default_sort_key) == exprs exprs = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)] assert sorted(exprs, key=default_sort_key) == exprs exprs = [f(x), g(x), exp(x), sin(x), cos(x), factorial(x)] assert sorted(exprs, key=default_sort_key) == exprs exprs = [Tuple(x, y), Tuple(x, z), Tuple(x, y, z)] assert sorted(exprs, key=default_sort_key) == exprs exprs = [[3], [1, 2]] assert sorted(exprs, key=default_sort_key) == exprs exprs = [[1, 2], [2, 3]] assert sorted(exprs, key=default_sort_key) == exprs exprs = [[1, 2], [1, 2, 3]] assert sorted(exprs, key=default_sort_key) == exprs exprs = [{x: -y}, {x: y}] assert sorted(exprs, key=default_sort_key) == exprs exprs = [{1}, {1, 2}] assert sorted(exprs, key=default_sort_key) == exprs a, b = exprs = [Dummy('x'), Dummy('x')] assert sorted([b, a], key=default_sort_key) == exprs def test_as_ordered_factors(): f, g = symbols('f,g', cls=Function) assert x.as_ordered_factors() == [x] assert (2*x*x**n*sin(x)*cos(x)).as_ordered_factors() \ == [Integer(2), x, x**n, sin(x), cos(x)] args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)] expr = Mul(*args) assert expr.as_ordered_factors() == args A, B = symbols('A,B', commutative=False) assert (A*B).as_ordered_factors() == [A, B] assert (B*A).as_ordered_factors() == [B, A] def test_as_ordered_terms(): f, g = symbols('f,g', cls=Function) assert x.as_ordered_terms() == [x] assert (sin(x)**2*cos(x) + sin(x)*cos(x)**2 + 1).as_ordered_terms() \ == [sin(x)**2*cos(x), sin(x)*cos(x)**2, 1] args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)] expr = Add(*args) assert expr.as_ordered_terms() == args assert (1 + 4*sqrt(3)*pi*x).as_ordered_terms() == [4*pi*x*sqrt(3), 1] assert ( 2 + 3*I).as_ordered_terms() == [2, 3*I] assert (-2 + 3*I).as_ordered_terms() == [-2, 3*I] assert ( 2 - 3*I).as_ordered_terms() == [2, -3*I] assert (-2 - 3*I).as_ordered_terms() == [-2, -3*I] assert ( 4 + 3*I).as_ordered_terms() == [4, 3*I] assert (-4 + 3*I).as_ordered_terms() == [-4, 3*I] assert ( 4 - 3*I).as_ordered_terms() == [4, -3*I] assert (-4 - 3*I).as_ordered_terms() == [-4, -3*I] f = x**2*y**2 + x*y**4 + y + 2 assert f.as_ordered_terms(order="lex") == [x**2*y**2, x*y**4, y, 2] assert f.as_ordered_terms(order="grlex") == [x*y**4, x**2*y**2, y, 2] assert f.as_ordered_terms(order="rev-lex") == [2, y, x*y**4, x**2*y**2] assert f.as_ordered_terms(order="rev-grlex") == [2, y, x**2*y**2, x*y**4] def test_sort_key_atomic_expr(): from sympy.physics.units import m, s assert sorted([-m, s], key=lambda arg: arg.sort_key()) == [-m, s] def test_issue_4199(): # first subs and limit gives NaN a = x/y assert a._eval_interval(x, 0, oo)._eval_interval(y, oo, 0) is S.NaN # second subs and limit gives NaN assert a._eval_interval(x, 0, oo)._eval_interval(y, 0, oo) is S.NaN # difference gives S.NaN a = x - y assert a._eval_interval(x, 1, oo)._eval_interval(y, oo, 1) is S.NaN raises(ValueError, lambda: x._eval_interval(x, None, None)) a = -y*Heaviside(x - y) assert a._eval_interval(x, -oo, oo) == -y assert a._eval_interval(x, oo, -oo) == y def test_eval_interval_zoo(): # Test that limit is used when zoo is returned assert Si(1/x)._eval_interval(x, 0, 1) == -pi/2 + Si(1) def test_primitive(): assert (3*(x + 1)**2).primitive() == (3, (x + 1)**2) assert (6*x + 2).primitive() == (2, 3*x + 1) assert (x/2 + 3).primitive() == (S(1)/2, x + 6) eq = (6*x + 2)*(x/2 + 3) assert eq.primitive()[0] == 1 eq = (2 + 2*x)**2 assert eq.primitive()[0] == 1 assert (4.0*x).primitive() == (1, 4.0*x) assert (4.0*x + y/2).primitive() == (S.Half, 8.0*x + y) assert (-2*x).primitive() == (2, -x) assert Add(5*z/7, 0.5*x, 3*y/2, evaluate=False).primitive() == \ (S(1)/14, 7.0*x + 21*y + 10*z) for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]: assert (i + x/3).primitive() == \ (S(1)/3, i + x) assert (S.Infinity + 2*x/3 + 4*y/7).primitive() == \ (S(1)/21, 14*x + 12*y + oo) assert S.Zero.primitive() == (S.One, S.Zero) def test_issue_5843(): a = 1 + x assert (2*a).extract_multiplicatively(a) == 2 assert (4*a).extract_multiplicatively(2*a) == 2 assert ((3*a)*(2*a)).extract_multiplicatively(a) == 6*a def test_is_constant(): from sympy.solvers.solvers import checksol Sum(x, (x, 1, 10)).is_constant() is True Sum(x, (x, 1, n)).is_constant() is False Sum(x, (x, 1, n)).is_constant(y) is True Sum(x, (x, 1, n)).is_constant(n) is False Sum(x, (x, 1, n)).is_constant(x) is True eq = a*cos(x)**2 + a*sin(x)**2 - a eq.is_constant() is True assert eq.subs({x: pi, a: 2}) == eq.subs({x: pi, a: 3}) == 0 assert x.is_constant() is False assert x.is_constant(y) is True assert checksol(x, x, Sum(x, (x, 1, n))) is False assert checksol(x, x, Sum(x, (x, 1, n))) is False f = Function('f') assert checksol(x, x, f(x)) is False p = symbols('p', positive=True) assert Pow(x, S(0), evaluate=False).is_constant() is True # == 1 assert Pow(S(0), x, evaluate=False).is_constant() is False # == 0 or 1 assert (2**x).is_constant() is False assert Pow(S(2), S(3), evaluate=False).is_constant() is True z1, z2 = symbols('z1 z2', zero=True) assert (z1 + 2*z2).is_constant() is True assert meter.is_constant() is True assert (3*meter).is_constant() is True assert (x*meter).is_constant() is False def test_equals(): assert (-3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2).equals(0) assert (x**2 - 1).equals((x + 1)*(x - 1)) assert (cos(x)**2 + sin(x)**2).equals(1) assert (a*cos(x)**2 + a*sin(x)**2).equals(a) r = sqrt(2) assert (-1/(r + r*x) + 1/r/(1 + x)).equals(0) assert factorial(x + 1).equals((x + 1)*factorial(x)) assert sqrt(3).equals(2*sqrt(3)) is False assert (sqrt(5)*sqrt(3)).equals(sqrt(3)) is False assert (sqrt(5) + sqrt(3)).equals(0) is False assert (sqrt(5) + pi).equals(0) is False assert meter.equals(0) is False assert (3*meter**2).equals(0) is False eq = -(-1)**(S(3)/4)*6**(S(1)/4) + (-6)**(S(1)/4)*I if eq != 0: # if canonicalization makes this zero, skip the test assert eq.equals(0) assert sqrt(x).equals(0) is False # from integrate(x*sqrt(1 + 2*x), x); # diff is zero only when assumptions allow i = 2*sqrt(2)*x**(S(5)/2)*(1 + 1/(2*x))**(S(5)/2)/5 + \ 2*sqrt(2)*x**(S(3)/2)*(1 + 1/(2*x))**(S(5)/2)/(-6 - 3/x) ans = sqrt(2*x + 1)*(6*x**2 + x - 1)/15 diff = i - ans assert diff.equals(0) is False assert diff.subs(x, -S.Half/2) == 7*sqrt(2)/120 # there are regions for x for which the expression is True, for # example, when x < -1/2 or x > 0 the expression is zero p = Symbol('p', positive=True) assert diff.subs(x, p).equals(0) is True assert diff.subs(x, -1).equals(0) is True # prove via minimal_polynomial or self-consistency eq = sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3)) - sqrt(10 + 6*sqrt(3)) assert eq.equals(0) q = 3**Rational(1, 3) + 3 p = expand(q**3)**Rational(1, 3) assert (p - q).equals(0) # issue 6829 # eq = q*x + q/4 + x**4 + x**3 + 2*x**2 - S(1)/3 # z = eq.subs(x, solve(eq, x)[0]) q = symbols('q') z = (q*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q - S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/6)/2 - S(1)/4) + q/4 + (-sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q - S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/6)/2 - S(1)/4)**4 + (-sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q - S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/6)/2 - S(1)/4)**3 + 2*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q - S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/6)/2 - S(1)/4)**2 - S(1)/3) assert z.equals(0) def test_random(): from sympy import posify, lucas assert posify(x)[0]._random() is not None assert lucas(n)._random(2, -2, 0, -1, 1) is None # issue 8662 assert Piecewise((Max(x, y), z))._random() is None def test_round(): from sympy.abc import x assert Float('0.1249999').round(2) == 0.12 d20 = 12345678901234567890 ans = S(d20).round(2) assert ans.is_Float and ans == d20 ans = S(d20).round(-2) assert ans.is_Float and ans == 12345678901234567900 assert S('1/7').round(4) == 0.1429 assert S('.[12345]').round(4) == 0.1235 assert S('.1349').round(2) == 0.13 n = S(12345) ans = n.round() assert ans.is_Float assert ans == n ans = n.round(1) assert ans.is_Float assert ans == n ans = n.round(4) assert ans.is_Float assert ans == n assert n.round(-1) == 12350 r = n.round(-4) assert r == 10000 # in fact, it should equal many values since __eq__ # compares at equal precision assert all(r == i for i in range(9984, 10049)) assert n.round(-5) == 0 assert (pi + sqrt(2)).round(2) == 4.56 assert (10*(pi + sqrt(2))).round(-1) == 50 raises(TypeError, lambda: round(x + 2, 2)) assert S(2.3).round(1) == 2.3 e = S(12.345).round(2) assert e == round(12.345, 2) assert type(e) is Float assert (Float(.3, 3) + 2*pi).round() == 7 assert (Float(.3, 3) + 2*pi*100).round() == 629 assert (Float(.03, 3) + 2*pi/100).round(5) == 0.09283 assert (Float(.03, 3) + 2*pi/100).round(4) == 0.0928 assert (pi + 2*E*I).round() == 3 + 5*I assert S.Zero.round() == 0 a = (Add(1, Float('1.' + '9'*27, ''), evaluate=0)) assert a.round(10) == Float('3.0000000000', '') assert a.round(25) == Float('3.0000000000000000000000000', '') assert a.round(26) == Float('3.00000000000000000000000000', '') assert a.round(27) == Float('2.999999999999999999999999999', '') assert a.round(30) == Float('2.999999999999999999999999999', '') raises(TypeError, lambda: x.round()) f = Function('f') raises(TypeError, lambda: f(1).round()) # exact magnitude of 10 assert str(S(1).round()) == '1.' assert str(S(100).round()) == '100.' # applied to real and imaginary portions assert (2*pi + E*I).round() == 6 + 3*I assert (2*pi + I/10).round() == 6 assert (pi/10 + 2*I).round() == 2*I # the lhs re and im parts are Float with dps of 2 # and those on the right have dps of 15 so they won't compare # equal unless we use string or compare components (which will # then coerce the floats to the same precision) or re-create # the floats assert str((pi/10 + E*I).round(2)) == '0.31 + 2.72*I' assert (pi/10 + E*I).round(2).as_real_imag() == (0.31, 2.72) assert (pi/10 + E*I).round(2) == Float(0.31, 2) + I*Float(2.72, 3) # issue 6914 assert (I**(I + 3)).round(3) == Float('-0.208', '')*I # issue 8720 assert S(-123.6).round() == -124. assert S(-1.5).round() == -2. assert S(-100.5).round() == -101. assert S(-1.5 - 10.5*I).round() == -2.0 - 11.0*I # issue 7961 assert str(S(0.006).round(2)) == '0.01' assert str(S(0.00106).round(4)) == '0.0011' # issue 8147 assert S.NaN.round() == S.NaN assert S.Infinity.round() == S.Infinity assert S.NegativeInfinity.round() == S.NegativeInfinity assert S.ComplexInfinity.round() == S.ComplexInfinity def test_round_exception_nostr(): # Don't use the string form of the expression in the round exception, as # it's too slow s = Symbol('bad') try: s.round() except TypeError as e: assert 'bad' not in str(e) else: # Did not raise raise AssertionError("Did not raise") def test_extract_branch_factor(): assert exp_polar(2.0*I*pi).extract_branch_factor() == (1, 1) def test_identity_removal(): assert Add.make_args(x + 0) == (x,) assert Mul.make_args(x*1) == (x,) def test_float_0(): assert Float(0.0) + 1 == Float(1.0) @XFAIL def test_float_0_fail(): assert Float(0.0)*x == Float(0.0) assert (x + Float(0.0)).is_Add def test_issue_6325(): ans = (b**2 + z**2 - (b*(a + b*t) + z*(c + t*z))**2/( (a + b*t)**2 + (c + t*z)**2))/sqrt((a + b*t)**2 + (c + t*z)**2) e = sqrt((a + b*t)**2 + (c + z*t)**2) assert diff(e, t, 2) == ans e.diff(t, 2) == ans assert diff(e, t, 2, simplify=False) != ans def test_issue_7426(): f1 = a % c f2 = x % z assert f1.equals(f2) == False def test_issue_1112(): x = Symbol('x', positive=False) assert (x > 0) is S.false def test_issue_10161(): x = symbols('x', real=True) assert x*abs(x)*abs(x) == x**3 def test_issue_10755(): x = symbols('x') raises(TypeError, lambda: int(log(x))) raises(TypeError, lambda: log(x).round(2))
bsd-3-clause
-4,765,008,874,888,499,000
32.233796
92
0.540573
false
hugomarin/artBO
montana/helpers/fckeditor/_samples/py/sampleposteddata.py
14
1896
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2008 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == This page lists the data posted by a form. """ import cgi import os # Tell the browser to render html print "Content-Type: text/html" print "" try: # Create a cgi object form = cgi.FieldStorage() except Exception, e: print e # Document header print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>FCKeditor - Samples - Posted Data</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="robots" content="noindex, nofollow"> <link href="../sample.css" rel="stylesheet" type="text/css" /> </head> <body> """ # This is the real work print """ <h1>FCKeditor - Samples - Posted Data</h1> This page lists all data posted by the form. <hr> <table border="1" cellspacing="0" id="outputSample"> <colgroup><col width="80"><col></colgroup> <thead> <tr> <th>Field Name</th> <th>Value</th> </tr> </thead> """ for key in form.keys(): try: value = form[key].value print """ <tr> <th>%s</th> <td><pre>%s</pre></td> </tr> """ % (key, value) except Exception, e: print e print "</table>" # For testing your environments print "<hr>" for key in os.environ.keys(): print "%s: %s<br>" % (key, os.environ.get(key, "")) print "<hr>" # Document footer print """ </body> </html> """
mit
4,190,585,792,407,657,000
20.545455
71
0.648207
false
Ecogenomics/CheckM
scripts/runGenomeSigDist.py
3
3522
#!/usr/bin/env python ############################################################################### # # runGenomeSigDist.py - calculate distribution of tetranucleotide # distances for all reference genomes # ############################################################################### # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # ############################################################################### import os import argparse class RunGenomeSigDist(object): def __init__(self): pass def run(self, metadataFile, genomeDir, numWindows, numThreads): # read metadata file print 'Determining finished prokaryotic reference genomes.' genomeIds = [] bHeader = True for line in open(metadataFile): lineSplit = [x.strip() for x in line.split('\t')] if bHeader: bHeader = False continue domain = lineSplit[1] status = lineSplit[2] if status == 'Finished' and (domain == 'Bacteria' or domain == 'Archaea'): genomeId = lineSplit[0] if os.path.exists(os.path.join(genomeDir, genomeId, genomeId + '.fna')): genomeIds.append(genomeId) print 'Finished genomes to process: ' + str(len(genomeIds)) # calculate difference in genomic signatures for each genome fout = open('cmdList.txt', 'w') for genomeId in genomeIds: genomeFile = os.path.join(genomeDir, genomeId, genomeId + '.fna') fout.write('./GenomeSigDist/genome-sig-dist -n ' + str(numWindows) + ' -g ' + genomeFile + ' -o ./deltaTD/' + genomeId + '.txt' + '\n') fout.close() os.system('cat cmdList.txt | parallel --max-procs ' + str(numThreads)) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Calculate GC distribution over reference genomes.') parser.add_argument('metadata_file', help='IMG metadata file.') parser.add_argument('genome_dir', help='IMG genome directory.') parser.add_argument('--num_windows', help='number of windows to sample', type = int, default = 10000) parser.add_argument('-t', '--threads', help='number of threads', type = int, default = 16) args = parser.parse_args() runGenomeSigDist = RunGenomeSigDist() runGenomeSigDist.run(args.metadata_file, args.genome_dir, args.num_windows, args.threads)
gpl-3.0
3,675,980,111,422,748,000
47.246575
147
0.515616
false
lavizhao/Tyrion
dump_category.py
1
1852
#coding: utf-8 ''' category查起来太慢了,重新dump一下 ''' from multiprocessing import Process,Queue from util.db import mydb from data import one_tran,get_conf import time import cPickle as pickle #任务queue tqueue = Queue(100000) #结果的queue gqueue = Queue(100000) cf = get_conf() def get_mydb(): host = cf["host"] user = cf["user"] passwd = cf["passwd"] if passwd == "null": passwd = "" mdb = mydb(host=host,user=user,passwd=passwd,port=3306,cache_open=False) return mdb def handle(num): print "handle start" count = 0 qsize = tqueue.qsize() mdb = get_mydb() while qsize > 0: category = tqueue.get() sql_str = 'select beh,dt from trans where category=\"%s\"'%(category) res = mdb.select_sql(sql_str,"tmall") gqueue.put((category,res)) count += 1 if count % 10 == 0: print "线程名",num,count #主程序 def main(): mdb = get_mydb() res = mdb.select_sql("select distinct category from trans;","tmall") res = [i[0] for i in res] for i in res: tqueue.put(i) print "开启进程" thread_num = 3 for i in range(thread_num): p = Process(target=handle,args=((str(i)))) p.start() result = {} current = 0 sleep_count = 200 count = 0 while current <= sleep_count: if gqueue.qsize() > 0: count += 1 res = gqueue.get() result[res[0]] = res[1] current = 0 if count % 100 == 0: print "category大小",len(result) else: current += 1 time.sleep(0.2) print "正在存储" t = open(cf["category_cache"],"wb") pickle.dump(result,t,True) if __name__ == '__main__': main()
mit
-7,742,521,274,288,671,000
19.790698
77
0.535794
false
siemens/django-dingos-authoring
dingos_authoring/views.py
1
26278
# Copyright (c) Siemens AG, 2014 # # This file is part of MANTIS. MANTIS is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either version 2 # of the License, or(at your option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # import sys, re, traceback, json, collections, logging, libxml2, importlib, pkgutil, hashlib from uuid import uuid4 from base64 import b64encode from operator import itemgetter from . import tasks from django.contrib import messages from django.contrib.auth.models import User, Group from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned from django.db.models import Q from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render_to_response from django.template import RequestContext from django.utils import timezone from braces.views import LoginRequiredMixin, SuperuserRequiredMixin from dingos import DINGOS_INTERNAL_IOBJECT_FAMILY_NAME, DINGOS_TEMPLATE_FAMILY from dingos.core.utilities import lookup_in_re_list from dingos.importer import Generic_XML_Import from dingos.models import InfoObject, InfoObject2Fact from dingos.view_classes import BasicListView, BasicTemplateView, BasicJSONView, BasicXMLView, BasicFilterView, BasicListActionView from dingos_authoring.view_classes import guiJSONimport from forms import XMLImportForm, SwitchAuthoringGroupForm, GUIJSONImportForm import forms as observables from . import DINGOS_AUTHORING_IMPORTER_REGISTRY, DINGOS_AUTHORING_CELERY_BUG_WORKAROUND from .filter import ImportFilter, AuthoringObjectFilter from .models import GroupNamespaceMap, AuthoredData, Identifier, UserAuthoringInfo from .view_classes import AuthoringMethodMixin logger = logging.getLogger(__name__) AUTHORING_IMPORTER_REGISTRY = [] for (matcher,module,class_name) in DINGOS_AUTHORING_IMPORTER_REGISTRY: my_module = importlib.import_module(module) AUTHORING_IMPORTER_REGISTRY.append((matcher,getattr(my_module,class_name))) #if DINGOS_AUTHORING_CELERY_BUG_WORKAROUND: # # This is an ugly hack which breaks the independence of the django-dingos-authoring # # app from the top-level configuration. # # The hack may be required in instances where the celery tasks defined in Django # # are not instantiated correctly: we have a system on which the configuration of # # celery as seen when starting the worker is perfectly ok, yet within Django, # # the tasks are not assigned the correct backend. # from mantis.celery import app as celery_app # print celery_app # # #tasks = celery_app.tasks# # # #add = celery_app.tasks['dingos_authoring.tasks.add'] # #scheduled_import = celery_app.tasks['dingos_authoring.tasks.scheduled_import'] #else: # pass # #from .tasks import add,scheduled_import # #fake_tasks = {'dingos_authoring.tasks.add':add, # # 'dingos_authoring.tasks.scheduled_import':scheduled_import # # } # # #celery_app = FakeCeleryApp(tasks=fake_tasks) class AuthoredDataHistoryView(AuthoringMethodMixin,BasicListView): """ Overview of history of an Authoring object. """ template_name = 'dingos_authoring/%s/AuthoredDataHistory.html' % DINGOS_TEMPLATE_FAMILY counting_paginator = True @property def title(self): latest_auth_obj = AuthoredData.objects.get(group=self.namespace_info['authoring_group'], identifier__name=self.kwargs['id'], latest=True) return "History of '%s' " % latest_auth_obj.name @property def queryset(self): namespace_info = self.namespace_info if not namespace_info: messages.error(self.request,"You are not member of an authoring group.") return AuthoredData.objects.exclude(id__contains='') if isinstance(namespace_info,list): messages.error(self.request,"You are member of several authoring groups but have not selected a" " default group.") return AuthoredData.objects.exclude(id__contains='') return AuthoredData.objects.filter(group=self.namespace_info['authoring_group'], identifier__name=self.kwargs['id']).order_by('-timestamp'). \ prefetch_related('identifier','group','user','author_view').prefetch_related('top_level_iobject', 'top_level_iobject__identifier', 'top_level_iobject__identifier__namespace') def get_context_data(self, **kwargs): context = super(AuthoredDataHistoryView, self).get_context_data(**kwargs) context['highlight_pk'] = self.request.GET.get('highlight',None) return context class index(AuthoringMethodMixin,BasicFilterView): """ Overview of saved drafts. """ counting_paginator = True @property def title(self): if self.namespace_info: if isinstance(self.namespace_info,list): return "No drafts or imports to be shown (no active authoring group selected)" else: return "Drafts and Imports of Authoring Group %s" % self.namespace_info['authoring_group'] else: return "No drafts or imports to be shown (user not member of an authoring group)" template_name = 'dingos_authoring/%s/AuthoredObjectList.html' % DINGOS_TEMPLATE_FAMILY filterset_class = AuthoringObjectFilter @property def queryset(self): namespace_info = self.namespace_info if not namespace_info: messages.error(self.request,'You are not member of an Authoring Group.') return AuthoredData.objects.exclude(pk__gt=-1) elif isinstance(namespace_info,list): messages.error(self.request,'You are member of several authoring groups but you have not selected an' \ ' active authoring group. Please do so in the "Switch Authoring Group" dialogue' ' available from the user menu (top right-hand corner).') return AuthoredData.objects.exclude(pk__gt=-1) return AuthoredData.objects.filter(Q(kind=AuthoredData.AUTHORING_JSON,group=namespace_info['authoring_group'],latest=True) & (Q(status=AuthoredData.AUTOSAVE) | Q(status=AuthoredData.UPDATE) | Q(status=AuthoredData.DRAFT) | Q(status=AuthoredData.IMPORTED))). \ prefetch_related('identifier','group','user','author_view').prefetch_related('top_level_iobject', 'top_level_iobject__identifier', 'top_level_iobject__identifier__namespace') list_actions = [('Take from owner', 'url.dingos_authoring.index.action.take', 0)] class ImportsView(BasicFilterView): """ Overview of saved drafts. """ title = "Imports" template_name = 'dingos_authoring/%s/ImportList.html' % DINGOS_TEMPLATE_FAMILY filterset_class= ImportFilter title = 'Imports' @property def queryset(self): queryset = AuthoredData.objects.filter( user=self.request.user, status=AuthoredData.IMPORTED) return queryset class GetDraftJSON(AuthoringMethodMixin,BasicJSONView): """ View serving latest draft of given name, or respond with the list of available templates """ @property def returned_obj(self): res = { 'status': False, 'msg': 'An error occured loading the requested template', 'data': None } if not self.namespace_info: return res authoring_group = self.namespace_info['authoring_group'] if 'list' in self.request.GET: json_obj_l = AuthoredData.objects.filter( kind = AuthoredData.AUTHORING_JSON, user = self.request.user, group = authoring_group, status = AuthoredData.DRAFT, latest = True, author_view__name = self.author_view ).prefetch_related('identifier','group','user').prefetch_related('top_level_iobject', 'top_level_iobject__identifier', 'top_level_iobject__identifier__namespace') res['status'] = True res['msg'] = '' res['data'] = [] for el in json_obj_l: nd = { 'id': el.identifier.name, 'name': el.name, 'date': el.timestamp.strftime("%Y-%m-%d %H:%M") } res['data'].append(nd) else: name = self.request.GET.get('name',False) try: json_obj = AuthoredData.objects.get(Q(kind=AuthoredData.AUTHORING_JSON, group=authoring_group, identifier__name=name, latest=True, ) & (Q(status=AuthoredData.DRAFT) |Q(status=AuthoredData.UPDATE) |Q(status=AuthoredData.IMPORTED)) & (Q(user__isnull=True) | Q(user=self.request.user)) ) except ObjectDoesNotExist: res['msg'] = 'Could not access object %s of group %s' %(name,authoring_group) res['status'] = False return res except MultipleObjectsReturned: res['msg'] = """Something is wrong in the database: there are several "latest" objects of group %s with identifier %s""" % (authoring_group,name) return res if not json_obj.user: # The user needs to take the object in order to edit it -- this is # done automatically here. if json_obj.status == AuthoredData.IMPORTED: status = AuthoredData.UPDATE else: status = json_obj.status json_obj = AuthoredData.object_copy(json_obj,user=self.request.user,status=status) res['data'] = {} res['data']['id'] = json_obj.identifier.name res['data']['name'] = json_obj.name res['data']['date'] = json_obj.timestamp.strftime("%Y-%m-%d %H:%M") res['data']['jsn'] = json_obj.content res['status'] = True res['msg'] = 'Loaded \'' + json_obj.name + '\'' return res class GUI_JSON_ImportTest(AuthoringMethodMixin,SuperuserRequiredMixin,BasicTemplateView): """ View for testing the GUI JSON import and the usage of the guiJSONimport function. """ # This import here locally, because it adds a dependency on # django-mantis-authoring. Import from mantis_authoring.CampaignIndicators import ProcessingView transformer = ProcessingView.transformer author_view = ProcessingView.author_view importer_class = ProcessingView.importer_class template_name = 'dingos_authoring/%s/XMLImport.html' % DINGOS_TEMPLATE_FAMILY title = 'Import GUI JSON' def get_context_data(self, **kwargs): context = super(GUI_JSON_ImportTest, self).get_context_data(**kwargs) context['form'] = self.form return context def get(self, request, *args, **kwargs): self.form = GUIJSONImportForm({'name':'Import of GUI-JSON via GUI'}) return super(BasicTemplateView,self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.form = GUIJSONImportForm(request.POST.dict()) if self.form.is_valid(): data = self.form.cleaned_data jsn = data['json'] try: namespace_info = self.get_authoring_namespaces(self.request.user) except StandardError, e: messages.error(self.request,e.message) return super(GUI_JSON_ImportTest,self).get(request, *args, **kwargs) res = guiJSONimport(self.transformer, self.author_view, self.importer_class, jsn, namespace_info, authored_data_name = data["name"], user = request.user, action = 'import', request = self.request ) if res['status']: messages.success(self.request,res['msg']) else: messages.error(self.request,res['msg']) self.form = GUIJSONImportForm() return super(GUI_JSON_ImportTest,self).get(request, *args, **kwargs) class XMLImportView(AuthoringMethodMixin,SuperuserRequiredMixin,BasicTemplateView): """ View for importing XML. """ template_name = 'dingos_authoring/%s/XMLImport.html' % DINGOS_TEMPLATE_FAMILY title = 'Import XML' def get_context_data(self, **kwargs): context = super(XMLImportView, self).get_context_data(**kwargs) context['form'] = self.form return context def get(self, request, *args, **kwargs): self.form = XMLImportForm({'name':'Import of XML via GUI'}) return super(BasicTemplateView,self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.form = XMLImportForm(request.POST.dict()) if self.form.is_valid(): data = self.form.cleaned_data doc = libxml2.parseDoc(data['xml']) root = doc.getRootElement() ns_mapping = {} try: ns_def = root.nsDefs() while ns_def: ns_mapping[ns_def.name] = ns_def.content ns_def = ns_def.next except: pass try: ns_slug = root.ns().name except: ns_slug = None if ns_slug: namespace = ns_mapping.get(ns_slug,None) else: namespace = '' try: namespace_info = self.get_authoring_namespaces(self.request.user) except StandardError, e: messages.error(self.request,e.message) return super(XMLImportView,self).get(request, *args, **kwargs) importer_class = None importer_class = lookup_in_re_list(AUTHORING_IMPORTER_REGISTRY,namespace) if not importer_class: messages.error(self.request,"Do not know how to import XML with namespace '%s'" % (namespace)) else: importer = importer_class(allowed_identifier_ns_uris=namespace_info['allowed_ns_uris'] + [namespace_info['default_ns_uri']], default_identifier_ns_uri=namespace_info['default_ns_uri'], substitute_unallowed_namespaces=True) if False: # Celery switched off result = importer.xml_import(xml_content = data['xml'], track_created_objects=True) messages.success(self.request,"Imported objects: %s" % ", ".join(map(lambda x: "%s:%s" % (x['identifier_namespace_uri'], x['identifier_uid']), list(result)))) else: identifier = Identifier.objects.create(name="%s" % uuid4()) authored_data = AuthoredData.objects_create(identifier = identifier, name = data.get('name',"Import of XML via GUI"), status = AuthoredData.IMPORTED, kind = AuthoredData.XML, data = data['xml'], user = self.request.user, group = namespace_info['authoring_group'], timestamp = timezone.now(), latest=True) result = tasks.scheduled_import.delay(importer=importer, xml=data['xml'], xml_import_obj=authored_data) authored_data.processing_id = result.id authored_data.save() messages.info(self.request,'Import started.') self.form = XMLImportForm() return super(XMLImportView,self).get(request, *args, **kwargs) class TakeReportView(AuthoringMethodMixin,BasicListActionView): # The base query limits down the objects to the objects that # the user is actually allowed to act upon. This is to prevent # the user from fiddling with the data submitted by his browser # and inserting identifiers of objects that were not offered # by the view. template_name = 'dingos_authoring/%s/actions/TakeAuthoringDataObject.html' % DINGOS_TEMPLATE_FAMILY title = 'Take report(s) from owner' description = "When taking a report from another user, you become its owner and thus the only person" \ " allowed to edit the report. Make sure that you only take a report that is not currently" \ " edited by its current owner!" @property def action_model_query(self): base_query = AuthoredData.objects.filter(Q(kind=AuthoredData.AUTHORING_JSON, group=self.namespace_info['authoring_group'], latest=True) & (Q(status=AuthoredData.DRAFT) | Q(status=AuthoredData.UPDATE) | Q(status=AuthoredData.IMPORTED))) return base_query def _take_authoring_data_obj(self,form_data,authoring_data_obj): if authoring_data_obj.user == self.request.user: return (None,"'%s' is already owned by you." % authoring_data_obj.name) elif authoring_data_obj.status in [AuthoredData.DRAFT,AuthoredData.UPDATE]: old_user = authoring_data_obj.user obj = AuthoredData.object_copy(authoring_data_obj, user= self.request.user) return (True, "'%s' is now owned by you instead of %s" % (obj.name, old_user)) elif authoring_data_obj.status == AuthoredData.IMPORTED: obj= AuthoredData.object_copy(authoring_data_obj, user= self.request.user, status = AuthoredData.UPDATE) return (True, "'%s' has been put into DRAFT mode and is now owned by you." % obj.name) else: return (False, "Do not know how to treat '%s'" % authoring_data_obj.name) @property def action_list(self): return [{'action_predicate': lambda x,y: True, 'action_function': lambda x,y: self._take_authoring_data_obj(x,y)}] class GetAuthoringNamespace(AuthoringMethodMixin, BasicJSONView): """ View serving the namespace of the currently logged in user """ @property def returned_obj(self): res = { 'status': False, 'msg': 'An error occurred fetching your namespace information', 'data': None } if self.request.user: try: ns = self.get_authoring_namespaces(self.request.user,fail_silently=False) del ns['authoring_group'] res['status'] = True res['msg'] = '' res['data'] = ns except StandardError as e: res['msg'] = str(e) finally: pass return res class CeleryTest(SuperuserRequiredMixin,BasicTemplateView): """ View for editing the saved searches of a user. """ template_name = 'dingos_authoring/%s/tests/CeleryTest.html' % DINGOS_TEMPLATE_FAMILY title = 'Test of Celery' def get_context_data(self, **kwargs): context = super(CeleryTest, self).get_context_data(**kwargs) #print celery_app.tasks #result = celery_app.tasks['dingos_authoring.tasks.add'].delay(2,2) if DINGOS_AUTHORING_CELERY_BUG_WORKAROUND: # This is an ugly hack which breaks the independence of the django-dingos-authoring # app from the top-level configuration. # The hack may be required in instances where the celery tasks defined in Django # are not instantiated correctly: we have a system on which the configuration of # celery as seen when starting the worker is perfectly ok, yet within Django, # the tasks are not assigned the correct backend. from mantis.celery import app as celery_app result = tasks.add.delay(2,2) status0 = result.status value1 = result.get(timeout=1) status1 = result.status context['status0'] = result.status context['value'] = result.get(timeout=1) context['status1'] = result.status return context class SwitchAuthoringGroupView(AuthoringMethodMixin,BasicTemplateView): """ View for editing the saved searches of a user. """ template_name = 'dingos_authoring/%s/actions/SwitchAuthoringGroup.html' % DINGOS_TEMPLATE_FAMILY title = 'Switch Authoring Group' form = None def build_form(self,*args): ns_info = self.get_authoring_namespaces(self.request.user,return_available_groups=True) if not ns_info: # User has no authoring groups return None else: group_choices = [] for (i,j) in ns_info['all_authoring_groups']: group_choices.append((i,i)) if len(args) == 0: # No values have been set; we set the initial value here if 'authoring_group' in ns_info: default = ns_info['authoring_group'] else: default = '' args = [{'group': default}] self.form = SwitchAuthoringGroupForm(*args, group_choices=group_choices, initial = {'group': ''}) def get(self, request, *args, **kwargs): self.build_form() return super(SwitchAuthoringGroupView,self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.build_form(request.POST.dict()) if self.form and self.form.is_valid(): selected_group = self.form.cleaned_data['group'] if selected_group: namespace_map = GroupNamespaceMap.objects.get(group__name=selected_group) try: user_authoring_info = UserAuthoringInfo.objects.get(user=request.user) user_authoring_info.default_authoring_namespace_info=namespace_map user_authoring_info.save() except ObjectDoesNotExist: UserAuthoringInfo.objects.create(user=request.user, default_authoring_namespace_info=namespace_map) messages.success(request,"Authoring group switched.") else: messages.info(request,"Leaving settings unchanged.") return super(SwitchAuthoringGroupView,self).get(request, *args, **kwargs) class ImportedXMLView(BasicXMLView): @property def returned_xml(self): iobject_id = self.kwargs.get('pk', None) iobject = InfoObject.objects.get(pk=iobject_id) authored_objects = iobject.yielded_by.all().filter(kind=AuthoredData.XML).order_by('-timestamp') if authored_objects: authored_object = authored_objects[0] return authored_object.content class Ping(AuthoringMethodMixin, BasicJSONView): """ Empty view with the only purpose of keeping the user session in the authoring GUIs active """ @property def returned_obj(self): res = { 'status': True, 'msg': 'Ok', 'data': None } # This triggers an update of the users session which results in pushing # a cookie with new expiry if self.request.user: self.request.session.modified = True else: res['status'] = False return res
gpl-2.0
-1,879,485,312,123,893,000
37.362044
178
0.564997
false
matthiaskrgr/cppcheck
tools/daca2-getpackages.py
1
3915
#!/usr/bin/env python # # 1. Create a folder daca2 in your HOME folder # 2. Put cppcheck-head in daca2. It should be built with all optimisations. # 3. Optional: Put a file called "suppressions.txt" in the daca2 folder. # 4. Optional: tweak FTPSERVER and FTPPATH in this script below. # 5. Run the daca2 script: python daca2.py FOLDER import argparse import logging import subprocess import sys import shutil import glob import os import datetime import time DEBIAN = ('ftp://ftp.se.debian.org/debian/', 'ftp://ftp.debian.org/debian/') def wget(filepath): filename = filepath if '/' in filepath: filename = filename[filename.rfind('/') + 1:] for d in DEBIAN: subprocess.call( ['nice', 'wget', '--tries=10', '--timeout=300', '-O', filename, d + filepath]) if os.path.isfile(filename): return True print('Sleep for 10 seconds..') time.sleep(10) return False def getpackages(): if not wget('ls-lR.gz'): return [] subprocess.call(['nice', 'gunzip', 'ls-lR.gz']) f = open('ls-lR', 'rt') lines = f.readlines() f.close() subprocess.call(['rm', 'ls-lR']) # Example content in ls-lR: #./pool/main/0/0xffff: #total 1452 #-rw-r--r-- 2 dak debadmin 6524 Dec 25 2016 0xffff_0.7-2.debian.tar.xz #-rw-r--r-- 2 dak debadmin 1791 Dec 25 2016 0xffff_0.7-2.dsc #-rw-r--r-- 2 dak debadmin 57168 Dec 25 2016 0xffff_0.7-2_amd64.deb #-rw-r--r-- 2 dak debadmin 48578 Dec 26 2016 0xffff_0.7-2_arm64.deb #-rw-r--r-- 2 dak debadmin 56730 Dec 26 2016 0xffff_0.7-2_armel.deb #-rw-r--r-- 2 dak debadmin 57296 Dec 26 2016 0xffff_0.7-2_armhf.deb #-rw-r--r-- 2 dak debadmin 60254 Dec 26 2016 0xffff_0.7-2_i386.deb #-rw-r--r-- 2 dak debadmin 53130 Dec 26 2016 0xffff_0.7-2_mips.deb #-rw-r--r-- 2 dak debadmin 52542 Dec 26 2016 0xffff_0.7-2_mips64el.deb #-rw-r--r-- 2 dak debadmin 53712 Dec 26 2016 0xffff_0.7-2_mipsel.deb #-rw-r--r-- 2 dak debadmin 51908 Dec 26 2016 0xffff_0.7-2_ppc64el.deb #-rw-r--r-- 2 dak debadmin 53548 Dec 26 2016 0xffff_0.7-2_s390x.deb #-rw-r--r-- 2 dak debadmin 65248 Dec 25 2016 0xffff_0.7.orig.tar.gz #-rw-r--r-- 2 dak debadmin 6884 Jul 19 19:08 0xffff_0.8-1.debian.tar.xz #-rw-r--r-- 2 dak debadmin 1807 Jul 19 19:08 0xffff_0.8-1.dsc #-rw-r--r-- 2 dak debadmin 58908 Jul 19 19:08 0xffff_0.8-1_amd64.deb #-rw-r--r-- 2 dak debadmin 51340 Jul 19 19:58 0xffff_0.8-1_arm64.deb #-rw-r--r-- 2 dak debadmin 57612 Jul 19 20:13 0xffff_0.8-1_armel.deb #-rw-r--r-- 2 dak debadmin 58584 Jul 19 19:58 0xffff_0.8-1_armhf.deb #-rw-r--r-- 2 dak debadmin 57544 Jul 19 20:23 0xffff_0.8-1_hurd-i386.deb #-rw-r--r-- 2 dak debadmin 62048 Jul 19 23:54 0xffff_0.8-1_i386.deb #-rw-r--r-- 2 dak debadmin 55080 Jul 23 19:07 0xffff_0.8-1_kfreebsd-amd64.deb #-rw-r--r-- 2 dak debadmin 58392 Jul 23 19:07 0xffff_0.8-1_kfreebsd-i386.deb #-rw-r--r-- 2 dak debadmin 54144 Jul 19 22:28 0xffff_0.8-1_mips.deb #-rw-r--r-- 2 dak debadmin 53648 Jul 20 00:56 0xffff_0.8-1_mips64el.deb #-rw-r--r-- 2 dak debadmin 54740 Jul 19 22:58 0xffff_0.8-1_mipsel.deb #-rw-r--r-- 2 dak debadmin 57424 Jul 19 19:58 0xffff_0.8-1_ppc64el.deb #-rw-r--r-- 2 dak debadmin 53764 Jul 19 22:28 0xffff_0.8-1_s390x.deb #-rw-r--r-- 2 dak debadmin 64504 Jul 19 19:08 0xffff_0.8.orig.tar.gz # path = None archives = [] filename = None for line in lines: line = line.strip() if len(line) < 4: if filename: archives.append(DEBIAN[0] + path + '/' + filename) path = None filename = None elif line.startswith('./pool/main/'): path = line[2:-1] elif path and line.endswith('.orig.tar.gz'): filename = line[1 + line.rfind(' '):] return archives for p in getpackages(): print(p)
gpl-3.0
-7,311,189,876,327,974,000
38.545455
90
0.618135
false
golismero/golismero
thirdparty_libs/nltk/parse/pchart.py
12
18640
# Natural Language Toolkit: Probabilistic Chart Parsers # # Copyright (C) 2001-2012 NLTK Project # Author: Edward Loper <edloper@gradient.cis.upenn.edu> # Steven Bird <sb@csse.unimelb.edu.au> # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT """ Classes and interfaces for associating probabilities with tree structures that represent the internal organization of a text. The probabilistic parser module defines ``BottomUpProbabilisticChartParser``. ``BottomUpProbabilisticChartParser`` is an abstract class that implements a bottom-up chart parser for ``PCFG`` grammars. It maintains a queue of edges, and adds them to the chart one at a time. The ordering of this queue is based on the probabilities associated with the edges, allowing the parser to expand more likely edges before less likely ones. Each subclass implements a different queue ordering, producing different search strategies. Currently the following subclasses are defined: - ``InsideChartParser`` searches edges in decreasing order of their trees' inside probabilities. - ``RandomChartParser`` searches edges in random order. - ``LongestChartParser`` searches edges in decreasing order of their location's length. The ``BottomUpProbabilisticChartParser`` constructor has an optional argument beam_size. If non-zero, this controls the size of the beam (aka the edge queue). This option is most useful with InsideChartParser. """ ##////////////////////////////////////////////////////// ## Bottom-Up PCFG Chart Parser ##////////////////////////////////////////////////////// # [XX] This might not be implemented quite right -- it would be better # to associate probabilities with child pointer lists. from nltk.tree import Tree, ProbabilisticTree from nltk.grammar import Nonterminal, WeightedGrammar from nltk.parse.api import ParserI from nltk.parse.chart import Chart, LeafEdge, TreeEdge, AbstractChartRule # Probabilistic edges class ProbabilisticLeafEdge(LeafEdge): def prob(self): return 1.0 class ProbabilisticTreeEdge(TreeEdge): def __init__(self, prob, *args, **kwargs): self._prob = prob TreeEdge.__init__(self, *args, **kwargs) def prob(self): return self._prob def __cmp__(self, other): if self._prob != other.prob(): return -1 return TreeEdge.__cmp__(self, other) @staticmethod def from_production(production, index, p): return ProbabilisticTreeEdge(p, (index, index), production.lhs(), production.rhs(), 0) # Rules using probabilistic edges class ProbabilisticBottomUpInitRule(AbstractChartRule): NUM_EDGES=0 def apply_iter(self, chart, grammar): for index in range(chart.num_leaves()): new_edge = ProbabilisticLeafEdge(chart.leaf(index), index) if chart.insert(new_edge, ()): yield new_edge class ProbabilisticBottomUpPredictRule(AbstractChartRule): NUM_EDGES=1 def apply_iter(self, chart, grammar, edge): if edge.is_incomplete(): return for prod in grammar.productions(): if edge.lhs() == prod.rhs()[0]: new_edge = ProbabilisticTreeEdge.from_production(prod, edge.start(), prod.prob()) if chart.insert(new_edge, ()): yield new_edge class ProbabilisticFundamentalRule(AbstractChartRule): NUM_EDGES=2 def apply_iter(self, chart, grammar, left_edge, right_edge): # Make sure the rule is applicable. if not (left_edge.end() == right_edge.start() and left_edge.next() == right_edge.lhs() and left_edge.is_incomplete() and right_edge.is_complete()): return # Construct the new edge. p = left_edge.prob() * right_edge.prob() new_edge = ProbabilisticTreeEdge(p, span=(left_edge.start(), right_edge.end()), lhs=left_edge.lhs(), rhs=left_edge.rhs(), dot=left_edge.dot()+1) # Add it to the chart, with appropriate child pointers. changed_chart = False for cpl1 in chart.child_pointer_lists(left_edge): if chart.insert(new_edge, cpl1+(right_edge,)): changed_chart = True # If we changed the chart, then generate the edge. if changed_chart: yield new_edge class SingleEdgeProbabilisticFundamentalRule(AbstractChartRule): NUM_EDGES=1 _fundamental_rule = ProbabilisticFundamentalRule() def apply_iter(self, chart, grammar, edge1): fr = self._fundamental_rule if edge1.is_incomplete(): # edge1 = left_edge; edge2 = right_edge for edge2 in chart.select(start=edge1.end(), is_complete=True, lhs=edge1.next()): for new_edge in fr.apply_iter(chart, grammar, edge1, edge2): yield new_edge else: # edge2 = left_edge; edge1 = right_edge for edge2 in chart.select(end=edge1.start(), is_complete=False, next=edge1.lhs()): for new_edge in fr.apply_iter(chart, grammar, edge2, edge1): yield new_edge def __str__(self): return 'Fundamental Rule' class BottomUpProbabilisticChartParser(ParserI): """ An abstract bottom-up parser for ``PCFG`` grammars that uses a ``Chart`` to record partial results. ``BottomUpProbabilisticChartParser`` maintains a queue of edges that can be added to the chart. This queue is initialized with edges for each token in the text that is being parsed. ``BottomUpProbabilisticChartParser`` inserts these edges into the chart one at a time, starting with the most likely edges, and proceeding to less likely edges. For each edge that is added to the chart, it may become possible to insert additional edges into the chart; these are added to the queue. This process continues until enough complete parses have been generated, or until the queue is empty. The sorting order for the queue is not specified by ``BottomUpProbabilisticChartParser``. Different sorting orders will result in different search strategies. The sorting order for the queue is defined by the method ``sort_queue``; subclasses are required to provide a definition for this method. :type _grammar: PCFG :ivar _grammar: The grammar used to parse sentences. :type _trace: int :ivar _trace: The level of tracing output that should be generated when parsing a text. """ def __init__(self, grammar, beam_size=0, trace=0): """ Create a new ``BottomUpProbabilisticChartParser``, that uses ``grammar`` to parse texts. :type grammar: PCFG :param grammar: The grammar used to parse texts. :type beam_size: int :param beam_size: The maximum length for the parser's edge queue. :type trace: int :param trace: The level of tracing that should be used when parsing a text. ``0`` will generate no tracing output; and higher numbers will produce more verbose tracing output. """ if not isinstance(grammar, WeightedGrammar): raise ValueError("The grammar must be probabilistic WeightedGrammar") self._grammar = grammar self.beam_size = beam_size self._trace = trace def grammar(self): return self._grammar def trace(self, trace=2): """ Set the level of tracing output that should be generated when parsing a text. :type trace: int :param trace: The trace level. A trace level of ``0`` will generate no tracing output; and higher trace levels will produce more verbose tracing output. :rtype: None """ self._trace = trace # TODO: change this to conform more with the standard ChartParser def nbest_parse(self, tokens, n=None): self._grammar.check_coverage(tokens) chart = Chart(list(tokens)) grammar = self._grammar # Chart parser rules. bu_init = ProbabilisticBottomUpInitRule() bu = ProbabilisticBottomUpPredictRule() fr = SingleEdgeProbabilisticFundamentalRule() # Our queue! queue = [] # Initialize the chart. for edge in bu_init.apply_iter(chart, grammar): if self._trace > 1: print ' %-50s [%s]' % (chart.pp_edge(edge,width=2), edge.prob()) queue.append(edge) while len(queue) > 0: # Re-sort the queue. self.sort_queue(queue, chart) # Prune the queue to the correct size if a beam was defined if self.beam_size: self._prune(queue, chart) # Get the best edge. edge = queue.pop() if self._trace > 0: print ' %-50s [%s]' % (chart.pp_edge(edge,width=2), edge.prob()) # Apply BU & FR to it. queue.extend(bu.apply(chart, grammar, edge)) queue.extend(fr.apply(chart, grammar, edge)) # Get a list of complete parses. parses = chart.parses(grammar.start(), ProbabilisticTree) # Assign probabilities to the trees. prod_probs = {} for prod in grammar.productions(): prod_probs[prod.lhs(), prod.rhs()] = prod.prob() for parse in parses: self._setprob(parse, prod_probs) # Sort by probability parses.sort(lambda a,b: cmp(b.prob(), a.prob())) return parses[:n] def _setprob(self, tree, prod_probs): if tree.prob() is not None: return # Get the prob of the CFG production. lhs = Nonterminal(tree.node) rhs = [] for child in tree: if isinstance(child, Tree): rhs.append(Nonterminal(child.node)) else: rhs.append(child) prob = prod_probs[lhs, tuple(rhs)] # Get the probs of children. for child in tree: if isinstance(child, Tree): self._setprob(child, prod_probs) prob *= child.prob() tree.set_prob(prob) def sort_queue(self, queue, chart): """ Sort the given queue of ``Edge`` objects, placing the edge that should be tried first at the beginning of the queue. This method will be called after each ``Edge`` is added to the queue. :param queue: The queue of ``Edge`` objects to sort. Each edge in this queue is an edge that could be added to the chart by the fundamental rule; but that has not yet been added. :type queue: list(Edge) :param chart: The chart being used to parse the text. This chart can be used to provide extra information for sorting the queue. :type chart: Chart :rtype: None """ raise NotImplementedError() def _prune(self, queue, chart): """ Discard items in the queue if the queue is longer than the beam.""" if len(queue) > self.beam_size: split = len(queue)-self.beam_size if self._trace > 2: for edge in queue[:split]: print ' %-50s [DISCARDED]' % chart.pp_edge(edge,2) del queue[:split] class InsideChartParser(BottomUpProbabilisticChartParser): """ A bottom-up parser for ``PCFG`` grammars that tries edges in descending order of the inside probabilities of their trees. The "inside probability" of a tree is simply the probability of the entire tree, ignoring its context. In particular, the inside probability of a tree generated by production *p* with children *c[1], c[2], ..., c[n]* is *P(p)P(c[1])P(c[2])...P(c[n])*; and the inside probability of a token is 1 if it is present in the text, and 0 if it is absent. This sorting order results in a type of lowest-cost-first search strategy. """ # Inherit constructor. def sort_queue(self, queue, chart): """ Sort the given queue of edges, in descending order of the inside probabilities of the edges' trees. :param queue: The queue of ``Edge`` objects to sort. Each edge in this queue is an edge that could be added to the chart by the fundamental rule; but that has not yet been added. :type queue: list(Edge) :param chart: The chart being used to parse the text. This chart can be used to provide extra information for sorting the queue. :type chart: Chart :rtype: None """ queue.sort(lambda e1,e2:cmp(e1.prob(), e2.prob())) # Eventually, this will become some sort of inside-outside parser: # class InsideOutsideParser(BottomUpProbabilisticChartParser): # def __init__(self, grammar, trace=0): # # Inherit docs. # BottomUpProbabilisticChartParser.__init__(self, grammar, trace) # # # Find the best path from S to each nonterminal # bestp = {} # for production in grammar.productions(): bestp[production.lhs()]=0 # bestp[grammar.start()] = 1.0 # # for i in range(len(grammar.productions())): # for production in grammar.productions(): # lhs = production.lhs() # for elt in production.rhs(): # bestp[elt] = max(bestp[lhs]*production.prob(), # bestp.get(elt,0)) # # self._bestp = bestp # for (k,v) in self._bestp.items(): print k,v # # def _cmp(self, e1, e2): # return cmp(e1.structure()[PROB]*self._bestp[e1.lhs()], # e2.structure()[PROB]*self._bestp[e2.lhs()]) # # def sort_queue(self, queue, chart): # queue.sort(self._cmp) import random class RandomChartParser(BottomUpProbabilisticChartParser): """ A bottom-up parser for ``PCFG`` grammars that tries edges in random order. This sorting order results in a random search strategy. """ # Inherit constructor def sort_queue(self, queue, chart): i = random.randint(0, len(queue)-1) (queue[-1], queue[i]) = (queue[i], queue[-1]) class UnsortedChartParser(BottomUpProbabilisticChartParser): """ A bottom-up parser for ``PCFG`` grammars that tries edges in whatever order. """ # Inherit constructor def sort_queue(self, queue, chart): return class LongestChartParser(BottomUpProbabilisticChartParser): """ A bottom-up parser for ``PCFG`` grammars that tries longer edges before shorter ones. This sorting order results in a type of best-first search strategy. """ # Inherit constructor def sort_queue(self, queue, chart): queue.sort(lambda e1,e2: cmp(e1.length(), e2.length())) ##////////////////////////////////////////////////////// ## Test Code ##////////////////////////////////////////////////////// def demo(choice=None, draw_parses=None, print_parses=None): """ A demonstration of the probabilistic parsers. The user is prompted to select which demo to run, and how many parses should be found; and then each parser is run on the same demo, and a summary of the results are displayed. """ import sys, time from nltk import tokenize, toy_pcfg1, toy_pcfg2 from nltk.parse import pchart # Define two demos. Each demo has a sentence and a grammar. demos = [('I saw John with my telescope', toy_pcfg1), ('the boy saw Jack with Bob under the table with a telescope', toy_pcfg2)] if choice is None: # Ask the user which demo they want to use. print for i in range(len(demos)): print '%3s: %s' % (i+1, demos[i][0]) print ' %r' % demos[i][1] print print 'Which demo (%d-%d)? ' % (1, len(demos)), choice = int(sys.stdin.readline().strip())-1 try: sent, grammar = demos[choice] except: print 'Bad sentence number' return # Tokenize the sentence. tokens = sent.split() # Define a list of parsers. We'll use all parsers. parsers = [ pchart.InsideChartParser(grammar), pchart.RandomChartParser(grammar), pchart.UnsortedChartParser(grammar), pchart.LongestChartParser(grammar), pchart.InsideChartParser(grammar, beam_size = len(tokens)+1) # was BeamParser ] # Run the parsers on the tokenized sentence. times = [] average_p = [] num_parses = [] all_parses = {} for parser in parsers: print '\ns: %s\nparser: %s\ngrammar: %s' % (sent,parser,grammar) parser.trace(3) t = time.time() parses = parser.nbest_parse(tokens) times.append(time.time()-t) if parses: p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses) else: p = 0 average_p.append(p) num_parses.append(len(parses)) for p in parses: all_parses[p.freeze()] = 1 # Print some summary statistics print print ' Parser Beam | Time (secs) # Parses Average P(parse)' print '------------------------+------------------------------------------' for i in range(len(parsers)): print '%18s %4d |%11.4f%11d%19.14f' % (parsers[i].__class__.__name__, parsers[i].beam_size, times[i],num_parses[i],average_p[i]) parses = all_parses.keys() if parses: p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses) else: p = 0 print '------------------------+------------------------------------------' print '%18s |%11s%11d%19.14f' % ('(All Parses)', 'n/a', len(parses), p) if draw_parses is None: # Ask the user if we should draw the parses. print print 'Draw parses (y/n)? ', draw_parses = sys.stdin.readline().strip().lower().startswith('y') if draw_parses: from nltk.draw.tree import draw_trees print ' please wait...' draw_trees(*parses) if print_parses is None: # Ask the user if we should print the parses. print print 'Print parses (y/n)? ', print_parses = sys.stdin.readline().strip().lower().startswith('y') if print_parses: for parse in parses: print parse if __name__ == '__main__': demo()
gpl-2.0
4,272,996,025,214,943,000
37.512397
97
0.600536
false
encbladexp/ansible
lib/ansible/module_utils/facts/hardware/linux.py
11
34498
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import collections import errno import glob import json import os import re import sys import time from multiprocessing import cpu_count from multiprocessing.pool import ThreadPool from ansible.module_utils._text import to_text from ansible.module_utils.six import iteritems from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.text.formatters import bytes_to_human from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size # import this as a module to ensure we get the same module instance from ansible.module_utils.facts import timeout def get_partition_uuid(partname): try: uuids = os.listdir("/dev/disk/by-uuid") except OSError: return for uuid in uuids: dev = os.path.realpath("/dev/disk/by-uuid/" + uuid) if dev == ("/dev/" + partname): return uuid return None class LinuxHardware(Hardware): """ Linux-specific subclass of Hardware. Defines memory and CPU facts: - memfree_mb - memtotal_mb - swapfree_mb - swaptotal_mb - processor (a list) - processor_cores - processor_count In addition, it also defines number of DMI facts and device facts. """ platform = 'Linux' # Originally only had these four as toplevelfacts ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree')) # Now we have all of these in a dict structure MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached')) # regex used against findmnt output to detect bind mounts BIND_MOUNT_RE = re.compile(r'.*\]') # regex used against mtab content to find entries that are bind mounts MTAB_BIND_MOUNT_RE = re.compile(r'.*bind.*"') # regex used for replacing octal escape sequences OCTAL_ESCAPE_RE = re.compile(r'\\[0-9]{3}') def populate(self, collected_facts=None): hardware_facts = {} self.module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_NUMERIC': 'C'} cpu_facts = self.get_cpu_facts(collected_facts=collected_facts) memory_facts = self.get_memory_facts() dmi_facts = self.get_dmi_facts() device_facts = self.get_device_facts() uptime_facts = self.get_uptime_facts() lvm_facts = self.get_lvm_facts() mount_facts = {} try: mount_facts = self.get_mount_facts() except timeout.TimeoutError: self.module.warn("No mount facts were gathered due to timeout.") hardware_facts.update(cpu_facts) hardware_facts.update(memory_facts) hardware_facts.update(dmi_facts) hardware_facts.update(device_facts) hardware_facts.update(uptime_facts) hardware_facts.update(lvm_facts) hardware_facts.update(mount_facts) return hardware_facts def get_memory_facts(self): memory_facts = {} if not os.access("/proc/meminfo", os.R_OK): return memory_facts memstats = {} for line in get_file_lines("/proc/meminfo"): data = line.split(":", 1) key = data[0] if key in self.ORIGINAL_MEMORY_FACTS: val = data[1].strip().split(' ')[0] memory_facts["%s_mb" % key.lower()] = int(val) // 1024 if key in self.MEMORY_FACTS: val = data[1].strip().split(' ')[0] memstats[key.lower()] = int(val) // 1024 if None not in (memstats.get('memtotal'), memstats.get('memfree')): memstats['real:used'] = memstats['memtotal'] - memstats['memfree'] if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')): memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers'] if None not in (memstats.get('memtotal'), memstats.get('nocache:free')): memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free'] if None not in (memstats.get('swaptotal'), memstats.get('swapfree')): memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree'] memory_facts['memory_mb'] = { 'real': { 'total': memstats.get('memtotal'), 'used': memstats.get('real:used'), 'free': memstats.get('memfree'), }, 'nocache': { 'free': memstats.get('nocache:free'), 'used': memstats.get('nocache:used'), }, 'swap': { 'total': memstats.get('swaptotal'), 'free': memstats.get('swapfree'), 'used': memstats.get('swap:used'), 'cached': memstats.get('swapcached'), }, } return memory_facts def get_cpu_facts(self, collected_facts=None): cpu_facts = {} collected_facts = collected_facts or {} i = 0 vendor_id_occurrence = 0 model_name_occurrence = 0 processor_occurence = 0 physid = 0 coreid = 0 sockets = {} cores = {} xen = False xen_paravirt = False try: if os.path.exists('/proc/xen'): xen = True else: for line in get_file_lines('/sys/hypervisor/type'): if line.strip() == 'xen': xen = True # Only interested in the first line break except IOError: pass if not os.access("/proc/cpuinfo", os.R_OK): return cpu_facts cpu_facts['processor'] = [] for line in get_file_lines('/proc/cpuinfo'): data = line.split(":", 1) key = data[0].strip() try: val = data[1].strip() except IndexError: val = "" if xen: if key == 'flags': # Check for vme cpu flag, Xen paravirt does not expose this. # Need to detect Xen paravirt because it exposes cpuinfo # differently than Xen HVM or KVM and causes reporting of # only a single cpu core. if 'vme' not in val: xen_paravirt = True # model name is for Intel arch, Processor (mind the uppercase P) # works for some ARM devices, like the Sheevaplug. # 'ncpus active' is SPARC attribute if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor', 'processor']: if 'processor' not in cpu_facts: cpu_facts['processor'] = [] cpu_facts['processor'].append(val) if key == 'vendor_id': vendor_id_occurrence += 1 if key == 'model name': model_name_occurrence += 1 if key == 'processor': processor_occurence += 1 i += 1 elif key == 'physical id': physid = val if physid not in sockets: sockets[physid] = 1 elif key == 'core id': coreid = val if coreid not in sockets: cores[coreid] = 1 elif key == 'cpu cores': sockets[physid] = int(val) elif key == 'siblings': cores[coreid] = int(val) elif key == '# processors': cpu_facts['processor_cores'] = int(val) elif key == 'ncpus active': i = int(val) # Skip for platforms without vendor_id/model_name in cpuinfo (e.g ppc64le) if vendor_id_occurrence > 0: if vendor_id_occurrence == model_name_occurrence: i = vendor_id_occurrence # The fields for ARM CPUs do not always include 'vendor_id' or 'model name', # and sometimes includes both 'processor' and 'Processor'. # The fields for Power CPUs include 'processor' and 'cpu'. # Always use 'processor' count for ARM and Power systems if collected_facts.get('ansible_architecture', '').startswith(('armv', 'aarch', 'ppc')): i = processor_occurence # FIXME if collected_facts.get('ansible_architecture') != 's390x': if xen_paravirt: cpu_facts['processor_count'] = i cpu_facts['processor_cores'] = i cpu_facts['processor_threads_per_core'] = 1 cpu_facts['processor_vcpus'] = i else: if sockets: cpu_facts['processor_count'] = len(sockets) else: cpu_facts['processor_count'] = i socket_values = list(sockets.values()) if socket_values and socket_values[0]: cpu_facts['processor_cores'] = socket_values[0] else: cpu_facts['processor_cores'] = 1 core_values = list(cores.values()) if core_values: cpu_facts['processor_threads_per_core'] = core_values[0] // cpu_facts['processor_cores'] else: cpu_facts['processor_threads_per_core'] = 1 // cpu_facts['processor_cores'] cpu_facts['processor_vcpus'] = (cpu_facts['processor_threads_per_core'] * cpu_facts['processor_count'] * cpu_facts['processor_cores']) # if the number of processors available to the module's # thread cannot be determined, the processor count # reported by /proc will be the default: cpu_facts['processor_nproc'] = processor_occurence try: cpu_facts['processor_nproc'] = len( os.sched_getaffinity(0) ) except AttributeError: # In Python < 3.3, os.sched_getaffinity() is not available try: cmd = get_bin_path('nproc') except ValueError: pass else: rc, out, _err = self.module.run_command(cmd) if rc == 0: cpu_facts['processor_nproc'] = int(out) return cpu_facts def get_dmi_facts(self): ''' learn dmi facts from system Try /sys first for dmi related facts. If that is not available, fall back to dmidecode executable ''' dmi_facts = {} if os.path.exists('/sys/devices/virtual/dmi/id/product_name'): # Use kernel DMI info, if available # DMI SPEC -- https://www.dmtf.org/sites/default/files/standards/documents/DSP0134_3.2.0.pdf FORM_FACTOR = ["Unknown", "Other", "Unknown", "Desktop", "Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower", "Portable", "Laptop", "Notebook", "Hand Held", "Docking Station", "All In One", "Sub Notebook", "Space-saving", "Lunch Box", "Main Server Chassis", "Expansion Chassis", "Sub Chassis", "Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis", "Rack Mount Chassis", "Sealed-case PC", "Multi-system", "CompactPCI", "AdvancedTCA", "Blade", "Blade Enclosure", "Tablet", "Convertible", "Detachable", "IoT Gateway", "Embedded PC", "Mini PC", "Stick PC"] DMI_DICT = { 'bios_date': '/sys/devices/virtual/dmi/id/bios_date', 'bios_vendor': '/sys/devices/virtual/dmi/id/bios_vendor', 'bios_version': '/sys/devices/virtual/dmi/id/bios_version', 'board_asset_tag': '/sys/devices/virtual/dmi/id/board_asset_tag', 'board_name': '/sys/devices/virtual/dmi/id/board_name', 'board_serial': '/sys/devices/virtual/dmi/id/board_serial', 'board_vendor': '/sys/devices/virtual/dmi/id/board_vendor', 'board_version': '/sys/devices/virtual/dmi/id/board_version', 'chassis_asset_tag': '/sys/devices/virtual/dmi/id/chassis_asset_tag', 'chassis_serial': '/sys/devices/virtual/dmi/id/chassis_serial', 'chassis_vendor': '/sys/devices/virtual/dmi/id/chassis_vendor', 'chassis_version': '/sys/devices/virtual/dmi/id/chassis_version', 'form_factor': '/sys/devices/virtual/dmi/id/chassis_type', 'product_name': '/sys/devices/virtual/dmi/id/product_name', 'product_serial': '/sys/devices/virtual/dmi/id/product_serial', 'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid', 'product_version': '/sys/devices/virtual/dmi/id/product_version', 'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor', } for (key, path) in DMI_DICT.items(): data = get_file_content(path) if data is not None: if key == 'form_factor': try: dmi_facts['form_factor'] = FORM_FACTOR[int(data)] except IndexError: dmi_facts['form_factor'] = 'unknown (%s)' % data else: dmi_facts[key] = data else: dmi_facts[key] = 'NA' else: # Fall back to using dmidecode, if available dmi_bin = self.module.get_bin_path('dmidecode') DMI_DICT = { 'bios_date': 'bios-release-date', 'bios_vendor': 'bios-vendor', 'bios_version': 'bios-version', 'board_asset_tag': 'baseboard-asset-tag', 'board_name': 'baseboard-product-name', 'board_serial': 'baseboard-serial-number', 'board_vendor': 'baseboard-manufacturer', 'board_version': 'baseboard-version', 'chassis_asset_tag': 'chassis-asset-tag', 'chassis_serial': 'chassis-serial-number', 'chassis_vendor': 'chassis-manufacturer', 'chassis_version': 'chassis-version', 'form_factor': 'chassis-type', 'product_name': 'system-product-name', 'product_serial': 'system-serial-number', 'product_uuid': 'system-uuid', 'product_version': 'system-version', 'system_vendor': 'system-manufacturer', } for (k, v) in DMI_DICT.items(): if dmi_bin is not None: (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v)) if rc == 0: # Strip out commented lines (specific dmidecode output) thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')]) try: json.dumps(thisvalue) except UnicodeDecodeError: thisvalue = "NA" dmi_facts[k] = thisvalue else: dmi_facts[k] = 'NA' else: dmi_facts[k] = 'NA' return dmi_facts def _run_lsblk(self, lsblk_path): # call lsblk and collect all uuids # --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts # this uses the linux major device number # for details see https://www.kernel.org/doc/Documentation/devices.txt args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID', '--exclude', '2'] cmd = [lsblk_path] + args rc, out, err = self.module.run_command(cmd) return rc, out, err def _lsblk_uuid(self): uuids = {} lsblk_path = self.module.get_bin_path("lsblk") if not lsblk_path: return uuids rc, out, err = self._run_lsblk(lsblk_path) if rc != 0: return uuids # each line will be in format: # <devicename><some whitespace><uuid> # /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0 for lsblk_line in out.splitlines(): if not lsblk_line: continue line = lsblk_line.strip() fields = line.rsplit(None, 1) if len(fields) < 2: continue device_name, uuid = fields[0].strip(), fields[1].strip() if device_name in uuids: continue uuids[device_name] = uuid return uuids def _udevadm_uuid(self, device): # fallback for versions of lsblk <= 2.23 that don't have --paths, see _run_lsblk() above uuid = 'N/A' udevadm_path = self.module.get_bin_path('udevadm') if not udevadm_path: return uuid cmd = [udevadm_path, 'info', '--query', 'property', '--name', device] rc, out, err = self.module.run_command(cmd) if rc != 0: return uuid # a snippet of the output of the udevadm command below will be: # ... # ID_FS_TYPE=ext4 # ID_FS_USAGE=filesystem # ID_FS_UUID=57b1a3e7-9019-4747-9809-7ec52bba9179 # ... m = re.search('ID_FS_UUID=(.*)\n', out) if m: uuid = m.group(1) return uuid def _run_findmnt(self, findmnt_path): args = ['--list', '--noheadings', '--notruncate'] cmd = [findmnt_path] + args rc, out, err = self.module.run_command(cmd, errors='surrogate_then_replace') return rc, out, err def _find_bind_mounts(self): bind_mounts = set() findmnt_path = self.module.get_bin_path("findmnt") if not findmnt_path: return bind_mounts rc, out, err = self._run_findmnt(findmnt_path) if rc != 0: return bind_mounts # find bind mounts, in case /etc/mtab is a symlink to /proc/mounts for line in out.splitlines(): fields = line.split() # fields[0] is the TARGET, fields[1] is the SOURCE if len(fields) < 2: continue # bind mounts will have a [/directory_name] in the SOURCE column if self.BIND_MOUNT_RE.match(fields[1]): bind_mounts.add(fields[0]) return bind_mounts def _mtab_entries(self): mtab_file = '/etc/mtab' if not os.path.exists(mtab_file): mtab_file = '/proc/mounts' mtab = get_file_content(mtab_file, '') mtab_entries = [] for line in mtab.splitlines(): fields = line.split() if len(fields) < 4: continue mtab_entries.append(fields) return mtab_entries @staticmethod def _replace_octal_escapes_helper(match): # Convert to integer using base8 and then convert to character return chr(int(match.group()[1:], 8)) def _replace_octal_escapes(self, value): return self.OCTAL_ESCAPE_RE.sub(self._replace_octal_escapes_helper, value) def get_mount_info(self, mount, device, uuids): mount_size = get_mount_size(mount) # _udevadm_uuid is a fallback for versions of lsblk <= 2.23 that don't have --paths # see _run_lsblk() above # https://github.com/ansible/ansible/issues/36077 uuid = uuids.get(device, self._udevadm_uuid(device)) return mount_size, uuid def get_mount_facts(self): mounts = [] # gather system lists bind_mounts = self._find_bind_mounts() uuids = self._lsblk_uuid() mtab_entries = self._mtab_entries() # start threads to query each mount results = {} pool = ThreadPool(processes=min(len(mtab_entries), cpu_count())) maxtime = globals().get('GATHER_TIMEOUT') or timeout.DEFAULT_GATHER_TIMEOUT for fields in mtab_entries: # Transform octal escape sequences fields = [self._replace_octal_escapes(field) for field in fields] device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3] if not device.startswith(('/', '\\')) and ':/' not in device or fstype == 'none': continue mount_info = {'mount': mount, 'device': device, 'fstype': fstype, 'options': options} if mount in bind_mounts: # only add if not already there, we might have a plain /etc/mtab if not self.MTAB_BIND_MOUNT_RE.match(options): mount_info['options'] += ",bind" results[mount] = {'info': mount_info, 'extra': pool.apply_async(self.get_mount_info, (mount, device, uuids)), 'timelimit': time.time() + maxtime} pool.close() # done with new workers, start gc # wait for workers and get results while results: for mount in list(results): done = False res = results[mount]['extra'] try: if res.ready(): done = True if res.successful(): mount_size, uuid = res.get() if mount_size: results[mount]['info'].update(mount_size) results[mount]['info']['uuid'] = uuid or 'N/A' else: # failed, try to find out why, if 'res.successful' we know there are no exceptions results[mount]['info']['note'] = 'Could not get extra information: %s.' % (to_text(res.get())) elif time.time() > results[mount]['timelimit']: done = True self.module.warn("Timeout exceeded when getting mount info for %s" % mount) results[mount]['info']['note'] = 'Could not get extra information due to timeout' except Exception as e: import traceback done = True results[mount]['info'] = 'N/A' self.module.warn("Error prevented getting extra info for mount %s: [%s] %s." % (mount, type(e), to_text(e))) self.module.debug(traceback.format_exc()) if done: # move results outside and make loop only handle pending mounts.append(results[mount]['info']) del results[mount] # avoid cpu churn, sleep between retrying for loop with remaining mounts time.sleep(0.1) return {'mounts': mounts} def get_device_links(self, link_dir): if not os.path.exists(link_dir): return {} try: retval = collections.defaultdict(set) for entry in os.listdir(link_dir): try: target = os.path.basename(os.readlink(os.path.join(link_dir, entry))) retval[target].add(entry) except OSError: continue return dict((k, list(sorted(v))) for (k, v) in iteritems(retval)) except OSError: return {} def get_all_device_owners(self): try: retval = collections.defaultdict(set) for path in glob.glob('/sys/block/*/slaves/*'): elements = path.split('/') device = elements[3] target = elements[5] retval[target].add(device) return dict((k, list(sorted(v))) for (k, v) in iteritems(retval)) except OSError: return {} def get_all_device_links(self): return { 'ids': self.get_device_links('/dev/disk/by-id'), 'uuids': self.get_device_links('/dev/disk/by-uuid'), 'labels': self.get_device_links('/dev/disk/by-label'), 'masters': self.get_all_device_owners(), } def get_holders(self, block_dev_dict, sysdir): block_dev_dict['holders'] = [] if os.path.isdir(sysdir + "/holders"): for folder in os.listdir(sysdir + "/holders"): if not folder.startswith("dm-"): continue name = get_file_content(sysdir + "/holders/" + folder + "/dm/name") if name: block_dev_dict['holders'].append(name) else: block_dev_dict['holders'].append(folder) def get_device_facts(self): device_facts = {} device_facts['devices'] = {} lspci = self.module.get_bin_path('lspci') if lspci: rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace') else: pcidata = None try: block_devs = os.listdir("/sys/block") except OSError: return device_facts devs_wwn = {} try: devs_by_id = os.listdir("/dev/disk/by-id") except OSError: pass else: for link_name in devs_by_id: if link_name.startswith("wwn-"): try: wwn_link = os.readlink(os.path.join("/dev/disk/by-id", link_name)) except OSError: continue devs_wwn[os.path.basename(wwn_link)] = link_name[4:] links = self.get_all_device_links() device_facts['device_links'] = links for block in block_devs: virtual = 1 sysfs_no_links = 0 try: path = os.readlink(os.path.join("/sys/block/", block)) except OSError: e = sys.exc_info()[1] if e.errno == errno.EINVAL: path = block sysfs_no_links = 1 else: continue sysdir = os.path.join("/sys/block", path) if sysfs_no_links == 1: for folder in os.listdir(sysdir): if "device" in folder: virtual = 0 break d = {} d['virtual'] = virtual d['links'] = {} for (link_type, link_values) in iteritems(links): d['links'][link_type] = link_values.get(block, []) diskname = os.path.basename(sysdir) for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']: d[key] = get_file_content(sysdir + "/device/" + key) sg_inq = self.module.get_bin_path('sg_inq') # we can get NVMe device's serial number from /sys/block/<name>/device/serial serial_path = "/sys/block/%s/device/serial" % (block) if sg_inq: device = "/dev/%s" % (block) rc, drivedata, err = self.module.run_command([sg_inq, device]) if rc == 0: serial = re.search(r"Unit serial number:\s+(\w+)", drivedata) if serial: d['serial'] = serial.group(1) else: serial = get_file_content(serial_path) if serial: d['serial'] = serial for key, test in [('removable', '/removable'), ('support_discard', '/queue/discard_granularity'), ]: d[key] = get_file_content(sysdir + test) if diskname in devs_wwn: d['wwn'] = devs_wwn[diskname] d['partitions'] = {} for folder in os.listdir(sysdir): m = re.search("(" + diskname + r"[p]?\d+)", folder) if m: part = {} partname = m.group(1) part_sysdir = sysdir + "/" + partname part['links'] = {} for (link_type, link_values) in iteritems(links): part['links'][link_type] = link_values.get(partname, []) part['start'] = get_file_content(part_sysdir + "/start", 0) part['sectors'] = get_file_content(part_sysdir + "/size", 0) part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size") if not part['sectorsize']: part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size", 512) part['size'] = bytes_to_human((float(part['sectors']) * 512.0)) part['uuid'] = get_partition_uuid(partname) self.get_holders(part, part_sysdir) d['partitions'][partname] = part d['rotational'] = get_file_content(sysdir + "/queue/rotational") d['scheduler_mode'] = "" scheduler = get_file_content(sysdir + "/queue/scheduler") if scheduler is not None: m = re.match(r".*?(\[(.*)\])", scheduler) if m: d['scheduler_mode'] = m.group(2) d['sectors'] = get_file_content(sysdir + "/size") if not d['sectors']: d['sectors'] = 0 d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size") if not d['sectorsize']: d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size", 512) d['size'] = bytes_to_human(float(d['sectors']) * 512.0) d['host'] = "" # domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7). m = re.match(r".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir) if m and pcidata: pciid = m.group(1) did = re.escape(pciid) m = re.search("^" + did + r"\s(.*)$", pcidata, re.MULTILINE) if m: d['host'] = m.group(1) self.get_holders(d, sysdir) device_facts['devices'][diskname] = d return device_facts def get_uptime_facts(self): uptime_facts = {} uptime_file_content = get_file_content('/proc/uptime') if uptime_file_content: uptime_seconds_string = uptime_file_content.split(' ')[0] uptime_facts['uptime_seconds'] = int(float(uptime_seconds_string)) return uptime_facts def _find_mapper_device_name(self, dm_device): dm_prefix = '/dev/dm-' mapper_device = dm_device if dm_device.startswith(dm_prefix): dmsetup_cmd = self.module.get_bin_path('dmsetup', True) mapper_prefix = '/dev/mapper/' rc, dm_name, err = self.module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) if rc == 0: mapper_device = mapper_prefix + dm_name.rstrip() return mapper_device def get_lvm_facts(self): """ Get LVM Facts if running as root and lvm utils are available """ lvm_facts = {} if os.getuid() == 0 and self.module.get_bin_path('vgs'): lvm_util_options = '--noheadings --nosuffix --units g --separator ,' vgs_path = self.module.get_bin_path('vgs') # vgs fields: VG #PV #LV #SN Attr VSize VFree vgs = {} if vgs_path: rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_path, lvm_util_options)) for vg_line in vg_lines.splitlines(): items = vg_line.strip().split(',') vgs[items[0]] = {'size_g': items[-2], 'free_g': items[-1], 'num_lvs': items[2], 'num_pvs': items[1]} lvs_path = self.module.get_bin_path('lvs') # lvs fields: # LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert lvs = {} if lvs_path: rc, lv_lines, err = self.module.run_command('%s %s' % (lvs_path, lvm_util_options)) for lv_line in lv_lines.splitlines(): items = lv_line.strip().split(',') lvs[items[0]] = {'size_g': items[3], 'vg': items[1]} pvs_path = self.module.get_bin_path('pvs') # pvs fields: PV VG #Fmt #Attr PSize PFree pvs = {} if pvs_path: rc, pv_lines, err = self.module.run_command('%s %s' % (pvs_path, lvm_util_options)) for pv_line in pv_lines.splitlines(): items = pv_line.strip().split(',') pvs[self._find_mapper_device_name(items[0])] = { 'size_g': items[4], 'free_g': items[5], 'vg': items[1]} lvm_facts['lvm'] = {'lvs': lvs, 'vgs': vgs, 'pvs': pvs} return lvm_facts class LinuxHardwareCollector(HardwareCollector): _platform = 'Linux' _fact_class = LinuxHardware required_facts = set(['platform'])
gpl-3.0
-7,721,708,395,090,663,000
39.020882
128
0.513537
false
TakeshiTseng/ryu
ryu/services/protocols/bgp/application.py
2
8351
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Defines bases classes to create a BGP application. """ import logging import os from ryu import cfg from ryu.lib import hub from ryu.utils import load_source from ryu.base.app_manager import RyuApp from ryu.services.protocols.bgp.base import add_bgp_error_metadata from ryu.services.protocols.bgp.base import BGPSException from ryu.services.protocols.bgp.base import BIN_ERROR from ryu.services.protocols.bgp.bgpspeaker import BGPSpeaker from ryu.services.protocols.bgp.net_ctrl import NET_CONTROLLER from ryu.services.protocols.bgp.net_ctrl import NC_RPC_BIND_IP from ryu.services.protocols.bgp.net_ctrl import NC_RPC_BIND_PORT from ryu.services.protocols.bgp.operator.ssh import SSH_CLI_CONTROLLER from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError from ryu.services.protocols.bgp.rtconf.common import BGP_SERVER_PORT from ryu.services.protocols.bgp.rtconf.common import DEFAULT_BGP_SERVER_PORT from ryu.services.protocols.bgp.rtconf.common import ( DEFAULT_REFRESH_MAX_EOR_TIME, DEFAULT_REFRESH_STALEPATH_TIME) from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LABEL_RANGE from ryu.services.protocols.bgp.rtconf.common import LABEL_RANGE from ryu.services.protocols.bgp.rtconf.common import LOCAL_AS from ryu.services.protocols.bgp.rtconf.common import REFRESH_MAX_EOR_TIME from ryu.services.protocols.bgp.rtconf.common import REFRESH_STALEPATH_TIME from ryu.services.protocols.bgp.rtconf.common import ROUTER_ID from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4 from ryu.services.protocols.bgp.utils.validation import is_valid_ipv6 LOG = logging.getLogger('bgpspeaker.application') CONF = cfg.CONF['bgp-app'] @add_bgp_error_metadata(code=BIN_ERROR, sub_code=1, def_desc='Unknown bootstrap exception.') class ApplicationException(BGPSException): """ Specific Base exception related to `BSPSpeaker`. """ pass def validate_rpc_host(ip): """ Validates the given ip for use as RPC server address. """ if not is_valid_ipv4(ip) and not is_valid_ipv6(ip): raise ApplicationException( desc='Invalid RPC ip address: %s' % ip) return ip def load_config(config_file): """ Validates the given file for use as the settings file for BGPSpeaker and loads the configuration from the given file as a module instance. """ if not config_file or not os.path.isfile(config_file): raise ApplicationException( desc='Invalid configuration file: %s' % config_file) # Loads the configuration from the given file, if available. try: return load_source('bgpspeaker.application.settings', config_file) except Exception as e: raise ApplicationException(desc=str(e)) class RyuBGPSpeaker(RyuApp): def __init__(self, *args, **kwargs): super(RyuBGPSpeaker, self).__init__(*args, **kwargs) self.config_file = CONF.config_file # BGPSpeaker instance (not instantiated yet) self.speaker = None def start(self): super(RyuBGPSpeaker, self).start() # If configuration file was provided and loaded successfully, we start # BGPSpeaker using the given settings. # If no configuration file is provided or if any minimum required # setting is missing, BGPSpeaker will not be started. if self.config_file: LOG.debug('Loading config file %s...', self.config_file) settings = load_config(self.config_file) # Configure logging settings, if available. if hasattr(settings, 'LOGGING'): # Not implemented yet. LOG.debug('Loading LOGGING settings... (NOT implemented yet)') # from logging.config import dictConfig # logging_settings = dictConfig(settings.LOGGING) # Configure BGP settings, if available. if hasattr(settings, 'BGP'): LOG.debug('Loading BGP settings...') self._start_speaker(settings.BGP) # Configure SSH settings, if available. if hasattr(settings, 'SSH'): LOG.debug('Loading SSH settings...') hub.spawn(SSH_CLI_CONTROLLER.start, **settings.SSH) # Start RPC server with the given RPC settings. rpc_settings = { NC_RPC_BIND_PORT: CONF.rpc_port, NC_RPC_BIND_IP: validate_rpc_host(CONF.rpc_host), } return hub.spawn(NET_CONTROLLER.start, **rpc_settings) def _start_speaker(self, settings): """ Starts BGPSpeaker using the given settings. """ # Settings for starting BGPSpeaker bgp_settings = {} # Get required settings. try: bgp_settings['as_number'] = settings.get(LOCAL_AS) bgp_settings['router_id'] = settings.get(ROUTER_ID) except KeyError as e: raise ApplicationException( desc='Required BGP configuration missing: %s' % e) # Get optional settings. bgp_settings[BGP_SERVER_PORT] = settings.get( BGP_SERVER_PORT, DEFAULT_BGP_SERVER_PORT) bgp_settings[REFRESH_STALEPATH_TIME] = settings.get( REFRESH_STALEPATH_TIME, DEFAULT_REFRESH_STALEPATH_TIME) bgp_settings[REFRESH_MAX_EOR_TIME] = settings.get( REFRESH_MAX_EOR_TIME, DEFAULT_REFRESH_MAX_EOR_TIME) bgp_settings[LABEL_RANGE] = settings.get( LABEL_RANGE, DEFAULT_LABEL_RANGE) # Create BGPSpeaker instance. LOG.debug('Starting BGPSpeaker...') self.speaker = BGPSpeaker(**bgp_settings) # Add neighbors. LOG.debug('Adding neighbors...') self._add_neighbors(settings.get('neighbors', [])) # Add VRFs. LOG.debug('Adding VRFs...') self._add_vrfs(settings.get('vrfs', [])) # Add Networks LOG.debug('Adding routes...') self._add_routes(settings.get('routes', [])) def _add_neighbors(self, settings): """ Add BGP neighbors from the given settings. All valid neighbors are loaded. Miss-configured neighbors are ignored and errors are logged. """ for neighbor_settings in settings: LOG.debug('Adding neighbor settings: %s', neighbor_settings) try: self.speaker.neighbor_add(**neighbor_settings) except RuntimeConfigError as e: LOG.exception(e) def _add_vrfs(self, settings): """ Add BGP VRFs from the given settings. All valid VRFs are loaded. Miss-configured VRFs are ignored and errors are logged. """ for vrf_settings in settings: LOG.debug('Adding VRF settings: %s', vrf_settings) try: self.speaker.vrf_add(**vrf_settings) except RuntimeConfigError as e: LOG.exception(e) def _add_routes(self, settings): """ Add BGP routes from given settings. All valid routes are loaded. Miss-configured routes are ignored and errors are logged. """ for route_settings in settings: if 'prefix' in route_settings: prefix_add = self.speaker.prefix_add elif 'route_type' in route_settings: prefix_add = self.speaker.evpn_prefix_add else: LOG.debug('Skip invalid route settings: %s', route_settings) continue LOG.debug('Adding route settings: %s', route_settings) try: prefix_add(**route_settings) except RuntimeConfigError as e: LOG.exception(e)
apache-2.0
-6,652,366,837,627,358,000
36.28125
78
0.651179
false
jjlee9/openthread
tools/harness-automation/cases/leader_9_2_11.py
16
1877
#!/usr/bin/env python # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import unittest from autothreadharness.harness_case import HarnessCase class Leader_9_2_11(HarnessCase): role = HarnessCase.ROLE_LEADER case = '9 2 11' golden_devices_required = 4 def on_dialog(self, dialog, title): pass if __name__ == '__main__': unittest.main()
bsd-3-clause
839,902,037,799,989,900
42.651163
77
0.76292
false
Azure/azure-sdk-for-python
sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/aio/_configuration.py
1
3209
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, TYPE_CHECKING from azure.core.configuration import Configuration from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMHttpLoggingPolicy from .._version import VERSION if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential class MicrosoftStorageSyncConfiguration(Configuration): """Configuration for MicrosoftStorageSync. Note that all parameters used to create this instance are saved as instance attributes. :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the target subscription. :type subscription_id: str """ def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any ) -> None: if credential is None: raise ValueError("Parameter 'credential' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") super(MicrosoftStorageSyncConfiguration, self).__init__(**kwargs) self.credential = credential self.subscription_id = subscription_id self.api_version = "2020-09-01" self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) kwargs.setdefault('sdk_moniker', 'mgmt-storagesync/{}'.format(VERSION)) self._configure(**kwargs) def _configure( self, **kwargs: Any ) -> None: self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) self.authentication_policy = kwargs.get('authentication_policy') if self.credential and not self.authentication_policy: self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
mit
-6,422,053,421,144,741,000
46.895522
134
0.681209
false
antlarr/picard
picard/ui/options/genres.py
2
3074
# -*- coding: utf-8 -*- # # Picard, the next-generation MusicBrainz tagger # Copyright (C) 2008 Lukáš Lalinský # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. from picard import config from picard.ui.options import ( OptionsPage, register_options_page, ) from picard.ui.ui_options_genres import Ui_GenresOptionsPage class GenresOptionsPage(OptionsPage): NAME = "genres" TITLE = N_("Genres") PARENT = "metadata" SORT_ORDER = 20 ACTIVE = True options = [ config.BoolOption("setting", "use_genres", False), config.IntOption("setting", "max_genres", 5), config.IntOption("setting", "min_genre_usage", 90), config.TextOption("setting", "ignore_genres", "seen live, favorites, fixme, owned"), config.TextOption("setting", "join_genres", ""), config.BoolOption("setting", "only_my_genres", False), config.BoolOption("setting", "artists_genres", False), config.BoolOption("setting", "folksonomy_tags", False), ] def __init__(self, parent=None): super().__init__(parent) self.ui = Ui_GenresOptionsPage() self.ui.setupUi(self) def load(self): self.ui.use_genres.setChecked(config.setting["use_genres"]) self.ui.max_genres.setValue(config.setting["max_genres"]) self.ui.min_genre_usage.setValue(config.setting["min_genre_usage"]) self.ui.join_genres.setEditText(config.setting["join_genres"]) self.ui.ignore_genres.setText(config.setting["ignore_genres"]) self.ui.only_my_genres.setChecked(config.setting["only_my_genres"]) self.ui.artists_genres.setChecked(config.setting["artists_genres"]) self.ui.folksonomy_tags.setChecked(config.setting["folksonomy_tags"]) def save(self): config.setting["use_genres"] = self.ui.use_genres.isChecked() config.setting["max_genres"] = self.ui.max_genres.value() config.setting["min_genre_usage"] = self.ui.min_genre_usage.value() config.setting["join_genres"] = self.ui.join_genres.currentText() config.setting["ignore_genres"] = self.ui.ignore_genres.text() config.setting["only_my_genres"] = self.ui.only_my_genres.isChecked() config.setting["artists_genres"] = self.ui.artists_genres.isChecked() config.setting["folksonomy_tags"] = self.ui.folksonomy_tags.isChecked() register_options_page(GenresOptionsPage)
gpl-2.0
5,159,729,941,301,588,000
40.5
92
0.688049
false
VincentVW/OIPA
OIPA/iati/updater.py
3
3879
from iati.models import Sector, BudgetType, DescriptionType import ujson import os import os.path class SectorUpdater(): def update_unesco_sectors(self): base = os.path.dirname(os.path.abspath(__file__)) location = base + "/data_backup/unesco_sectors.json" json_data = open(location) unesco_sectors = ujson.load(json_data) for cr in unesco_sectors: try: code = int(cr) name = unesco_sectors[cr]['name'] if Sector.objects.filter(code=code).exists(): the_sector = Sector.objects.get(code=code) the_sector.name = name else: the_sector = Sector(code=code, name=name) the_sector.save() except Exception as e: print "error in update_country_sectors" + str(type) print e.args return False json_data.close() return True def update_rain_sectors(self): base = os.path.dirname(os.path.abspath(__file__)) location = base + "/data_backup/rain_sectors.json" json_data = open(location) rain_sectors = ujson.load(json_data) for cr in rain_sectors: try: code = int(cr) name = rain_sectors[cr]['name'] if Sector.objects.filter(code=code).exists(): the_sector = Sector.objects.get(code=code) the_sector.name = name else: the_sector = Sector(code=code, name=name) the_sector.save() except Exception as e: print "error in update_rain_sectors" + str(type) print e.args return False json_data.close() base = os.path.dirname(os.path.abspath(__file__)) location = base + "/data_backup/rain_budget_types.json" json_data = open(location) rain_sectors = ujson.load(json_data) for cr in rain_sectors: try: code = int(cr) name = rain_sectors[cr]['name'] if BudgetType.objects.filter(code=code).exists(): the_sector = BudgetType.objects.get(code=code) the_sector.name = name else: the_sector = BudgetType(code=code, name=name, language="en") the_sector.save() except Exception as e: print "error in update_rain_sectors" + str(type) print e.args return False json_data.close() base = os.path.dirname(os.path.abspath(__file__)) location = base + "/data_backup/rain_description_types.json" json_data = open(location) rain_sectors = ujson.load(json_data) for cr in rain_sectors: try: code = int(cr) name = rain_sectors[cr]['name'] if DescriptionType.objects.filter(code=code).exists(): the_sector = DescriptionType.objects.get(code=code) the_sector.name = name else: the_sector = DescriptionType(code=code, name=name, description="RAIN description") the_sector.save() except Exception as e: print "error in update_rain_sectors" + str(type) print e.args return False json_data.close() return True
agpl-3.0
-2,873,489,906,242,336,000
31.596639
106
0.472544
false
paxapy/zulip
analytics/management/commands/analyze_mit.py
4
3812
from __future__ import absolute_import from __future__ import print_function from typing import Any from optparse import make_option from django.core.management.base import BaseCommand, CommandParser from zerver.models import Recipient, Message from zerver.lib.timestamp import timestamp_to_datetime import datetime import time import logging def compute_stats(log_level): # type: (int) -> None logger = logging.getLogger() logger.setLevel(log_level) one_week_ago = timestamp_to_datetime(time.time()) - datetime.timedelta(weeks=1) mit_query = Message.objects.filter(sender__realm__domain="mit.edu", recipient__type=Recipient.STREAM, pub_date__gt=one_week_ago) for bot_sender_start in ["imap.", "rcmd.", "sys."]: mit_query = mit_query.exclude(sender__email__startswith=(bot_sender_start)) # Filtering for "/" covers tabbott/extra@ and all the daemon/foo bots. mit_query = mit_query.exclude(sender__email__contains=("/")) mit_query = mit_query.exclude(sender__email__contains=("aim.com")) mit_query = mit_query.exclude( sender__email__in=["rss@mit.edu", "bash@mit.edu", "apache@mit.edu", "bitcoin@mit.edu", "lp@mit.edu", "clocks@mit.edu", "root@mit.edu", "nagios@mit.edu", "www-data|local-realm@mit.edu"]) user_counts = {} # type: Dict[str, Dict[str, int]] for m in mit_query.select_related("sending_client", "sender"): email = m.sender.email user_counts.setdefault(email, {}) user_counts[email].setdefault(m.sending_client.name, 0) user_counts[email][m.sending_client.name] += 1 total_counts = {} # type: Dict[str, int] total_user_counts = {} # type: Dict[str, int] for email, counts in user_counts.items(): total_user_counts.setdefault(email, 0) for client_name, count in counts.items(): total_counts.setdefault(client_name, 0) total_counts[client_name] += count total_user_counts[email] += count logging.debug("%40s | %10s | %s" % ("User", "Messages", "Percentage Zulip")) top_percents = {} # type: Dict[int, float] for size in [10, 25, 50, 100, 200, len(total_user_counts.keys())]: top_percents[size] = 0.0 for i, email in enumerate(sorted(total_user_counts.keys(), key=lambda x: -total_user_counts[x])): percent_zulip = round(100 - (user_counts[email].get("zephyr_mirror", 0)) * 100. / total_user_counts[email], 1) for size in top_percents.keys(): top_percents.setdefault(size, 0) if i < size: top_percents[size] += (percent_zulip * 1.0 / size) logging.debug("%40s | %10s | %s%%" % (email, total_user_counts[email], percent_zulip)) logging.info("") for size in sorted(top_percents.keys()): logging.info("Top %6s | %s%%" % (size, round(top_percents[size], 1))) grand_total = sum(total_counts.values()) print(grand_total) logging.info("%15s | %s" % ("Client", "Percentage")) for client in total_counts.keys(): logging.info("%15s | %s%%" % (client, round(100. * total_counts[client] / grand_total, 1))) class Command(BaseCommand): help = "Compute statistics on MIT Zephyr usage." def add_arguments(self, parser): # type: (CommandParser) -> None parser.add_argument('--verbose', default=False, action='store_true') def handle(self, *args, **options): # type: (*Any, **Any) -> None level = logging.INFO if options["verbose"]: level = logging.DEBUG compute_stats(level)
apache-2.0
6,711,362,981,282,459,000
42.816092
99
0.590766
false
tszym/ansible
lib/ansible/modules/network/ordnance/ordnance_config.py
8
12513
#!/usr/bin/python # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: ordnance_config version_added: "2.3" author: "Alexander Turner (alex.turner@ordnance.io)" short_description: Manage Ordnance configuration sections description: - Ordnance router configurations use a simple block indent file syntax for segmenting configuration into sections. This module provides an implementation for working with these configuration sections in a deterministic way. options: commands: description: - The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration command syntax as some commands are automatically modified by the device config parser. required: false default: null aliases: ['commands'] parents: description: - The ordered set of parents that uniquely identify the section the commands should be checked against. If the parents argument is omitted, the commands are checked against the set of top level or global commands. required: false default: null src: description: - Specifies the source path to the file that contains the configuration or configuration template to load. The path to the source file can either be the full path on the Ansible control host or a relative path from the playbook or role root directory. This argument is mutually exclusive with I(lines). required: false default: null before: description: - The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched against the system. required: false default: null after: description: - The ordered set of commands to append to the end of the command stack if a change needs to be made. Just like with I(before) this allows the playbook designer to append a set of commands to be executed after the command set. required: false default: null match: description: - Instructs the module on the way to perform the matching of the set of commands against the current device config. If match is set to I(line), commands are matched line by line. If match is set to I(strict), command lines are matched with respect to position. If match is set to I(exact), command lines must be an equal match. Finally, if match is set to I(none), the module will not attempt to compare the source configuration with the running configuration on the remote device. required: false default: line choices: ['line', 'strict', 'exact', 'none'] replace: description: - Instructs the module on the way to perform the configuration on the device. If the replace argument is set to I(line) then the modified lines are pushed to the device in configuration mode. If the replace argument is set to I(block) then the entire command block is pushed to the device in configuration mode if any line is not correct. required: false default: line choices: ['line', 'block'] multiline_delimiter: description: - This argument is used when pushing a multiline configuration element to the Ordnance router. It specifies the character to use as the delimiting character. This only applies to the configuration action required: false default: "@" backup: description: - This argument will cause the module to create a full backup of the current C(running-config) from the remote device before any changes are made. The backup file is written to the C(backup) folder in the playbook root directory. If the directory does not exist, it is created. required: false default: no choices: ['yes', 'no'] config: description: - The C(config) argument allows the playbook designer to supply the base configuration to be used to validate configuration changes necessary. If this argument is provided, the module will not download the running-config from the remote node. required: false default: null defaults: description: - This argument specifies whether or not to collect all defaults when getting the remote device running config. When enabled, the module will get the current config by issuing the command C(show running-config all). required: false default: no choices: ['yes', 'no'] save: description: - The C(save) argument instructs the module to save the running- config to the startup-config at the conclusion of the module running. If check mode is specified, this argument is ignored. required: false default: no choices: ['yes', 'no'] """ EXAMPLES = """ --- # Note: examples below use the following provider dict to handle # transport and authentication to the node. vars: cli: host: "{{ inventory_hostname }}" username: RouterName password: password transport: cli --- - name: configure top level configuration ordnance_config: lines: hostname {{ inventory_hostname }} provider: "{{ cli }}" - name: configure interface settings ordnance_config: lines: - description test interface - ip address 172.31.1.1 255.255.255.0 parents: interface Ethernet1 provider: "{{ cli }}" - name: configure bgp router ordnance_config: lines: - neighbor 1.1.1.1 remote-as 1234 - network 10.0.0.0/24 parents: router bgp 65001 provider: "{{ cli }}" """ RETURN = """ updates: description: The set of commands that will be pushed to the remote device returned: Only when commands is specified. type: list sample: ['...', '...'] backup_path: description: The full path to the backup file returned: when backup is yes type: string sample: /playbooks/ansible/backup/ordnance_config.2016-07-16@22:28:34 """ import re import time import traceback from ansible.module_utils.network import NetworkModule, NetworkError from ansible.module_utils.netcfg import NetworkConfig, dumps from ansible.module_utils.netcli import Command from ansible.module_utils.ordnance import get_config from ansible.module_utils.six import iteritems from ansible.module_utils._text import to_native def check_args(module, warnings): if module.params['multiline_delimiter']: if len(module.params['multiline_delimiter']) != 1: module.fail_json(msg='multiline_delimiter value can only be a ' 'single character') if module.params['force']: warnings.append('The force argument is deprecated, please use ' 'match=none instead. This argument will be ' 'removed in the future') def extract_banners(config): banners = {} banner_cmds = re.findall(r'^banner (\w+)', config, re.M) for cmd in banner_cmds: regex = r'banner %s \^C(.+?)(?=\^C)' % cmd match = re.search(regex, config, re.S) if match: key = 'banner %s' % cmd banners[key] = match.group(1).strip() for cmd in banner_cmds: regex = r'banner %s \^C(.+?)(?=\^C)' % cmd match = re.search(regex, config, re.S) if match: config = config.replace(str(match.group(1)), '') config = re.sub(r'banner \w+ \^C\^C', '!! banner removed', config) return (config, banners) def diff_banners(want, have): candidate = {} for key, value in iteritems(want): if value != have.get(key): candidate[key] = value return candidate def load_banners(module, banners): delimiter = module.params['multiline_delimiter'] for key, value in iteritems(banners): key += ' %s' % delimiter for cmd in ['config terminal', key, value, delimiter, 'end']: cmd += '\r' module.connection.shell.shell.sendall(cmd) time.sleep(1) module.connection.shell.receive() def get_config(module, result): contents = module.params['config'] if not contents: defaults = module.params['defaults'] contents = module.config.get_config(include_defaults=defaults) contents, banners = extract_banners(contents) return NetworkConfig(indent=1, contents=contents), banners def get_candidate(module): candidate = NetworkConfig(indent=1) banners = {} if module.params['src']: src, banners = extract_banners(module.params['src']) candidate.load(src) elif module.params['lines']: parents = module.params['parents'] or list() candidate.add(module.params['lines'], parents=parents) return candidate, banners def run(module, result): match = module.params['match'] replace = module.params['replace'] path = module.params['parents'] candidate, want_banners = get_candidate(module) if match != 'none': config, have_banners = get_config(module, result) path = module.params['parents'] configobjs = candidate.difference(config, path=path,match=match, replace=replace) else: configobjs = candidate.items have_banners = {} banners = diff_banners(want_banners, have_banners) if configobjs or banners: commands = dumps(configobjs, 'commands').split('\n') if module.params['lines']: if module.params['before']: commands[:0] = module.params['before'] if module.params['after']: commands.extend(module.params['after']) result['updates'] = commands result['banners'] = banners # send the configuration commands to the device and merge # them with the current running config if not module.check_mode: if commands: module.config(commands) if banners: load_banners(module, banners) result['changed'] = True if module.params['save']: if not module.check_mode: module.config.save_config() result['changed'] = True def main(): """ main entry point for module execution """ argument_spec = dict( src=dict(type='path'), lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), before=dict(type='list'), after=dict(type='list'), match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), replace=dict(default='line', choices=['line', 'block']), multiline_delimiter=dict(default='@'), config=dict(), defaults=dict(type='bool', default=False), backup=dict(type='bool', default=False), save=dict(default=False, type='bool'), ) mutually_exclusive = [('lines', 'src')] required_if = [('match', 'strict', ['lines']), ('match', 'exact', ['lines']), ('replace', 'block', ['lines'])] module = NetworkModule(argument_spec=argument_spec, connect_on_load=False, mutually_exclusive=mutually_exclusive, required_if=required_if, supports_check_mode=True) if module.params['force'] is True: module.params['match'] = 'none' warnings = list() check_args(module, warnings) result = dict(changed=False, warnings=warnings) if module.params['backup']: result['__backup__'] = module.config.get_config() try: run(module, result) except NetworkError as e: module.disconnect() module.fail_json(msg=to_native(e), exception=traceback.format_exc()) module.disconnect() module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
5,724,727,704,387,658,000
32.637097
92
0.640294
false
github-borat/cinder
cinder/api/middleware/fault.py
1
3078
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.dec import webob.exc from cinder.api.openstack import wsgi from cinder import exception from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import log as logging from cinder import utils from cinder import wsgi as base_wsgi LOG = logging.getLogger(__name__) class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" _status_to_type = {} @staticmethod def status_to_type(status): if not FaultWrapper._status_to_type: for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): FaultWrapper._status_to_type[clazz.code] = clazz return FaultWrapper._status_to_type.get( status, webob.exc.HTTPInternalServerError)() def _error(self, inner, req): if not isinstance(inner, exception.QuotaError): LOG.exception(_("Caught error: %s"), unicode(inner)) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: msg = (inner.msg if isinstance(inner, exception.CinderException) else unicode(inner)) params = {'exception': inner.__class__.__name__, 'explanation': msg} outer.explanation = _('%(exception)s: %(explanation)s') % params return wsgi.Fault(outer) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: return self._error(ex, req)
apache-2.0
-8,071,283,626,662,000,000
38.461538
78
0.659194
false
Hybrid-Cloud/badam
patches_tool/aws_patch/aws_deps/libcloud/test/compute/test_vcl.py
1
4978
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import sys from libcloud.utils.py3 import httplib from libcloud.utils.py3 import xmlrpclib from libcloud.compute.drivers.vcl import VCLNodeDriver as VCL from libcloud.compute.types import NodeState from libcloud.test import MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import VCL_PARAMS class VCLTests(unittest.TestCase): def setUp(self): VCL.connectionCls.conn_classes = ( VCLMockHttp, VCLMockHttp) VCLMockHttp.type = None self.driver = VCL(*VCL_PARAMS) def test_list_nodes(self): node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] self.assertEqual(node.name, 'CentOS 5.4 Base (32 bit VM)') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.extra['pass'], 'ehkNGW') def test_list_images(self): images = self.driver.list_images() image = images[0] self.assertEqual(image.id, '8') def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 1) def test_create_node(self): image = self.driver.list_images()[0] node = self.driver.create_node(image=image) self.assertEqual(node.id, '51') def test_destroy_node(self): node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] self.assertTrue(self.driver.destroy_node(node)) def test_ex_update_node_access(self): node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] node = self.driver.ex_update_node_access(node, ipaddr='192.168.1.2') self.assertEqual(node.name, 'CentOS 5.4 Base (32 bit VM)') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.extra['pass'], 'ehkNGW') def test_ex_extend_request_time(self): node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] self.assertTrue(self.driver.ex_extend_request_time(node, 60)) def test_ex_get_request_end_time(self): node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] self.assertEqual( self.driver.ex_get_request_end_time(node), 1334168100 ) class VCLMockHttp(MockHttp): fixtures = ComputeFileFixtures('vcl') def _get_method_name(self, type, use_param, qs, path): return "_xmlrpc" def _xmlrpc(self, method, url, body, headers): params, meth_name = xmlrpclib.loads(body) if self.type: meth_name = "%s_%s" % (meth_name, self.type) return getattr(self, meth_name)(method, url, body, headers) def XMLRPCgetImages(self, method, url, body, headers): body = self.fixtures.load('XMLRPCgetImages.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def XMLRPCextendRequest(self, method, url, body, headers): body = self.fixtures.load('XMLRPCextendRequest.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def XMLRPCgetRequestIds(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCgetRequestIds.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def XMLRPCgetRequestStatus(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCgetRequestStatus.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def XMLRPCendRequest(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCendRequest.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def XMLRPCaddRequest(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCaddRequest.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def XMLRPCgetRequestConnectData(self, method, url, body, headers): body = self.fixtures.load( 'XMLRPCgetRequestConnectData.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main())
apache-2.0
-6,919,046,569,781,226,000
37.19685
76
0.652873
false
markwash/hoke
hoke/commands/show_new.py
1
1378
from .. import display from .. import models from .. import states def get_command(): return Command() class Command(object): name = 'new' help = ('List new blueprints') def add_arguments(self, parser): pass def execute(self, hoke_db, args): bps = self.get_new_bps(hoke_db) bps.sort(key=lambda x: x.date_created) self.display(bps) def get_new_bps(self, hoke_db): bps = [] for raw_bp in hoke_db.list_blueprints(): bp = models.blueprint_from_raw(raw_bp) new_check = states.get_new_check() if new_check.check(bp): bps.append(bp) return bps def display(self, bps): print "There are {} new blueprints".format(len(bps)) print first = True for bp in bps: if first: first = False else: print print u"{}:".format(bp.title) print u" Owner: {}".format(bp.owner) print u" Assignee: {}".format(bp.assignee) print u" Date Created: {}".format( bp.date_created.date().isoformat()) print u" URL: {}".format(bp.url) print u" Summary:" display.print_summary(bp.summary, prefix=' ') print print "Displayed {} new blueprints".format(len(bps))
bsd-2-clause
-8,687,910,759,248,600,000
27.122449
60
0.524673
false
opencord/voltha
common/utils/id_generation.py
1
4021
# # Copyright 2017 the original author or authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # """ ID generation utils """ from uuid import uuid4 BROADCAST_CORE_ID=hex(0xFFFF)[2:] def get_next_core_id(current_id_in_hex_str): """ :param current_id_in_hex_str: a hex string of the maximum core id assigned without the leading 0x characters :return: current_id_in_hex_str + 1 in hex string """ if not current_id_in_hex_str or current_id_in_hex_str == '': return '0001' else: return format(int(current_id_in_hex_str, 16) + 1, '04x') def create_cluster_logical_device_ids(core_id, switch_id): """ Creates a logical device id and an OpenFlow datapath id that is unique across the Voltha cluster. The returned logical device id represents a 64 bits integer where the lower 48 bits is the switch id and the upper 16 bits is the core id. For the datapath id the core id is set to '0000' as it is not used for voltha core routing :param core_id: string :param switch_id:int :return: cluster logical device id and OpenFlow datapath id """ switch_id = format(switch_id, '012x') core_in_hex=format(int(core_id, 16), '04x') ld_id = '{}{}'.format(core_in_hex[-4:], switch_id[-12:]) dpid_id = '{}{}'.format('0000', switch_id[-12:]) return ld_id, int(dpid_id, 16) def is_broadcast_core_id(id): assert id and len(id) == 16 return id[:4] == BROADCAST_CORE_ID def create_empty_broadcast_id(): """ Returns an empty broadcast id (ffff000000000000). The id is used to dispatch xPON objects across all the Voltha instances. :return: An empty broadcast id """ return '{}{}'.format(BROADCAST_CORE_ID, '0'*12) def create_cluster_id(): """ Returns an id that is common across all voltha instances. The id is a str of 64 bits. The lower 48 bits refers to an id specific to that object while the upper 16 bits refers a broadcast core_id :return: An common id across all Voltha instances """ return '{}{}'.format(BROADCAST_CORE_ID, uuid4().hex[:12]) def create_cluster_device_id(core_id): """ Creates a device id that is unique across the Voltha cluster. The device id is a str of 64 bits. The lower 48 bits refers to the device id while the upper 16 bits refers to the core id. :param core_id: string :return: cluster device id """ return '{}{}'.format(format(int(core_id), '04x'), uuid4().hex[:12]) def get_core_id_from_device_id(device_id): # Device id is a string and the first 4 characters represent the core_id assert device_id and len(device_id) == 16 # Get the leading 4 hexs and remove leading 0's return device_id[:4] def get_core_id_from_logical_device_id(logical_device_id): """ Logical Device id is a string and the first 4 characters represent the core_id :param logical_device_id: :return: core_id string """ assert logical_device_id and len(logical_device_id) == 16 # Get the leading 4 hexs and remove leading 0's return logical_device_id[:4] def get_core_id_from_datapath_id(datapath_id): """ datapath id is a uint64 where: - low 48 bits -> switch_id - high 16 bits -> core id :param datapath_id: :return: core_id string """ assert datapath_id # Get the hex string and remove the '0x' prefix id_in_hex_str = hex(datapath_id)[2:] assert len(id_in_hex_str) > 12 return id_in_hex_str[:-12]
apache-2.0
-7,519,366,933,427,644,000
33.663793
78
0.667993
false
wagavulin/arrow
cpp/build-support/cpplint.py
6
249597
#!/usr/bin/env python # # Copyright (c) 2009 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Does google-lint on c++ files. The goal of this script is to identify places in the code that *may* be in non-compliance with google style. It does not attempt to fix up these problems -- the point is to educate. It does also not attempt to find all problems, or to ensure that everything it does find is legitimately a problem. In particular, we can get very confused by /* and // inside strings! We do a small hack, which is to ignore //'s with "'s after them on the same line, but it is far from perfect (in either direction). """ import codecs import copy import getopt import glob import itertools import math # for log import os import re import sre_compile import string import sys import unicodedata import xml.etree.ElementTree # if empty, use defaults _header_extensions = set([]) # if empty, use defaults _valid_extensions = set([]) # Files with any of these extensions are considered to be # header files (and will undergo different style checks). # This set can be extended by using the --headers # option (also supported in CPPLINT.cfg) def GetHeaderExtensions(): if not _header_extensions: return set(['h', 'hpp', 'hxx', 'h++', 'cuh']) return _header_extensions # The allowed extensions for file names # This is set by --extensions flag def GetAllExtensions(): if not _valid_extensions: return GetHeaderExtensions().union(set(['c', 'cc', 'cpp', 'cxx', 'c++', 'cu'])) return _valid_extensions def GetNonHeaderExtensions(): return GetAllExtensions().difference(GetHeaderExtensions()) _USAGE = """ Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit] [--filter=-x,+y,...] [--counting=total|toplevel|detailed] [--repository=path] [--root=subdir] [--linelength=digits] [--recursive] [--exclude=path] [--headers=ext1,ext2] [--extensions=hpp,cpp,...] <file> [file] ... The style guidelines this tries to follow are those in https://google.github.io/styleguide/cppguide.html Every problem is given a confidence score from 1-5, with 5 meaning we are certain of the problem, and 1 meaning it could be a legitimate construct. This will miss some errors, and is not a substitute for a code review. To suppress false-positive errors of a certain category, add a 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*) suppresses errors of all categories on that line. The files passed in will be linted; at least one file must be provided. Default linted extensions are %s. Other file types will be ignored. Change the extensions with the --extensions flag. Flags: output=emacs|eclipse|vs7|junit By default, the output is formatted to ease emacs parsing. Output compatible with eclipse (eclipse), Visual Studio (vs7), and JUnit XML parsers such as those used in Jenkins and Bamboo may also be used. Other formats are unsupported. verbose=# Specify a number 0-5 to restrict errors to certain verbosity levels. Errors with lower verbosity levels have lower confidence and are more likely to be false positives. quiet Supress output other than linting errors, such as information about which files have been processed and excluded. filter=-x,+y,... Specify a comma-separated list of category-filters to apply: only error messages whose category names pass the filters will be printed. (Category names are printed with the message and look like "[whitespace/indent]".) Filters are evaluated left to right. "-FOO" and "FOO" means "do not print categories that start with FOO". "+FOO" means "do print categories that start with FOO". Examples: --filter=-whitespace,+whitespace/braces --filter=whitespace,runtime/printf,+runtime/printf_format --filter=-,+build/include_what_you_use To see a list of all the categories used in cpplint, pass no arg: --filter= counting=total|toplevel|detailed The total number of errors found is always printed. If 'toplevel' is provided, then the count of errors in each of the top-level categories like 'build' and 'whitespace' will also be printed. If 'detailed' is provided, then a count is provided for each category like 'build/class'. repository=path The top level directory of the repository, used to derive the header guard CPP variable. By default, this is determined by searching for a path that contains .git, .hg, or .svn. When this flag is specified, the given path is used instead. This option allows the header guard CPP variable to remain consistent even if members of a team have different repository root directories (such as when checking out a subdirectory with SVN). In addition, users of non-mainstream version control systems can use this flag to ensure readable header guard CPP variables. Examples: Assuming that Alice checks out ProjectName and Bob checks out ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then with no --repository flag, the header guard CPP variable will be: Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_ Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_ If Alice uses the --repository=trunk flag and Bob omits the flag or uses --repository=. then the header guard CPP variable will be: Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_ Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_ root=subdir The root directory used for deriving header guard CPP variables. This directory is relative to the top level directory of the repository which by default is determined by searching for a directory that contains .git, .hg, or .svn but can also be controlled with the --repository flag. If the specified directory does not exist, this flag is ignored. Examples: Assuming that src is the top level directory of the repository, the header guard CPP variables for src/chrome/browser/ui/browser.h are: No flag => CHROME_BROWSER_UI_BROWSER_H_ --root=chrome => BROWSER_UI_BROWSER_H_ --root=chrome/browser => UI_BROWSER_H_ linelength=digits This is the allowed line length for the project. The default value is 80 characters. Examples: --linelength=120 recursive Search for files to lint recursively. Each directory given in the list of files to be linted is replaced by all files that descend from that directory. Files with extensions not in the valid extensions list are excluded. exclude=path Exclude the given path from the list of files to be linted. Relative paths are evaluated relative to the current directory and shell globbing is performed. This flag can be provided multiple times to exclude multiple files. Examples: --exclude=one.cc --exclude=src/*.cc --exclude=src/*.cc --exclude=test/*.cc extensions=extension,extension,... The allowed file extensions that cpplint will check Examples: --extensions=%s headers=extension,extension,... The allowed header extensions that cpplint will consider to be header files (by default, only files with extensions %s will be assumed to be headers) Examples: --headers=%s cpplint.py supports per-directory configurations specified in CPPLINT.cfg files. CPPLINT.cfg file can contain a number of key=value pairs. Currently the following options are supported: set noparent filter=+filter1,-filter2,... exclude_files=regex linelength=80 root=subdir "set noparent" option prevents cpplint from traversing directory tree upwards looking for more .cfg files in parent directories. This option is usually placed in the top-level project directory. The "filter" option is similar in function to --filter flag. It specifies message filters in addition to the |_DEFAULT_FILTERS| and those specified through --filter command-line flag. "exclude_files" allows to specify a regular expression to be matched against a file name. If the expression matches, the file is skipped and not run through the linter. "linelength" specifies the allowed line length for the project. The "root" option is similar in function to the --root flag (see example above). CPPLINT.cfg has an effect on files in the same directory and all subdirectories, unless overridden by a nested configuration file. Example file: filter=-build/include_order,+build/include_alpha exclude_files=.*\\.cc The above example disables build/include_order warning and enables build/include_alpha as well as excludes all .cc from being processed by linter, in the current directory (where the .cfg file is located) and all subdirectories. """ % (list(GetAllExtensions()), ','.join(list(GetAllExtensions())), GetHeaderExtensions(), ','.join(GetHeaderExtensions())) # We categorize each error message we print. Here are the categories. # We want an explicit list so we can list them all in cpplint --filter=. # If you add a new error message with a new category, add it to the list # here! cpplint_unittest.py should tell you if you forget to do this. _ERROR_CATEGORIES = [ 'build/class', 'build/c++11', 'build/c++14', 'build/c++tr1', 'build/deprecated', 'build/endif_comment', 'build/explicit_make_pair', 'build/forward_decl', 'build/header_guard', 'build/include', 'build/include_subdir', 'build/include_alpha', 'build/include_order', 'build/include_what_you_use', 'build/namespaces_literals', 'build/namespaces', 'build/printf_format', 'build/storage_class', 'legal/copyright', 'readability/alt_tokens', 'readability/braces', 'readability/casting', 'readability/check', 'readability/constructors', 'readability/fn_size', 'readability/inheritance', 'readability/multiline_comment', 'readability/multiline_string', 'readability/namespace', 'readability/nolint', 'readability/nul', 'readability/strings', 'readability/todo', 'readability/utf8', 'runtime/arrays', 'runtime/casting', 'runtime/explicit', 'runtime/int', 'runtime/init', 'runtime/invalid_increment', 'runtime/member_string_references', 'runtime/memset', 'runtime/indentation_namespace', 'runtime/operator', 'runtime/printf', 'runtime/printf_format', 'runtime/references', 'runtime/string', 'runtime/threadsafe_fn', 'runtime/vlog', 'whitespace/blank_line', 'whitespace/braces', 'whitespace/comma', 'whitespace/comments', 'whitespace/empty_conditional_body', 'whitespace/empty_if_body', 'whitespace/empty_loop_body', 'whitespace/end_of_line', 'whitespace/ending_newline', 'whitespace/forcolon', 'whitespace/indent', 'whitespace/line_length', 'whitespace/newline', 'whitespace/operators', 'whitespace/parens', 'whitespace/semicolon', 'whitespace/tab', 'whitespace/todo', ] # These error categories are no longer enforced by cpplint, but for backwards- # compatibility they may still appear in NOLINT comments. _LEGACY_ERROR_CATEGORIES = [ 'readability/streams', 'readability/function', ] # The default state of the category filter. This is overridden by the --filter= # flag. By default all errors are on, so only add here categories that should be # off by default (i.e., categories that must be enabled by the --filter= flags). # All entries here should start with a '-' or '+', as in the --filter= flag. _DEFAULT_FILTERS = ['-build/include_alpha'] # The default list of categories suppressed for C (not C++) files. _DEFAULT_C_SUPPRESSED_CATEGORIES = [ 'readability/casting', ] # The default list of categories suppressed for Linux Kernel files. _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [ 'whitespace/tab', ] # We used to check for high-bit characters, but after much discussion we # decided those were OK, as long as they were in UTF-8 and didn't represent # hard-coded international strings, which belong in a separate i18n file. # C++ headers _CPP_HEADERS = frozenset([ # Legacy 'algobase.h', 'algo.h', 'alloc.h', 'builtinbuf.h', 'bvector.h', 'complex.h', 'defalloc.h', 'deque.h', 'editbuf.h', 'fstream.h', 'function.h', 'hash_map', 'hash_map.h', 'hash_set', 'hash_set.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip.h', 'iostream.h', 'istream.h', 'iterator.h', 'list.h', 'map.h', 'multimap.h', 'multiset.h', 'ostream.h', 'pair.h', 'parsestream.h', 'pfstream.h', 'procbuf.h', 'pthread_alloc', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h', 'set.h', 'slist', 'slist.h', 'stack.h', 'stdiostream.h', 'stl_alloc.h', 'stl_relops.h', 'streambuf.h', 'stream.h', 'strfile.h', 'strstream.h', 'tempbuf.h', 'tree.h', 'type_traits.h', 'vector.h', # 17.6.1.2 C++ library headers 'algorithm', 'array', 'atomic', 'bitset', 'chrono', 'codecvt', 'complex', 'condition_variable', 'deque', 'exception', 'forward_list', 'fstream', 'functional', 'future', 'initializer_list', 'iomanip', 'ios', 'iosfwd', 'iostream', 'istream', 'iterator', 'limits', 'list', 'locale', 'map', 'memory', 'mutex', 'new', 'numeric', 'ostream', 'queue', 'random', 'ratio', 'regex', 'scoped_allocator', 'set', 'sstream', 'stack', 'stdexcept', 'streambuf', 'string', 'strstream', 'system_error', 'thread', 'tuple', 'typeindex', 'typeinfo', 'type_traits', 'unordered_map', 'unordered_set', 'utility', 'valarray', 'vector', # 17.6.1.2 C++ headers for C library facilities 'cassert', 'ccomplex', 'cctype', 'cerrno', 'cfenv', 'cfloat', 'cinttypes', 'ciso646', 'climits', 'clocale', 'cmath', 'csetjmp', 'csignal', 'cstdalign', 'cstdarg', 'cstdbool', 'cstddef', 'cstdint', 'cstdio', 'cstdlib', 'cstring', 'ctgmath', 'ctime', 'cuchar', 'cwchar', 'cwctype', ]) # Type names _TYPES = re.compile( r'^(?:' # [dcl.type.simple] r'(char(16_t|32_t)?)|wchar_t|' r'bool|short|int|long|signed|unsigned|float|double|' # [support.types] r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|' # [cstdint.syn] r'(u?int(_fast|_least)?(8|16|32|64)_t)|' r'(u?int(max|ptr)_t)|' r')$') # These headers are excluded from [build/include] and [build/include_order] # checks: # - Anything not following google file name conventions (containing an # uppercase character, such as Python.h or nsStringAPI.h, for example). # - Lua headers. _THIRD_PARTY_HEADERS_PATTERN = re.compile( r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$') # Pattern for matching FileInfo.BaseName() against test file name _test_suffixes = ['_test', '_regtest', '_unittest'] _TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$' # Pattern that matches only complete whitespace, possibly across multiple lines. _EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL) # Assertion macros. These are defined in base/logging.h and # testing/base/public/gunit.h. _CHECK_MACROS = [ 'DCHECK', 'CHECK', 'EXPECT_TRUE', 'ASSERT_TRUE', 'EXPECT_FALSE', 'ASSERT_FALSE', ] # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE _CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS]) for op, replacement in [('==', 'EQ'), ('!=', 'NE'), ('>=', 'GE'), ('>', 'GT'), ('<=', 'LE'), ('<', 'LT')]: _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), ('>=', 'LT'), ('>', 'LE'), ('<=', 'GT'), ('<', 'GE')]: _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement # Alternative tokens and their replacements. For full list, see section 2.5 # Alternative tokens [lex.digraph] in the C++ standard. # # Digraphs (such as '%:') are not included here since it's a mess to # match those on a word boundary. _ALT_TOKEN_REPLACEMENT = { 'and': '&&', 'bitor': '|', 'or': '||', 'xor': '^', 'compl': '~', 'bitand': '&', 'and_eq': '&=', 'or_eq': '|=', 'xor_eq': '^=', 'not': '!', 'not_eq': '!=' } # Compile regular expression that matches all the above keywords. The "[ =()]" # bit is meant to avoid matching these keywords outside of boolean expressions. # # False positives include C-style multi-line comments and multi-line strings # but those have always been troublesome for cpplint. _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') # These constants define types of headers for use with # _IncludeState.CheckNextIncludeOrder(). _C_SYS_HEADER = 1 _CPP_SYS_HEADER = 2 _LIKELY_MY_HEADER = 3 _POSSIBLE_MY_HEADER = 4 _OTHER_HEADER = 5 # These constants define the current inline assembly state _NO_ASM = 0 # Outside of inline assembly block _INSIDE_ASM = 1 # Inside inline assembly block _END_ASM = 2 # Last line of inline assembly block _BLOCK_ASM = 3 # The whole block is an inline assembly block # Match start of assembly blocks _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' r'(?:\s+(volatile|__volatile__))?' r'\s*[{(]') # Match strings that indicate we're working on a C (not C++) file. _SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|' r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))') # Match string that indicates we're working on a Linux Kernel file. _SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)') _regexp_compile_cache = {} # {str, set(int)}: a map from error categories to sets of linenumbers # on which those errors are expected and should be suppressed. _error_suppressions = {} # The root directory used for deriving header guard CPP variable. # This is set by --root flag. _root = None # The top level repository directory. If set, _root is calculated relative to # this directory instead of the directory containing version control artifacts. # This is set by the --repository flag. _repository = None # Files to exclude from linting. This is set by the --exclude flag. _excludes = None # Whether to supress PrintInfo messages _quiet = False # The allowed line length of files. # This is set by --linelength flag. _line_length = 80 try: xrange(1, 0) except NameError: # -- pylint: disable=redefined-builtin xrange = range try: unicode except NameError: # -- pylint: disable=redefined-builtin basestring = unicode = str try: long(2) except NameError: # -- pylint: disable=redefined-builtin long = int if sys.version_info < (3,): # -- pylint: disable=no-member # BINARY_TYPE = str itervalues = dict.itervalues iteritems = dict.iteritems else: # BINARY_TYPE = bytes itervalues = dict.values iteritems = dict.items def unicode_escape_decode(x): if sys.version_info < (3,): return codecs.unicode_escape_decode(x)[0] else: return x # {str, bool}: a map from error categories to booleans which indicate if the # category should be suppressed for every line. _global_error_suppressions = {} def ParseNolintSuppressions(filename, raw_line, linenum, error): """Updates the global list of line error-suppressions. Parses any NOLINT comments on the current line, updating the global error_suppressions store. Reports an error if the NOLINT comment was malformed. Args: filename: str, the name of the input file. raw_line: str, the line of input text, with comments. linenum: int, the number of the current line. error: function, an error handler. """ matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line) if matched: if matched.group(1): suppressed_line = linenum + 1 else: suppressed_line = linenum category = matched.group(2) if category in (None, '(*)'): # => "suppress all" _error_suppressions.setdefault(None, set()).add(suppressed_line) else: if category.startswith('(') and category.endswith(')'): category = category[1:-1] if category in _ERROR_CATEGORIES: _error_suppressions.setdefault(category, set()).add(suppressed_line) elif category not in _LEGACY_ERROR_CATEGORIES: error(filename, linenum, 'readability/nolint', 5, 'Unknown NOLINT error category: %s' % category) def ProcessGlobalSuppresions(lines): """Updates the list of global error suppressions. Parses any lint directives in the file that have global effect. Args: lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline. """ for line in lines: if _SEARCH_C_FILE.search(line): for category in _DEFAULT_C_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True if _SEARCH_KERNEL_FILE.search(line): for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True def ResetNolintSuppressions(): """Resets the set of NOLINT suppressions to empty.""" _error_suppressions.clear() _global_error_suppressions.clear() def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment or global suppression. """ return (_global_error_suppressions.get(category, False) or linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set())) def Match(pattern, s): """Matches the string with the pattern, caching the compiled regexp.""" # The regexp compilation caching is inlined in both Match and Search for # performance reasons; factoring it out into a separate function turns out # to be noticeably expensive. if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].match(s) def ReplaceAll(pattern, rep, s): """Replaces instances of pattern in a string with a replacement. The compiled regex is kept in a cache shared by Match and Search. Args: pattern: regex pattern rep: replacement text s: search string Returns: string with replacements made (or original string if no replacements) """ if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].sub(rep, s) def Search(pattern, s): """Searches the string for the pattern, caching the compiled regexp.""" if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].search(s) def _IsSourceExtension(s): """File extension (excluding dot) matches a source file extension.""" return s in GetNonHeaderExtensions() class _IncludeState(object): """Tracks line numbers for includes, and the order in which includes appear. include_list contains list of lists of (header, line number) pairs. It's a lists of lists rather than just one flat list to make it easier to update across preprocessor boundaries. Call CheckNextIncludeOrder() once for each header in the file, passing in the type constants defined above. Calls in an illegal order will raise an _IncludeError with an appropriate error message. """ # self._section will move monotonically through this set. If it ever # needs to move backwards, CheckNextIncludeOrder will raise an error. _INITIAL_SECTION = 0 _MY_H_SECTION = 1 _C_SECTION = 2 _CPP_SECTION = 3 _OTHER_H_SECTION = 4 _TYPE_NAMES = { _C_SYS_HEADER: 'C system header', _CPP_SYS_HEADER: 'C++ system header', _LIKELY_MY_HEADER: 'header this file implements', _POSSIBLE_MY_HEADER: 'header this file may implement', _OTHER_HEADER: 'other header', } _SECTION_NAMES = { _INITIAL_SECTION: "... nothing. (This can't be an error.)", _MY_H_SECTION: 'a header this file implements', _C_SECTION: 'C system header', _CPP_SECTION: 'C++ system header', _OTHER_H_SECTION: 'other header', } def __init__(self): self.include_list = [[]] self._section = None self._last_header = None self.ResetSection('') def FindHeader(self, header): """Check if a header has already been included. Args: header: header to check. Returns: Line number of previous occurrence, or -1 if the header has not been seen before. """ for section_list in self.include_list: for f in section_list: if f[0] == header: return f[1] return -1 def ResetSection(self, directive): """Reset section checking for preprocessor directive. Args: directive: preprocessor directive (e.g. "if", "else"). """ # The name of the current section. self._section = self._INITIAL_SECTION # The path of last found header. self._last_header = '' # Update list of includes. Note that we never pop from the # include list. if directive in ('if', 'ifdef', 'ifndef'): self.include_list.append([]) elif directive in ('else', 'elif'): self.include_list[-1] = [] def SetLastHeader(self, header_path): self._last_header = header_path def CanonicalizeAlphabeticalOrder(self, header_path): """Returns a path canonicalized for alphabetical comparison. - replaces "-" with "_" so they both cmp the same. - removes '-inl' since we don't require them to be after the main header. - lowercase everything, just in case. Args: header_path: Path to be canonicalized. Returns: Canonicalized path. """ return header_path.replace('-inl.h', '.h').replace('-', '_').lower() def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): """Check if a header is in alphabetical order with the previous header. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. header_path: Canonicalized header to be checked. Returns: Returns true if the header is in alphabetical order. """ # If previous section is different from current section, _last_header will # be reset to empty string, so it's always less than current header. # # If previous line was a blank line, assume that the headers are # intentionally sorted the way they are. if (self._last_header > header_path and Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])): return False return True def CheckNextIncludeOrder(self, header_type): """Returns a non-empty error message if the next header is out of order. This function also updates the internal state to be ready to check the next include. Args: header_type: One of the _XXX_HEADER constants defined above. Returns: The empty string if the header is in the right order, or an error message describing what's wrong. """ error_message = ('Found %s after %s' % (self._TYPE_NAMES[header_type], self._SECTION_NAMES[self._section])) last_section = self._section if header_type == _C_SYS_HEADER: if self._section <= self._C_SECTION: self._section = self._C_SECTION else: self._last_header = '' return error_message elif header_type == _CPP_SYS_HEADER: if self._section <= self._CPP_SECTION: self._section = self._CPP_SECTION else: self._last_header = '' return error_message elif header_type == _LIKELY_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: self._section = self._OTHER_H_SECTION elif header_type == _POSSIBLE_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: # This will always be the fallback because we're not sure # enough that the header is associated with this file. self._section = self._OTHER_H_SECTION else: assert header_type == _OTHER_HEADER self._section = self._OTHER_H_SECTION if last_section != self._section: self._last_header = '' return '' class _CppLintState(object): """Maintains module-wide state..""" def __init__(self): self.verbose_level = 1 # global setting. self.error_count = 0 # global count of reported errors # filters to apply when emitting error messages self.filters = _DEFAULT_FILTERS[:] # backup of filter list. Used to restore the state after each file. self._filters_backup = self.filters[:] self.counting = 'total' # In what way are we counting errors? self.errors_by_category = {} # string to int dict storing error counts # output format: # "emacs" - format that emacs can parse (default) # "eclipse" - format that eclipse can parse # "vs7" - format that Microsoft Visual Studio 7 can parse # "junit" - format that Jenkins, Bamboo, etc can parse self.output_format = 'emacs' # For JUnit output, save errors and failures until the end so that they # can be written into the XML self._junit_errors = [] self._junit_failures = [] def SetOutputFormat(self, output_format): """Sets the output format for errors.""" self.output_format = output_format def SetVerboseLevel(self, level): """Sets the module's verbosity, and returns the previous setting.""" last_verbose_level = self.verbose_level self.verbose_level = level return last_verbose_level def SetCountingStyle(self, counting_style): """Sets the module's counting options.""" self.counting = counting_style def SetFilters(self, filters): """Sets the error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "+whitespace/indent"). Each filter should start with + or -; else we die. Raises: ValueError: The comma-separated filters did not all start with '+' or '-'. E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" """ # Default filters always have less priority than the flag ones. self.filters = _DEFAULT_FILTERS[:] self.AddFilters(filters) def AddFilters(self, filters): """ Adds more filters to the existing list of error-message filters. """ for filt in filters.split(','): clean_filt = filt.strip() if clean_filt: self.filters.append(clean_filt) for filt in self.filters: if not (filt.startswith('+') or filt.startswith('-')): raise ValueError('Every filter in --filters must start with + or -' ' (%s does not)' % filt) def BackupFilters(self): """ Saves the current filter list to backup storage.""" self._filters_backup = self.filters[:] def RestoreFilters(self): """ Restores filters previously backed up.""" self.filters = self._filters_backup[:] def ResetErrorCounts(self): """Sets the module's error statistic back to zero.""" self.error_count = 0 self.errors_by_category = {} def IncrementErrorCount(self, category): """Bumps the module's error statistic.""" self.error_count += 1 if self.counting in ('toplevel', 'detailed'): if self.counting != 'detailed': category = category.split('/')[0] if category not in self.errors_by_category: self.errors_by_category[category] = 0 self.errors_by_category[category] += 1 def PrintErrorCounts(self): """Print a summary of errors by category, and the total.""" for category, count in sorted(iteritems(self.errors_by_category)): self.PrintInfo('Category \'%s\' errors found: %d\n' % (category, count)) if self.error_count > 0: self.PrintInfo('Total errors found: %d\n' % self.error_count) def PrintInfo(self, message): if not _quiet and self.output_format != 'junit': sys.stderr.write(message) def PrintError(self, message): if self.output_format == 'junit': self._junit_errors.append(message) else: sys.stderr.write(message) def AddJUnitFailure(self, filename, linenum, message, category, confidence): self._junit_failures.append((filename, linenum, message, category, confidence)) def FormatJUnitXML(self): num_errors = len(self._junit_errors) num_failures = len(self._junit_failures) testsuite = xml.etree.ElementTree.Element('testsuite') testsuite.attrib['name'] = 'cpplint' testsuite.attrib['errors'] = str(num_errors) testsuite.attrib['failures'] = str(num_failures) if num_errors == 0 and num_failures == 0: testsuite.attrib['tests'] = str(1) xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed') else: testsuite.attrib['tests'] = str(num_errors + num_failures) if num_errors > 0: testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase') testcase.attrib['name'] = 'errors' error = xml.etree.ElementTree.SubElement(testcase, 'error') error.text = '\n'.join(self._junit_errors) if num_failures > 0: # Group failures by file failed_file_order = [] failures_by_file = {} for failure in self._junit_failures: failed_file = failure[0] if failed_file not in failed_file_order: failed_file_order.append(failed_file) failures_by_file[failed_file] = [] failures_by_file[failed_file].append(failure) # Create a testcase for each file for failed_file in failed_file_order: failures = failures_by_file[failed_file] testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase') testcase.attrib['name'] = failed_file failure = xml.etree.ElementTree.SubElement(testcase, 'failure') template = '{0}: {1} [{2}] [{3}]' texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures] failure.text = '\n'.join(texts) xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n' return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8') _cpplint_state = _CppLintState() def _OutputFormat(): """Gets the module's output format.""" return _cpplint_state.output_format def _SetOutputFormat(output_format): """Sets the module's output format.""" _cpplint_state.SetOutputFormat(output_format) def _VerboseLevel(): """Returns the module's verbosity setting.""" return _cpplint_state.verbose_level def _SetVerboseLevel(level): """Sets the module's verbosity, and returns the previous setting.""" return _cpplint_state.SetVerboseLevel(level) def _SetCountingStyle(level): """Sets the module's counting options.""" _cpplint_state.SetCountingStyle(level) def _Filters(): """Returns the module's list of output filters, as a list.""" return _cpplint_state.filters def _SetFilters(filters): """Sets the module's error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "whitespace/indent"). Each filter should start with + or -; else we die. """ _cpplint_state.SetFilters(filters) def _AddFilters(filters): """Adds more filter overrides. Unlike _SetFilters, this function does not reset the current list of filters available. Args: filters: A string of comma-separated filters (eg "whitespace/indent"). Each filter should start with + or -; else we die. """ _cpplint_state.AddFilters(filters) def _BackupFilters(): """ Saves the current filter list to backup storage.""" _cpplint_state.BackupFilters() def _RestoreFilters(): """ Restores filters previously backed up.""" _cpplint_state.RestoreFilters() class _FunctionState(object): """Tracks current function name and the number of lines in its body.""" _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. def __init__(self): self.in_a_function = False self.lines_in_function = 0 self.current_function = '' def Begin(self, function_name): """Start analyzing function body. Args: function_name: The name of the function being tracked. """ self.in_a_function = True self.lines_in_function = 0 self.current_function = function_name def Count(self): """Count line in current function body.""" if self.in_a_function: self.lines_in_function += 1 def Check(self, error, filename, linenum): """Report if too many lines in function body. Args: error: The function to call with any errors found. filename: The name of the current file. linenum: The number of the line to check. """ if not self.in_a_function: return if Match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER trigger = base_trigger * 2**_VerboseLevel() if self.lines_in_function > trigger: error_level = int(math.log(self.lines_in_function / base_trigger, 2)) # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... if error_level > 5: error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' ' %s has %d non-comment lines' ' (error triggered by exceeding %d lines).' % ( self.current_function, self.lines_in_function, trigger)) def End(self): """Stop analyzing function body.""" self.in_a_function = False class _IncludeError(Exception): """Indicates a problem with the include order in a file.""" pass class FileInfo(object): """Provides utility functions for filenames. FileInfo provides easy access to the components of a file's path relative to the project root. """ def __init__(self, filename): self._filename = filename def FullName(self): """Make Windows paths like Unix.""" return os.path.abspath(self._filename).replace('\\', '/') def RepositoryName(self): r"""FullName after removing the local path to the repository. If we have a real absolute path name here we can try to do something smart: detecting the root of the checkout and truncating /path/to/checkout from the name so that we get header guards that don't include things like "C:\Documents and Settings\..." or "/home/username/..." in them and thus people on different computers who have checked the source out to different locations won't see bogus errors. """ fullname = self.FullName() if os.path.exists(fullname): project_dir = os.path.dirname(fullname) # If the user specified a repository path, it exists, and the file is # contained in it, use the specified repository path if _repository: repo = FileInfo(_repository).FullName() root_dir = project_dir while os.path.exists(root_dir): # allow case insensitive compare on Windows if os.path.normcase(root_dir) == os.path.normcase(repo): return os.path.relpath(fullname, root_dir).replace('\\', '/') one_up_dir = os.path.dirname(root_dir) if one_up_dir == root_dir: break root_dir = one_up_dir if os.path.exists(os.path.join(project_dir, ".svn")): # If there's a .svn file in the current directory, we recursively look # up the directory tree for the top of the SVN checkout root_dir = project_dir one_up_dir = os.path.dirname(root_dir) while os.path.exists(os.path.join(one_up_dir, ".svn")): root_dir = os.path.dirname(root_dir) one_up_dir = os.path.dirname(one_up_dir) prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by # searching up from the current path. root_dir = current_dir = os.path.dirname(fullname) while current_dir != os.path.dirname(current_dir): if (os.path.exists(os.path.join(current_dir, ".git")) or os.path.exists(os.path.join(current_dir, ".hg")) or os.path.exists(os.path.join(current_dir, ".svn"))): root_dir = current_dir current_dir = os.path.dirname(current_dir) if (os.path.exists(os.path.join(root_dir, ".git")) or os.path.exists(os.path.join(root_dir, ".hg")) or os.path.exists(os.path.join(root_dir, ".svn"))): prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Don't know what to do; header guard warnings may be wrong... return fullname def Split(self): """Splits the file into the directory, basename, and extension. For 'chrome/browser/browser.cc', Split() would return ('chrome/browser', 'browser', '.cc') Returns: A tuple of (directory, basename, extension). """ googlename = self.RepositoryName() project, rest = os.path.split(googlename) return (project,) + os.path.splitext(rest) def BaseName(self): """File base name - text after the final slash, before the final period.""" return self.Split()[1] def Extension(self): """File extension - text following the final period, includes that period.""" return self.Split()[2] def NoExtension(self): """File has no source file extension.""" return '/'.join(self.Split()[0:2]) def IsSource(self): """File has a source file extension.""" return _IsSourceExtension(self.Extension()[1:]) def _ShouldPrintError(category, confidence, linenum): """If confidence >= verbose, category passes filter and is not suppressed.""" # There are three ways we might decide not to print an error message: # a "NOLINT(category)" comment appears in the source, # the verbosity level isn't high enough, or the filters filter it out. if IsErrorSuppressedByNolint(category, linenum): return False if confidence < _cpplint_state.verbose_level: return False is_filtered = False for one_filter in _Filters(): if one_filter.startswith('-'): if category.startswith(one_filter[1:]): is_filtered = True elif one_filter.startswith('+'): if category.startswith(one_filter[1:]): is_filtered = False else: assert False # should have been checked for in SetFilter. if is_filtered: return False return True def Error(filename, linenum, category, confidence, message): """Logs the fact we've found a lint error. We log where the error was found, and also our confidence in the error, that is, how certain we are this is a legitimate style regression, and not a misidentification or a use that's sometimes justified. False positives can be suppressed by the use of "cpplint(category)" comments on the offending line. These are parsed into _error_suppressions. Args: filename: The name of the file containing the error. linenum: The number of the line containing the error. category: A string used to describe the "category" this bug falls under: "whitespace", say, or "runtime". Categories may have a hierarchy separated by slashes: "whitespace/indent". confidence: A number from 1-5 representing a confidence score for the error, with 5 meaning that we are certain of the problem, and 1 meaning that it could be a legitimate construct. message: The error message. """ if _ShouldPrintError(category, confidence, linenum): _cpplint_state.IncrementErrorCount(category) if _cpplint_state.output_format == 'vs7': _cpplint_state.PrintError('%s(%s): warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'eclipse': sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'junit': _cpplint_state.AddJUnitFailure(filename, linenum, message, category, confidence) else: final_message = '%s:%s: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence) sys.stderr.write(final_message) # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') # Match a single C style comment on the same line. _RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/' # Matches multi-line C style comments. # This RE is a little bit more complicated than one might expect, because we # have to take care of space removals tools so we can handle comments inside # statements better. # The current rule is: We only clear spaces from both sides when we're at the # end of the line. Otherwise, we try to remove spaces from the right side, # if this doesn't work we try on left side but only if there's a non-character # on the right. _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' + _RE_PATTERN_C_COMMENTS + r'\s+|' + r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' + _RE_PATTERN_C_COMMENTS + r')') def IsCppString(line): """Does line terminate so, that the next symbol is in string constant. This function does not consider single-line nor multi-line comments. Args: line: is a partial line of code starting from the 0..n. Returns: True, if next character appended to 'line' is inside a string constant. """ line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 def CleanseRawStrings(raw_lines): """Removes C++11 raw strings from lines. Before: static const char kData[] = R"( multi-line string )"; After: static const char kData[] = "" (replaced by blank line) ""; Args: raw_lines: list of raw lines. Returns: list of lines with C++11 raw strings replaced by empty strings. """ delimiter = None lines_without_raw_strings = [] for line in raw_lines: if delimiter: # Inside a raw string, look for the end end = line.find(delimiter) if end >= 0: # Found the end of the string, match leading space for this # line and resume copying the original lines, and also insert # a "" on the last line. leading_space = Match(r'^(\s*)\S', line) line = leading_space.group(1) + '""' + line[end + len(delimiter):] delimiter = None else: # Haven't found the end yet, append a blank line. line = '""' # Look for beginning of a raw string, and replace them with # empty strings. This is done in a loop to handle multiple raw # strings on the same line. while delimiter is None: # Look for beginning of a raw string. # See 2.14.15 [lex.string] for syntax. # # Once we have matched a raw string, we check the prefix of the # line to make sure that the line is not part of a single line # comment. It's done this way because we remove raw strings # before removing comments as opposed to removing comments # before removing raw strings. This is because there are some # cpplint checks that requires the comments to be preserved, but # we don't want to check comments that are inside raw strings. matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) if (matched and not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//', matched.group(1))): delimiter = ')' + matched.group(2) + '"' end = matched.group(3).find(delimiter) if end >= 0: # Raw string ended on same line line = (matched.group(1) + '""' + matched.group(3)[end + len(delimiter):]) delimiter = None else: # Start of a multi-line raw string line = matched.group(1) + '""' else: break lines_without_raw_strings.append(line) # TODO(unknown): if delimiter is not None here, we might want to # emit a warning for unterminated string. return lines_without_raw_strings def FindNextMultiLineCommentStart(lines, lineix): """Find the beginning marker for a multiline comment.""" while lineix < len(lines): if lines[lineix].strip().startswith('/*'): # Only return this marker if the comment goes beyond this line if lines[lineix].strip().find('*/', 2) < 0: return lineix lineix += 1 return len(lines) def FindNextMultiLineCommentEnd(lines, lineix): """We are inside a comment, find the end marker.""" while lineix < len(lines): if lines[lineix].strip().endswith('*/'): return lineix lineix += 1 return len(lines) def RemoveMultiLineCommentsFromRange(lines, begin, end): """Clears a range of lines for multi-line comments.""" # Having // dummy comments makes the lines non-empty, so we will not get # unnecessary blank line warnings later in the code. for i in range(begin, end): lines[i] = '/**/' def RemoveMultiLineComments(filename, lines, error): """Removes multiline (c-style) comments from lines.""" lineix = 0 while lineix < len(lines): lineix_begin = FindNextMultiLineCommentStart(lines, lineix) if lineix_begin >= len(lines): return lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) if lineix_end >= len(lines): error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, 'Could not find end of multi-line comment') return RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) lineix = lineix_end + 1 def CleanseComments(line): """Removes //-comments and single-line C-style /* */ comments. Args: line: A line of C++ source. Returns: The line with single-line comments removed. """ commentpos = line.find('//') if commentpos != -1 and not IsCppString(line[:commentpos]): line = line[:commentpos].rstrip() # get rid of /* ... */ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) class CleansedLines(object): """Holds 4 copies of all lines with different preprocessing applied to them. 1) elided member contains lines without strings and comments. 2) lines member contains lines without comments. 3) raw_lines member contains all the lines without processing. 4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw strings removed. All these members are of <type 'list'>, and of the same length. """ def __init__(self, lines): self.elided = [] self.lines = [] self.raw_lines = lines self.num_lines = len(lines) self.lines_without_raw_strings = CleanseRawStrings(lines) for linenum in range(len(self.lines_without_raw_strings)): self.lines.append(CleanseComments( self.lines_without_raw_strings[linenum])) elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) self.elided.append(CleanseComments(elided)) def NumLines(self): """Returns the number of lines represented.""" return self.num_lines @staticmethod def _CollapseStrings(elided): """Collapses strings and chars on a line to simple "" or '' blocks. We nix strings first so we're not fooled by text like '"http://"' Args: elided: The line being processed. Returns: The line with collapsed strings. """ if _RE_PATTERN_INCLUDE.match(elided): return elided # Remove escaped characters first to make quote/single quote collapsing # basic. Things that look like escaped characters shouldn't occur # outside of strings and chars. elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) # Replace quoted strings and digit separators. Both single quotes # and double quotes are processed in the same loop, otherwise # nested quotes wouldn't work. collapsed = '' while True: # Find the first quote character match = Match(r'^([^\'"]*)([\'"])(.*)$', elided) if not match: collapsed += elided break head, quote, tail = match.groups() if quote == '"': # Collapse double quoted strings second_quote = tail.find('"') if second_quote >= 0: collapsed += head + '""' elided = tail[second_quote + 1:] else: # Unmatched double quote, don't bother processing the rest # of the line since this is probably a multiline string. collapsed += elided break else: # Found single quote, check nearby text to eliminate digit separators. # # There is no special handling for floating point here, because # the integer/fractional/exponent parts would all be parsed # correctly as long as there are digits on both sides of the # separator. So we are fine as long as we don't see something # like "0.'3" (gcc 4.9.0 will not allow this literal). if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail) collapsed += head + match_literal.group(1).replace("'", '') elided = match_literal.group(2) else: second_quote = tail.find('\'') if second_quote >= 0: collapsed += head + "''" elided = tail[second_quote + 1:] else: # Unmatched single quote collapsed += elided break return collapsed def FindEndOfExpressionInLine(line, startpos, stack): """Find the position just after the end of current parenthesized expression. Args: line: a CleansedLines line. startpos: start searching at this position. stack: nesting stack at startpos. Returns: On finding matching end: (index just after matching end, None) On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at end of this line) """ for i in xrange(startpos, len(line)): char = line[i] if char in '([{': # Found start of parenthesized expression, push to expression stack stack.append(char) elif char == '<': # Found potential start of template argument list if i > 0 and line[i - 1] == '<': # Left shift operator if stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) elif i > 0 and Search(r'\boperator\s*$', line[0:i]): # operator<, don't add to stack continue else: # Tentative start of template argument list stack.append('<') elif char in ')]}': # Found end of parenthesized expression. # # If we are currently expecting a matching '>', the pending '<' # must have been an operator. Remove them from expression stack. while stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) if ((stack[-1] == '(' and char == ')') or (stack[-1] == '[' and char == ']') or (stack[-1] == '{' and char == '}')): stack.pop() if not stack: return (i + 1, None) else: # Mismatched parentheses return (-1, None) elif char == '>': # Found potential end of template argument list. # Ignore "->" and operator functions if (i > 0 and (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))): continue # Pop the stack if there is a matching '<'. Otherwise, ignore # this '>' since it must be an operator. if stack: if stack[-1] == '<': stack.pop() if not stack: return (i + 1, None) elif char == ';': # Found something that look like end of statements. If we are currently # expecting a '>', the matching '<' must have been an operator, since # template argument list should not contain statements. while stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) # Did not find end of expression or unbalanced parentheses on this line return (-1, stack) def CloseExpression(clean_lines, linenum, pos): """If input points to ( or { or [ or <, finds the position that closes it. If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the linenum/pos that correspond to the closing of the expression. TODO(unknown): cpplint spends a fair bit of time matching parentheses. Ideally we would want to index all opening and closing parentheses once and have CloseExpression be just a simple lookup, but due to preprocessor tricks, this is not so easy. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *past* the closing brace, or (line, len(lines), -1) if we never find a close. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]): return (line, clean_lines.NumLines(), -1) # Check first line (end_pos, stack) = FindEndOfExpressionInLine(line, pos, []) if end_pos > -1: return (line, linenum, end_pos) # Continue scanning forward while stack and linenum < clean_lines.NumLines() - 1: linenum += 1 line = clean_lines.elided[linenum] (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack) if end_pos > -1: return (line, linenum, end_pos) # Did not find end of expression before end of file, give up return (line, clean_lines.NumLines(), -1) def FindStartOfExpressionInLine(line, endpos, stack): """Find position at the matching start of current expression. This is almost the reverse of FindEndOfExpressionInLine, but note that the input position and returned position differs by 1. Args: line: a CleansedLines line. endpos: start searching at this position. stack: nesting stack at endpos. Returns: On finding matching start: (index at matching start, None) On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at beginning of this line) """ i = endpos while i >= 0: char = line[i] if char in ')]}': # Found end of expression, push to expression stack stack.append(char) elif char == '>': # Found potential end of template argument list. # # Ignore it if it's a "->" or ">=" or "operator>" if (i > 0 and (line[i - 1] == '-' or Match(r'\s>=\s', line[i - 1:]) or Search(r'\boperator\s*$', line[0:i]))): i -= 1 else: stack.append('>') elif char == '<': # Found potential start of template argument list if i > 0 and line[i - 1] == '<': # Left shift operator i -= 1 else: # If there is a matching '>', we can pop the expression stack. # Otherwise, ignore this '<' since it must be an operator. if stack and stack[-1] == '>': stack.pop() if not stack: return (i, None) elif char in '([{': # Found start of expression. # # If there are any unmatched '>' on the stack, they must be # operators. Remove those. while stack and stack[-1] == '>': stack.pop() if not stack: return (-1, None) if ((char == '(' and stack[-1] == ')') or (char == '[' and stack[-1] == ']') or (char == '{' and stack[-1] == '}')): stack.pop() if not stack: return (i, None) else: # Mismatched parentheses return (-1, None) elif char == ';': # Found something that look like end of statements. If we are currently # expecting a '<', the matching '>' must have been an operator, since # template argument list should not contain statements. while stack and stack[-1] == '>': stack.pop() if not stack: return (-1, None) i -= 1 return (-1, stack) def ReverseCloseExpression(clean_lines, linenum, pos): """If input points to ) or } or ] or >, finds the position that opens it. If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the linenum/pos that correspond to the opening of the expression. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *at* the opening brace, or (line, 0, -1) if we never find the matching opening brace. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] if line[pos] not in ')}]>': return (line, 0, -1) # Check last line (start_pos, stack) = FindStartOfExpressionInLine(line, pos, []) if start_pos > -1: return (line, linenum, start_pos) # Continue scanning backward while stack and linenum > 0: linenum -= 1 line = clean_lines.elided[linenum] (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack) if start_pos > -1: return (line, linenum, start_pos) # Did not find start of expression before beginning of file, give up return (line, 0, -1) def CheckForCopyright(filename, lines, error): """Logs an error if no Copyright message appears at the top of the file.""" # We'll say it should occur by line 10. Don't forget there's a # dummy line at the front. for line in range(1, min(len(lines), 11)): if re.search(r'Copyright', lines[line], re.I): break else: # means no copyright line was found error(filename, 0, 'legal/copyright', 5, 'No copyright message found. ' 'You should have a line: "Copyright [year] <Copyright Owner>"') def GetIndentLevel(line): """Return the number of leading spaces in line. Args: line: A string to check. Returns: An integer count of leading spaces, possibly zero. """ indent = Match(r'^( *)\S', line) if indent: return len(indent.group(1)) else: return 0 def GetHeaderGuardCPPVariable(filename): """Returns the CPP variable that should be used as a header guard. Args: filename: The name of a C++ header file. Returns: The CPP variable that should be used as a header guard in the named file. """ # Restores original filename in case that cpplint is invoked from Emacs's # flymake. filename = re.sub(r'_flymake\.h$', '.h', filename) filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) # Replace 'c++' with 'cpp'. filename = filename.replace('C++', 'cpp').replace('c++', 'cpp') fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: suffix = os.sep # On Windows using directory separator will leave us with # "bogus escape error" unless we properly escape regex. if suffix == '\\': suffix += '\\' file_path_from_root = re.sub('^' + _root + suffix, '', file_path_from_root) return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_' def CheckForHeaderGuard(filename, clean_lines, error): """Checks that the file contains a header guard. Logs an error if no #ifndef header guard is present. For other headers, checks that the full pathname is used. Args: filename: The name of the C++ header file. clean_lines: A CleansedLines instance containing the file. error: The function to call with any errors found. """ # Don't check for header guards if there are error suppression # comments somewhere in this file. # # Because this is silencing a warning for a nonexistent line, we # only support the very specific NOLINT(build/header_guard) syntax, # and not the general NOLINT or NOLINT(*) syntax. raw_lines = clean_lines.lines_without_raw_strings for i in raw_lines: if Search(r'//\s*NOLINT\(build/header_guard\)', i): return # Allow pragma once instead of header guards for i in raw_lines: if Search(r'^\s*#pragma\s+once', i): return cppvar = GetHeaderGuardCPPVariable(filename) ifndef = '' ifndef_linenum = 0 define = '' endif = '' endif_linenum = 0 for linenum, line in enumerate(raw_lines): linesplit = line.split() if len(linesplit) >= 2: # find the first occurrence of #ifndef and #define, save arg if not ifndef and linesplit[0] == '#ifndef': # set ifndef to the header guard presented on the #ifndef line. ifndef = linesplit[1] ifndef_linenum = linenum if not define and linesplit[0] == '#define': define = linesplit[1] # find the last occurrence of #endif, save entire line if line.startswith('#endif'): endif = line endif_linenum = linenum if not ifndef or not define or ifndef != define: error(filename, 0, 'build/header_guard', 5, 'No #ifndef header guard found, suggested CPP variable is: %s' % cppvar) return # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ # for backward compatibility. if ifndef != cppvar: error_level = 0 if ifndef != cppvar + '_': error_level = 5 ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum, error) error(filename, ifndef_linenum, 'build/header_guard', error_level, '#ifndef header guard has wrong style, please use: %s' % cppvar) # Check for "//" comments on endif line. ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum, error) match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif) if match: if match.group(1) == '_': # Issue low severity warning for deprecated double trailing underscore error(filename, endif_linenum, 'build/header_guard', 0, '#endif line should be "#endif // %s"' % cppvar) return # Didn't find the corresponding "//" comment. If this file does not # contain any "//" comments at all, it could be that the compiler # only wants "/**/" comments, look for those instead. no_single_line_comments = True for i in xrange(1, len(raw_lines) - 1): line = raw_lines[i] if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line): no_single_line_comments = False break if no_single_line_comments: match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif) if match: if match.group(1) == '_': # Low severity warning for double trailing underscore error(filename, endif_linenum, 'build/header_guard', 0, '#endif line should be "#endif /* %s */"' % cppvar) return # Didn't find anything error(filename, endif_linenum, 'build/header_guard', 5, '#endif line should be "#endif // %s"' % cppvar) def CheckHeaderFileIncluded(filename, include_state, error): """Logs an error if a source file does not include its header.""" # Do not check test files fileinfo = FileInfo(filename) if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()): return for ext in GetHeaderExtensions(): basefilename = filename[0:len(filename) - len(fileinfo.Extension())] headerfile = basefilename + '.' + ext if not os.path.exists(headerfile): continue headername = FileInfo(headerfile).RepositoryName() first_include = None for section_list in include_state.include_list: for f in section_list: if headername in f[0] or f[0] in headername: return if not first_include: first_include = f[1] error(filename, first_include, 'build/include', 5, '%s should include its header file %s' % (fileinfo.RepositoryName(), headername)) def CheckForBadCharacters(filename, lines, error): """Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ for linenum, line in enumerate(lines): if unicode_escape_decode('\ufffd') in line: error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if '\0' in line: error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') def CheckForNewlineAtEOF(filename, lines, error): """Logs an error if there is no newline char at the end of the file. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ # The array lines() was created by adding two newlines to the # original file (go figure), then splitting on \n. # To verify that the file ends in \n, we just have to make sure the # last-but-two element of lines() exists and is empty. if len(lines) < 3 or lines[-2]: error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, 'Could not find a newline character at the end of the file.') def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): """Logs an error if we see /* ... */ or "..." that extend past one line. /* ... */ comments are legit inside macros, for one line. Otherwise, we prefer // comments, so it's ok to warn about the other. Likewise, it's ok for strings to extend across multiple lines, as long as a line continuation character (backslash) terminates each line. Although not currently prohibited by the C++ style guide, it's ugly and unnecessary. We don't do well with either in this lint program, so we warn about both. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Remove all \\ (escaped backslashes) from the line. They are OK, and the # second (escaped) slash may trigger later \" detection erroneously. line = line.replace('\\\\', '') if line.count('/*') > line.count('*/'): error(filename, linenum, 'readability/multiline_comment', 5, 'Complex multi-line /*...*/-style comment found. ' 'Lint may give bogus warnings. ' 'Consider replacing these with //-style comments, ' 'with #if 0...#endif, ' 'or with more clearly structured multi-line comments.') if (line.count('"') - line.count('\\"')) % 2: error(filename, linenum, 'readability/multiline_string', 5, 'Multi-line string ("...") found. This lint script doesn\'t ' 'do well with such strings, and may give bogus warnings. ' 'Use C++11 raw strings or concatenation instead.') # (non-threadsafe name, thread-safe alternative, validation pattern) # # The validation pattern is used to eliminate false positives such as: # _rand(); // false positive due to substring match. # ->rand(); // some member function rand(). # ACMRandom rand(seed); // some variable named rand. # ISAACRandom rand(); // another variable named rand. # # Basically we require the return value of these functions to be used # in some expression context on the same line by matching on some # operator before the function name. This eliminates constructors and # member function calls. _UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)' _THREADING_LIST = ( ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'), ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'), ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'), ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'), ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'), ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'), ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'), ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'), ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'), ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'), ('strtok(', 'strtok_r(', _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'), ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'), ) def CheckPosixThreading(filename, clean_lines, linenum, error): """Checks for calls to thread-unsafe functions. Much code has been originally written without consideration of multi-threading. Also, engineers are relying on their old experience; they have learned posix before threading extensions were added. These tests guide the engineers to use thread-safe functions (when using posix directly). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST: # Additional pattern matching check to confirm that this is the # function we are looking for if Search(pattern, line): error(filename, linenum, 'runtime/threadsafe_fn', 2, 'Consider using ' + multithread_safe_func + '...) instead of ' + single_thread_func + '...) for improved thread safety.') def CheckVlogArguments(filename, clean_lines, linenum, error): """Checks that VLOG() is only used for defining a logging level. For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and VLOG(FATAL) are not. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): error(filename, linenum, 'runtime/vlog', 5, 'VLOG() should be used with numeric verbosity level. ' 'Use LOG() if you want symbolic severity levels.') # Matches invalid increment: *count++, which moves pointer instead of # incrementing a value. _RE_PATTERN_INVALID_INCREMENT = re.compile( r'^\s*\*\w+(\+\+|--);') def CheckInvalidIncrement(filename, clean_lines, linenum, error): """Checks for invalid increment *count++. For example following function: void increment_counter(int* count) { *count++; } is invalid, because it effectively does count++, moving pointer, and should be replaced with ++*count, (*count)++ or *count += 1. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] if _RE_PATTERN_INVALID_INCREMENT.match(line): error(filename, linenum, 'runtime/invalid_increment', 5, 'Changing pointer instead of value (or unused value of operator*).') def IsMacroDefinition(clean_lines, linenum): if Search(r'^#define', clean_lines[linenum]): return True if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]): return True return False def IsForwardClassDeclaration(clean_lines, linenum): return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum]) class _BlockInfo(object): """Stores information about a generic block of code.""" def __init__(self, linenum, seen_open_brace): self.starting_linenum = linenum self.seen_open_brace = seen_open_brace self.open_parentheses = 0 self.inline_asm = _NO_ASM self.check_namespace_indentation = False def CheckBegin(self, filename, clean_lines, linenum, error): """Run checks that applies to text up to the opening brace. This is mostly for checking the text after the class identifier and the "{", usually where the base class is specified. For other blocks, there isn't much to check, so we always pass. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ pass def CheckEnd(self, filename, clean_lines, linenum, error): """Run checks that applies to text after the closing brace. This is mostly used for checking end of namespace comments. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ pass def IsBlockInfo(self): """Returns true if this block is a _BlockInfo. This is convenient for verifying that an object is an instance of a _BlockInfo, but not an instance of any of the derived classes. Returns: True for this class, False for derived classes. """ return self.__class__ == _BlockInfo class _ExternCInfo(_BlockInfo): """Stores information about an 'extern "C"' block.""" def __init__(self, linenum): _BlockInfo.__init__(self, linenum, True) class _ClassInfo(_BlockInfo): """Stores information about a class.""" def __init__(self, name, class_or_struct, clean_lines, linenum): _BlockInfo.__init__(self, linenum, False) self.name = name self.is_derived = False self.check_namespace_indentation = True if class_or_struct == 'struct': self.access = 'public' self.is_struct = True else: self.access = 'private' self.is_struct = False # Remember initial indentation level for this class. Using raw_lines here # instead of elided to account for leading comments. self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum]) # Try to find the end of the class. This will be confused by things like: # class A { # } *x = { ... # # But it's still good enough for CheckSectionSpacing. self.last_line = 0 depth = 0 for i in range(linenum, clean_lines.NumLines()): line = clean_lines.elided[i] depth += line.count('{') - line.count('}') if not depth: self.last_line = i break def CheckBegin(self, filename, clean_lines, linenum, error): # Look for a bare ':' if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): self.is_derived = True def CheckEnd(self, filename, clean_lines, linenum, error): # If there is a DISALLOW macro, it should appear near the end of # the class. seen_last_thing_in_class = False for i in xrange(linenum - 1, self.starting_linenum, -1): match = Search( r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' + self.name + r'\)', clean_lines.elided[i]) if match: if seen_last_thing_in_class: error(filename, i, 'readability/constructors', 3, match.group(1) + ' should be the last thing in the class') break if not Match(r'^\s*$', clean_lines.elided[i]): seen_last_thing_in_class = True # Check that closing brace is aligned with beginning of the class. # Only do this if the closing brace is indented by only whitespaces. # This means we will not check single-line class definitions. indent = Match(r'^( *)\}', clean_lines.elided[linenum]) if indent and len(indent.group(1)) != self.class_indent: if self.is_struct: parent = 'struct ' + self.name else: parent = 'class ' + self.name error(filename, linenum, 'whitespace/indent', 3, 'Closing brace should be aligned with beginning of %s' % parent) class _NamespaceInfo(_BlockInfo): """Stores information about a namespace.""" def __init__(self, name, linenum): _BlockInfo.__init__(self, linenum, False) self.name = name or '' self.check_namespace_indentation = True def CheckEnd(self, filename, clean_lines, linenum, error): """Check end of namespace comments.""" line = clean_lines.raw_lines[linenum] # Check how many lines is enclosed in this namespace. Don't issue # warning for missing namespace comments if there aren't enough # lines. However, do apply checks if there is already an end of # namespace comment and it's incorrect. # # TODO(unknown): We always want to check end of namespace comments # if a namespace is large, but sometimes we also want to apply the # check if a short namespace contained nontrivial things (something # other than forward declarations). There is currently no logic on # deciding what these nontrivial things are, so this check is # triggered by namespace size only, which works most of the time. if (linenum - self.starting_linenum < 10 and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)): return # Look for matching comment at end of namespace. # # Note that we accept C style "/* */" comments for terminating # namespaces, so that code that terminate namespaces inside # preprocessor macros can be cpplint clean. # # We also accept stuff like "// end of namespace <name>." with the # period at the end. # # Besides these, we don't accept anything else, otherwise we might # get false negatives when existing comment is a substring of the # expected namespace. if self.name: # Named namespace if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + r'[\*/\.\\\s]*$'), line): error(filename, linenum, 'readability/namespace', 5, 'Namespace should be terminated with "// namespace %s"' % self.name) else: # Anonymous namespace if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): # If "// namespace anonymous" or "// anonymous namespace (more text)", # mention "// anonymous namespace" as an acceptable form if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line): error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"' ' or "// anonymous namespace"') else: error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"') class _PreprocessorInfo(object): """Stores checkpoints of nesting stacks when #if/#else is seen.""" def __init__(self, stack_before_if): # The entire nesting stack before #if self.stack_before_if = stack_before_if # The entire nesting stack up to #else self.stack_before_else = [] # Whether we have already seen #else or #elif self.seen_else = False class NestingState(object): """Holds states related to parsing braces.""" def __init__(self): # Stack for tracking all braces. An object is pushed whenever we # see a "{", and popped when we see a "}". Only 3 types of # objects are possible: # - _ClassInfo: a class or struct. # - _NamespaceInfo: a namespace. # - _BlockInfo: some other type of block. self.stack = [] # Top of the previous stack before each Update(). # # Because the nesting_stack is updated at the end of each line, we # had to do some convoluted checks to find out what is the current # scope at the beginning of the line. This check is simplified by # saving the previous top of nesting stack. # # We could save the full stack, but we only need the top. Copying # the full nesting stack would slow down cpplint by ~10%. self.previous_stack_top = [] # Stack of _PreprocessorInfo objects. self.pp_stack = [] def SeenOpenBrace(self): """Check if we have seen the opening brace for the innermost block. Returns: True if we have seen the opening brace, False if the innermost block is still expecting an opening brace. """ return (not self.stack) or self.stack[-1].seen_open_brace def InNamespaceBody(self): """Check if we are currently one level inside a namespace body. Returns: True if top of the stack is a namespace block, False otherwise. """ return self.stack and isinstance(self.stack[-1], _NamespaceInfo) def InExternC(self): """Check if we are currently one level inside an 'extern "C"' block. Returns: True if top of the stack is an extern block, False otherwise. """ return self.stack and isinstance(self.stack[-1], _ExternCInfo) def InClassDeclaration(self): """Check if we are currently one level inside a class or struct declaration. Returns: True if top of the stack is a class/struct, False otherwise. """ return self.stack and isinstance(self.stack[-1], _ClassInfo) def InAsmBlock(self): """Check if we are currently one level inside an inline ASM block. Returns: True if the top of the stack is a block containing inline ASM. """ return self.stack and self.stack[-1].inline_asm != _NO_ASM def InTemplateArgumentList(self, clean_lines, linenum, pos): """Check if current position is inside template argument list. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: position just after the suspected template argument. Returns: True if (linenum, pos) is inside template arguments. """ while linenum < clean_lines.NumLines(): # Find the earliest character that might indicate a template argument line = clean_lines.elided[linenum] match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:]) if not match: linenum += 1 pos = 0 continue token = match.group(1) pos += len(match.group(0)) # These things do not look like template argument list: # class Suspect { # class Suspect x; } if token in ('{', '}', ';'): return False # These things look like template argument list: # template <class Suspect> # template <class Suspect = default_value> # template <class Suspect[]> # template <class Suspect...> if token in ('>', '=', '[', ']', '.'): return True # Check if token is an unmatched '<'. # If not, move on to the next character. if token != '<': pos += 1 if pos >= len(line): linenum += 1 pos = 0 continue # We can't be sure if we just find a single '<', and need to # find the matching '>'. (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1) if end_pos < 0: # Not sure if template argument list or syntax error in file return False linenum = end_line pos = end_pos return False def UpdatePreprocessor(self, line): """Update preprocessor stack. We need to handle preprocessors due to classes like this: #ifdef SWIG struct ResultDetailsPageElementExtensionPoint { #else struct ResultDetailsPageElementExtensionPoint : public Extension { #endif We make the following assumptions (good enough for most files): - Preprocessor condition evaluates to true from #if up to first #else/#elif/#endif. - Preprocessor condition evaluates to false from #else/#elif up to #endif. We still perform lint checks on these lines, but these do not affect nesting stack. Args: line: current line to check. """ if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): # Beginning of #if block, save the nesting stack here. The saved # stack will allow us to restore the parsing state in the #else case. self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) elif Match(r'^\s*#\s*(else|elif)\b', line): # Beginning of #else block if self.pp_stack: if not self.pp_stack[-1].seen_else: # This is the first #else or #elif block. Remember the # whole nesting stack up to this point. This is what we # keep after the #endif. self.pp_stack[-1].seen_else = True self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) # Restore the stack to how it was before the #if self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) else: # TODO(unknown): unexpected #else, issue warning? pass elif Match(r'^\s*#\s*endif\b', line): # End of #if or #else blocks. if self.pp_stack: # If we saw an #else, we will need to restore the nesting # stack to its former state before the #else, otherwise we # will just continue from where we left off. if self.pp_stack[-1].seen_else: # Here we can just use a shallow copy since we are the last # reference to it. self.stack = self.pp_stack[-1].stack_before_else # Drop the corresponding #if self.pp_stack.pop() else: # TODO(unknown): unexpected #endif, issue warning? pass # TODO(unknown): Update() is too long, but we will refactor later. def Update(self, filename, clean_lines, linenum, error): """Update nesting state with current line. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Remember top of the previous nesting stack. # # The stack is always pushed/popped and not modified in place, so # we can just do a shallow copy instead of copy.deepcopy. Using # deepcopy would slow down cpplint by ~28%. if self.stack: self.previous_stack_top = self.stack[-1] else: self.previous_stack_top = None # Update pp_stack self.UpdatePreprocessor(line) # Count parentheses. This is to avoid adding struct arguments to # the nesting stack. if self.stack: inner_block = self.stack[-1] depth_change = line.count('(') - line.count(')') inner_block.open_parentheses += depth_change # Also check if we are starting or ending an inline assembly block. if inner_block.inline_asm in (_NO_ASM, _END_ASM): if (depth_change != 0 and inner_block.open_parentheses == 1 and _MATCH_ASM.match(line)): # Enter assembly block inner_block.inline_asm = _INSIDE_ASM else: # Not entering assembly block. If previous line was _END_ASM, # we will now shift to _NO_ASM state. inner_block.inline_asm = _NO_ASM elif (inner_block.inline_asm == _INSIDE_ASM and inner_block.open_parentheses == 0): # Exit assembly block inner_block.inline_asm = _END_ASM # Consume namespace declaration at the beginning of the line. Do # this in a loop so that we catch same line declarations like this: # namespace proto2 { namespace bridge { class MessageSet; } } while True: # Match start of namespace. The "\b\s*" below catches namespace # declarations even if it weren't followed by a whitespace, this # is so that we don't confuse our namespace checker. The # missing spaces will be flagged by CheckSpacing. namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) if not namespace_decl_match: break new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) self.stack.append(new_namespace) line = namespace_decl_match.group(2) if line.find('{') != -1: new_namespace.seen_open_brace = True line = line[line.find('{') + 1:] # Look for a class declaration in whatever is left of the line # after parsing namespaces. The regexp accounts for decorated classes # such as in: # class LOCKABLE API Object { # }; class_decl_match = Match( r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?' r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))' r'(.*)$', line) if (class_decl_match and (not self.stack or self.stack[-1].open_parentheses == 0)): # We do not want to accept classes that are actually template arguments: # template <class Ignore1, # class Ignore2 = Default<Args>, # template <Args> class Ignore3> # void Function() {}; # # To avoid template argument cases, we scan forward and look for # an unmatched '>'. If we see one, assume we are inside a # template argument list. end_declaration = len(class_decl_match.group(1)) if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration): self.stack.append(_ClassInfo( class_decl_match.group(3), class_decl_match.group(2), clean_lines, linenum)) line = class_decl_match.group(4) # If we have not yet seen the opening brace for the innermost block, # run checks here. if not self.SeenOpenBrace(): self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) # Update access control if we are inside a class/struct if self.stack and isinstance(self.stack[-1], _ClassInfo): classinfo = self.stack[-1] access_match = Match( r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' r':(?:[^:]|$)', line) if access_match: classinfo.access = access_match.group(2) # Check that access keywords are indented +1 space. Skip this # check if the keywords are not preceded by whitespaces. indent = access_match.group(1) if (len(indent) != classinfo.class_indent + 1 and Match(r'^\s*$', indent)): if classinfo.is_struct: parent = 'struct ' + classinfo.name else: parent = 'class ' + classinfo.name slots = '' if access_match.group(3): slots = access_match.group(3) error(filename, linenum, 'whitespace/indent', 3, '%s%s: should be indented +1 space inside %s' % ( access_match.group(2), slots, parent)) # Consume braces or semicolons from what's left of the line while True: # Match first brace, semicolon, or closed parenthesis. matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) if not matched: break token = matched.group(1) if token == '{': # If namespace or class hasn't seen a opening brace yet, mark # namespace/class head as complete. Push a new block onto the # stack otherwise. if not self.SeenOpenBrace(): self.stack[-1].seen_open_brace = True elif Match(r'^extern\s*"[^"]*"\s*\{', line): self.stack.append(_ExternCInfo(linenum)) else: self.stack.append(_BlockInfo(linenum, True)) if _MATCH_ASM.match(line): self.stack[-1].inline_asm = _BLOCK_ASM elif token == ';' or token == ')': # If we haven't seen an opening brace yet, but we already saw # a semicolon, this is probably a forward declaration. Pop # the stack for these. # # Similarly, if we haven't seen an opening brace yet, but we # already saw a closing parenthesis, then these are probably # function arguments with extra "class" or "struct" keywords. # Also pop these stack for these. if not self.SeenOpenBrace(): self.stack.pop() else: # token == '}' # Perform end of block checks and pop the stack. if self.stack: self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) self.stack.pop() line = matched.group(2) def InnermostClass(self): """Get class info on the top of the stack. Returns: A _ClassInfo object if we are inside a class, or None otherwise. """ for i in range(len(self.stack), 0, -1): classinfo = self.stack[i - 1] if isinstance(classinfo, _ClassInfo): return classinfo return None def CheckCompletedBlocks(self, filename, error): """Checks that all classes and namespaces have been completely parsed. Call this when all lines in a file have been processed. Args: filename: The name of the current file. error: The function to call with any errors found. """ # Note: This test can result in false positives if #ifdef constructs # get in the way of brace matching. See the testBuildClass test in # cpplint_unittest.py for an example of this. for obj in self.stack: if isinstance(obj, _ClassInfo): error(filename, obj.starting_linenum, 'build/class', 5, 'Failed to find complete declaration of class %s' % obj.name) elif isinstance(obj, _NamespaceInfo): error(filename, obj.starting_linenum, 'build/namespaces', 5, 'Failed to find complete declaration of namespace %s' % obj.name) def CheckForNonStandardConstructs(filename, clean_lines, linenum, nesting_state, error): r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. Complain about several constructs which gcc-2 accepts, but which are not standard C++. Warning about these in lint is one way to ease the transition to new compilers. - put storage class first (e.g. "static const" instead of "const static"). - "%lld" instead of %qd" in printf-type functions. - "%1$d" is non-standard in printf-type functions. - "\%" is an undefined character escape sequence. - text after #endif is not allowed. - invalid inner-style forward declaration. - >? and <? operators, and their >?= and <?= cousins. Additionally, check for constructor/destructor style violations and reference members, as it is very convenient to do so while checking for gcc-2 compliance. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message """ # Remove comments from the line, but leave in strings for now. line = clean_lines.lines[linenum] if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line): error(filename, linenum, 'runtime/printf_format', 3, '%q in format strings is deprecated. Use %ll instead.') if Search(r'printf\s*\(.*".*%\d+\$', line): error(filename, linenum, 'runtime/printf_format', 2, '%N$ formats are unconventional. Try rewriting to avoid them.') # Remove escaped backslashes before looking for undefined escapes. line = line.replace('\\\\', '') if Search(r'("|\').*\\(%|\[|\(|{)', line): error(filename, linenum, 'build/printf_format', 3, '%, [, (, and { are undefined character escapes. Unescape them.') # For the rest, work with both comments and strings removed. line = clean_lines.elided[linenum] if Search(r'\b(const|volatile|void|char|short|int|long' r'|float|double|signed|unsigned' r'|schar|u?int8|u?int16|u?int32|u?int64)' r'\s+(register|static|extern|typedef)\b', line): error(filename, linenum, 'build/storage_class', 5, 'Storage-class specifier (static, extern, typedef, etc) should be ' 'at the beginning of the declaration.') if Match(r'\s*#\s*endif\s*[^/\s]+', line): error(filename, linenum, 'build/endif_comment', 5, 'Uncommented text after #endif is non-standard. Use a comment.') if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line): error(filename, linenum, 'build/forward_decl', 5, 'Inner-style forward declarations are invalid. Remove this line.') if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line): error(filename, linenum, 'build/deprecated', 3, '>? and <? (max and min) operators are non-standard and deprecated.') if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line): # TODO(unknown): Could it be expanded safely to arbitrary references, # without triggering too many false positives? The first # attempt triggered 5 warnings for mostly benign code in the regtest, hence # the restriction. # Here's the original regexp, for the reference: # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?' # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' error(filename, linenum, 'runtime/member_string_references', 2, 'const string& members are dangerous. It is much better to use ' 'alternatives, such as pointers or simple constants.') # Everything else in this function operates on class declarations. # Return early if the top of the nesting stack is not a class, or if # the class head is not completed yet. classinfo = nesting_state.InnermostClass() if not classinfo or not classinfo.seen_open_brace: return # The class may have been declared with namespace or classname qualifiers. # The constructor and destructor will not have those qualifiers. base_classname = classinfo.name.split('::')[-1] # Look for single-argument constructors that aren't marked explicit. # Technically a valid construct, but against style. explicit_constructor_match = Match( r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*' r'\(((?:[^()]|\([^()]*\))*)\)' % re.escape(base_classname), line) if explicit_constructor_match: is_marked_explicit = explicit_constructor_match.group(1) if not explicit_constructor_match.group(2): constructor_args = [] else: constructor_args = explicit_constructor_match.group(2).split(',') # collapse arguments so that commas in template parameter lists and function # argument parameter lists don't split arguments in two i = 0 while i < len(constructor_args): constructor_arg = constructor_args[i] while (constructor_arg.count('<') > constructor_arg.count('>') or constructor_arg.count('(') > constructor_arg.count(')')): constructor_arg += ',' + constructor_args[i + 1] del constructor_args[i + 1] constructor_args[i] = constructor_arg i += 1 variadic_args = [arg for arg in constructor_args if '&&...' in arg] defaulted_args = [arg for arg in constructor_args if '=' in arg] noarg_constructor = (not constructor_args or # empty arg list # 'void' arg specifier (len(constructor_args) == 1 and constructor_args[0].strip() == 'void')) onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg not noarg_constructor) or # all but at most one arg defaulted (len(constructor_args) >= 1 and not noarg_constructor and len(defaulted_args) >= len(constructor_args) - 1) or # variadic arguments with zero or one argument (len(constructor_args) <= 2 and len(variadic_args) >= 1)) initializer_list_constructor = bool( onearg_constructor and Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0])) copy_constructor = bool( onearg_constructor and Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&' % re.escape(base_classname), constructor_args[0].strip())) if (not is_marked_explicit and onearg_constructor and not initializer_list_constructor and not copy_constructor): if defaulted_args or variadic_args: error(filename, linenum, 'runtime/explicit', 5, 'Constructors callable with one argument ' 'should be marked explicit.') else: error(filename, linenum, 'runtime/explicit', 5, 'Single-parameter constructors should be marked explicit.') elif is_marked_explicit and not onearg_constructor: if noarg_constructor: error(filename, linenum, 'runtime/explicit', 5, 'Zero-parameter constructors should not be marked explicit.') def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): """Checks for the correctness of various spacing around function calls. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Since function calls often occur inside if/for/while/switch # expressions - which have their own, more liberal conventions - we # first see if we should be looking inside such an expression for a # function call, to which we can apply more strict standards. fncall = line # if there's no control flow construct, look at whole line for pattern in (r'\bif\s*\((.*)\)\s*{', r'\bfor\s*\((.*)\)\s*{', r'\bwhile\s*\((.*)\)\s*[{;]', r'\bswitch\s*\((.*)\)\s*{'): match = Search(pattern, line) if match: fncall = match.group(1) # look inside the parens for function calls break # Except in if/for/while/switch, there should never be space # immediately inside parens (eg "f( 3, 4 )"). We make an exception # for nested parens ( (a+b) + c ). Likewise, there should never be # a space before a ( when it's a function argument. I assume it's a # function argument when the char before the whitespace is legal in # a function name (alnum + _) and we're not starting a macro. Also ignore # pointers and references to arrays and functions coz they're too tricky: # we use a very simple way to recognize these: # " (something)(maybe-something)" or # " (something)(maybe-something," or # " (something)[something]" # Note that we assume the contents of [] to be short enough that # they'll never need to wrap. if ( # Ignore control structures. not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', fncall) and # Ignore pointers/references to functions. not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and # Ignore pointers/references to arrays. not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (') if (Search(r'\w\s+\(', fncall) and not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and not Search(r'\bcase\s+\(', fncall)): # TODO(unknown): Space after an operator function seem to be a common # error, silence those for now by restricting them to highest verbosity. if Search(r'\boperator_*\b', line): error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call') else: error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call') # If the ) is followed only by a newline or a { + newline, assume it's # part of a control statement (if/while/etc), and don't complain if Search(r'[^)]\s+\)\s*[^{\s]', fncall): # If the closing parenthesis is preceded by only whitespaces, # try to give a more descriptive error message. if Search(r'^\s+\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )') def IsBlankLine(line): """Returns true if the given line is blank. We consider a line to be blank if the line is empty or consists of only white spaces. Args: line: A line of a string. Returns: True, if the given line is blank. """ return not line or line.isspace() def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error): is_namespace_indent_item = ( len(nesting_state.stack) > 1 and nesting_state.stack[-1].check_namespace_indentation and isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and nesting_state.previous_stack_top == nesting_state.stack[-2]) if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, clean_lines.elided, line): CheckItemIndentationInNamespace(filename, clean_lines.elided, line, error) def CheckForFunctionLengths(filename, clean_lines, linenum, function_state, error): """Reports for long function bodies. For an overview why this is done, see: https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions Uses a simplistic algorithm assuming other style guidelines (especially spacing) are followed. Only checks unindented functions, so class members are unchecked. Trivial bodies are unchecked, so constructors with huge initializer lists may be missed. Blank/comment lines are not counted so as to avoid encouraging the removal of vertical space and comments just to get through a lint check. NOLINT *on the last line of a function* disables this check. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. function_state: Current function name and lines in body so far. error: The function to call with any errors found. """ lines = clean_lines.lines line = lines[linenum] joined_line = '' starting_func = False regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... match_result = Match(regexp, line) if match_result: # If the name is all caps and underscores, figure it's a macro and # ignore it, unless it's TEST or TEST_F. function_name = match_result.group(1).split()[-1] if function_name == 'TEST' or function_name == 'TEST_F' or ( not Match(r'[A-Z_]+$', function_name)): starting_func = True if starting_func: body_found = False for start_linenum in range(linenum, clean_lines.NumLines()): start_line = lines[start_linenum] joined_line += ' ' + start_line.lstrip() if Search(r'(;|})', start_line): # Declarations and trivial functions body_found = True break # ... ignore elif Search(r'{', start_line): body_found = True function = Search(r'((\w|:)*)\(', line).group(1) if Match(r'TEST', function): # Handle TEST... macros parameter_regexp = Search(r'(\(.*\))', joined_line) if parameter_regexp: # Ignore bad syntax function += parameter_regexp.group(1) else: function += '()' function_state.Begin(function) break if not body_found: # No body for the function (or evidence of a non-function) was found. error(filename, linenum, 'readability/fn_size', 5, 'Lint failed to find start of function body.') elif Match(r'^\}\s*$', line): # function end function_state.Check(error, filename, linenum) function_state.End() elif not Match(r'^\s*$', line): function_state.Count() # Count non-blank/non-comment lines. _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?') def CheckComment(line, filename, linenum, next_line_start, error): """Checks for common mistakes in comments. Args: line: The line in question. filename: The name of the current file. linenum: The number of the line to check. next_line_start: The first non-whitespace column of the next line. error: The function to call with any errors found. """ commentpos = line.find('//') if commentpos != -1: # Check if the // may be in quotes. If so, ignore it if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0: # Allow one space for new scopes, two spaces otherwise: if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and ((commentpos >= 1 and line[commentpos-1] not in string.whitespace) or (commentpos >= 2 and line[commentpos-2] not in string.whitespace))): error(filename, linenum, 'whitespace/comments', 2, 'At least two spaces is best between code and comments') # Checks for common mistakes in TODO comments. comment = line[commentpos:] match = _RE_PATTERN_TODO.match(comment) if match: # One whitespace is correct; zero whitespace is handled elsewhere. leading_whitespace = match.group(1) if len(leading_whitespace) > 1: error(filename, linenum, 'whitespace/todo', 2, 'Too many spaces before TODO') username = match.group(2) if not username: error(filename, linenum, 'readability/todo', 2, 'Missing username in TODO; it should look like ' '"// TODO(my_username): Stuff."') middle_whitespace = match.group(3) # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison if middle_whitespace != ' ' and middle_whitespace != '': error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space') # If the comment contains an alphanumeric character, there # should be a space somewhere between it and the // unless # it's a /// or //! Doxygen comment. if (Match(r'//[^ ]*\w', comment) and not Match(r'(///|//\!)(\s+|$)', comment)): error(filename, linenum, 'whitespace/comments', 4, 'Should have a space between // and comment') def CheckAccess(filename, clean_lines, linenum, nesting_state, error): """Checks for improper use of DISALLOW* macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # get rid of comments and strings matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) if not matched: return if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): if nesting_state.stack[-1].access != 'private': error(filename, linenum, 'readability/constructors', 3, '%s must be in the private: section' % matched.group(1)) else: # Found DISALLOW* macro outside a class declaration, or perhaps it # was used inside a function when it should have been part of the # class declaration. We could issue a warning here, but it # probably resulted in a compiler error already. pass def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): """Checks for the correctness of various spacing issues in the code. Things we check for: spaces around operators, spaces after if/for/while/switch, no spaces around parens in function calls, two spaces between code and comment, don't start a block with a blank line, don't end a function with a blank line, don't add a blank line after public/protected/private, don't have too many blank lines in a row. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Don't use "elided" lines here, otherwise we can't check commented lines. # Don't want to use "raw" either, because we don't want to check inside C++11 # raw strings, raw = clean_lines.lines_without_raw_strings line = raw[linenum] # Before nixing comments, check if the line is blank for no good # reason. This includes the first line after a block is opened, and # blank lines at the end of a function (ie, right before a line like '}' # # Skip all the blank line checks if we are immediately inside a # namespace body. In other words, don't issue blank line warnings # for this block: # namespace { # # } # # A warning about missing end of namespace comments will be issued instead. # # Also skip blank line checks for 'extern "C"' blocks, which are formatted # like namespaces. if (IsBlankLine(line) and not nesting_state.InNamespaceBody() and not nesting_state.InExternC()): elided = clean_lines.elided prev_line = elided[linenum - 1] prevbrace = prev_line.rfind('{') # TODO(unknown): Don't complain if line before blank line, and line after, # both start with alnums and are indented the same amount. # This ignores whitespace at the start of a namespace block # because those are not usually indented. if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: # OK, we have a blank line at the start of a code block. Before we # complain, we check if it is an exception to the rule: The previous # non-empty line has the parameters of a function header that are indented # 4 spaces (because they did not fit in a 80 column line when placed on # the same line as the function name). We also check for the case where # the previous line is indented 6 spaces, which may happen when the # initializers of a constructor do not fit into a 80 column line. exception = False if Match(r' {6}\w', prev_line): # Initializer list? # We are looking for the opening column of initializer list, which # should be indented 4 spaces to cause 6 space indentation afterwards. search_position = linenum-2 while (search_position >= 0 and Match(r' {6}\w', elided[search_position])): search_position -= 1 exception = (search_position >= 0 and elided[search_position][:5] == ' :') else: # Search for the function arguments or an initializer list. We use a # simple heuristic here: If the line is indented 4 spaces; and we have a # closing paren, without the opening paren, followed by an opening brace # or colon (for initializer lists) we assume that it is the last line of # a function header. If we have a colon indented 4 spaces, it is an # initializer list. exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', prev_line) or Match(r' {4}:', prev_line)) if not exception: error(filename, linenum, 'whitespace/blank_line', 2, 'Redundant blank line at the start of a code block ' 'should be deleted.') # Ignore blank lines at the end of a block in a long if-else # chain, like this: # if (condition1) { # // Something followed by a blank line # # } else if (condition2) { # // Something else # } if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] if (next_line and Match(r'\s*}', next_line) and next_line.find('} else ') == -1): error(filename, linenum, 'whitespace/blank_line', 3, 'Redundant blank line at the end of a code block ' 'should be deleted.') matched = Match(r'\s*(public|protected|private):', prev_line) if matched: error(filename, linenum, 'whitespace/blank_line', 3, 'Do not leave a blank line after "%s:"' % matched.group(1)) # Next, check comments next_line_start = 0 if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] next_line_start = len(next_line) - len(next_line.lstrip()) CheckComment(line, filename, linenum, next_line_start, error) # get rid of comments and strings line = clean_lines.elided[linenum] # You shouldn't have spaces before your brackets, except maybe after # 'delete []' or 'return []() {};' if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line): error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [') # In range-based for, we wanted spaces before and after the colon, but # not around "::" tokens that might appear. if (Search(r'for *\(.*[^:]:[^: ]', line) or Search(r'for *\(.*[^: ]:[^:]', line)): error(filename, linenum, 'whitespace/forcolon', 2, 'Missing space around colon in range-based for loop') def CheckOperatorSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing around operators. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Don't try to do spacing checks for operator methods. Do this by # replacing the troublesome characters with something else, # preserving column position for all other characters. # # The replacement is done repeatedly to avoid false positives from # operators that call operators. while True: match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) if match: line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) else: break # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". # Otherwise not. Note we only check for non-spaces on *both* sides; # sometimes people put non-spaces on one side when aligning ='s among # many lines (not that this is behavior that I approve of...) if ((Search(r'[\w.]=', line) or Search(r'=[\w.]', line)) and not Search(r'\b(if|while|for) ', line) # Operators taken from [lex.operators] in C++11 standard. and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line) and not Search(r'operator=', line)): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') # It's ok not to have spaces around binary operators like + - * /, but if # there's too little whitespace, we get concerned. It's hard to tell, # though, so we punt on this one for now. TODO. # You should always have whitespace around binary operators. # # Check <= and >= first to avoid false positives with < and >, then # check non-include lines for spacing around < and >. # # If the operator is followed by a comma, assume it's be used in a # macro context and don't do any checks. This avoids false # positives. # # Note that && is not included here. This is because there are too # many false positives due to RValue references. match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around %s' % match.group(1)) elif not Match(r'#.*include', line): # Look for < that is not surrounded by spaces. This is only # triggered if both sides are missing spaces, even though # technically should should flag if at least one side is missing a # space. This is done to avoid some false positives with shifts. match = Match(r'^(.*[^\s<])<[^\s=<,]', line) if match: (_, _, end_pos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if end_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') # Look for > that is not surrounded by spaces. Similar to the # above, we only trigger if both sides are missing spaces to avoid # false positives with shifts. match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression( clean_lines, linenum, len(match.group(1))) if start_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') # We allow no-spaces around << when used like this: 10<<20, but # not otherwise (particularly, not when used as streams) # # We also allow operators following an opening parenthesis, since # those tend to be macros that deal with operators. match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line) if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') # We allow no-spaces around >> for almost anything. This is because # C++11 allows ">>" to close nested templates, which accounts for # most cases when ">>" is not followed by a space. # # We still warn on ">>" followed by alpha character, because that is # likely due to ">>" being used for right shifts, e.g.: # value >> alpha # # When ">>" is used to close templates, the alphanumeric letter that # follows would be part of an identifier, and there should still be # a space separating the template type and the identifier. # type<type<type>> alpha match = Search(r'>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') # There shouldn't be space around unary operators match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, 'Extra space for operator %s' % match.group(1)) def CheckParenthesisSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing around parentheses. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # No spaces after an if, while, switch, or for match = Search(r' (if\(|for\(|while\(|switch\()', line) if match: error(filename, linenum, 'whitespace/parens', 5, 'Missing space before ( in %s' % match.group(1)) # For if/for/while/switch, the left and right parens should be # consistent about how many spaces are inside the parens, and # there should either be zero or one spaces inside the parens. # We don't want: "if ( foo)" or "if ( foo )". # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. match = Search(r'\b(if|for|while|switch)\s*' r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', line) if match: if len(match.group(2)) != len(match.group(4)): if not (match.group(3) == ';' and len(match.group(2)) == 1 + len(match.group(4)) or not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): error(filename, linenum, 'whitespace/parens', 5, 'Mismatching spaces inside () in %s' % match.group(1)) if len(match.group(2)) not in [0, 1]: error(filename, linenum, 'whitespace/parens', 5, 'Should have zero or one spaces inside ( and ) in %s' % match.group(1)) def CheckCommaSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing near commas and semicolons. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ raw = clean_lines.lines_without_raw_strings line = clean_lines.elided[linenum] # You should always have a space after a comma (either as fn arg or operator) # # This does not apply when the non-space character following the # comma is another comma, since the only time when that happens is # for empty macro arguments. # # We run this check in two passes: first pass on elided lines to # verify that lines contain missing whitespaces, second pass on raw # lines to confirm that those missing whitespaces are not due to # elided comments. if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and Search(r',[^,\s]', raw[linenum])): error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,') # You should always have a space after a semicolon # except for few corner cases # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more # space after ; if Search(r';[^\s};\\)/]', line): error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;') def _IsType(clean_lines, nesting_state, expr): """Check if expression looks like a type name, returns true if so. Args: clean_lines: A CleansedLines instance containing the file. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. expr: The expression to check. Returns: True, if token looks like a type. """ # Keep only the last token in the expression last_word = Match(r'^.*(\b\S+)$', expr) if last_word: token = last_word.group(1) else: token = expr # Match native types and stdint types if _TYPES.match(token): return True # Try a bit harder to match templated types. Walk up the nesting # stack until we find something that resembles a typename # declaration for what we are looking for. typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) + r'\b') block_index = len(nesting_state.stack) - 1 while block_index >= 0: if isinstance(nesting_state.stack[block_index], _NamespaceInfo): return False # Found where the opening brace is. We want to scan from this # line up to the beginning of the function, minus a few lines. # template <typename Type1, // stop scanning here # ...> # class C # : public ... { // start scanning here last_line = nesting_state.stack[block_index].starting_linenum next_block_start = 0 if block_index > 0: next_block_start = nesting_state.stack[block_index - 1].starting_linenum first_line = last_line while first_line >= next_block_start: if clean_lines.elided[first_line].find('template') >= 0: break first_line -= 1 if first_line < next_block_start: # Didn't find any "template" keyword before reaching the next block, # there are probably no template things to check for this block block_index -= 1 continue # Look for typename in the specified range for i in xrange(first_line, last_line + 1, 1): if Search(typename_pattern, clean_lines.elided[i]): return True block_index -= 1 return False def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error): """Checks for horizontal spacing near commas. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Except after an opening paren, or after another opening brace (in case of # an initializer list, for instance), you should have spaces before your # braces when they are delimiting blocks, classes, namespaces etc. # And since you should never have braces at the beginning of a line, # this is an easy test. Except that braces used for initialization don't # follow the same rule; we often don't want spaces before those. match = Match(r'^(.*[^ ({>]){', line) if match: # Try a bit harder to check for brace initialization. This # happens in one of the following forms: # Constructor() : initializer_list_{} { ... } # Constructor{}.MemberFunction() # Type variable{}; # FunctionCall(type{}, ...); # LastArgument(..., type{}); # LOG(INFO) << type{} << " ..."; # map_of_type[{...}] = ...; # ternary = expr ? new type{} : nullptr; # OuterTemplate<InnerTemplateConstructor<Type>{}> # # We check for the character following the closing brace, and # silence the warning if it's one of those listed above, i.e. # "{.;,)<>]:". # # To account for nested initializer list, we allow any number of # closing braces up to "{;,)<". We can't simply silence the # warning on first sight of closing brace, because that would # cause false negatives for things that are not initializer lists. # Silence this: But not this: # Outer{ if (...) { # Inner{...} if (...){ // Missing space before { # }; } # # There is a false negative with this approach if people inserted # spurious semicolons, e.g. "if (cond){};", but we will catch the # spurious semicolon with a separate check. leading_text = match.group(1) (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) trailing_text = '' if endpos > -1: trailing_text = endline[endpos:] for offset in xrange(endlinenum + 1, min(endlinenum + 3, clean_lines.NumLines() - 1)): trailing_text += clean_lines.elided[offset] # We also suppress warnings for `uint64_t{expression}` etc., as the style # guide recommends brace initialization for integral types to avoid # overflow/truncation. if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text) and not _IsType(clean_lines, nesting_state, leading_text)): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {') # Make sure '} else {' has spaces. if Search(r'}else', line): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else') # You shouldn't have a space before a semicolon at the end of the line. # There's a special case for "for" since the style guide allows space before # the semicolon there. if Search(r':\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.') elif Search(r'^\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, ' 'use {} instead.') elif (Search(r'\s+;\s*$', line) and not Search(r'\bfor\b', line)): error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty ' 'statement, use {} instead.') def IsDecltype(clean_lines, linenum, column): """Check if the token ending on (linenum, column) is decltype(). Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is decltype() expression, False otherwise. """ (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column) if start_col < 0: return False if Search(r'\bdecltype\s*$', text[0:start_col]): return True return False def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): """Checks for additional blank line issues related to sections. Currently the only thing checked here is blank line before protected/private. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. class_info: A _ClassInfo objects. linenum: The number of the line to check. error: The function to call with any errors found. """ # Skip checks if the class is small, where small means 25 lines or less. # 25 lines seems like a good cutoff since that's the usual height of # terminals, and any class that can't fit in one screen can't really # be considered "small". # # Also skip checks if we are on the first line. This accounts for # classes that look like # class Foo { public: ... }; # # If we didn't find the end of the class, last_line would be zero, # and the check will be skipped by the first condition. if (class_info.last_line - class_info.starting_linenum <= 24 or linenum <= class_info.starting_linenum): return matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: # Issue warning if the line before public/protected/private was # not a blank line, but don't do this if the previous line contains # "class" or "struct". This can happen two ways: # - We are at the beginning of the class. # - We are forward-declaring an inner class that is semantically # private, but needed to be public for implementation reasons. # Also ignores cases where the previous line ends with a backslash as can be # common when defining classes in C macros. prev_line = clean_lines.lines[linenum - 1] if (not IsBlankLine(prev_line) and not Search(r'\b(class|struct)\b', prev_line) and not Search(r'\\$', prev_line)): # Try a bit harder to find the beginning of the class. This is to # account for multi-line base-specifier lists, e.g.: # class Derived # : public Base { end_class_head = class_info.starting_linenum for i in range(class_info.starting_linenum, linenum): if Search(r'\{\s*$', clean_lines.lines[i]): end_class_head = i break if end_class_head < linenum - 1: error(filename, linenum, 'whitespace/blank_line', 3, '"%s:" should be preceded by a blank line' % matched.group(1)) def GetPreviousNonBlankLine(clean_lines, linenum): """Return the most recent non-blank line and its line number. Args: clean_lines: A CleansedLines instance containing the file contents. linenum: The number of the line to check. Returns: A tuple with two elements. The first element is the contents of the last non-blank line before the current line, or the empty string if this is the first non-blank line. The second is the line number of that line, or -1 if this is the first non-blank line. """ prevlinenum = linenum - 1 while prevlinenum >= 0: prevline = clean_lines.elided[prevlinenum] if not IsBlankLine(prevline): # if not a blank line... return (prevline, prevlinenum) prevlinenum -= 1 return ('', -1) def CheckBraces(filename, clean_lines, linenum, error): """Looks for misplaced braces (e.g. at the end of line). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # get rid of comments and strings if Match(r'\s*{\s*$', line): # We allow an open brace to start a line in the case where someone is using # braces in a block to explicitly create a new scope, which is commonly used # to control the lifetime of stack-allocated variables. Braces are also # used for brace initializers inside function calls. We don't detect this # perfectly: we just don't complain if the last non-whitespace character on # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the # previous line starts a preprocessor block. We also allow a brace on the # following line if it is part of an array initialization and would not fit # within the 80 character limit of the preceding line. prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if (not Search(r'[,;:}{(]\s*$', prevline) and not Match(r'\s*#', prevline) and not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)): error(filename, linenum, 'whitespace/braces', 4, '{ should almost always be at the end of the previous line') # An else clause should be on the same line as the preceding closing brace. if Match(r'\s*else\b\s*(?:if\b|\{|$)', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if Match(r'\s*}\s*$', prevline): error(filename, linenum, 'whitespace/newline', 4, 'An else should appear on the same line as the preceding }') # If braces come on one side of an else, they should be on both. # However, we have to worry about "else if" that spans multiple lines! if Search(r'else if\s*\(', line): # could be multi-line if brace_on_left = bool(Search(r'}\s*else if\s*\(', line)) # find the ( after the if pos = line.find('else if') pos = line.find('(', pos) if pos > 0: (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) brace_on_right = endline[endpos:].find('{') != -1 if brace_on_left != brace_on_right: # must be brace after if error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') # Likewise, an else should never have the else clause on the same line if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): error(filename, linenum, 'whitespace/newline', 4, 'Else clause should never be on same line as else (use 2 lines)') # In the same way, a do/while should never be on one line if Match(r'\s*do [^\s{]', line): error(filename, linenum, 'whitespace/newline', 4, 'do/while clauses should not be on a single line') # Check single-line if/else bodies. The style guide says 'curly braces are not # required for single-line statements'. We additionally allow multi-line, # single statements, but we reject anything with more than one semicolon in # it. This means that the first semicolon after the if should be at the end of # its line, and the line after that should have an indent level equal to or # lower than the if. We also check for ambiguous if/else nesting without # braces. if_else_match = Search(r'\b(if\s*\(|else\b)', line) if if_else_match and not Match(r'\s*#', line): if_indent = GetIndentLevel(line) endline, endlinenum, endpos = line, linenum, if_else_match.end() if_match = Search(r'\bif\s*\(', line) if if_match: # This could be a multiline if condition, so find the end first. pos = if_match.end() - 1 (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos) # Check for an opening brace, either directly after the if or on the next # line. If found, this isn't a single-statement conditional. if (not Match(r'\s*{', endline[endpos:]) and not (Match(r'\s*$', endline[endpos:]) and endlinenum < (len(clean_lines.elided) - 1) and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))): while (endlinenum < len(clean_lines.elided) and ';' not in clean_lines.elided[endlinenum][endpos:]): endlinenum += 1 endpos = 0 if endlinenum < len(clean_lines.elided): endline = clean_lines.elided[endlinenum] # We allow a mix of whitespace and closing braces (e.g. for one-liner # methods) and a single \ after the semicolon (for macros) endpos = endline.find(';') if not Match(r';[\s}]*(\\?)$', endline[endpos:]): # Semicolon isn't the last character, there's something trailing. # Output a warning if the semicolon is not contained inside # a lambda expression. if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$', endline): error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') elif endlinenum < len(clean_lines.elided) - 1: # Make sure the next line is dedented next_line = clean_lines.elided[endlinenum + 1] next_indent = GetIndentLevel(next_line) # With ambiguous nested if statements, this will error out on the # if that *doesn't* match the else, regardless of whether it's the # inner one or outer one. if (if_match and Match(r'\s*else\b', next_line) and next_indent != if_indent): error(filename, linenum, 'readability/braces', 4, 'Else clause should be indented at the same level as if. ' 'Ambiguous nested if/else chains require braces.') elif next_indent > if_indent: error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') def CheckTrailingSemicolon(filename, clean_lines, linenum, error): """Looks for redundant trailing semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Block bodies should not be followed by a semicolon. Due to C++11 # brace initialization, there are more places where semicolons are # required than not, so we use a whitelist approach to check these # rather than a blacklist. These are the places where "};" should # be replaced by just "}": # 1. Some flavor of block following closing parenthesis: # for (;;) {}; # while (...) {}; # switch (...) {}; # Function(...) {}; # if (...) {}; # if (...) else if (...) {}; # # 2. else block: # if (...) else {}; # # 3. const member function: # Function(...) const {}; # # 4. Block following some statement: # x = 42; # {}; # # 5. Block at the beginning of a function: # Function(...) { # {}; # } # # Note that naively checking for the preceding "{" will also match # braces inside multi-dimensional arrays, but this is fine since # that expression will not contain semicolons. # # 6. Block following another block: # while (true) {} # {}; # # 7. End of namespaces: # namespace {}; # # These semicolons seems far more common than other kinds of # redundant semicolons, possibly due to people converting classes # to namespaces. For now we do not warn for this case. # # Try matching case 1 first. match = Match(r'^(.*\)\s*)\{', line) if match: # Matched closing parenthesis (case 1). Check the token before the # matching opening parenthesis, and don't warn if it looks like a # macro. This avoids these false positives: # - macro that defines a base class # - multi-line macro that defines a base class # - macro that defines the whole class-head # # But we still issue warnings for macros that we know are safe to # warn, specifically: # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P # - TYPED_TEST # - INTERFACE_DEF # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: # # We implement a whitelist of safe macros instead of a blacklist of # unsafe macros, even though the latter appears less frequently in # google code and would have been easier to implement. This is because # the downside for getting the whitelist wrong means some extra # semicolons, while the downside for getting the blacklist wrong # would result in compile errors. # # In addition to macros, we also don't want to warn on # - Compound literals # - Lambdas # - alignas specifier with anonymous structs # - decltype closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression( clean_lines, linenum, closing_brace_pos) if opening_parenthesis[2] > -1: line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix) func = Match(r'^(.*\])\s*$', line_prefix) if ((macro and macro.group(1) not in ( 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or Search(r'\bdecltype$', line_prefix) or Search(r'\s+=\s*$', line_prefix)): match = None if (match and opening_parenthesis[1] > 1 and Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): # Multi-line lambda-expression match = None else: # Try matching cases 2-3. match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) if not match: # Try matching cases 4-6. These are always matched on separate lines. # # Note that we can't simply concatenate the previous line to the # current line and do a single match, otherwise we may output # duplicate warnings for the blank line case: # if (cond) { # // blank line # } prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if prevline and Search(r'[;{}]\s*$', prevline): match = Match(r'^(\s*)\{', line) # Check matching closing brace if match: (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if endpos > -1 and Match(r'^\s*;', endline[endpos:]): # Current {} pair is eligible for semicolon check, and we have found # the redundant semicolon, output warning here. # # Note: because we are scanning forward for opening braces, and # outputting warnings for the matching closing brace, if there are # nested blocks with trailing semicolons, we will get the error # messages in reversed order. # We need to check the line forward for NOLINT raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1, error) ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum, error) error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }") def CheckEmptyBlockBody(filename, clean_lines, linenum, error): """Look for empty loop/conditional body with only a single semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Search for loop keywords at the beginning of the line. Because only # whitespaces are allowed before the keywords, this will also ignore most # do-while-loops, since those lines should start with closing brace. # # We also check "if" blocks here, since an empty conditional block # is likely an error. line = clean_lines.elided[linenum] matched = Match(r'\s*(for|while|if)\s*\(', line) if matched: # Find the end of the conditional expression. (end_line, end_linenum, end_pos) = CloseExpression( clean_lines, linenum, line.find('(')) # Output warning if what follows the condition expression is a semicolon. # No warning for all other cases, including whitespace or newline, since we # have a separate check for semicolons preceded by whitespace. if end_pos >= 0 and Match(r';', end_line[end_pos:]): if matched.group(1) == 'if': error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') else: error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue') # Check for if statements that have completely empty bodies (no comments) # and no else clauses. if end_pos >= 0 and matched.group(1) == 'if': # Find the position of the opening { for the if statement. # Return without logging an error if it has no brackets. opening_linenum = end_linenum opening_line_fragment = end_line[end_pos:] # Loop until EOF or find anything that's not whitespace or opening {. while not Search(r'^\s*\{', opening_line_fragment): if Search(r'^(?!\s*$)', opening_line_fragment): # Conditional has no brackets. return opening_linenum += 1 if opening_linenum == len(clean_lines.elided): # Couldn't find conditional's opening { or any code before EOF. return opening_line_fragment = clean_lines.elided[opening_linenum] # Set opening_line (opening_line_fragment may not be entire opening line). opening_line = clean_lines.elided[opening_linenum] # Find the position of the closing }. opening_pos = opening_line_fragment.find('{') if opening_linenum == end_linenum: # We need to make opening_pos relative to the start of the entire line. opening_pos += end_pos (closing_line, closing_linenum, closing_pos) = CloseExpression( clean_lines, opening_linenum, opening_pos) if closing_pos < 0: return # Now construct the body of the conditional. This consists of the portion # of the opening line after the {, all lines until the closing line, # and the portion of the closing line before the }. if (clean_lines.raw_lines[opening_linenum] != CleanseComments(clean_lines.raw_lines[opening_linenum])): # Opening line ends with a comment, so conditional isn't empty. return if closing_linenum > opening_linenum: # Opening line after the {. Ignore comments here since we checked above. bodylist = list(opening_line[opening_pos+1:]) # All lines until closing line, excluding closing line, with comments. bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum]) # Closing line before the }. Won't (and can't) have comments. bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1]) body = '\n'.join(bodylist) else: # If statement has brackets and fits on a single line. body = opening_line[opening_pos+1:closing_pos-1] # Check if the body is empty if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body): return # The body is empty. Now make sure there's not an else clause. current_linenum = closing_linenum current_line_fragment = closing_line[closing_pos:] # Loop until EOF or find anything that's not whitespace or else clause. while Search(r'^\s*$|^(?=\s*else)', current_line_fragment): if Search(r'^(?=\s*else)', current_line_fragment): # Found an else clause, so don't log an error. return current_linenum += 1 if current_linenum == len(clean_lines.elided): break current_line_fragment = clean_lines.elided[current_linenum] # The body is empty and there's no else clause until EOF or other code. error(filename, end_linenum, 'whitespace/empty_if_body', 4, ('If statement had no body and no else clause')) def FindCheckMacro(line): """Find a replaceable CHECK-like macro. Args: line: line to search on. Returns: (macro name, start position), or (None, -1) if no replaceable macro is found. """ for macro in _CHECK_MACROS: i = line.find(macro) if i >= 0: # Find opening parenthesis. Do a regular expression match here # to make sure that we are matching the expected CHECK macro, as # opposed to some other macro that happens to contain the CHECK # substring. matched = Match(r'^(.*\b' + macro + r'\s*)\(', line) if not matched: continue return (macro, len(matched.group(1))) return (None, -1) def CheckCheck(filename, clean_lines, linenum, error): """Checks the use of CHECK and EXPECT macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Decide the set of replacement macros that should be suggested lines = clean_lines.elided (check_macro, start_pos) = FindCheckMacro(lines[linenum]) if not check_macro: return # Find end of the boolean expression by matching parentheses (last_line, end_line, end_pos) = CloseExpression( clean_lines, linenum, start_pos) if end_pos < 0: return # If the check macro is followed by something other than a # semicolon, assume users will log their own custom error messages # and don't suggest any replacements. if not Match(r'\s*;', last_line[end_pos:]): return if linenum == end_line: expression = lines[linenum][start_pos + 1:end_pos - 1] else: expression = lines[linenum][start_pos + 1:] for i in xrange(linenum + 1, end_line): expression += lines[i] expression += last_line[0:end_pos - 1] # Parse expression so that we can take parentheses into account. # This avoids false positives for inputs like "CHECK((a < 4) == b)", # which is not replaceable by CHECK_LE. lhs = '' rhs = '' operator = None while expression: matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' r'==|!=|>=|>|<=|<|\()(.*)$', expression) if matched: token = matched.group(1) if token == '(': # Parenthesized operand expression = matched.group(2) (end, _) = FindEndOfExpressionInLine(expression, 0, ['(']) if end < 0: return # Unmatched parenthesis lhs += '(' + expression[0:end] expression = expression[end:] elif token in ('&&', '||'): # Logical and/or operators. This means the expression # contains more than one term, for example: # CHECK(42 < a && a < b); # # These are not replaceable with CHECK_LE, so bail out early. return elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): # Non-relational operator lhs += token expression = matched.group(2) else: # Relational operator operator = token rhs = matched.group(2) break else: # Unparenthesized operand. Instead of appending to lhs one character # at a time, we do another regular expression match to consume several # characters at once if possible. Trivial benchmark shows that this # is more efficient when the operands are longer than a single # character, which is generally the case. matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) if not matched: matched = Match(r'^(\s*\S)(.*)$', expression) if not matched: break lhs += matched.group(1) expression = matched.group(2) # Only apply checks if we got all parts of the boolean expression if not (lhs and operator and rhs): return # Check that rhs do not contain logical operators. We already know # that lhs is fine since the loop above parses out && and ||. if rhs.find('&&') > -1 or rhs.find('||') > -1: return # At least one of the operands must be a constant literal. This is # to avoid suggesting replacements for unprintable things like # CHECK(variable != iterator) # # The following pattern matches decimal, hex integers, strings, and # characters (in that order). lhs = lhs.strip() rhs = rhs.strip() match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' if Match(match_constant, lhs) or Match(match_constant, rhs): # Note: since we know both lhs and rhs, we can provide a more # descriptive error message like: # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) # Instead of: # Consider using CHECK_EQ instead of CHECK(a == b) # # We are still keeping the less descriptive message because if lhs # or rhs gets long, the error message might become unreadable. error(filename, linenum, 'readability/check', 2, 'Consider using %s instead of %s(a %s b)' % ( _CHECK_REPLACEMENT[check_macro][operator], check_macro, operator)) def CheckAltTokens(filename, clean_lines, linenum, error): """Check alternative keywords being used in boolean expressions. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Avoid preprocessor lines if Match(r'^\s*#', line): return # Last ditch effort to avoid multi-line comments. This will not help # if the comment started before the current line or ended after the # current line, but it catches most of the false positives. At least, # it provides a way to workaround this warning for people who use # multi-line comments in preprocessor macros. # # TODO(unknown): remove this once cpplint has better support for # multi-line comments. if line.find('/*') >= 0 or line.find('*/') >= 0: return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, 'Use operator %s instead of %s' % ( _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) def GetLineWidth(line): """Determines the width of the line in column positions. Args: line: A string, which may be a Unicode string. Returns: The width of the line in column positions, accounting for Unicode combining characters and wide characters. """ if isinstance(line, unicode): width = 0 for uc in unicodedata.normalize('NFC', line): if unicodedata.east_asian_width(uc) in ('W', 'F'): width += 2 elif not unicodedata.combining(uc): width += 1 return width else: return len(line) def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, error): """Checks rules from the 'C++ style rules' section of cppguide.html. Most of these rules are hard to test (naming, comment style), but we do what we can. In particular we check for 2-space indents, line lengths, tab usage, spaces inside code, etc. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Don't use "elided" lines here, otherwise we can't check commented lines. # Don't want to use "raw" either, because we don't want to check inside C++11 # raw strings, raw_lines = clean_lines.lines_without_raw_strings line = raw_lines[linenum] prev = raw_lines[linenum - 1] if linenum > 0 else '' if line.find('\t') != -1: error(filename, linenum, 'whitespace/tab', 1, 'Tab found; better to use spaces') # One or three blank spaces at the beginning of the line is weird; it's # hard to reconcile that with 2-space indents. # NOTE: here are the conditions rob pike used for his tests. Mine aren't # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces # if(RLENGTH > 20) complain = 0; # if(match($0, " +(error|private|public|protected):")) complain = 0; # if(match(prev, "&& *$")) complain = 0; # if(match(prev, "\\|\\| *$")) complain = 0; # if(match(prev, "[\",=><] *$")) complain = 0; # if(match($0, " <<")) complain = 0; # if(match(prev, " +for \\(")) complain = 0; # if(prevodd && match(prevprev, " +for \\(")) complain = 0; scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$' classinfo = nesting_state.InnermostClass() initial_spaces = 0 cleansed_line = clean_lines.elided[linenum] while initial_spaces < len(line) and line[initial_spaces] == ' ': initial_spaces += 1 # There are certain situations we allow one space, notably for # section labels, and also lines containing multi-line raw strings. # We also don't check for lines that look like continuation lines # (of lines ending in double quotes, commas, equals, or angle brackets) # because the rules for how to indent those are non-trivial. if (not Search(r'[",=><] *$', prev) and (initial_spaces == 1 or initial_spaces == 3) and not Match(scope_or_label_pattern, cleansed_line) and not (clean_lines.raw_lines[linenum] != line and Match(r'^\s*""', line))): error(filename, linenum, 'whitespace/indent', 3, 'Weird number of spaces at line-start. ' 'Are you using a 2-space indent?') if line and line[-1].isspace(): error(filename, linenum, 'whitespace/end_of_line', 4, 'Line ends in whitespace. Consider deleting these extra spaces.') # Check if the line is a header guard. is_header_guard = False if file_extension in GetHeaderExtensions(): cppvar = GetHeaderGuardCPPVariable(filename) if (line.startswith('#ifndef %s' % cppvar) or line.startswith('#define %s' % cppvar) or line.startswith('#endif // %s' % cppvar)): is_header_guard = True # #include lines and header guards can be long, since there's no clean way to # split them. # # URLs can be long too. It's possible to split these, but it makes them # harder to cut&paste. # # The "$Id:...$" comment may also get very long without it being the # developers fault. # # Doxygen documentation copying can get pretty long when using an overloaded # function declaration if (not line.startswith('#include') and not is_header_guard and not Match(r'^\s*//.*http(s?)://\S*$', line) and not Match(r'^\s*//\s*[^\s]*$', line) and not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)): line_width = GetLineWidth(line) if line_width > _line_length: error(filename, linenum, 'whitespace/line_length', 2, 'Lines should be <= %i characters long' % _line_length) if (cleansed_line.count(';') > 1 and # allow simple single line lambdas not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}', line) and # for loops are allowed two ;'s (and may run over two lines). cleansed_line.find('for') == -1 and (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and # It's ok to have many commands in a switch case that fits in 1 line not ((cleansed_line.find('case ') != -1 or cleansed_line.find('default:') != -1) and cleansed_line.find('break;') != -1)): error(filename, linenum, 'whitespace/newline', 0, 'More than one command on the same line') # Some more style checks CheckBraces(filename, clean_lines, linenum, error) CheckTrailingSemicolon(filename, clean_lines, linenum, error) CheckEmptyBlockBody(filename, clean_lines, linenum, error) CheckAccess(filename, clean_lines, linenum, nesting_state, error) CheckSpacing(filename, clean_lines, linenum, nesting_state, error) CheckOperatorSpacing(filename, clean_lines, linenum, error) CheckParenthesisSpacing(filename, clean_lines, linenum, error) CheckCommaSpacing(filename, clean_lines, linenum, error) CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error) CheckSpacingForFunctionCall(filename, clean_lines, linenum, error) CheckCheck(filename, clean_lines, linenum, error) CheckAltTokens(filename, clean_lines, linenum, error) classinfo = nesting_state.InnermostClass() if classinfo: CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') # Matches the first component of a filename delimited by -s and _s. That is: # _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') def _DropCommonSuffixes(filename): """Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed. """ for suffix in itertools.chain( ('%s.%s' % (test_suffix.lstrip('_'), ext) for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())), ('%s.%s' % (suffix, ext) for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0] def _ClassifyInclude(fileinfo, include, is_system): """Figures out what kind of header 'include' is. Args: fileinfo: The current file cpplint is running over. A FileInfo instance. include: The path to a #included file. is_system: True if the #include used <> rather than "". Returns: One of the _XXX_HEADER constants. For example: >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) _C_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) _CPP_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) _LIKELY_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), ... 'bar/foo_other_ext.h', False) _POSSIBLE_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) _OTHER_HEADER """ # This is a list of all standard c++ header files, except # those already checked for above. is_cpp_h = include in _CPP_HEADERS # Headers with C++ extensions shouldn't be considered C system headers if is_system and os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']: is_system = False if is_system: if is_cpp_h: return _CPP_SYS_HEADER else: return _C_SYS_HEADER # If the target file and the include we're checking share a # basename when we drop common extensions, and the include # lives in . , then it's likely to be owned by the target file. target_dir, target_base = ( os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) target_dir_pub = os.path.normpath(target_dir + '/../public') target_dir_pub = target_dir_pub.replace('\\', '/') if target_base == include_base and ( include_dir == target_dir or include_dir == target_dir_pub): return _LIKELY_MY_HEADER # If the target and include share some initial basename # component, it's possible the target is implementing the # include, so it's allowed to be first, but we'll never # complain if it's not there. target_first_component = _RE_FIRST_COMPONENT.match(target_base) include_first_component = _RE_FIRST_COMPONENT.match(include_base) if (target_first_component and include_first_component and target_first_component.group(0) == include_first_component.group(0)): return _POSSIBLE_MY_HEADER return _OTHER_HEADER def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): """Check rules that are applicable to #include lines. Strings on #include lines are NOT removed from elided line, to make certain tasks easier. However, to prevent false positives, checks applicable to #include lines in CheckLanguage must be put here. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. include_state: An _IncludeState instance in which the headers are inserted. error: The function to call with any errors found. """ fileinfo = FileInfo(filename) line = clean_lines.lines[linenum] # "include" should use the new style "foo/bar.h" instead of just "bar.h" # Only do this check if the included header follows google naming # conventions. If not, assume that it's a 3rd party API that # requires special include conventions. # # We also make an exception for Lua headers, which follow google # naming convention but not the include convention. match = Match(r'#include\s*"([^/]+\.h)"', line) if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)): error(filename, linenum, 'build/include_subdir', 4, 'Include the directory when naming .h files') # we shouldn't include a file more than once. actually, there are a # handful of instances where doing so is okay, but in general it's # not. match = _RE_PATTERN_INCLUDE.search(line) if match: include = match.group(2) is_system = (match.group(1) == '<') duplicate_line = include_state.FindHeader(include) if duplicate_line >= 0: error(filename, linenum, 'build/include', 4, '"%s" already included at %s:%s' % (include, filename, duplicate_line)) return for extension in GetNonHeaderExtensions(): if (include.endswith('.' + extension) and os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)): error(filename, linenum, 'build/include', 4, 'Do not include .' + extension + ' files from other packages') return if not _THIRD_PARTY_HEADERS_PATTERN.match(include): include_state.include_list[-1].append((include, linenum)) # We want to ensure that headers appear in the right order: # 1) for foo.cc, foo.h (preferred location) # 2) c system files # 3) cpp system files # 4) for foo.cc, foo.h (deprecated location) # 5) other google headers # # We classify each include statement as one of those 5 types # using a number of techniques. The include_state object keeps # track of the highest type seen, and complains if we see a # lower type after that. error_message = include_state.CheckNextIncludeOrder( _ClassifyInclude(fileinfo, include, is_system)) if error_message: error(filename, linenum, 'build/include_order', 4, '%s. Should be: %s.h, c system, c++ system, other.' % (error_message, fileinfo.BaseName())) canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) if not include_state.IsInAlphabeticalOrder( clean_lines, linenum, canonical_include): error(filename, linenum, 'build/include_alpha', 4, 'Include "%s" not in alphabetical order' % include) include_state.SetLastHeader(canonical_include) def _GetTextInside(text, start_pattern): r"""Retrieves all the text between matching open and close parentheses. Given a string of lines and a regular expression string, retrieve all the text following the expression and between opening punctuation symbols like (, [, or {, and the matching close-punctuation symbol. This properly nested occurrences of the punctuations, so for the text like printf(a(), b(c())); a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. start_pattern must match string having an open punctuation symbol at the end. Args: text: The lines to extract text. Its comments and strings must be elided. It can be single line and can span multiple lines. start_pattern: The regexp string indicating where to start extracting the text. Returns: The extracted text. None if either the opening string or ending punctuation could not be found. """ # TODO(unknown): Audit cpplint.py to see what places could be profitably # rewritten to use _GetTextInside (and use inferior regexp matching today). # Give opening punctuations to get the matching close-punctuations. matching_punctuation = {'(': ')', '{': '}', '[': ']'} closing_punctuation = set(itervalues(matching_punctuation)) # Find the position to start extracting text. match = re.search(start_pattern, text, re.M) if not match: # start_pattern not found in text. return None start_position = match.end(0) assert start_position > 0, ( 'start_pattern must ends with an opening punctuation.') assert text[start_position - 1] in matching_punctuation, ( 'start_pattern must ends with an opening punctuation.') # Stack of closing punctuations we expect to have in text after position. punctuation_stack = [matching_punctuation[text[start_position - 1]]] position = start_position while punctuation_stack and position < len(text): if text[position] == punctuation_stack[-1]: punctuation_stack.pop() elif text[position] in closing_punctuation: # A closing punctuation without matching opening punctuations. return None elif text[position] in matching_punctuation: punctuation_stack.append(matching_punctuation[text[position]]) position += 1 if punctuation_stack: # Opening punctuations left without matching close-punctuations. return None # punctuations match. return text[start_position:position - 1] # Patterns for matching call-by-reference parameters. # # Supports nested templates up to 2 levels deep using this messy pattern: # < (?: < (?: < [^<>]* # > # | [^<>] )* # > # | [^<>] )* # > _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]* _RE_PATTERN_TYPE = ( r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?' r'(?:\w|' r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|' r'::)+') # A call-by-reference parameter ends with '& identifier'. _RE_PATTERN_REF_PARAM = re.compile( r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*' r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]') # A call-by-const-reference parameter either ends with 'const& identifier' # or looks like 'const type& identifier' when 'type' is atomic. _RE_PATTERN_CONST_REF_PARAM = ( r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')') # Stream types. _RE_PATTERN_REF_STREAM_PARAM = ( r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')') def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error): """Checks rules from the 'C++ language rules' section of cppguide.html. Some of these rules are hard to test (function overloading, using uint32 inappropriately), but we do the best we can. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. include_state: An _IncludeState instance in which the headers are inserted. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # If the line is empty or consists of entirely a comment, no need to # check it. line = clean_lines.elided[linenum] if not line: return match = _RE_PATTERN_INCLUDE.search(line) if match: CheckIncludeLine(filename, clean_lines, linenum, include_state, error) return # Reset include state across preprocessor directives. This is meant # to silence warnings for conditional includes. match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line) if match: include_state.ResetSection(match.group(1)) # Perform other checks now that we are sure that this is not an include line CheckCasts(filename, clean_lines, linenum, error) CheckGlobalStatic(filename, clean_lines, linenum, error) CheckPrintf(filename, clean_lines, linenum, error) if file_extension in GetHeaderExtensions(): # TODO(unknown): check that 1-arg constructors are explicit. # How to tell it's a constructor? # (handled in CheckForNonStandardConstructs for now) # TODO(unknown): check that classes declare or disable copy/assign # (level 1 error) pass # Check if people are using the verboten C basic types. The only exception # we regularly allow is "unsigned short port" for port. if Search(r'\bshort port\b', line): if not Search(r'\bunsigned short port\b', line): error(filename, linenum, 'runtime/int', 4, 'Use "unsigned short" for ports, not "short"') else: match = Search(r'\b(short|long(?! +double)|long long)\b', line) if match: error(filename, linenum, 'runtime/int', 4, 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) # Check if some verboten operator overloading is going on # TODO(unknown): catch out-of-line unary operator&: # class X {}; # int operator&(const X& x) { return 42; } // unary operator& # The trick is it's hard to tell apart from binary operator&: # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& if Search(r'\boperator\s*&\s*\(\s*\)', line): error(filename, linenum, 'runtime/operator', 4, 'Unary operator& is dangerous. Do not use it.') # Check for suspicious usage of "if" like # } if (a == b) { if Search(r'\}\s*if\s*\(', line): error(filename, linenum, 'readability/braces', 4, 'Did you mean "else if"? If not, start a new line for "if".') # Check for potential format string bugs like printf(foo). # We constrain the pattern not to pick things like DocidForPrintf(foo). # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) # TODO(unknown): Catch the following case. Need to change the calling # convention of the whole function to process multiple line to handle it. # printf( # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') if printf_args: match = Match(r'([\w.\->()]+)$', printf_args) if match and match.group(1) != '__VA_ARGS__': function_name = re.search(r'\b((?:string)?printf)\s*\(', line, re.I).group(1) error(filename, linenum, 'runtime/printf', 4, 'Potential format string bug. Do %s("%%s", %s) instead.' % (function_name, match.group(1))) # Check for potential memset bugs like memset(buf, sizeof(buf), 0). match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): error(filename, linenum, 'runtime/memset', 4, 'Did you mean "memset(%s, 0, %s)"?' % (match.group(1), match.group(2))) if Search(r'\busing namespace\b', line): if Search(r'\bliterals\b', line): error(filename, linenum, 'build/namespaces_literals', 5, 'Do not use namespace using-directives. ' 'Use using-declarations instead.') else: error(filename, linenum, 'build/namespaces', 5, 'Do not use namespace using-directives. ' 'Use using-declarations instead.') # Detect variable-length arrays. match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) if (match and match.group(2) != 'return' and match.group(2) != 'delete' and match.group(3).find(']') == -1): # Split the size using space and arithmetic operators as delimiters. # If any of the resulting tokens are not compile time constants then # report the error. tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) is_const = True skip_next = False for tok in tokens: if skip_next: skip_next = False continue if Search(r'sizeof\(.+\)', tok): continue if Search(r'arraysize\(\w+\)', tok): continue tok = tok.lstrip('(') tok = tok.rstrip(')') if not tok: continue if Match(r'\d+', tok): continue if Match(r'0[xX][0-9a-fA-F]+', tok): continue if Match(r'k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue # A catch all for tricky sizeof cases, including 'sizeof expression', # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' # requires skipping the next token because we split on ' ' and '*'. if tok.startswith('sizeof'): skip_next = True continue is_const = False break if not is_const: error(filename, linenum, 'runtime/arrays', 1, 'Do not use variable-length arrays. Use an appropriately named ' "('k' followed by CamelCase) compile-time constant for the size.") # Check for use of unnamed namespaces in header files. Registration # macros are typically OK, so we allow use of "namespace {" on lines # that end with backslashes. if (file_extension in GetHeaderExtensions() and Search(r'\bnamespace\s*{', line) and line[-1] != '\\'): error(filename, linenum, 'build/namespaces', 4, 'Do not use unnamed namespaces in header files. See ' 'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' ' for more information.') def CheckGlobalStatic(filename, clean_lines, linenum, error): """Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Match two lines at a time to support multiline declarations if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): line += clean_lines.elided[linenum + 1].strip() # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access, and # also because globals can be destroyed when some threads are still running. # TODO(unknown): Generalize this to also find static unique_ptr instances. # TODO(unknown): File bugs for clang-tidy to find these. match = Match( r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +' r'([a-zA-Z0-9_:]+)\b(.*)', line) # Remove false positives: # - String pointers (as opposed to values). # string *pointer # const string *pointer # string const *pointer # string *const pointer # # - Functions and template specializations. # string Function<Type>(... # string Class<Type>::Method(... # # - Operators. These are matched separately because operator names # cross non-word boundaries, and trying to match both operators # and functions at the same time would decrease accuracy of # matching identifiers. # string Class::operator*() if (match and not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))): if Search(r'\bconst\b', line): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string ' 'instead: "%schar%s %s[]".' % (match.group(1), match.group(2) or '', match.group(3))) else: error(filename, linenum, 'runtime/string', 4, 'Static/global string variables are not permitted.') if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.') def CheckPrintf(filename, clean_lines, linenum, error): """Check for printf related issues. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # When snprintf is used, the second argument shouldn't be a literal. match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) if match and match.group(2) != '0': # If 2nd arg is zero, snprintf is used to calculate size. error(filename, linenum, 'runtime/printf', 3, 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' 'to snprintf.' % (match.group(1), match.group(2))) # Check if some verboten C functions are being used. if Search(r'\bsprintf\s*\(', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') match = Search(r'\b(strcpy|strcat)\s*\(', line) if match: error(filename, linenum, 'runtime/printf', 4, 'Almost always, snprintf is better than %s' % match.group(1)) def IsDerivedFunction(clean_lines, linenum): """Check if current line contains an inherited function. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line contains a function with "override" virt-specifier. """ # Scan back a few lines for start of current function for i in xrange(linenum, max(-1, linenum - 10), -1): match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i]) if match: # Look for "override" after the matching closing parenthesis line, _, closing_paren = CloseExpression( clean_lines, i, len(match.group(1))) return (closing_paren >= 0 and Search(r'\boverride\b', line[closing_paren:])) return False def IsOutOfLineMethodDefinition(clean_lines, linenum): """Check if current line contains an out-of-line method definition. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line contains an out-of-line method definition. """ # Scan back a few lines for start of current function for i in xrange(linenum, max(-1, linenum - 10), -1): if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]): return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None return False def IsInitializerList(clean_lines, linenum): """Check if current line is inside constructor initializer list. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line appears to be inside constructor initializer list, False otherwise. """ for i in xrange(linenum, 1, -1): line = clean_lines.elided[i] if i == linenum: remove_function_body = Match(r'^(.*)\{\s*$', line) if remove_function_body: line = remove_function_body.group(1) if Search(r'\s:\s*\w+[({]', line): # A lone colon tend to indicate the start of a constructor # initializer list. It could also be a ternary operator, which # also tend to appear in constructor initializer lists as # opposed to parameter lists. return True if Search(r'\}\s*,\s*$', line): # A closing brace followed by a comma is probably the end of a # brace-initialized member in constructor initializer list. return True if Search(r'[{};]\s*$', line): # Found one of the following: # - A closing brace or semicolon, probably the end of the previous # function. # - An opening brace, probably the start of current class or namespace. # # Current line is probably not inside an initializer list since # we saw one of those things without seeing the starting colon. return False # Got to the beginning of the file without seeing the start of # constructor initializer list. return False def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state, error): """Check for non-const references. Separate from CheckLanguage since it scans backwards from current line, instead of scanning forward. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Do nothing if there is no '&' on current line. line = clean_lines.elided[linenum] if '&' not in line: return # If a function is inherited, current function doesn't have much of # a choice, so any non-const references should not be blamed on # derived function. if IsDerivedFunction(clean_lines, linenum): return # Don't warn on out-of-line method definitions, as we would warn on the # in-line declaration, if it isn't marked with 'override'. if IsOutOfLineMethodDefinition(clean_lines, linenum): return # Long type names may be broken across multiple lines, usually in one # of these forms: # LongType # ::LongTypeContinued &identifier # LongType:: # LongTypeContinued &identifier # LongType< # ...>::LongTypeContinued &identifier # # If we detected a type split across two lines, join the previous # line to current line so that we can match const references # accordingly. # # Note that this only scans back one line, since scanning back # arbitrary number of lines would be expensive. If you have a type # that spans more than 2 lines, please use a typedef. if linenum > 1: previous = None if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): # previous_line\n + ::current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', clean_lines.elided[linenum - 1]) elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): # previous_line::\n + current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', clean_lines.elided[linenum - 1]) if previous: line = previous.group(1) + line.lstrip() else: # Check for templated parameter that is split across multiple lines endpos = line.rfind('>') if endpos > -1: (_, startline, startpos) = ReverseCloseExpression( clean_lines, linenum, endpos) if startpos > -1 and startline < linenum: # Found the matching < on an earlier line, collect all # pieces up to current line. line = '' for i in xrange(startline, linenum + 1): line += clean_lines.elided[i].strip() # Check for non-const references in function parameters. A single '&' may # found in the following places: # inside expression: binary & for bitwise AND # inside expression: unary & for taking the address of something # inside declarators: reference parameter # We will exclude the first two cases by checking that we are not inside a # function body, including one that was just introduced by a trailing '{'. # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. if (nesting_state.previous_stack_top and not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or isinstance(nesting_state.previous_stack_top, _NamespaceInfo))): # Not at toplevel, not within a class, and not within a namespace return # Avoid initializer lists. We only need to scan back from the # current line for something that starts with ':'. # # We don't need to check the current line, since the '&' would # appear inside the second set of parentheses on the current line as # opposed to the first set. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 10), -1): previous_line = clean_lines.elided[i] if not Search(r'[),]\s*$', previous_line): break if Match(r'^\s*:\s+\S', previous_line): return # Avoid preprocessors if Search(r'\\\s*$', line): return # Avoid constructor initializer lists if IsInitializerList(clean_lines, linenum): return # We allow non-const references in a few standard places, like functions # called "swap()" or iostream operators like "<<" or ">>". Do not check # those function parameters. # # We also accept & in static_assert, which looks like a function but # it's actually a declaration expression. whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' r'operator\s*[<>][<>]|' r'static_assert|COMPILE_ASSERT' r')\s*\(') if Search(whitelisted_functions, line): return elif not Search(r'\S+\([^)]*$', line): # Don't see a whitelisted function on this line. Actually we # didn't see any function name on this line, so this is likely a # multi-line parameter list. Try a bit harder to catch this case. for i in xrange(2): if (linenum > i and Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): return decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)): error(filename, linenum, 'runtime/references', 2, 'Is this a non-const reference? ' 'If so, make const or use a pointer: ' + ReplaceAll(' *<', '<', parameter)) def CheckCasts(filename, clean_lines, linenum, error): """Various cast related checks. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Check to see if they're using an conversion function cast. # I just try to capture the most common basic types, though there are more. # Parameterless conversion functions, such as bool(), are allowed as they are # probably a member operator declaration or default constructor. match = Search( r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b' r'(int|float|double|bool|char|int32|uint32|int64|uint64)' r'(\([^)].*)', line) expecting_function = ExpectingFunctionArgs(clean_lines, linenum) if match and not expecting_function: matched_type = match.group(2) # matched_new_or_template is used to silence two false positives: # - New operators # - Template arguments with function types # # For template arguments, we match on types immediately following # an opening bracket without any spaces. This is a fast way to # silence the common case where the function type is the first # template argument. False negative with less-than comparison is # avoided because those operators are usually followed by a space. # # function<double(double)> // bracket + no space = false positive # value < double(42) // bracket + space = true positive matched_new_or_template = match.group(1) # Avoid arrays by looking for brackets that come after the closing # parenthesis. if Match(r'\([^()]+\)\s*\[', match.group(3)): return # Other things to ignore: # - Function pointers # - Casts to pointer types # - Placement new # - Alias declarations matched_funcptr = match.group(3) if (matched_new_or_template is None and not (matched_funcptr and (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', matched_funcptr) or matched_funcptr.startswith('(*)'))) and not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and not Search(r'new\(\S+\)\s*' + matched_type, line)): error(filename, linenum, 'readability/casting', 4, 'Using deprecated casting style. ' 'Use static_cast<%s>(...) instead' % matched_type) if not expecting_function: CheckCStyleCast(filename, clean_lines, linenum, 'static_cast', r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) # This doesn't catch all cases. Consider (const char * const)"hello". # # (char *) "foo" should always be a const_cast (reinterpret_cast won't # compile). if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error): pass else: # Check pointer casts for other than string constants CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) # In addition, we look for people taking the address of a cast. This # is dangerous -- casts can assign to temporaries, so the pointer doesn't # point where you think. # # Some non-identifier character is required before the '&' for the # expression to be recognized as a cast. These are casts: # expression = &static_cast<int*>(temporary()); # function(&(int*)(temporary())); # # This is not a cast: # reference_type&(int* function_param); match = Search( r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|' r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line) if match: # Try a better error message when the & is bound to something # dereferenced by the casted pointer, as opposed to the casted # pointer itself. parenthesis_error = False match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line) if match: _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1))) if x1 >= 0 and clean_lines.elided[y1][x1] == '(': _, y2, x2 = CloseExpression(clean_lines, y1, x1) if x2 >= 0: extended_line = clean_lines.elided[y2][x2:] if y2 < clean_lines.NumLines() - 1: extended_line += clean_lines.elided[y2 + 1] if Match(r'\s*(?:->|\[)', extended_line): parenthesis_error = True if parenthesis_error: error(filename, linenum, 'readability/casting', 4, ('Are you taking an address of something dereferenced ' 'from a cast? Wrapping the dereferenced expression in ' 'parentheses will make the binding more obvious')) else: error(filename, linenum, 'runtime/casting', 4, ('Are you taking an address of a cast? ' 'This is dangerous: could be a temp var. ' 'Take the address before doing the cast, rather than after')) def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): """Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise. """ line = clean_lines.elided[linenum] match = Search(pattern, line) if not match: return False # Exclude lines with keywords that tend to look like casts context = line[0:match.start(1) - 1] if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): return False # Try expanding current context to see if we one level of # parentheses inside a macro. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 5), -1): context = clean_lines.elided[i] + context if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): return False # operator++(int) and operator--(int) if context.endswith(' operator++') or context.endswith(' operator--'): return False # A single unnamed argument for a function tends to look like old style cast. # If we see those, don't issue warnings for deprecated casts. remainder = line[match.end(0):] if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)', remainder): return False # At this point, all that should be left is actual casts. error(filename, linenum, 'readability/casting', 4, 'Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1))) return True def ExpectingFunctionArgs(clean_lines, linenum): """Checks whether where function type arguments are expected. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if the line at 'linenum' is inside something that expects arguments of function types. """ line = clean_lines.elided[linenum] return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or (linenum >= 2 and (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', clean_lines.elided[linenum - 1]) or Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', clean_lines.elided[linenum - 2]) or Search(r'\bstd::m?function\s*\<\s*$', clean_lines.elided[linenum - 1])))) _HEADERS_CONTAINING_TEMPLATES = ( ('<deque>', ('deque',)), ('<functional>', ('unary_function', 'binary_function', 'plus', 'minus', 'multiplies', 'divides', 'modulus', 'negate', 'equal_to', 'not_equal_to', 'greater', 'less', 'greater_equal', 'less_equal', 'logical_and', 'logical_or', 'logical_not', 'unary_negate', 'not1', 'binary_negate', 'not2', 'bind1st', 'bind2nd', 'pointer_to_unary_function', 'pointer_to_binary_function', 'ptr_fun', 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', 'mem_fun_ref_t', 'const_mem_fun_t', 'const_mem_fun1_t', 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', 'mem_fun_ref', )), ('<limits>', ('numeric_limits',)), ('<list>', ('list',)), ('<map>', ('map', 'multimap',)), ('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr', 'unique_ptr', 'weak_ptr')), ('<queue>', ('queue', 'priority_queue',)), ('<set>', ('set', 'multiset',)), ('<stack>', ('stack',)), ('<string>', ('char_traits', 'basic_string',)), ('<tuple>', ('tuple',)), ('<unordered_map>', ('unordered_map', 'unordered_multimap')), ('<unordered_set>', ('unordered_set', 'unordered_multiset')), ('<utility>', ('pair',)), ('<vector>', ('vector',)), # gcc extensions. # Note: std::hash is their hash, ::hash is our hash ('<hash_map>', ('hash_map', 'hash_multimap',)), ('<hash_set>', ('hash_set', 'hash_multiset',)), ('<slist>', ('slist',)), ) _HEADERS_MAYBE_TEMPLATES = ( ('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort', 'transform', )), ('<utility>', ('forward', 'make_pair', 'move', 'swap')), ) _RE_PATTERN_STRING = re.compile(r'\bstring\b') _re_pattern_headers_maybe_templates = [] for _header, _templates in _HEADERS_MAYBE_TEMPLATES: for _template in _templates: # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or # type::max(). _re_pattern_headers_maybe_templates.append( (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), _template, _header)) # Other scripts may reach in and modify this pattern. _re_pattern_templates = [] for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: for _template in _templates: _re_pattern_templates.append( (re.compile(r'(\<|\b)' + _template + r'\s*\<'), _template + '<>', _header)) def FilesBelongToSameModule(filename_cc, filename_h): """Check if these two filenames belong to the same module. The concept of a 'module' here is a as follows: foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the same 'module' if they are in the same directory. some/path/public/xyzzy and some/path/internal/xyzzy are also considered to belong to the same module here. If the filename_cc contains a longer path than the filename_h, for example, '/absolute/path/to/base/sysinfo.cc', and this file would include 'base/sysinfo.h', this function also produces the prefix needed to open the header. This is used by the caller of this function to more robustly open the header file. We don't have access to the real include paths in this context, so we need this guesswork here. Known bugs: tools/base/bar.cc and base/bar.h belong to the same module according to this implementation. Because of this, this function gives some false positives. This should be sufficiently rare in practice. Args: filename_cc: is the path for the source (e.g. .cc) file filename_h: is the path for the header path Returns: Tuple with a bool and a string: bool: True if filename_cc and filename_h belong to the same module. string: the additional prefix needed to open the header file. """ fileinfo_cc = FileInfo(filename_cc) if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions(): return (False, '') fileinfo_h = FileInfo(filename_h) if not fileinfo_h.Extension().lstrip('.') in GetHeaderExtensions(): return (False, '') filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))] matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName()) if matched_test_suffix: filename_cc = filename_cc[:-len(matched_test_suffix.group(1))] filename_cc = filename_cc.replace('/public/', '/') filename_cc = filename_cc.replace('/internal/', '/') filename_h = filename_h[:-(len(fileinfo_h.Extension()))] if filename_h.endswith('-inl'): filename_h = filename_h[:-len('-inl')] filename_h = filename_h.replace('/public/', '/') filename_h = filename_h.replace('/internal/', '/') files_belong_to_same_module = filename_cc.endswith(filename_h) common_path = '' if files_belong_to_same_module: common_path = filename_cc[:-len(filename_h)] return files_belong_to_same_module, common_path def UpdateIncludeState(filename, include_dict, io=codecs): """Fill up the include_dict with new includes found from the file. Args: filename: the name of the header to read. include_dict: a dictionary in which the headers are inserted. io: The io factory to use to read the file. Provided for testability. Returns: True if a header was successfully added. False otherwise. """ headerfile = None try: headerfile = io.open(filename, 'r', 'utf8', 'replace') except IOError: return False linenum = 0 for line in headerfile: linenum += 1 clean_line = CleanseComments(line) match = _RE_PATTERN_INCLUDE.search(clean_line) if match: include = match.group(2) include_dict.setdefault(include, linenum) return True def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs): """Reports for missing stl includes. This function will output warnings to make sure you are including the headers necessary for the stl containers and functions that you use. We only give one reason to include a header. For example, if you use both equal_to<> and less<> in a .h file, only one (the latter in the file) of these will be reported as a reason to include the <functional>. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. include_state: An _IncludeState instance. error: The function to call with any errors found. io: The IO factory to use to read the header file. Provided for unittest injection. """ required = {} # A map of header name to linenumber and the template entity. # Example of required: { '<functional>': (1219, 'less<>') } for linenum in range(clean_lines.NumLines()): line = clean_lines.elided[linenum] if not line or line[0] == '#': continue # String is special -- it is a non-templatized type in STL. matched = _RE_PATTERN_STRING.search(line) if matched: # Don't warn about strings in non-STL namespaces: # (We check only the first match per line; good enough.) prefix = line[:matched.start()] if prefix.endswith('std::') or not prefix.endswith('::'): required['<string>'] = (linenum, 'string') for pattern, template, header in _re_pattern_headers_maybe_templates: if pattern.search(line): required[header] = (linenum, template) # The following function is just a speed up, no semantics are changed. if not '<' in line: # Reduces the cpu time usage by skipping lines. continue for pattern, template, header in _re_pattern_templates: matched = pattern.search(line) if matched: # Don't warn about IWYU in non-STL namespaces: # (We check only the first match per line; good enough.) prefix = line[:matched.start()] if prefix.endswith('std::') or not prefix.endswith('::'): required[header] = (linenum, template) # The policy is that if you #include something in foo.h you don't need to # include it again in foo.cc. Here, we will look at possible includes. # Let's flatten the include_state include_list and copy it into a dictionary. include_dict = dict([item for sublist in include_state.include_list for item in sublist]) # Did we find the header for this file (if any) and successfully load it? header_found = False # Use the absolute path so that matching works properly. abs_filename = FileInfo(filename).FullName() # For Emacs's flymake. # If cpplint is invoked from Emacs's flymake, a temporary file is generated # by flymake and that file name might end with '_flymake.cc'. In that case, # restore original file name here so that the corresponding header file can be # found. # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' # instead of 'foo_flymake.h' abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) # include_dict is modified during iteration, so we iterate over a copy of # the keys. header_keys = list(include_dict.keys()) for header in header_keys: (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) fullpath = common_path + header if same_module and UpdateIncludeState(fullpath, include_dict, io): header_found = True # If we can't find the header file for a .cc, assume it's because we don't # know where to look. In that case we'll give up as we're not sure they # didn't include it in the .h file. # TODO(unknown): Do a better job of finding .h files so we are confident that # not having the .h file means there isn't one. if not header_found: for extension in GetNonHeaderExtensions(): if filename.endswith('.' + extension): return # All the lines have been processed, report the errors found. for required_header_unstripped in sorted(required, key=required.__getitem__): template = required[required_header_unstripped][1] if required_header_unstripped.strip('<>"') not in include_dict: error(filename, required[required_header_unstripped][0], 'build/include_what_you_use', 4, 'Add #include ' + required_header_unstripped + ' for ' + template) _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<') def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): """Check that make_pair's template arguments are deduced. G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are specified explicitly, and such use isn't intended in any case. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) if match: error(filename, linenum, 'build/explicit_make_pair', 4, # 4 = high confidence 'For C++11-compatibility, omit template arguments from make_pair' ' OR use pair directly OR if appropriate, construct a pair directly') def CheckRedundantVirtual(filename, clean_lines, linenum, error): """Check if line contains a redundant "virtual" function-specifier. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Look for "virtual" on current line. line = clean_lines.elided[linenum] virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line) if not virtual: return # Ignore "virtual" keywords that are near access-specifiers. These # are only used in class base-specifier and do not apply to member # functions. if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or Match(r'^\s+(public|protected|private)\b', virtual.group(3))): return # Ignore the "virtual" keyword from virtual base classes. Usually # there is a column on the same line in these cases (virtual base # classes are rare in google3 because multiple inheritance is rare). if Match(r'^.*[^:]:[^:].*$', line): return # Look for the next opening parenthesis. This is the start of the # parameter list (possibly on the next line shortly after virtual). # TODO(unknown): doesn't work if there are virtual functions with # decltype() or other things that use parentheses, but csearch suggests # that this is rare. end_col = -1 end_line = -1 start_col = len(virtual.group(2)) for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())): line = clean_lines.elided[start_line][start_col:] parameter_list = Match(r'^([^(]*)\(', line) if parameter_list: # Match parentheses to find the end of the parameter list (_, end_line, end_col) = CloseExpression( clean_lines, start_line, start_col + len(parameter_list.group(1))) break start_col = 0 if end_col < 0: return # Couldn't find end of parameter list, give up # Look for "override" or "final" after the parameter list # (possibly on the next few lines). for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())): line = clean_lines.elided[i][end_col:] match = Search(r'\b(override|final)\b', line) if match: error(filename, linenum, 'readability/inheritance', 4, ('"virtual" is redundant since function is ' 'already declared as "%s"' % match.group(1))) # Set end_col to check whole lines after we are done with the # first line. end_col = 0 if Search(r'[^\w]\s*$', line): break def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error): """Check if line contains a redundant "override" or "final" virt-specifier. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Look for closing parenthesis nearby. We need one to confirm where # the declarator ends and where the virt-specifier starts to avoid # false positives. line = clean_lines.elided[linenum] declarator_end = line.rfind(')') if declarator_end >= 0: fragment = line[declarator_end:] else: if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0: fragment = line else: return # Check that at most one of "override" or "final" is present, not both if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment): error(filename, linenum, 'readability/inheritance', 4, ('"override" is redundant since function is ' 'already declared as "final"')) # Returns true if we are at a new block, and it is directly # inside of a namespace. def IsBlockInNameSpace(nesting_state, is_forward_declaration): """Checks that the new block is directly in a namespace. Args: nesting_state: The _NestingState object that contains info about our state. is_forward_declaration: If the class is a forward declared class. Returns: Whether or not the new block is directly in a namespace. """ if is_forward_declaration: return len(nesting_state.stack) >= 1 and ( isinstance(nesting_state.stack[-1], _NamespaceInfo)) return (len(nesting_state.stack) > 1 and nesting_state.stack[-1].check_namespace_indentation and isinstance(nesting_state.stack[-2], _NamespaceInfo)) def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, raw_lines_no_comments, linenum): """This method determines if we should apply our namespace indentation check. Args: nesting_state: The current nesting state. is_namespace_indent_item: If we just put a new class on the stack, True. If the top of the stack is not a class, or we did not recently add the class, False. raw_lines_no_comments: The lines without the comments. linenum: The current line number we are processing. Returns: True if we should apply our namespace indentation check. Currently, it only works for classes and namespaces inside of a namespace. """ is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments, linenum) if not (is_namespace_indent_item or is_forward_declaration): return False # If we are in a macro, we do not want to check the namespace indentation. if IsMacroDefinition(raw_lines_no_comments, linenum): return False return IsBlockInNameSpace(nesting_state, is_forward_declaration) # Call this method if the line is directly inside of a namespace. # If the line above is blank (excluding comments) or the start of # an inner namespace, it cannot be indented. def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum, error): line = raw_lines_no_comments[linenum] if Match(r'^\s+', line): error(filename, linenum, 'runtime/indentation_namespace', 4, 'Do not indent within a namespace') def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=None): """Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[line], line, error) nesting_state.Update(filename, clean_lines, line, error) CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error) if nesting_state.InAsmBlock(): return CheckForFunctionLengths(filename, clean_lines, line, function_state, error) CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error) CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error) CheckVlogArguments(filename, clean_lines, line, error) CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) CheckRedundantVirtual(filename, clean_lines, line, error) CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) if extra_check_functions: for check_fn in extra_check_functions: check_fn(filename, clean_lines, line, error) def FlagCxx11Features(filename, clean_lines, linenum, error): """Flag those c++11 features that we only allow in certain places. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) # Flag unapproved C++ TR1 headers. if include and include.group(1).startswith('tr1/'): error(filename, linenum, 'build/c++tr1', 5, ('C++ TR1 headers such as <%s> are unapproved.') % include.group(1)) # Flag unapproved C++11 headers. if include and include.group(1) in ('cfenv', 'condition_variable', 'fenv.h', 'future', 'mutex', 'thread', 'chrono', 'ratio', 'regex', 'system_error', ): error(filename, linenum, 'build/c++11', 5, ('<%s> is an unapproved C++11 header.') % include.group(1)) # The only place where we need to worry about C++11 keywords and library # features in preprocessor directives is in macro definitions. if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return # These are classes and free functions. The classes are always # mentioned as std::*, but we only catch the free functions if # they're not found by ADL. They're alphabetical by header. for top_name in ( # type_traits 'alignment_of', 'aligned_union', ): if Search(r'\bstd::%s\b' % top_name, line): error(filename, linenum, 'build/c++11', 5, ('std::%s is an unapproved C++11 class or function. Send c-style ' 'an example of where it would make your code more readable, and ' 'they may let you use it.') % top_name) def FlagCxx14Features(filename, clean_lines, linenum, error): """Flag those C++14 features that we restrict. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) # Flag unapproved C++14 headers. if include and include.group(1) in ('scoped_allocator', 'shared_mutex'): error(filename, linenum, 'build/c++14', 5, ('<%s> is an unapproved C++14 header.') % include.group(1)) def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=None): """Performs lint checks and reports any errors to the given error function. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ lines = (['// marker so line numbers and indices both start at 1'] + lines + ['// marker so line numbers end in a known way']) include_state = _IncludeState() function_state = _FunctionState() nesting_state = NestingState() ResetNolintSuppressions() CheckForCopyright(filename, lines, error) ProcessGlobalSuppresions(lines) RemoveMultiLineComments(filename, lines, error) clean_lines = CleansedLines(lines) if file_extension in GetHeaderExtensions(): CheckForHeaderGuard(filename, clean_lines, error) for line in range(clean_lines.NumLines()): ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions) FlagCxx11Features(filename, clean_lines, line, error) nesting_state.CheckCompletedBlocks(filename, error) CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) # Check that the .cc file has included its header if it exists. if _IsSourceExtension(file_extension): CheckHeaderFileIncluded(filename, include_state, error) # We check here rather than inside ProcessLine so that we see raw # lines rather than "cleaned" lines. CheckForBadCharacters(filename, lines, error) CheckForNewlineAtEOF(filename, lines, error) def ProcessConfigOverrides(filename): """ Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further. """ abs_filename = os.path.abspath(filename) cfg_filters = [] keep_looking = True while keep_looking: abs_path, base_name = os.path.split(abs_filename) if not base_name: break # Reached the root directory. cfg_file = os.path.join(abs_path, "CPPLINT.cfg") abs_filename = abs_path if not os.path.isfile(cfg_file): continue try: with open(cfg_file) as file_handle: for line in file_handle: line, _, _ = line.partition('#') # Remove comments. if not line.strip(): continue name, _, val = line.partition('=') name = name.strip() val = val.strip() if name == 'set noparent': keep_looking = False elif name == 'filter': cfg_filters.append(val) elif name == 'exclude_files': # When matching exclude_files pattern, use the base_name of # the current file name or the directory name we are processing. # For example, if we are checking for lint errors in /foo/bar/baz.cc # and we found the .cfg file at /foo/CPPLINT.cfg, then the config # file's "exclude_files" filter is meant to be checked against "bar" # and not "baz" nor "bar/baz.cc". if base_name: pattern = re.compile(val) if pattern.match(base_name): _cpplint_state.PrintInfo('Ignoring "%s": file excluded by ' '"%s". File path component "%s" matches pattern "%s"\n' % (filename, cfg_file, base_name, val)) return False elif name == 'linelength': global _line_length try: _line_length = int(val) except ValueError: _cpplint_state.PrintError('Line length must be numeric.') elif name == 'extensions': global _valid_extensions try: extensions = [ext.strip() for ext in val.split(',')] _valid_extensions = set(extensions) except ValueError: sys.stderr.write('Extensions should be a comma-separated list of values;' 'for example: extensions=hpp,cpp\n' 'This could not be parsed: "%s"' % (val,)) elif name == 'headers': global _header_extensions try: extensions = [ext.strip() for ext in val.split(',')] _header_extensions = set(extensions) except ValueError: sys.stderr.write('Extensions should be a comma-separated list of values;' 'for example: extensions=hpp,cpp\n' 'This could not be parsed: "%s"' % (val,)) elif name == 'root': global _root _root = val else: _cpplint_state.PrintError( 'Invalid configuration option (%s) in file %s\n' % (name, cfg_file)) except IOError: _cpplint_state.PrintError( "Skipping config file '%s': Can't open for reading\n" % cfg_file) keep_looking = False # Apply all the accumulated filters in reverse order (top-level directory # config options having the least priority). for cfg_filter in reversed(cfg_filters): _AddFilters(cfg_filter) return True def ProcessFile(filename, vlevel, extra_check_functions=None): """Does google-lint on a single file. Args: filename: The name of the file to parse. vlevel: The level of errors to report. Every error of confidence >= verbose_level will be reported. 0 is a good default. extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ _SetVerboseLevel(vlevel) _BackupFilters() if not ProcessConfigOverrides(filename): _RestoreFilters() return lf_lines = [] crlf_lines = [] try: # Support the UNIX convention of using "-" for stdin. Note that # we are not opening the file with universal newline support # (which codecs doesn't support anyway), so the resulting lines do # contain trailing '\r' characters if we are reading a file that # has CRLF endings. # If after the split a trailing '\r' is present, it is removed # below. if filename == '-': lines = codecs.StreamReaderWriter(sys.stdin, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace').read().split('\n') else: lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') # Remove trailing '\r'. # The -1 accounts for the extra trailing blank line we get from split() for linenum in range(len(lines) - 1): if lines[linenum].endswith('\r'): lines[linenum] = lines[linenum].rstrip('\r') crlf_lines.append(linenum + 1) else: lf_lines.append(linenum + 1) except IOError: _cpplint_state.PrintError( "Skipping input '%s': Can't open for reading\n" % filename) _RestoreFilters() return # Note, if no dot is found, this will give the entire filename as the ext. file_extension = filename[filename.rfind('.') + 1:] # When reading from stdin, the extension is unknown, so no cpplint tests # should rely on the extension. if filename != '-' and file_extension not in GetAllExtensions(): _cpplint_state.PrintError('Ignoring %s; not a valid file name ' '(%s)\n' % (filename, ', '.join(GetAllExtensions()))) else: ProcessFileData(filename, file_extension, lines, Error, extra_check_functions) # If end-of-line sequences are a mix of LF and CR-LF, issue # warnings on the lines with CR. # # Don't issue any warnings if all lines are uniformly LF or CR-LF, # since critique can handle these just fine, and the style guide # doesn't dictate a particular end of line sequence. # # We can't depend on os.linesep to determine what the desired # end-of-line sequence should be, since that will return the # server-side end-of-line sequence. if lf_lines and crlf_lines: # Warn on every line with CR. An alternative approach might be to # check whether the file is mostly CRLF or just LF, and warn on the # minority, we bias toward LF here since most tools prefer LF. for linenum in crlf_lines: Error(filename, linenum, 'whitespace/newline', 1, 'Unexpected \\r (^M) found; better to use only \\n') _cpplint_state.PrintInfo('Done processing %s\n' % filename) _RestoreFilters() def PrintUsage(message): """Prints a brief usage string and exits, optionally with an error message. Args: message: The optional error message. """ sys.stderr.write(_USAGE) if message: sys.exit('\nFATAL ERROR: ' + message) else: sys.exit(0) def PrintCategories(): """Prints a list of all the error-categories used by error messages. These are the categories used to filter messages via --filter. """ sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) sys.exit(0) def ParseArguments(args): """Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint. """ try: (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'repository=', 'linelength=', 'extensions=', 'exclude=', 'headers=', 'quiet', 'recursive']) except getopt.GetoptError: PrintUsage('Invalid arguments.') verbosity = _VerboseLevel() output_format = _OutputFormat() filters = '' counting_style = '' recursive = False for (opt, val) in opts: if opt == '--help': PrintUsage(None) elif opt == '--output': if val not in ('emacs', 'vs7', 'eclipse', 'junit'): PrintUsage('The only allowed output formats are emacs, vs7, eclipse ' 'and junit.') output_format = val elif opt == '--verbose': verbosity = int(val) elif opt == '--filter': filters = val if not filters: PrintCategories() elif opt == '--counting': if val not in ('total', 'toplevel', 'detailed'): PrintUsage('Valid counting options are total, toplevel, and detailed') counting_style = val elif opt == '--root': global _root _root = val elif opt == '--repository': global _repository _repository = val elif opt == '--linelength': global _line_length try: _line_length = int(val) except ValueError: PrintUsage('Line length must be digits.') elif opt == '--exclude': global _excludes if not _excludes: _excludes = set() _excludes.update(glob.glob(val)) elif opt == '--extensions': global _valid_extensions try: _valid_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') elif opt == '--headers': global _header_extensions try: _header_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') elif opt == '--recursive': recursive = True elif opt == '--quiet': global _quiet _quiet = True if not filenames: PrintUsage('No files were specified.') if recursive: filenames = _ExpandDirectories(filenames) if _excludes: filenames = _FilterExcludedFiles(filenames) _SetOutputFormat(output_format) _SetVerboseLevel(verbosity) _SetFilters(filters) _SetCountingStyle(counting_style) return filenames def _ExpandDirectories(filenames): """Searches a list of filenames and replaces directories in the list with all files descending from those directories. Files with extensions not in the valid extensions list are excluded. Args: filenames: A list of files or directories Returns: A list of all files that are members of filenames or descended from a directory in filenames """ expanded = set() for filename in filenames: if not os.path.isdir(filename): expanded.add(filename) continue for root, _, files in os.walk(filename): for loopfile in files: fullname = os.path.join(root, loopfile) if fullname.startswith('.' + os.path.sep): fullname = fullname[len('.' + os.path.sep):] expanded.add(fullname) filtered = [] for filename in expanded: if os.path.splitext(filename)[1][1:] in GetAllExtensions(): filtered.append(filename) return filtered def _FilterExcludedFiles(filenames): """Filters out files listed in the --exclude command line switch. File paths in the switch are evaluated relative to the current working directory """ exclude_paths = [os.path.abspath(f) for f in _excludes] return [f for f in filenames if os.path.abspath(f) not in exclude_paths] def main(): filenames = ParseArguments(sys.argv[1:]) backup_err = sys.stderr try: # Change stderr to write with replacement characters so we don't die # if we try to print something containing non-ASCII characters. sys.stderr = codecs.StreamReader(sys.stderr, 'replace') _cpplint_state.ResetErrorCounts() for filename in filenames: ProcessFile(filename, _cpplint_state.verbose_level) _cpplint_state.PrintErrorCounts() if _cpplint_state.output_format == 'junit': sys.stderr.write(_cpplint_state.FormatJUnitXML()) finally: sys.stderr = backup_err sys.exit(_cpplint_state.error_count > 0) if __name__ == '__main__': main()
apache-2.0
-861,955,295,882,756,000
37.547799
97
0.639988
false
uclouvain/osis
attribution/views/charge_repartition/update.py
1
2133
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## from django.utils.translation import gettext_lazy as _ from attribution.forms.attributions import LecturingAttributionChargeForm, PracticalAttributionChargeForm from attribution.views.learning_unit.update import UpdateAttributionView class EditChargeRepartition(UpdateAttributionView): permission_required = 'base.can_add_charge_repartition' template_name = "attribution/charge_repartition/add_charge_repartition_inner.html" form_classes = { "lecturing_charge_form": LecturingAttributionChargeForm, "practical_charge_form": PracticalAttributionChargeForm } def get_success_message(self, forms): return _("Repartition modified for %(tutor)s (%(function)s)") %\ {"tutor": self.attribution.tutor.person, "function": _(self.attribution.get_function_display())}
agpl-3.0
-7,869,994,393,758,727,000
48.581395
105
0.677298
false
pieleric/odemis
src/odemis/acq/align/test/autofocus_test.py
2
26024
# -*- coding: utf-8 -*- ''' Created on 25 April 2014 @author: Kimon Tsitsikas Copyright © 2013-2014 Kimon Tsitsikas, Delmic This file is part of Odemis. Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/. ''' from __future__ import division from concurrent.futures._base import CancelledError import logging import numpy from odemis.dataio import tiff from odemis import model, acq import odemis from odemis.acq import align, stream from odemis.acq.align import autofocus from odemis.acq.align.autofocus import Sparc2AutoFocus, MTD_BINARY from odemis.dataio import hdf5 from odemis.util import test, timeout, img import os from scipy import ndimage import time import unittest from odemis.acq import path # logging.basicConfig(format=" - %(levelname)s \t%(message)s") logging.getLogger().setLevel(logging.DEBUG) # _frm = "%(asctime)s %(levelname)-7s %(module)-15s: %(message)s" # logging.getLogger().handlers[0].setFormatter(logging.Formatter(_frm)) CONFIG_PATH = os.path.dirname(odemis.__file__) + "/../../install/linux/usr/share/odemis/" SECOM_CONFIG = CONFIG_PATH + "sim/secom-focus-test.odm.yaml" SPARC_CONFIG = CONFIG_PATH + "sim/sparc2-focus-test.odm.yaml" SPARC2_FOCUS_CONFIG = CONFIG_PATH + "sim/sparc2-ded-focus-test-sim.odm.yaml" SPARC2_FOCUS2_CONFIG = CONFIG_PATH + "sim/sparc2-4spec-sim.odm.yaml" TEST_IMAGE_PATH = os.path.dirname(__file__) class TestAutofocus(unittest.TestCase): """ Test autofocus functions """ backend_was_running = False @classmethod def setUpClass(cls): try: test.start_backend(SECOM_CONFIG) except LookupError: logging.info("A running backend is already found, skipping tests") cls.backend_was_running = True return except IOError as exp: logging.error(str(exp)) raise # find components by their role cls.ebeam = model.getComponent(role="e-beam") cls.sed = model.getComponent(role="se-detector") cls.ccd = model.getComponent(role="ccd") cls.focus = model.getComponent(role="focus") cls.efocus = model.getComponent(role="ebeam-focus") cls.light = model.getComponent(role="light") cls.light_filter = model.getComponent(role="filter") # The good focus positions are at the start up positions cls._opt_good_focus = cls.focus.position.value["z"] cls._sem_good_focus = cls.efocus.position.value["z"] @classmethod def tearDownClass(cls): if cls.backend_was_running: return test.stop_backend() def setUp(self): if self.backend_was_running: self.skipTest("Running backend found") def test_measure_focus(self): """ Test MeasureFocus """ data = hdf5.read_data(os.path.dirname(__file__) + "/grid_10x10.h5") C, T, Z, Y, X = data[0].shape data[0].shape = Y, X input = data[0] prev_res = autofocus.MeasureSEMFocus(input) for i in range(1, 10, 1): blur = ndimage.gaussian_filter(input, sigma=i) res = autofocus.MeasureSEMFocus(blur) self.assertGreater(prev_res, res) prev_res = res @timeout(1000) def test_autofocus_opt(self): """ Test AutoFocus on CCD """ # The way to measure focus is a bit different between CCD and SEM focus = self.focus ebeam = self.ebeam ccd = self.ccd focus.moveAbs({"z": self._opt_good_focus - 400e-6}).result() ccd.exposureTime.value = ccd.exposureTime.range[0] future_focus = align.AutoFocus(ccd, ebeam, focus) foc_pos, foc_lev = future_focus.result(timeout=900) self.assertAlmostEqual(foc_pos, self._opt_good_focus, 3) self.assertGreater(foc_lev, 0) @timeout(1000) def test_autofocus_sem(self): """ Test AutoFocus on e-beam """ self.efocus.moveAbs({"z": self._sem_good_focus - 100e-06}).result() self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0] future_focus = align.AutoFocus(self.sed, self.ebeam, self.efocus) foc_pos, foc_lev = future_focus.result(timeout=900) self.assertAlmostEqual(foc_pos, self._sem_good_focus, 3) self.assertGreater(foc_lev, 0) @timeout(1000) def test_autofocus_sem_hint(self): """ Test AutoFocus on e-beam with a hint """ self.efocus.moveAbs({"z": self._sem_good_focus + 200e-06}).result() self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0] # We don't give exactly the good focus position, to make it a little harder future_focus = align.AutoFocus(self.sed, self.ebeam, self.efocus, good_focus=self._sem_good_focus + 100e-9) foc_pos, foc_lev = future_focus.result(timeout=900) self.assertAlmostEqual(foc_pos, self._sem_good_focus, 3) self.assertGreater(foc_lev, 0) class TestSparc2AutoFocus(unittest.TestCase): """ Test Sparc2Autofocus for sp-ccd backend : SPARC2_FOCUS_CONFIG """ backend_was_running = False @classmethod def setUpClass(cls): try: test.start_backend(SPARC2_FOCUS_CONFIG) except LookupError: logging.info("A running backend is already found, skipping tests") cls.backend_was_running = True return except IOError as exp: logging.error(str(exp)) raise # find components by their role cls.ccd = model.getComponent(role="ccd") cls.spccd = model.getComponent(role="sp-ccd") cls.focus = model.getComponent(role="focus") cls.bl = model.getComponent(role="brightlight") cls.spgr = model.getComponent(role="spectrograph") cls.spgr_ded = model.getComponent(role="spectrograph-dedicated") cls.aligner = model.getComponent(role="fiber-aligner") cls.microscope = model.getMicroscope() cls.optmngr = path.OpticalPathManager(cls.microscope) cls.specline_ccd = stream.BrightfieldStream("Spectrograph_line_ccd", cls.ccd, cls.ccd.data, cls.bl) cls.specline_spccd = stream.BrightfieldStream("Spectrograph_line_spccd", cls.spccd, cls.spccd.data, cls.bl) # The good focus position is the start up position cls._good_focus = cls.focus.position.value["z"] @classmethod def tearDownClass(cls): if cls.backend_was_running: return test.stop_backend() def setUp(self): if self.backend_was_running: self.skipTest("Running backend found") self.opm = acq.path.OpticalPathManager(model.getMicroscope()) # Speed it up self.ccd.exposureTime.value = self.ccd.exposureTime.range[0] self.spccd.exposureTime.value = self.spccd.exposureTime.range[0] @timeout(1000) def test_one_det(self): """ Test AutoFocus Spectrometer on SP-CCD """ self.focus.moveAbs({"z": self._good_focus - 200e-6}).result() data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.ccd.set_image(new_img) f = Sparc2AutoFocus("spec-fiber-focus", self.optmngr, [self.specline_spccd], True) time.sleep(5) data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.ccd.set_image(new_img) res = f.result(timeout=1000) for (g, d), fpos in res.items(): self.assertEqual(d.role, self.spccd.role) self.assertAlmostEqual(fpos, self._good_focus, 3) self.assertEqual(len(res.keys()), len(self.spgr.axes["grating"].choices)) @timeout(100) def test_cancel(self): """ Test cancelling does cancel (relatively quickly) """ self.focus.moveAbs({"z": self._good_focus - 200e-6}).result() data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.ccd.set_image(new_img) f = Sparc2AutoFocus("spec-fiber-focus", self.optmngr, [self.specline_spccd], True) time.sleep(5) data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.ccd.set_image(new_img) cancelled = f.cancel() self.assertTrue(cancelled) self.assertTrue(f.cancelled()) with self.assertRaises(CancelledError): res = f.result(timeout=900) @timeout(1000) def test_multi_det(self): """ Test AutoFocus Spectrometer with multiple detectors """ # Note: a full procedure would start by setting the slit to the smallest position # (cf optical path mode "spec-focus") and activating an energy source specline_mul = [self.specline_ccd, self.specline_spccd] self.focus.moveAbs({"z": self._good_focus + 400e-6}).result() logging.debug("print the result of self.focus.moveAbs %s", self.focus.moveAbs({"z": self._good_focus + 400e-6}).result()) data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.ccd.set_image(new_img) f = Sparc2AutoFocus("spec-fiber-focus", self.optmngr, specline_mul, True) time.sleep(5) data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.ccd.set_image(new_img) res = f.result(timeout=900) for (g, d), fpos in res.items(): self.assertIn(d.role, (self.ccd.role, self.spccd.role)) if d.role is self.ccd.role: self.assertAlmostEqual(fpos, self._good_focus, 3) if d.role is self.spccd.role: self.assertAlmostEqual(fpos, self._good_focus, 3) # We expect an entry for each combination grating/detector self.assertEqual(len(res.keys()), len(self.spgr.axes["grating"].choices)) class TestSparc2AutoFocus_2(unittest.TestCase): """ Test Sparc2Autofocus for ccd backend : SPARC2_FOCUS_CONFIG """ backend_was_running = False @classmethod def setUpClass(cls): try: test.start_backend(SPARC2_FOCUS_CONFIG) except LookupError: logging.info("A running backend is already found, skipping tests") cls.backend_was_running = True return except IOError as exp: logging.error(str(exp)) raise # find components by their role cls.ccd = model.getComponent(role="ccd") cls.spccd = model.getComponent(role="sp-ccd") cls.focus = model.getComponent(role="focus") cls.spgr = model.getComponent(role="spectrograph") cls.spgr_ded = model.getComponent(role="spectrograph-dedicated") cls.bl = model.getComponent(role="brightlight") cls.microscope = model.getMicroscope() cls.optmngr = path.OpticalPathManager(cls.microscope) cls.specline_ccd = stream.BrightfieldStream("Spectrograph_line_ccd", cls.ccd, cls.ccd.data, cls.bl) cls.specline_spccd = stream.BrightfieldStream ("Spectrograph line_spccd", cls.spccd, cls.spccd.data, cls.bl) # The good focus position is the start up position cls._good_focus = cls.focus.position.value["z"] @classmethod def tearDownClass(cls): if cls.backend_was_running: return test.stop_backend() def setUp(self): if self.backend_was_running: self.skipTest("Running backend found") self.opm = acq.path.OpticalPathManager(model.getMicroscope()) # Speed it up self.ccd.exposureTime.value = self.ccd.exposureTime.range[0] self.spccd.exposureTime.value = self.spccd.exposureTime.range[0] @timeout(1000) def test_one_det(self): """ Test AutoFocus Spectrometer on CCD """ self.focus.moveAbs({"z": self._good_focus - 200e-6}).result() data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.spccd.set_image(new_img) f = Sparc2AutoFocus("spec-focus", self.optmngr, [self.specline_ccd], True) time.sleep(5) data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.spccd.set_image(new_img) res = f.result(timeout=900) for (g, d), fpos in res.items(): self.assertEqual(d.role, self.ccd.role) self.assertAlmostEqual(fpos, self._good_focus, 3) self.assertEqual(len(res.keys()), len(self.spgr_ded.axes["grating"].choices)) @timeout(100) def test_cancel(self): """ Test cancelling does cancel (relatively quickly) """ self.focus.moveAbs({"z": self._good_focus - 200e-6}).result() data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.spccd.set_image(new_img) f = Sparc2AutoFocus("spec-focus", self.optmngr, [self.specline_ccd], True) time.sleep(5) data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.spccd.set_image(new_img) cancelled = f.cancel() self.assertTrue(cancelled) self.assertTrue(f.cancelled()) with self.assertRaises(CancelledError): res = f.result(timeout=900) @timeout(1000) def test_multi_det(self): """ Test AutoFocus Spectrometer with multiple detectors """ # Note: a full procedure would start by setting the slit to the smallest position # (cf optical path mode "spec-focus") and activating an energy source specline_mul = [self.specline_ccd, self.specline_spccd] self.focus.moveAbs({"z": self._good_focus + 400e-6}).result() data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.spccd.set_image(new_img) f = Sparc2AutoFocus("spec-focus", self.optmngr, specline_mul, True) time.sleep(5) data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.spccd.set_image(new_img) res = f.result(timeout=900) for (g, d), fpos in res.items(): self.assertIn(d.role, (self.ccd.role, self.spccd.role)) if d.role is self.ccd.role: self.assertAlmostEqual(fpos, self._good_focus, 3) if d.role is self.spccd.role: self.assertAlmostEqual(fpos, self._good_focus, 3) # We expect an entry for each combination grating/detector self.assertEqual(len(res.keys()), len(self.spgr_ded.axes["grating"].choices)) class TestSparc2AutoFocus_3(unittest.TestCase): """ Test Sparc2Autofocus for in case of 4 detectors backend : SPARC2_FOCUS2_CONFIG """ backend_was_running = False @classmethod def setUpClass(cls): try: test.start_backend(SPARC2_FOCUS2_CONFIG) except LookupError: logging.info("A running backend is already found, skipping tests") cls.backend_was_running = True return except IOError as exp: logging.error(str(exp)) raise # find components by their role cls.ccd = model.getComponent(role="ccd0") cls.spccd = model.getComponent(role="sp-ccd3") cls.focus = model.getComponent(role="focus") cls.spgr = model.getComponent(role="spectrograph") cls.spgr_ded = model.getComponent(role="spectrograph-dedicated") cls.bl = model.getComponent(role="brightlight") cls.microscope = model.getMicroscope() cls.optmngr = path.OpticalPathManager(cls.microscope) cls.specline_ccd = stream.BrightfieldStream("Spectrograph_line_ccd", cls.ccd, cls.ccd.data, cls.bl) cls.specline_spccd = stream.BrightfieldStream ("Spectrograph line_spccd", cls.spccd, cls.spccd.data, cls.bl) # The good focus position is the start up position cls._good_focus = cls.focus.position.value["z"] @classmethod def tearDownClass(cls): if cls.backend_was_running: return test.stop_backend() def setUp(self): if self.backend_was_running: self.skipTest("Running backend found") self.opm = acq.path.OpticalPathManager(model.getMicroscope()) # Speed it up self.ccd.exposureTime.value = self.ccd.exposureTime.range[0] self.spccd.exposureTime.value = self.spccd.exposureTime.range[0] @timeout(1000) def test_spectrograph(self): """ Test AutoFocus Spectrometer on CCD """ f = Sparc2AutoFocus("spec-focus", self.optmngr, [self.specline_ccd], True) res = f.result(timeout=900) for (g, d), fpos in res.items(): self.assertIn(d.role, {"ccd0", "sp-ccd1"}) self.assertEqual(len(res.keys()), 2*len(self.spgr_ded.axes["grating"].choices)) def test_ded_spectrograph(self): """ Test AutoFocus Spectrometer on CCD """ f = Sparc2AutoFocus("spec-fiber-focus", self.optmngr, [self.specline_spccd], True) res = f.result(timeout=900) for (g, d), fpos in res.items(): self.assertIn(d.role, {"sp-ccd2", "sp-ccd3"}) self.assertEqual(len(res.keys()), 2*len(self.spgr_ded.axes["grating"].choices)) @timeout(100) def test_cancel(self): """ Test cancelling does cancel (relatively quickly) """ self.focus.moveAbs({"z": self._good_focus - 200e-6}).result() f = Sparc2AutoFocus("spec-focus", self.optmngr, [self.specline_ccd], True) time.sleep(5) cancelled = f.cancel() self.assertTrue(cancelled) self.assertTrue(f.cancelled()) with self.assertRaises(CancelledError): res = f.result(timeout=900) class TestAutofocusSpectrometer(unittest.TestCase): """ Test autofocus spectrometer function """ backend_was_running = False @classmethod def setUpClass(cls): try: test.start_backend(SPARC_CONFIG) except LookupError: logging.info("A running backend is already found, skipping tests") cls.backend_was_running = True return except IOError as exp: logging.error(str(exp)) raise # find components by their role cls.ccd = model.getComponent(role="ccd") cls.spccd = model.getComponent(role="sp-ccd") cls.focus = model.getComponent(role="focus") cls.spgr = model.getComponent(role="spectrograph") cls.light = model.getComponent(role="brightlight") cls.selector = model.getComponent(role="spec-det-selector") # The good focus position is the start up position cls._good_focus = cls.focus.position.value["z"] @classmethod def tearDownClass(cls): if cls.backend_was_running: return test.stop_backend() def setUp(self): if self.backend_was_running: self.skipTest("Running backend found") # Speed it up self.ccd.exposureTime.value = self.ccd.exposureTime.range[0] self.spccd.exposureTime.value = self.spccd.exposureTime.range[0] @timeout(1000) def test_one_det(self): """ Test AutoFocus Spectrometer on CCD """ self.focus.moveAbs({"z": self._good_focus - 200e-6}).result() f = align.AutoFocusSpectrometer(self.spgr, self.focus, self.ccd) res = f.result(timeout=900) for (g, d), fpos in res.items(): self.assertIs(d, self.ccd) self.assertAlmostEqual(fpos, self._good_focus, 3) self.assertEqual(len(res.keys()), len(self.spgr.axes["grating"].choices)) @timeout(100) def test_cancel(self): """ Test cancelling does cancel (relatively quickly) """ self.focus.moveAbs({"z": self._good_focus + 400e-6}).result() f = align.AutoFocusSpectrometer(self.spgr, self.focus, [self.ccd]) time.sleep(2) f.cancel() self.assertTrue(f.cancelled()) with self.assertRaises(CancelledError): res = f.result(timeout=900) @timeout(1000) def test_multi_det(self): """ Test AutoFocus Spectrometer with multiple detectors """ # Note: a full procedure would start by setting the slit to the smallest position # (cf optical path mode "spec-focus") and activating an energy source self.focus.moveAbs({"z": self._good_focus + 400e-6}).result() f = align.AutoFocusSpectrometer(self.spgr, self.focus, [self.ccd, self.spccd], self.selector) res = f.result(timeout=900) for (g, d), fpos in res.items(): self.assertIn(d, (self.ccd, self.spccd)) # Only check that the focus is correct with the CCD as the simulator # doesn't actually connects the focus position to the spccd image # (so the image is always the same, and the autofocus procedure # picks a random position) if d is self.ccd: self.assertAlmostEqual(fpos, self._good_focus, 3) # The number of entries depend on the implementation. For now, we expect # an entry for each combination grating/detector ngs = len(self.spgr.axes["grating"].choices) nds = 2 self.assertEqual(len(res), ngs * nds) class TestAutofocus1d(unittest.TestCase): """ Test autofocus functions on 1 line CCD. """ backend_was_running = False @classmethod def setUpClass(cls): try: test.start_backend(SPARC2_FOCUS_CONFIG) except LookupError: logging.info("A running backend is already found, skipping tests") cls.backend_was_running = True return except IOError as exp: logging.error(str(exp)) raise # find components by their role cls.ccd = model.getComponent(role="ccd") cls.spectrometer = model.getComponent(role="spectrometer-integrated") cls.focus = model.getComponent(role="focus") cls._good_focus = cls.focus.position.value["z"] @classmethod def tearDownClass(cls): if cls.backend_was_running: return test.stop_backend() def setUp(self): if self.backend_was_running: self.skipTest("Running backend found") @timeout(1000) def test_autofocus_spect(self): """ Test AutoFocus on 1 line CCD for example spectrum. """ # Make sure the image is the example spectrum image, in case this test runs after test_autofocus_slit. data = hdf5.read_data(os.path.dirname(odemis.__file__) + "/driver/sparc-spec-sim.h5") new_img = img.ensure2DImage(data[0]) self.ccd.set_image(new_img) self.focus.moveAbs({"z": self._good_focus - 200e-6}).result() f = align.AutoFocus(self.spectrometer, None, self.focus, method=MTD_BINARY) foc_pos, foc_lev = f.result(timeout=900) logging.debug("Found focus at {} good focus at {}".format(foc_pos, self._good_focus)) # The focus step size is 10.9e-6, the tolerance is set to 2.5e-5; approximately two focus steps. numpy.testing.assert_allclose(foc_pos, self._good_focus, atol=2.5e-5) @timeout(1000) def test_autofocus_slit(self): """ Test AutoFocus on 1 line CCD for an image of a slit. """ # Change image to slit image. data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff")) new_img = img.ensure2DImage(data[0]) self.ccd.set_image(new_img) self.spectrometer.binning.value = (4, 64) self.focus.moveAbs({"z": self._good_focus - 200e-6}).result() f = align.AutoFocus(self.spectrometer, None, self.focus, method=MTD_BINARY) foc_pos, foc_lev = f.result(timeout=900) logging.debug("Found focus at {} good focus at {}".format(foc_pos, self._good_focus)) # The focus step size is 10.9e-6, the tolerance is set to 2.5e-5; approximately two focus steps. numpy.testing.assert_allclose(foc_pos, self._good_focus, atol=2.5e-5) self.focus.moveAbs({"z": self._good_focus + 400e-6}).result() f = align.AutoFocus(self.spectrometer, None, self.focus, method=MTD_BINARY) foc_pos, foc_lev = f.result(timeout=900) logging.debug("Found focus at {} good focus at {}".format(foc_pos, self._good_focus)) # The focus step size is 10.9e-6, the tolerance is set to 2.5e-5; approximately two focus steps. numpy.testing.assert_allclose(foc_pos, self._good_focus, atol=2.5e-5) if __name__ == '__main__': unittest.main()
gpl-2.0
8,850,355,308,528,776,000
37.045322
116
0.628444
false
balanced/status.balancedpayments.com
venv/lib/python2.7/site-packages/distribute-0.6.34-py2.7.egg/setuptools/tests/test_resources.py
23
25002
#!/usr/bin/python # -*- coding: utf-8 -*- # NOTE: the shebang and encoding lines are for ScriptHeaderTests; do not remove from unittest import TestCase, makeSuite; from pkg_resources import * from setuptools.command.easy_install import get_script_header, is_sh import os, pkg_resources, sys, StringIO, tempfile, shutil try: frozenset except NameError: from sets import ImmutableSet as frozenset def safe_repr(obj, short=False): """ copied from Python2.7""" try: result = repr(obj) except Exception: result = object.__repr__(obj) if not short or len(result) < _MAX_LENGTH: return result return result[:_MAX_LENGTH] + ' [truncated]...' class Metadata(EmptyProvider): """Mock object to return metadata as if from an on-disk distribution""" def __init__(self,*pairs): self.metadata = dict(pairs) def has_metadata(self,name): return name in self.metadata def get_metadata(self,name): return self.metadata[name] def get_metadata_lines(self,name): return yield_lines(self.get_metadata(name)) class DistroTests(TestCase): def testCollection(self): # empty path should produce no distributions ad = Environment([], platform=None, python=None) self.assertEqual(list(ad), []) self.assertEqual(ad['FooPkg'],[]) ad.add(Distribution.from_filename("FooPkg-1.3_1.egg")) ad.add(Distribution.from_filename("FooPkg-1.4-py2.4-win32.egg")) ad.add(Distribution.from_filename("FooPkg-1.2-py2.4.egg")) # Name is in there now self.assertTrue(ad['FooPkg']) # But only 1 package self.assertEqual(list(ad), ['foopkg']) # Distributions sort by version self.assertEqual( [dist.version for dist in ad['FooPkg']], ['1.4','1.3-1','1.2'] ) # Removing a distribution leaves sequence alone ad.remove(ad['FooPkg'][1]) self.assertEqual( [dist.version for dist in ad['FooPkg']], ['1.4','1.2'] ) # And inserting adds them in order ad.add(Distribution.from_filename("FooPkg-1.9.egg")) self.assertEqual( [dist.version for dist in ad['FooPkg']], ['1.9','1.4','1.2'] ) ws = WorkingSet([]) foo12 = Distribution.from_filename("FooPkg-1.2-py2.4.egg") foo14 = Distribution.from_filename("FooPkg-1.4-py2.4-win32.egg") req, = parse_requirements("FooPkg>=1.3") # Nominal case: no distros on path, should yield all applicable self.assertEqual(ad.best_match(req,ws).version, '1.9') # If a matching distro is already installed, should return only that ws.add(foo14); self.assertEqual(ad.best_match(req,ws).version, '1.4') # If the first matching distro is unsuitable, it's a version conflict ws = WorkingSet([]); ws.add(foo12); ws.add(foo14) self.assertRaises(VersionConflict, ad.best_match, req, ws) # If more than one match on the path, the first one takes precedence ws = WorkingSet([]); ws.add(foo14); ws.add(foo12); ws.add(foo14); self.assertEqual(ad.best_match(req,ws).version, '1.4') def checkFooPkg(self,d): self.assertEqual(d.project_name, "FooPkg") self.assertEqual(d.key, "foopkg") self.assertEqual(d.version, "1.3-1") self.assertEqual(d.py_version, "2.4") self.assertEqual(d.platform, "win32") self.assertEqual(d.parsed_version, parse_version("1.3-1")) def testDistroBasics(self): d = Distribution( "/some/path", project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32" ) self.checkFooPkg(d) d = Distribution("/some/path") self.assertEqual(d.py_version, sys.version[:3]) self.assertEqual(d.platform, None) def testDistroParse(self): d = Distribution.from_filename("FooPkg-1.3_1-py2.4-win32.egg") self.checkFooPkg(d) d = Distribution.from_filename("FooPkg-1.3_1-py2.4-win32.egg-info") self.checkFooPkg(d) def testDistroMetadata(self): d = Distribution( "/some/path", project_name="FooPkg", py_version="2.4", platform="win32", metadata = Metadata( ('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n") ) ) self.checkFooPkg(d) def distRequires(self, txt): return Distribution("/foo", metadata=Metadata(('depends.txt', txt))) def checkRequires(self, dist, txt, extras=()): self.assertEqual( list(dist.requires(extras)), list(parse_requirements(txt)) ) def testDistroDependsSimple(self): for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0": self.checkRequires(self.distRequires(v), v) def testResolve(self): ad = Environment([]); ws = WorkingSet([]) # Resolving no requirements -> nothing to install self.assertEqual( list(ws.resolve([],ad)), [] ) # Request something not in the collection -> DistributionNotFound self.assertRaises( DistributionNotFound, ws.resolve, parse_requirements("Foo"), ad ) Foo = Distribution.from_filename( "/foo_dir/Foo-1.2.egg", metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0")) ) ad.add(Foo); ad.add(Distribution.from_filename("Foo-0.9.egg")) # Request thing(s) that are available -> list to activate for i in range(3): targets = list(ws.resolve(parse_requirements("Foo"), ad)) self.assertEqual(targets, [Foo]) map(ws.add,targets) self.assertRaises(VersionConflict, ws.resolve, parse_requirements("Foo==0.9"), ad) ws = WorkingSet([]) # reset # Request an extra that causes an unresolved dependency for "Baz" self.assertRaises( DistributionNotFound, ws.resolve,parse_requirements("Foo[bar]"), ad ) Baz = Distribution.from_filename( "/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo")) ) ad.add(Baz) # Activation list now includes resolved dependency self.assertEqual( list(ws.resolve(parse_requirements("Foo[bar]"), ad)), [Foo,Baz] ) # Requests for conflicting versions produce VersionConflict self.assertRaises( VersionConflict, ws.resolve, parse_requirements("Foo==1.2\nFoo!=1.2"), ad ) def testDistroDependsOptions(self): d = self.distRequires(""" Twisted>=1.5 [docgen] ZConfig>=2.0 docutils>=0.3 [fastcgi] fcgiapp>=0.1""") self.checkRequires(d,"Twisted>=1.5") self.checkRequires( d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"] ) self.checkRequires( d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"] ) self.checkRequires( d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(), ["docgen","fastcgi"] ) self.checkRequires( d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(), ["fastcgi", "docgen"] ) self.assertRaises(UnknownExtra, d.requires, ["foo"]) def testSetuptoolsDistributeCombination(self): # Ensure that installing a 0.7-series setuptools fails. PJE says that # it will not co-exist. ws = WorkingSet([]) d = Distribution( "/some/path", project_name="setuptools", version="0.7a1") self.assertRaises(ValueError, ws.add, d) # A 0.6-series is no problem d2 = Distribution( "/some/path", project_name="setuptools", version="0.6c9") ws.add(d2) # a unexisting version needs to work ws = WorkingSet([]) d3 = Distribution( "/some/path", project_name="setuptools") ws.add(d3) class EntryPointTests(TestCase): def assertfields(self, ep): self.assertEqual(ep.name,"foo") self.assertEqual(ep.module_name,"setuptools.tests.test_resources") self.assertEqual(ep.attrs, ("EntryPointTests",)) self.assertEqual(ep.extras, ("x",)) self.assertTrue(ep.load() is EntryPointTests) self.assertEqual( str(ep), "foo = setuptools.tests.test_resources:EntryPointTests [x]" ) def setUp(self): self.dist = Distribution.from_filename( "FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]'))) def testBasics(self): ep = EntryPoint( "foo", "setuptools.tests.test_resources", ["EntryPointTests"], ["x"], self.dist ) self.assertfields(ep) def testParse(self): s = "foo = setuptools.tests.test_resources:EntryPointTests [x]" ep = EntryPoint.parse(s, self.dist) self.assertfields(ep) ep = EntryPoint.parse("bar baz= spammity[PING]") self.assertEqual(ep.name,"bar baz") self.assertEqual(ep.module_name,"spammity") self.assertEqual(ep.attrs, ()) self.assertEqual(ep.extras, ("ping",)) ep = EntryPoint.parse(" fizzly = wocka:foo") self.assertEqual(ep.name,"fizzly") self.assertEqual(ep.module_name,"wocka") self.assertEqual(ep.attrs, ("foo",)) self.assertEqual(ep.extras, ()) def testRejects(self): for ep in [ "foo", "x=1=2", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2", ]: try: EntryPoint.parse(ep) except ValueError: pass else: raise AssertionError("Should've been bad", ep) def checkSubMap(self, m): self.assertEqual(len(m), len(self.submap_expect)) for key, ep in self.submap_expect.iteritems(): self.assertEqual(repr(m.get(key)), repr(ep)) submap_expect = dict( feature1=EntryPoint('feature1', 'somemodule', ['somefunction']), feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']), feature3=EntryPoint('feature3', 'this.module', extras=['something']) ) submap_str = """ # define features for blah blah feature1 = somemodule:somefunction feature2 = another.module:SomeClass [extra1,extra2] feature3 = this.module [something] """ def testParseList(self): self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str)) self.assertRaises(ValueError, EntryPoint.parse_group, "x a", "foo=bar") self.assertRaises(ValueError, EntryPoint.parse_group, "x", ["foo=baz", "foo=bar"]) def testParseMap(self): m = EntryPoint.parse_map({'xyz':self.submap_str}) self.checkSubMap(m['xyz']) self.assertEqual(m.keys(),['xyz']) m = EntryPoint.parse_map("[xyz]\n"+self.submap_str) self.checkSubMap(m['xyz']) self.assertEqual(m.keys(),['xyz']) self.assertRaises(ValueError, EntryPoint.parse_map, ["[xyz]", "[xyz]"]) self.assertRaises(ValueError, EntryPoint.parse_map, self.submap_str) class RequirementsTests(TestCase): def testBasics(self): r = Requirement.parse("Twisted>=1.2") self.assertEqual(str(r),"Twisted>=1.2") self.assertEqual(repr(r),"Requirement.parse('Twisted>=1.2')") self.assertEqual(r, Requirement("Twisted", [('>=','1.2')], ())) self.assertEqual(r, Requirement("twisTed", [('>=','1.2')], ())) self.assertNotEqual(r, Requirement("Twisted", [('>=','2.0')], ())) self.assertNotEqual(r, Requirement("Zope", [('>=','1.2')], ())) self.assertNotEqual(r, Requirement("Zope", [('>=','3.0')], ())) self.assertNotEqual(r, Requirement.parse("Twisted[extras]>=1.2")) def testOrdering(self): r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ()) r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ()) self.assertEqual(r1,r2) self.assertEqual(str(r1),str(r2)) self.assertEqual(str(r2),"Twisted==1.2c1,>=1.2") def testBasicContains(self): r = Requirement("Twisted", [('>=','1.2')], ()) foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg") twist11 = Distribution.from_filename("Twisted-1.1.egg") twist12 = Distribution.from_filename("Twisted-1.2.egg") self.assertTrue(parse_version('1.2') in r) self.assertTrue(parse_version('1.1') not in r) self.assertTrue('1.2' in r) self.assertTrue('1.1' not in r) self.assertTrue(foo_dist not in r) self.assertTrue(twist11 not in r) self.assertTrue(twist12 in r) def testAdvancedContains(self): r, = parse_requirements("Foo>=1.2,<=1.3,==1.9,>2.0,!=2.5,<3.0,==4.5") for v in ('1.2','1.2.2','1.3','1.9','2.0.1','2.3','2.6','3.0c1','4.5'): self.assertTrue(v in r, (v,r)) for v in ('1.2c1','1.3.1','1.5','1.9.1','2.0','2.5','3.0','4.0'): self.assertTrue(v not in r, (v,r)) def testOptionsAndHashing(self): r1 = Requirement.parse("Twisted[foo,bar]>=1.2") r2 = Requirement.parse("Twisted[bar,FOO]>=1.2") r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0") self.assertEqual(r1,r2) self.assertEqual(r1,r3) self.assertEqual(r1.extras, ("foo","bar")) self.assertEqual(r2.extras, ("bar","foo")) # extras are normalized self.assertEqual(hash(r1), hash(r2)) self.assertEqual( hash(r1), hash(("twisted", ((">=",parse_version("1.2")),), frozenset(["foo","bar"]))) ) def testVersionEquality(self): r1 = Requirement.parse("foo==0.3a2") r2 = Requirement.parse("foo!=0.3a4") d = Distribution.from_filename self.assertTrue(d("foo-0.3a4.egg") not in r1) self.assertTrue(d("foo-0.3a1.egg") not in r1) self.assertTrue(d("foo-0.3a4.egg") not in r2) self.assertTrue(d("foo-0.3a2.egg") in r1) self.assertTrue(d("foo-0.3a2.egg") in r2) self.assertTrue(d("foo-0.3a3.egg") in r2) self.assertTrue(d("foo-0.3a5.egg") in r2) def testDistributeSetuptoolsOverride(self): # Plain setuptools or distribute mean we return distribute. self.assertEqual( Requirement.parse('setuptools').project_name, 'distribute') self.assertEqual( Requirement.parse('distribute').project_name, 'distribute') # setuptools lower than 0.7 means distribute self.assertEqual( Requirement.parse('setuptools==0.6c9').project_name, 'distribute') self.assertEqual( Requirement.parse('setuptools==0.6c10').project_name, 'distribute') self.assertEqual( Requirement.parse('setuptools>=0.6').project_name, 'distribute') self.assertEqual( Requirement.parse('setuptools < 0.7').project_name, 'distribute') # setuptools 0.7 and higher means setuptools. self.assertEqual( Requirement.parse('setuptools == 0.7').project_name, 'setuptools') self.assertEqual( Requirement.parse('setuptools == 0.7a1').project_name, 'setuptools') self.assertEqual( Requirement.parse('setuptools >= 0.7').project_name, 'setuptools') class ParseTests(TestCase): def testEmptyParse(self): self.assertEqual(list(parse_requirements('')), []) def testYielding(self): for inp,out in [ ([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']), (['x\n\n','y'], ['x','y']), ]: self.assertEqual(list(pkg_resources.yield_lines(inp)),out) def testSplitting(self): self.assertEqual( list( pkg_resources.split_sections(""" x [Y] z a [b ] # foo c [ d] [q] v """ ) ), [(None,["x"]), ("Y",["z","a"]), ("b",["c"]), ("d",[]), ("q",["v"])] ) self.assertRaises(ValueError,list,pkg_resources.split_sections("[foo")) def testSafeName(self): self.assertEqual(safe_name("adns-python"), "adns-python") self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils") self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils") self.assertEqual(safe_name("Money$$$Maker"), "Money-Maker") self.assertNotEqual(safe_name("peak.web"), "peak-web") def testSafeVersion(self): self.assertEqual(safe_version("1.2-1"), "1.2-1") self.assertEqual(safe_version("1.2 alpha"), "1.2.alpha") self.assertEqual(safe_version("2.3.4 20050521"), "2.3.4.20050521") self.assertEqual(safe_version("Money$$$Maker"), "Money-Maker") self.assertEqual(safe_version("peak.web"), "peak.web") def testSimpleRequirements(self): self.assertEqual( list(parse_requirements('Twis-Ted>=1.2-1')), [Requirement('Twis-Ted',[('>=','1.2-1')], ())] ) self.assertEqual( list(parse_requirements('Twisted >=1.2, \ # more\n<2.0')), [Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())] ) self.assertEqual( Requirement.parse("FooBar==1.99a3"), Requirement("FooBar", [('==','1.99a3')], ()) ) self.assertRaises(ValueError,Requirement.parse,">=2.3") self.assertRaises(ValueError,Requirement.parse,"x\\") self.assertRaises(ValueError,Requirement.parse,"x==2 q") self.assertRaises(ValueError,Requirement.parse,"X==1\nY==2") self.assertRaises(ValueError,Requirement.parse,"#") def testVersionEquality(self): def c(s1,s2): p1, p2 = parse_version(s1),parse_version(s2) self.assertEqual(p1,p2, (s1,s2,p1,p2)) c('0.4', '0.4.0') c('0.4.0.0', '0.4.0') c('0.4.0-0', '0.4-0') c('0pl1', '0.0pl1') c('0pre1', '0.0c1') c('0.0.0preview1', '0c1') c('0.0c1', '0rc1') c('1.2a1', '1.2.a.1'); c('1.2...a', '1.2a') def testVersionOrdering(self): def c(s1,s2): p1, p2 = parse_version(s1),parse_version(s2) self.assertTrue(p1<p2, (s1,s2,p1,p2)) c('2.1','2.1.1') c('2.1.0','2.10') c('2a1','2b0') c('2b1','2c0') c('2a1','2.1') c('2.3a1', '2.3') c('2.1-1', '2.1-2') c('2.1-1', '2.1.1') c('2.1', '2.1.1-1') c('2.1', '2.1pl4') c('2.1a0-20040501', '2.1') c('1.1', '02.1') c('A56','B27') c('3.2', '3.2.pl0') c('3.2-1', '3.2pl1') c('3.2pl1', '3.2pl1-1') c('0.4', '4.0') c('0.0.4', '0.4.0') c('0pl1', '0.4pl1') c('2.1dev','2.1a0') c('2.1.0rc1','2.1.0') c('2.1.0','2.1.0-rc0') c('2.1.0','2.1.0-a') c('2.1.0','2.1.0-alpha') c('2.1.0','2.1.0-foo') c('1.0','1.0-1') c('1.0-1','1.0.1') c('1.0a','1.0b') c('1.0dev','1.0rc1') c('1.0pre','1.0') c('1.0pre','1.0') c('1.0a','1.0-a') c('1.0rc1','1.0-rc1') torture =""" 0.80.1-3 0.80.1-2 0.80.1-1 0.79.9999+0.80.0pre4-1 0.79.9999+0.80.0pre2-3 0.79.9999+0.80.0pre2-2 0.77.2-1 0.77.1-1 0.77.0-1 """.split() for p,v1 in enumerate(torture): for v2 in torture[p+1:]: c(v2,v1) class ScriptHeaderTests(TestCase): non_ascii_exe = '/Users/José/bin/python' def test_get_script_header(self): if not sys.platform.startswith('java') or not is_sh(sys.executable): # This test is for non-Jython platforms self.assertEqual(get_script_header('#!/usr/local/bin/python'), '#!%s\n' % os.path.normpath(sys.executable)) self.assertEqual(get_script_header('#!/usr/bin/python -x'), '#!%s -x\n' % os.path.normpath(sys.executable)) self.assertEqual(get_script_header('#!/usr/bin/python', executable=self.non_ascii_exe), '#!%s -x\n' % self.non_ascii_exe) def test_get_script_header_jython_workaround(self): # This test doesn't work with Python 3 in some locales if (sys.version_info >= (3,) and os.environ.get("LC_CTYPE") in (None, "C", "POSIX")): return class java: class lang: class System: @staticmethod def getProperty(property): return "" sys.modules["java"] = java platform = sys.platform sys.platform = 'java1.5.0_13' stdout = sys.stdout try: # A mock sys.executable that uses a shebang line (this file) exe = os.path.normpath(os.path.splitext(__file__)[0] + '.py') self.assertEqual( get_script_header('#!/usr/local/bin/python', executable=exe), '#!/usr/bin/env %s\n' % exe) # Ensure we generate what is basically a broken shebang line # when there's options, with a warning emitted sys.stdout = sys.stderr = StringIO.StringIO() self.assertEqual(get_script_header('#!/usr/bin/python -x', executable=exe), '#!%s -x\n' % exe) self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue()) sys.stdout = sys.stderr = StringIO.StringIO() self.assertEqual(get_script_header('#!/usr/bin/python', executable=self.non_ascii_exe), '#!%s -x\n' % self.non_ascii_exe) self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue()) finally: del sys.modules["java"] sys.platform = platform sys.stdout = stdout class NamespaceTests(TestCase): def setUp(self): self._ns_pkgs = pkg_resources._namespace_packages.copy() self._tmpdir = tempfile.mkdtemp(prefix="tests-distribute-") os.makedirs(os.path.join(self._tmpdir, "site-pkgs")) self._prev_sys_path = sys.path[:] sys.path.append(os.path.join(self._tmpdir, "site-pkgs")) def tearDown(self): shutil.rmtree(self._tmpdir) pkg_resources._namespace_packages = self._ns_pkgs.copy() sys.path = self._prev_sys_path[:] def _assertIn(self, member, container): """ assertIn and assertTrue does not exist in Python2.3""" if member not in container: standardMsg = '%s not found in %s' % (safe_repr(member), safe_repr(container)) self.fail(self._formatMessage(msg, standardMsg)) def test_two_levels_deep(self): """ Test nested namespace packages Create namespace packages in the following tree : site-packages-1/pkg1/pkg2 site-packages-2/pkg1/pkg2 Check both are in the _namespace_packages dict and that their __path__ is correct """ sys.path.append(os.path.join(self._tmpdir, "site-pkgs2")) os.makedirs(os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2")) os.makedirs(os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2")) ns_str = "__import__('pkg_resources').declare_namespace(__name__)\n" for site in ["site-pkgs", "site-pkgs2"]: pkg1_init = open(os.path.join(self._tmpdir, site, "pkg1", "__init__.py"), "w") pkg1_init.write(ns_str) pkg1_init.close() pkg2_init = open(os.path.join(self._tmpdir, site, "pkg1", "pkg2", "__init__.py"), "w") pkg2_init.write(ns_str) pkg2_init.close() import pkg1 self._assertIn("pkg1", pkg_resources._namespace_packages.keys()) try: import pkg1.pkg2 except ImportError, e: self.fail("Distribute tried to import the parent namespace package") # check the _namespace_packages dict self._assertIn("pkg1.pkg2", pkg_resources._namespace_packages.keys()) self.assertEqual(pkg_resources._namespace_packages["pkg1"], ["pkg1.pkg2"]) # check the __path__ attribute contains both paths self.assertEqual(pkg1.pkg2.__path__, [ os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"), os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2") ])
mit
-722,911,614,495,337,900
36.937785
94
0.553258
false
absperf/wagtailapproval
wagtailapproval/wagtail_hooks.py
1
1367
from __future__ import (absolute_import, division, print_function, unicode_literals) from django.conf.urls import include, url from wagtail.wagtailcore import hooks from wagtail.wagtailcore.models import Page from . import urls from .menu import ApprovalAdminMenuItem, ApprovalMenuItem from .models import ApprovalPipeline, ApprovalStep @hooks.register('register_admin_urls') def register_admin_urls(): return [url(r'^approval/', include(urls, namespace='wagtailapproval'))] @hooks.register('register_admin_menu_item') def register_approval_menu_item(): return ApprovalMenuItem() @hooks.register('register_admin_menu_item') def register_approval_admin_menu_item(): return ApprovalAdminMenuItem() @hooks.register('after_create_page') def take_ownership_if_necessary(request, page): '''Checks the request user and takes ownership of the page if it is created by an owned user''' user = request.user # Do not take control of our pipeline or step types if not isinstance(page.specific, (ApprovalPipeline, ApprovalStep)): for step in ApprovalStep.objects.filter(group__in=user.groups.all()): # We do this to enforce that ApprovalTickets only grab the base # Page object, not the subclass step.take_ownership(Page.objects.get(pk=page.pk)) step.save()
bsd-2-clause
-6,330,125,383,709,285,000
34.051282
79
0.719093
false
marble/Toolchain_RenderDocumentation
12-Get-ready-for-the-project/run_05-Acquire-lock.py
1
4882
#!/usr/bin/env python from __future__ import print_function from __future__ import absolute_import import tct import time import os import sys params = tct.readjson(sys.argv[1]) facts = tct.readjson(params['factsfile']) milestones = tct.readjson(params['milestonesfile']) reason = '' resultfile = params['resultfile'] result = tct.readjson(resultfile) toolname = params['toolname'] toolname_pure = params['toolname_pure'] workdir = params['workdir'] loglist = result['loglist'] = result.get('loglist', []) exitcode = CONTINUE = 0 # ================================================== # Make a copy of milestones for later inspection? # -------------------------------------------------- if 0 or milestones.get('debug_always_make_milestones_snapshot'): tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1]) # ================================================== # Helper functions # -------------------------------------------------- def lookup(D, *keys, **kwdargs): result = tct.deepget(D, *keys, **kwdargs) loglist.append((keys, result)) return result # ================================================== # define # -------------------------------------------------- talk = milestones.get('talk', 1) lockfile = None lockfile_time = None lockfile_planned = None lockfile_planned_time = None lockfile_planned_age = None lockfile_removed = None lockfile_create_logstamp = None unixtime = None # ================================================== # Check params # -------------------------------------------------- if exitcode == CONTINUE: loglist.append('CHECK PARAMS') lockfile_name = lookup(milestones, 'lockfile_name') toolchain_temp_home = lookup(params, 'toolchain_temp_home') if not (lockfile_name and toolchain_temp_home): exitcode = 22 reason = 'Bad PARAMS or nothing to do' if exitcode == CONTINUE: loglist.append('PARAMS are ok') else: loglist.append('PROBLEM with params') # ================================================== # work # -------------------------------------------------- if exitcode == CONTINUE: lockfile_ttl_seconds = milestones.get('lockfile_ttl_seconds', 3600) lockfile_planned = os.path.join(toolchain_temp_home, lockfile_name) if os.path.exists(lockfile_planned): loglist.append('lockfile_planned exists') lockfile_planned_time = int(os.path.getmtime(lockfile_planned)) loglist.append(('lockfile_planned_time', lockfile_planned_time)) unixtime = int(time.time()) loglist.append(('unixtime', unixtime)) lockfile_planned_age = unixtime - lockfile_planned_time loglist.append(('lockfile_planned_age', lockfile_planned_age)) if talk: print('is locked since %s seconds, will wait until %s' % (lockfile_planned_age, lockfile_ttl_seconds)) # seconds if lockfile_planned_age >= lockfile_ttl_seconds: os.remove(lockfile_planned) if os.path.exists(lockfile_planned): exitcode = 22 reason = 'lockfile_planned already exists' else: lockfile_removed = lockfile_planned if talk: print('unlock because of age') if exitcode == CONTINUE: if os.path.exists(lockfile_planned): loglist.append('lockfile_planned still exists') exitcode = 22 reason = 'lockfile_planned exists but shouldn\'t' if exitcode == CONTINUE: tct.writejson(facts, lockfile_planned) lockfile_create_logstamp = tct.logstamp_finegrained() lockfile = lockfile_planned lockfile_time = int(os.path.getmtime(lockfile)) loglist.append(('lockfile_time', lockfile_time)) # ================================================== # Set MILESTONE # -------------------------------------------------- if lockfile: result['MILESTONES'].append({'lockfile': lockfile}) if lockfile_time: result['MILESTONES'].append({'lockfile_time': lockfile_time}) if lockfile_planned: result['MILESTONES'].append({'lockfile_planned': lockfile_planned}) if lockfile_planned_age: result['MILESTONES'].append({'lockfile_planned_age': lockfile_planned_age}) if lockfile_planned_time: result['MILESTONES'].append({'lockfile_planned_time': lockfile_planned_time}) if lockfile_removed: result['MILESTONES'].append({'lockfile_removed':lockfile_removed}) if lockfile_create_logstamp: result['MILESTONES'].append({'lockfile_create_logstamp': lockfile_create_logstamp}) # ================================================== # save result # -------------------------------------------------- tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason) # ================================================== # Return with proper exitcode # -------------------------------------------------- sys.exit(exitcode)
mit
-407,474,331,921,038,700
30.294872
114
0.569439
false
gooddata/openstack-nova
nova/tests/unit/api/openstack/compute/test_networks.py
2
32459
# Copyright 2011 Grid Dynamics # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import math import iso8601 import mock import netaddr from oslo_config import cfg from oslo_utils.fixture import uuidsentinel as uuids import webob from nova.api.openstack.compute import networks as networks_v21 from nova.api.openstack.compute import networks_associate \ as networks_associate_v21 import nova.context from nova import exception from nova.network import manager from nova.network.neutronv2 import api as neutron from nova import objects from nova import test from nova.tests.unit.api.openstack import fakes import nova.utils CONF = cfg.CONF FAKE_NETWORK_PROJECT_ID = '6133f8b603924f45bc0c9e21f6df12fa' UTC = iso8601.UTC FAKE_NETWORKS = [ { 'bridge': 'br100', 'vpn_public_port': 1000, 'dhcp_start': '10.0.0.3', 'bridge_interface': 'eth0', 'updated_at': datetime.datetime(2011, 8, 16, 9, 26, 13, 48257, tzinfo=UTC), 'id': 1, 'uuid': uuids.network_1, 'cidr_v6': None, 'deleted_at': None, 'gateway': '10.0.0.1', 'label': 'mynet_0', 'project_id': FAKE_NETWORK_PROJECT_ID, 'rxtx_base': None, 'vpn_private_address': '10.0.0.2', 'deleted': False, 'vlan': 100, 'broadcast': '10.0.0.7', 'netmask': '255.255.255.248', 'injected': False, 'cidr': '10.0.0.0/29', 'vpn_public_address': '127.0.0.1', 'multi_host': False, 'dns1': None, 'dns2': None, 'host': 'nsokolov-desktop', 'gateway_v6': None, 'netmask_v6': None, 'priority': None, 'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 387525, tzinfo=UTC), 'mtu': None, 'dhcp_server': '10.0.0.1', 'enable_dhcp': True, 'share_address': False, }, { 'bridge': 'br101', 'vpn_public_port': 1001, 'dhcp_start': '10.0.0.11', 'bridge_interface': 'eth0', 'updated_at': None, 'id': 2, 'cidr_v6': None, 'uuid': uuids.network_2, 'deleted_at': None, 'gateway': '10.0.0.9', 'label': 'mynet_1', 'project_id': None, 'vpn_private_address': '10.0.0.10', 'deleted': False, 'vlan': 101, 'broadcast': '10.0.0.15', 'rxtx_base': None, 'netmask': '255.255.255.248', 'injected': False, 'cidr': '10.0.0.10/29', 'vpn_public_address': None, 'multi_host': False, 'dns1': None, 'dns2': None, 'host': None, 'gateway_v6': None, 'netmask_v6': None, 'priority': None, 'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 885495, tzinfo=UTC), 'mtu': None, 'dhcp_server': '10.0.0.9', 'enable_dhcp': True, 'share_address': False, }, ] FAKE_USER_NETWORKS = [ { 'id': 1, 'cidr': '10.0.0.0/29', 'netmask': '255.255.255.248', 'gateway': '10.0.0.1', 'broadcast': '10.0.0.7', 'dns1': None, 'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_0', 'netmask_v6': None, 'uuid': uuids.network_1, }, { 'id': 2, 'cidr': '10.0.0.10/29', 'netmask': '255.255.255.248', 'gateway': '10.0.0.9', 'broadcast': '10.0.0.15', 'dns1': None, 'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_1', 'netmask_v6': None, 'uuid': uuids.network_2, }, ] NEW_NETWORK = { "network": { "bridge_interface": "eth0", "cidr": "10.20.105.0/24", "label": "new net 111", "vlan_start": 111, "multi_host": False, 'dhcp_server': '10.0.0.1', 'enable_dhcp': True, 'share_address': False, } } class FakeNetworkAPI(object): _sentinel = object() def __init__(self): self.networks = copy.deepcopy(FAKE_NETWORKS) def delete(self, context, network_id): if network_id == 'always_delete': return True if network_id == -1: raise exception.NetworkInUse(network_id=network_id) for i, network in enumerate(self.networks): if network['id'] == network_id: del self.networks[0] return True raise exception.NetworkNotFoundForUUID(uuid=network_id) def disassociate(self, context, network_uuid): for network in self.networks: if network.get('uuid') == network_uuid: network['project_id'] = None return True raise exception.NetworkNotFound(network_id=network_uuid) def associate(self, context, network_uuid, host=_sentinel, project=_sentinel): for network in self.networks: if network.get('uuid') == network_uuid: if host is not FakeNetworkAPI._sentinel: network['host'] = host if project is not FakeNetworkAPI._sentinel: network['project_id'] = project return True raise exception.NetworkNotFound(network_id=network_uuid) def add_network_to_project(self, context, project_id, network_uuid=None): if network_uuid: for network in self.networks: if network.get('project_id', None) is None: network['project_id'] = project_id return return for network in self.networks: if network.get('uuid') == network_uuid: network['project_id'] = project_id return def get_all(self, context): return self._fake_db_network_get_all(context, project_only=True) def _fake_db_network_get_all(self, context, project_only="allow_none"): project_id = context.project_id nets = self.networks if nova.context.is_user_context(context) and project_only: if project_only == 'allow_none': nets = [n for n in self.networks if (n['project_id'] == project_id or n['project_id'] is None)] else: nets = [n for n in self.networks if n['project_id'] == project_id] objs = [objects.Network._from_db_object(context, objects.Network(), net) for net in nets] return objects.NetworkList(objects=objs) def get(self, context, network_id): for network in self.networks: if network.get('uuid') == network_id: if 'injected' in network and network['injected'] is None: # NOTE: This is a workaround for passing unit tests. # When using nova-network, 'injected' value should be # boolean because of the definition of objects.Network(). # However, 'injected' value can be None if neutron. # So here changes the value to False just for passing # following _from_db_object(). network['injected'] = False return objects.Network._from_db_object(context, objects.Network(), network) raise exception.NetworkNotFound(network_id=network_id) def create(self, context, **kwargs): subnet_bits = int(math.ceil(math.log(kwargs.get( 'network_size', CONF.network_size), 2))) fixed_net_v4 = netaddr.IPNetwork(kwargs['cidr']) prefixlen_v4 = 32 - subnet_bits subnets_v4 = list(fixed_net_v4.subnet( prefixlen_v4, count=kwargs.get('num_networks', CONF.num_networks))) new_networks = [] new_id = max((net['id'] for net in self.networks)) for index, subnet_v4 in enumerate(subnets_v4): new_id += 1 net = {'id': new_id, 'uuid': uuids.fake} net['cidr'] = str(subnet_v4) net['netmask'] = str(subnet_v4.netmask) net['gateway'] = kwargs.get('gateway') or str(subnet_v4[1]) net['broadcast'] = str(subnet_v4.broadcast) net['dhcp_start'] = str(subnet_v4[2]) for key in FAKE_NETWORKS[0]: net.setdefault(key, kwargs.get(key)) new_networks.append(net) self.networks += new_networks return new_networks # NOTE(vish): tests that network create Exceptions actually return # the proper error responses class NetworkCreateExceptionsTestV21(test.TestCase): validation_error = exception.ValidationError class PassthroughAPI(object): def __init__(self): self.network_manager = manager.FlatDHCPManager() def create(self, *args, **kwargs): if kwargs['label'] == 'fail_NetworkNotCreated': raise exception.NetworkNotCreated(req='fake_fail') return self.network_manager.create_networks(*args, **kwargs) def setUp(self): super(NetworkCreateExceptionsTestV21, self).setUp() self._setup() fakes.stub_out_networking(self) self.new_network = copy.deepcopy(NEW_NETWORK) def _setup(self): self.req = fakes.HTTPRequest.blank('') self.controller = networks_v21.NetworkController(self.PassthroughAPI()) def test_network_create_bad_vlan(self): self.new_network['network']['vlan_start'] = 'foo' self.assertRaises(self.validation_error, self.controller.create, self.req, body=self.new_network) def test_network_create_no_cidr(self): del self.new_network['network']['cidr'] self.assertRaises(self.validation_error, self.controller.create, self.req, body=self.new_network) def test_network_create_no_label(self): del self.new_network['network']['label'] self.assertRaises(self.validation_error, self.controller.create, self.req, body=self.new_network) def test_network_create_label_too_long(self): self.new_network['network']['label'] = "x" * 256 self.assertRaises(self.validation_error, self.controller.create, self.req, body=self.new_network) def test_network_create_invalid_fixed_cidr(self): self.new_network['network']['fixed_cidr'] = 'foo' self.assertRaises(self.validation_error, self.controller.create, self.req, body=self.new_network) def test_network_create_invalid_start(self): self.new_network['network']['allowed_start'] = 'foo' self.assertRaises(self.validation_error, self.controller.create, self.req, body=self.new_network) def test_network_create_bad_cidr(self): self.new_network['network']['cidr'] = '128.0.0.0/900' self.assertRaises(self.validation_error, self.controller.create, self.req, body=self.new_network) def test_network_create_handle_network_not_created(self): self.new_network['network']['label'] = 'fail_NetworkNotCreated' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.new_network) @mock.patch.object(objects.NetworkList, 'get_all') def test_network_create_cidr_conflict(self, mock_get_all): def fake_get_all(context): ret = objects.NetworkList(context=context, objects=[]) net = objects.Network(cidr='10.0.0.0/23') ret.objects.append(net) return ret mock_get_all.side_effect = fake_get_all self.new_network['network']['cidr'] = '10.0.0.0/24' self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.req, body=self.new_network) def test_network_create_vlan_conflict(self): @staticmethod def get_all(context): ret = objects.NetworkList(context=context, objects=[]) net = objects.Network(cidr='10.0.0.0/24', vlan=100) ret.objects.append(net) return ret def fake_create(context): raise exception.DuplicateVlan(vlan=100) self.stub_out('nova.objects.NetworkList.get_all', get_all) self.stub_out('nova.objects.Network.create', fake_create) self.new_network['network']['cidr'] = '20.0.0.0/24' self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.req, body=self.new_network) class NetworksTestV21(test.NoDBTestCase): validation_error = exception.ValidationError def setUp(self): super(NetworksTestV21, self).setUp() self.fake_network_api = FakeNetworkAPI() self._setup() fakes.stub_out_networking(self) self.new_network = copy.deepcopy(NEW_NETWORK) self.non_admin_req = fakes.HTTPRequest.blank( '', project_id=fakes.FAKE_PROJECT_ID) self.admin_req = fakes.HTTPRequest.blank('', project_id=fakes.FAKE_PROJECT_ID, use_admin_context=True) def _setup(self): self.controller = networks_v21.NetworkController( self.fake_network_api) self.neutron_ctrl = networks_v21.NetworkController( neutron.API()) self.req = fakes.HTTPRequest.blank('', project_id=fakes.FAKE_PROJECT_ID) def _check_status(self, res, method, code): self.assertEqual(method.wsgi_code, code) @staticmethod def network_uuid_to_id(network): network['id'] = network['uuid'] del network['uuid'] def test_network_list_all_as_user(self): self.maxDiff = None res_dict = self.controller.index(self.non_admin_req) self.assertEqual(res_dict, {'networks': []}) project_id = self.req.environ["nova.context"].project_id cxt = self.req.environ["nova.context"] uuid = FAKE_NETWORKS[0]['uuid'] self.fake_network_api.associate(context=cxt, network_uuid=uuid, project=project_id) res_dict = self.controller.index(self.non_admin_req) expected = [copy.deepcopy(FAKE_USER_NETWORKS[0])] for network in expected: self.network_uuid_to_id(network) self.assertEqual({'networks': expected}, res_dict) def test_network_list_all_as_admin(self): res_dict = self.controller.index(self.admin_req) expected = copy.deepcopy(FAKE_NETWORKS) for network in expected: self.network_uuid_to_id(network) self.assertEqual({'networks': expected}, res_dict) def test_network_disassociate(self): uuid = FAKE_NETWORKS[0]['uuid'] disassociate = self.controller._disassociate_host_and_project res = disassociate(self.req, uuid, {'disassociate': None}) self._check_status(res, disassociate, 202) self.assertIsNone(self.fake_network_api.networks[0]['project_id']) self.assertIsNone(self.fake_network_api.networks[0]['host']) def test_network_disassociate_not_found(self): self.assertRaises(webob.exc.HTTPNotFound, self.controller._disassociate_host_and_project, self.req, 100, {'disassociate': None}) def test_network_get_as_user(self): uuid = FAKE_USER_NETWORKS[0]['uuid'] res_dict = self.controller.show(self.non_admin_req, uuid) expected = {'network': copy.deepcopy(FAKE_USER_NETWORKS[0])} self.network_uuid_to_id(expected['network']) self.assertEqual(expected, res_dict) def test_network_get_as_admin(self): uuid = FAKE_NETWORKS[0]['uuid'] res_dict = self.controller.show(self.admin_req, uuid) expected = {'network': copy.deepcopy(FAKE_NETWORKS[0])} self.network_uuid_to_id(expected['network']) self.assertEqual(expected, res_dict) def test_network_get_not_found(self): self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, self.req, 100) def test_network_delete(self): delete_method = self.controller.delete res = delete_method(self.req, 1) self._check_status(res, delete_method, 202) def test_network_delete_not_found(self): self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, self.req, 100) def test_network_delete_in_use(self): self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, self.req, -1) def test_network_add(self): uuid = FAKE_NETWORKS[1]['uuid'] add = self.controller.add res = add(self.req, body={'id': uuid}) self._check_status(res, add, 202) res_dict = self.controller.show(self.admin_req, uuid) self.assertEqual(res_dict['network']['project_id'], fakes.FAKE_PROJECT_ID) @mock.patch('nova.tests.unit.api.openstack.compute.test_networks.' 'FakeNetworkAPI.add_network_to_project', side_effect=exception.NoMoreNetworks) def test_network_add_no_more_networks_fail(self, mock_add): uuid = FAKE_NETWORKS[1]['uuid'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.add, self.req, body={'id': uuid}) @mock.patch('nova.tests.unit.api.openstack.compute.test_networks.' 'FakeNetworkAPI.add_network_to_project', side_effect=exception. NetworkNotFoundForUUID(uuid=fakes.FAKE_PROJECT_ID)) def test_network_add_network_not_found_networks_fail(self, mock_add): uuid = FAKE_NETWORKS[1]['uuid'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.add, self.req, body={'id': uuid}) def test_network_add_network_without_body(self): self.assertRaises(self.validation_error, self.controller.add, self.req, body=None) def test_network_add_network_with_invalid_id(self): self.assertRaises(exception.ValidationError, self.controller.add, self.req, body={'id': 123}) def test_network_add_network_with_extra_arg(self): uuid = FAKE_NETWORKS[1]['uuid'] self.assertRaises(exception.ValidationError, self.controller.add, self.req, body={'id': uuid, 'extra_arg': 123}) def test_network_add_network_with_none_id(self): add = self.controller.add res = add(self.req, body={'id': None}) self._check_status(res, add, 202) def test_network_create(self): res_dict = self.controller.create(self.req, body=self.new_network) self.assertIn('network', res_dict) uuid = res_dict['network']['id'] res_dict = self.controller.show(self.req, uuid) self.assertTrue(res_dict['network']['label']. startswith(NEW_NETWORK['network']['label'])) def test_network_create_large(self): self.new_network['network']['cidr'] = '128.0.0.0/4' res_dict = self.controller.create(self.req, body=self.new_network) self.assertEqual(res_dict['network']['cidr'], self.new_network['network']['cidr']) def test_network_neutron_disassociate_not_implemented(self): uuid = FAKE_NETWORKS[1]['uuid'] self.assertRaises(webob.exc.HTTPNotImplemented, self.neutron_ctrl._disassociate_host_and_project, self.req, uuid, {'disassociate': None}) class NetworksAssociateTestV21(test.NoDBTestCase): def setUp(self): super(NetworksAssociateTestV21, self).setUp() self.fake_network_api = FakeNetworkAPI() self._setup() fakes.stub_out_networking(self) self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True) def _setup(self): self.controller = networks_v21.NetworkController(self.fake_network_api) self.associate_controller = networks_associate_v21\ .NetworkAssociateActionController(self.fake_network_api) self.neutron_assoc_ctrl = ( networks_associate_v21.NetworkAssociateActionController( neutron.API())) self.req = fakes.HTTPRequest.blank('') def _check_status(self, res, method, code): self.assertEqual(method.wsgi_code, code) def test_network_disassociate_host_only(self): uuid = FAKE_NETWORKS[0]['uuid'] disassociate = self.associate_controller._disassociate_host_only res = disassociate( self.req, uuid, {'disassociate_host': None}) self._check_status(res, disassociate, 202) self.assertIsNotNone(self.fake_network_api.networks[0]['project_id']) self.assertIsNone(self.fake_network_api.networks[0]['host']) def test_network_disassociate_project_only(self): uuid = FAKE_NETWORKS[0]['uuid'] disassociate = self.associate_controller._disassociate_project_only res = disassociate(self.req, uuid, {'disassociate_project': None}) self._check_status(res, disassociate, 202) self.assertIsNone(self.fake_network_api.networks[0]['project_id']) self.assertIsNotNone(self.fake_network_api.networks[0]['host']) def test_network_disassociate_project_network_delete(self): uuid = FAKE_NETWORKS[1]['uuid'] disassociate = self.associate_controller._disassociate_project_only res = disassociate( self.req, uuid, {'disassociate_project': None}) self._check_status(res, disassociate, 202) self.assertIsNone(self.fake_network_api.networks[1]['project_id']) delete = self.controller.delete res = delete(self.req, 1) # NOTE: On v2.1 code, delete method doesn't return anything and # the status code is decorated on wsgi_code of the method. self.assertIsNone(res) self.assertEqual(202, delete.wsgi_code) def test_network_associate_project_delete_fail(self): uuid = FAKE_NETWORKS[0]['uuid'] req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid) self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, req, -1) def test_network_associate_with_host(self): uuid = FAKE_NETWORKS[1]['uuid'] associate = self.associate_controller._associate_host res = associate(self.req, uuid, body={'associate_host': "TestHost"}) self._check_status(res, associate, 202) res_dict = self.controller.show(self.admin_req, uuid) self.assertEqual(res_dict['network']['host'], 'TestHost') def test_network_neutron_associate_not_implemented(self): uuid = FAKE_NETWORKS[1]['uuid'] self.assertRaises(webob.exc.HTTPNotImplemented, self.neutron_assoc_ctrl._associate_host, self.req, uuid, body={'associate_host': "TestHost"}) def _test_network_neutron_associate_host_validation_failed(self, body): uuid = FAKE_NETWORKS[1]['uuid'] self.assertRaises(exception.ValidationError, self.associate_controller._associate_host, self.req, uuid, body=body) def test_network_neutron_associate_host_non_string(self): self._test_network_neutron_associate_host_validation_failed( {'associate_host': 123}) def test_network_neutron_associate_host_empty_body(self): self._test_network_neutron_associate_host_validation_failed({}) def test_network_neutron_associate_bad_associate_host_key(self): self._test_network_neutron_associate_host_validation_failed( {'badassociate_host': "TestHost"}) def test_network_neutron_associate_host_extra_arg(self): self._test_network_neutron_associate_host_validation_failed( {'associate_host': "TestHost", 'extra_arg': "extra_arg"}) def test_network_neutron_disassociate_project_not_implemented(self): uuid = FAKE_NETWORKS[1]['uuid'] self.assertRaises(webob.exc.HTTPNotImplemented, self.neutron_assoc_ctrl._disassociate_project_only, self.req, uuid, {'disassociate_project': None}) def test_network_neutron_disassociate_host_not_implemented(self): uuid = FAKE_NETWORKS[1]['uuid'] self.assertRaises(webob.exc.HTTPNotImplemented, self.neutron_assoc_ctrl._disassociate_host_only, self.req, uuid, {'disassociate_host': None}) class NetworksEnforcementV21(test.NoDBTestCase): def setUp(self): super(NetworksEnforcementV21, self).setUp() self.controller = networks_v21.NetworkController() self.req = fakes.HTTPRequest.blank('') def test_show_policy_failed(self): rule_name = 'os_compute_api:os-networks:view' self.policy.set_rules({rule_name: "project:non_fake"}) exc = self.assertRaises( exception.PolicyNotAuthorized, self.controller.show, self.req, fakes.FAKE_UUID) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) def test_index_policy_failed(self): rule_name = 'os_compute_api:os-networks:view' self.policy.set_rules({rule_name: "project:non_fake"}) exc = self.assertRaises( exception.PolicyNotAuthorized, self.controller.index, self.req) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) def test_create_policy_failed(self): rule_name = 'os_compute_api:os-networks' self.policy.set_rules({rule_name: "project:non_fake"}) exc = self.assertRaises( exception.PolicyNotAuthorized, self.controller.create, self.req, body=NEW_NETWORK) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) def test_delete_policy_failed(self): rule_name = 'os_compute_api:os-networks' self.policy.set_rules({rule_name: "project:non_fake"}) exc = self.assertRaises( exception.PolicyNotAuthorized, self.controller.delete, self.req, fakes.FAKE_UUID) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) def test_add_policy_failed(self): rule_name = 'os_compute_api:os-networks' self.policy.set_rules({rule_name: "project:non_fake"}) exc = self.assertRaises( exception.PolicyNotAuthorized, self.controller.add, self.req, body={'id': fakes.FAKE_UUID}) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) def test_disassociate_policy_failed(self): rule_name = 'os_compute_api:os-networks' self.policy.set_rules({rule_name: "project:non_fake"}) exc = self.assertRaises( exception.PolicyNotAuthorized, self.controller._disassociate_host_and_project, self.req, fakes.FAKE_UUID, body={'network': {}}) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) class NetworksAssociateEnforcementV21(test.NoDBTestCase): def setUp(self): super(NetworksAssociateEnforcementV21, self).setUp() self.controller = (networks_associate_v21. NetworkAssociateActionController()) self.req = fakes.HTTPRequest.blank('') def test_disassociate_host_policy_failed(self): rule_name = 'os_compute_api:os-networks-associate' self.policy.set_rules({rule_name: "project:non_fake"}) exc = self.assertRaises( exception.PolicyNotAuthorized, self.controller._disassociate_host_only, self.req, fakes.FAKE_UUID, body={'disassociate_host': {}}) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) def test_disassociate_project_only_policy_failed(self): rule_name = 'os_compute_api:os-networks-associate' self.policy.set_rules({rule_name: "project:non_fake"}) exc = self.assertRaises( exception.PolicyNotAuthorized, self.controller._disassociate_project_only, self.req, fakes.FAKE_UUID, body={'disassociate_project': {}}) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) def test_disassociate_host_only_policy_failed(self): rule_name = 'os_compute_api:os-networks-associate' self.policy.set_rules({rule_name: "project:non_fake"}) exc = self.assertRaises( exception.PolicyNotAuthorized, self.controller._associate_host, self.req, fakes.FAKE_UUID, body={'associate_host': 'fake_host'}) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) class NetworksDeprecationTest(test.NoDBTestCase): def setUp(self): super(NetworksDeprecationTest, self).setUp() self.controller = networks_v21.NetworkController() self.req = fakes.HTTPRequest.blank('', version='2.36') def test_all_api_return_not_found(self): self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.show, self.req, fakes.FAKE_UUID) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.delete, self.req, fakes.FAKE_UUID) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.index, self.req) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller._disassociate_host_and_project, self.req, {}) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.add, self.req, {}) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.create, self.req, {}) class NetworksAssociateDeprecationTest(test.NoDBTestCase): def setUp(self): super(NetworksAssociateDeprecationTest, self).setUp() self.controller = networks_associate_v21\ .NetworkAssociateActionController() self.req = fakes.HTTPRequest.blank('', version='2.36') def test_all_api_return_not_found(self): self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller._associate_host, self.req, fakes.FAKE_UUID, {}) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller._disassociate_project_only, self.req, fakes.FAKE_UUID, {}) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller._disassociate_host_only, self.req, fakes.FAKE_UUID, {})
apache-2.0
9,182,814,938,588,251,000
41.319426
79
0.59512
false
wang1352083/pythontool
python-2.7.12-lib/test/test_dictviews.py
6
9607
import copy import pickle import unittest import collections from test import test_support class DictSetTest(unittest.TestCase): def test_constructors_not_callable(self): kt = type({}.viewkeys()) self.assertRaises(TypeError, kt, {}) self.assertRaises(TypeError, kt) it = type({}.viewitems()) self.assertRaises(TypeError, it, {}) self.assertRaises(TypeError, it) vt = type({}.viewvalues()) self.assertRaises(TypeError, vt, {}) self.assertRaises(TypeError, vt) def test_dict_keys(self): d = {1: 10, "a": "ABC"} keys = d.viewkeys() self.assertEqual(len(keys), 2) self.assertEqual(set(keys), set([1, "a"])) self.assertEqual(keys, set([1, "a"])) self.assertNotEqual(keys, set([1, "a", "b"])) self.assertNotEqual(keys, set([1, "b"])) self.assertNotEqual(keys, set([1])) self.assertNotEqual(keys, 42) self.assertIn(1, keys) self.assertIn("a", keys) self.assertNotIn(10, keys) self.assertNotIn("Z", keys) self.assertEqual(d.viewkeys(), d.viewkeys()) e = {1: 11, "a": "def"} self.assertEqual(d.viewkeys(), e.viewkeys()) del e["a"] self.assertNotEqual(d.viewkeys(), e.viewkeys()) def test_dict_items(self): d = {1: 10, "a": "ABC"} items = d.viewitems() self.assertEqual(len(items), 2) self.assertEqual(set(items), set([(1, 10), ("a", "ABC")])) self.assertEqual(items, set([(1, 10), ("a", "ABC")])) self.assertNotEqual(items, set([(1, 10), ("a", "ABC"), "junk"])) self.assertNotEqual(items, set([(1, 10), ("a", "def")])) self.assertNotEqual(items, set([(1, 10)])) self.assertNotEqual(items, 42) self.assertIn((1, 10), items) self.assertIn(("a", "ABC"), items) self.assertNotIn((1, 11), items) self.assertNotIn(1, items) self.assertNotIn((), items) self.assertNotIn((1,), items) self.assertNotIn((1, 2, 3), items) self.assertEqual(d.viewitems(), d.viewitems()) e = d.copy() self.assertEqual(d.viewitems(), e.viewitems()) e["a"] = "def" self.assertNotEqual(d.viewitems(), e.viewitems()) def test_dict_mixed_keys_items(self): d = {(1, 1): 11, (2, 2): 22} e = {1: 1, 2: 2} self.assertEqual(d.viewkeys(), e.viewitems()) self.assertNotEqual(d.viewitems(), e.viewkeys()) def test_dict_values(self): d = {1: 10, "a": "ABC"} values = d.viewvalues() self.assertEqual(set(values), set([10, "ABC"])) self.assertEqual(len(values), 2) def test_dict_repr(self): d = {1: 10, "a": "ABC"} self.assertIsInstance(repr(d), str) r = repr(d.viewitems()) self.assertIsInstance(r, str) self.assertTrue(r == "dict_items([('a', 'ABC'), (1, 10)])" or r == "dict_items([(1, 10), ('a', 'ABC')])") r = repr(d.viewkeys()) self.assertIsInstance(r, str) self.assertTrue(r == "dict_keys(['a', 1])" or r == "dict_keys([1, 'a'])") r = repr(d.viewvalues()) self.assertIsInstance(r, str) self.assertTrue(r == "dict_values(['ABC', 10])" or r == "dict_values([10, 'ABC'])") def test_keys_set_operations(self): d1 = {'a': 1, 'b': 2} d2 = {'b': 3, 'c': 2} d3 = {'d': 4, 'e': 5} self.assertEqual(d1.viewkeys() & d1.viewkeys(), {'a', 'b'}) self.assertEqual(d1.viewkeys() & d2.viewkeys(), {'b'}) self.assertEqual(d1.viewkeys() & d3.viewkeys(), set()) self.assertEqual(d1.viewkeys() & set(d1.viewkeys()), {'a', 'b'}) self.assertEqual(d1.viewkeys() & set(d2.viewkeys()), {'b'}) self.assertEqual(d1.viewkeys() & set(d3.viewkeys()), set()) self.assertEqual(d1.viewkeys() & tuple(d1.viewkeys()), {'a', 'b'}) self.assertEqual(d1.viewkeys() | d1.viewkeys(), {'a', 'b'}) self.assertEqual(d1.viewkeys() | d2.viewkeys(), {'a', 'b', 'c'}) self.assertEqual(d1.viewkeys() | d3.viewkeys(), {'a', 'b', 'd', 'e'}) self.assertEqual(d1.viewkeys() | set(d1.viewkeys()), {'a', 'b'}) self.assertEqual(d1.viewkeys() | set(d2.viewkeys()), {'a', 'b', 'c'}) self.assertEqual(d1.viewkeys() | set(d3.viewkeys()), {'a', 'b', 'd', 'e'}) self.assertEqual(d1.viewkeys() | (1, 2), {'a', 'b', 1, 2}) self.assertEqual(d1.viewkeys() ^ d1.viewkeys(), set()) self.assertEqual(d1.viewkeys() ^ d2.viewkeys(), {'a', 'c'}) self.assertEqual(d1.viewkeys() ^ d3.viewkeys(), {'a', 'b', 'd', 'e'}) self.assertEqual(d1.viewkeys() ^ set(d1.viewkeys()), set()) self.assertEqual(d1.viewkeys() ^ set(d2.viewkeys()), {'a', 'c'}) self.assertEqual(d1.viewkeys() ^ set(d3.viewkeys()), {'a', 'b', 'd', 'e'}) self.assertEqual(d1.viewkeys() ^ tuple(d2.keys()), {'a', 'c'}) self.assertEqual(d1.viewkeys() - d1.viewkeys(), set()) self.assertEqual(d1.viewkeys() - d2.viewkeys(), {'a'}) self.assertEqual(d1.viewkeys() - d3.viewkeys(), {'a', 'b'}) self.assertEqual(d1.viewkeys() - set(d1.viewkeys()), set()) self.assertEqual(d1.viewkeys() - set(d2.viewkeys()), {'a'}) self.assertEqual(d1.viewkeys() - set(d3.viewkeys()), {'a', 'b'}) self.assertEqual(d1.viewkeys() - (0, 1), {'a', 'b'}) def test_items_set_operations(self): d1 = {'a': 1, 'b': 2} d2 = {'a': 2, 'b': 2} d3 = {'d': 4, 'e': 5} self.assertEqual( d1.viewitems() & d1.viewitems(), {('a', 1), ('b', 2)}) self.assertEqual(d1.viewitems() & d2.viewitems(), {('b', 2)}) self.assertEqual(d1.viewitems() & d3.viewitems(), set()) self.assertEqual(d1.viewitems() & set(d1.viewitems()), {('a', 1), ('b', 2)}) self.assertEqual(d1.viewitems() & set(d2.viewitems()), {('b', 2)}) self.assertEqual(d1.viewitems() & set(d3.viewitems()), set()) self.assertEqual(d1.viewitems() | d1.viewitems(), {('a', 1), ('b', 2)}) self.assertEqual(d1.viewitems() | d2.viewitems(), {('a', 1), ('a', 2), ('b', 2)}) self.assertEqual(d1.viewitems() | d3.viewitems(), {('a', 1), ('b', 2), ('d', 4), ('e', 5)}) self.assertEqual(d1.viewitems() | set(d1.viewitems()), {('a', 1), ('b', 2)}) self.assertEqual(d1.viewitems() | set(d2.viewitems()), {('a', 1), ('a', 2), ('b', 2)}) self.assertEqual(d1.viewitems() | set(d3.viewitems()), {('a', 1), ('b', 2), ('d', 4), ('e', 5)}) self.assertEqual(d1.viewitems() ^ d1.viewitems(), set()) self.assertEqual(d1.viewitems() ^ d2.viewitems(), {('a', 1), ('a', 2)}) self.assertEqual(d1.viewitems() ^ d3.viewitems(), {('a', 1), ('b', 2), ('d', 4), ('e', 5)}) self.assertEqual(d1.viewitems() - d1.viewitems(), set()) self.assertEqual(d1.viewitems() - d2.viewitems(), {('a', 1)}) self.assertEqual(d1.viewitems() - d3.viewitems(), {('a', 1), ('b', 2)}) self.assertEqual(d1.viewitems() - set(d1.viewitems()), set()) self.assertEqual(d1.viewitems() - set(d2.viewitems()), {('a', 1)}) self.assertEqual(d1.viewitems() - set(d3.viewitems()), {('a', 1), ('b', 2)}) def test_recursive_repr(self): d = {} d[42] = d.viewvalues() self.assertRaises(RuntimeError, repr, d) def test_abc_registry(self): d = dict(a=1) self.assertIsInstance(d.viewkeys(), collections.KeysView) self.assertIsInstance(d.viewkeys(), collections.MappingView) self.assertIsInstance(d.viewkeys(), collections.Set) self.assertIsInstance(d.viewkeys(), collections.Sized) self.assertIsInstance(d.viewkeys(), collections.Iterable) self.assertIsInstance(d.viewkeys(), collections.Container) self.assertIsInstance(d.viewvalues(), collections.ValuesView) self.assertIsInstance(d.viewvalues(), collections.MappingView) self.assertIsInstance(d.viewvalues(), collections.Sized) self.assertIsInstance(d.viewitems(), collections.ItemsView) self.assertIsInstance(d.viewitems(), collections.MappingView) self.assertIsInstance(d.viewitems(), collections.Set) self.assertIsInstance(d.viewitems(), collections.Sized) self.assertIsInstance(d.viewitems(), collections.Iterable) self.assertIsInstance(d.viewitems(), collections.Container) def test_copy(self): d = {1: 10, "a": "ABC"} self.assertRaises(TypeError, copy.copy, d.viewkeys()) self.assertRaises(TypeError, copy.copy, d.viewvalues()) self.assertRaises(TypeError, copy.copy, d.viewitems()) def test_pickle(self): d = {1: 10, "a": "ABC"} for proto in range(pickle.HIGHEST_PROTOCOL + 1): self.assertRaises((TypeError, pickle.PicklingError), pickle.dumps, d.viewkeys(), proto) self.assertRaises((TypeError, pickle.PicklingError), pickle.dumps, d.viewvalues(), proto) self.assertRaises((TypeError, pickle.PicklingError), pickle.dumps, d.viewitems(), proto) def test_main(): test_support.run_unittest(DictSetTest) if __name__ == "__main__": test_main()
mit
-857,247,219,054,991,100
43.476852
79
0.539294
false
mattstruble/crusty
crusty/util/enum.py
1
8791
#!/usr/bin/env python # Copyright (c) 2016 Matt Struble. All Rights Reserved. # # Use is subject to license terms. # # Author: Matt Struble # Date: Oct. 15 2016 import inspect class EnumError(AttributeError): pass class _meta_enum(type): def __init__(cls, cls_name, cls_bases, cls_dict): """ Automatically creates enumeration based upon classes dictionary. Iterates through the object's member list and replaces each member variable with an enumerated instance of that variable. """ if cls_name == 'Enum': return for item in cls_dict: if type(cls_dict.get(item)) != staticmethod: # preserve static methods if item[0].isalpha(): # Only enumerate 'public' variables eInstance = EnumInstance(cls_name, item, cls_dict.get(item)) setattr(cls, item, eInstance) cls._initialized = True def __setattr__(cls, attr, value): """ Raises EnumError if attempting to change an attribute at runtime. """ if hasattr(cls, '_initialized'): raise EnumError("type object '%s' is an enumeration and cannot be modified at runtime." % cls.__name__) super(_meta_enum, cls).__setattr__(attr, value) def __iter__(self): """ Returns a generator for enum values. When iterating the enum, only returns class members whose value are instances of EnumInstance. """ return (self.__dict__[item] for item in self.__dict__ if not callable(item) and isinstance(self.__dict__[item], EnumInstance)) def __repr__(self): s = self.__name__ if self.__bases__: s = s + '(' + ', '.join(map(lambda x : x.__name__, self.__bases__)) + ')' return s class Enum(object): """ A user-defined type that consists of a set of named constants that are known as enumerators. Examples: >>> class Key(Enum): A = 0 B = 1 C = 2 >>> print Key.A Key.A >>> Key.A.value 0 >>> Key.A == 0 True >>> Key.A == Key True >>> Key.A in Key True >>> for key in Key: print key Key.A Key.B Key.C >>> Key.C - Key.B 1 (also works for: +, *, /, //, %, divmod(), pow(), **, <<, >>, &, ^, |) >>> Test = Enum('Test', 'A', 'B', C=4) >>> print Test.A Test.A >>> Test.A.value 0 >>> Test.B.value 1 >>> Test.C.value 4 >>> Test.A in Test True >>> Test.A == Test True >>> Test.A == Key.A False >>> Test.A in Key False >>> Key.A in Test False """ __metaclass__ = _meta_enum def __new__(self, name, *args, **kwargs): """ Creates automatic enumerated values for inline declarations. Takes in an unspecified length list of strings and/or keyword arguments to transform into enumerated values. The basic arguments will be automatically assigned (int) values ranging from 0..N based upon their order and keyword arguments will maintain their passed in values. Parameters ---------- name : string Name of the enumeration class to be created. Must start with an alpha value. *args : string List of arguments to turn into enumerated values. **kwargs List of keyword arguments to turn into enumerated values. Returns ------- Enum: A generated enumerated type based upon inputted parameters. Raises ------ TypeError If name or *args are not string EnumError If *args begin with a non-alpha value. """ if not isinstance(name, basestring): raise TypeError("Enum name must be a string.") if not all(isinstance(arg, basestring) for arg in args): raise TypeError("Enum unnamed arguments must all be strings.") if not all(arg[0].isalpha() for arg in args): raise EnumError("Enum arguments must start with an alpha character.") enums = dict(zip(args, range(len(args))), **kwargs) return type(name, (Enum, ), enums) class EnumInstance(object): """ Instance class for each enumerated value within Enum. EnumInstance enables enums to maintain their original class value for comparisons and reference while coding. Attributes: value : Value of the enum variable """ def __init__(self, classname, enumname, value): self.__name__ = classname self.__classname = classname self.__enumname = enumname self.__value = value self.value = value; self._initialized = True def __setattr__(self, value, attr): """ Raises EnumError if attempting to change an attribute at runtime. """ if hasattr(self, '_initialized'): raise EnumError("type object '%s' is an enumeration and cannot be modified at runtime." % self.__classname) self.__dict__[value] = attr def __repr__(self): return "EnumInstance(%r, %r, %r)" % (self.__classname, self.__enumname, self.__value) def __str__(self): return "%s.%s" % (self.__classname, self.__enumname) def __int__(self): return int(self.__value) def __float__(self): return float(self.__value) def __long__(self): return long(self.__value) def __oct__(self): return oct(self.__value) def __hex__(self): return hex(self.__value) def __cmp__(self, other): """ Handles class, EnumInstance, and value comparisons for EnumInstance. If comparing to Class object it checks to see if the object's name matches the enum classname. If comparing to EnumInstance object it checks to see if the object's name matches the enum classname. Otherwise it compares the stored value with the value passed in. """ if inspect.isclass(other) and issubclass(other, Enum): return cmp(self.__classname, other.__name__) elif isinstance(other, EnumInstance): if self.__classname == other.__name__: return cmp(self.value, other.value) else: return cmp(self.__classname, other.__name__) return cmp(self.__value, other) ## Left operand arithmetic operations def __add__(self, other): return self.__value + other def __sub__(self, other): return self.__value - other def __mul__(self, other): return self.__value * other def __div__(self, other): return self.__value / other def __floordiv__(self, other): return self.__value // other def __mod__(self, other): return self.__value % other def __divmod__(self, other): return divmod(self.__value, other) def __pow__(self, other, modulo=None): return pow(self.__value, other, modulo) def __lshift__(self, other): return self.__value << other def __rshift__(self, other): return self.__value >> other def __and__(self, other): return self.__value & other def __xor__(self, other): return self.__value ^ other def __or__(self, other): return self.__value | other ## Right operand arithmetic operations def __radd__(self, other): return other + self.__value def __rsub__(self, other): return other - self.__value def __rmul__(self, other): return other * self.__value def __rdiv__(self, other): return other / self.__value def __rfloordiv__(self, other): return other // self.__value def __rmod__(self, other): return other % self.__value def __rdivmod__(self, other): return divmod(other, self.__value) def __rpow__(self, other, modulo=None): return pow(other, self.__value, modulo) def __rlshift__(self, other): return other << self.__value def __rrshift__(self, other): return other >> self.__value def __rand__(self, other): return other & self.__value def __rxor__(self, other): return other ^ self.__value def __ror__(self, other): return other | self.__value
mit
-7,099,585,610,388,234,000
28.8
134
0.538847
false
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-2.1/Lib/plat-irix5/panel.py
6
6727
# Module 'panel' # # Support for the Panel library. # Uses built-in module 'pnl'. # Applications should use 'panel.function' instead of 'pnl.function'; # most 'pnl' functions are transparently exported by 'panel', # but dopanel() is overridden and you have to use this version # if you want to use callbacks. import pnl debug = 0 # Test if an object is a list. # def is_list(x): return type(x) == type([]) # Reverse a list. # def reverse(list): res = [] for item in list: res.insert(0, item) return res # Get an attribute of a list, which may itself be another list. # Don't use 'prop' for name. # def getattrlist(list, name): for item in list: if item and is_list(item) and item[0] == name: return item[1:] return [] # Get a property of a list, which may itself be another list. # def getproplist(list, name): for item in list: if item and is_list(item) and item[0] == 'prop': if len(item) > 1 and item[1] == name: return item[2:] return [] # Test if an actuator description contains the property 'end-of-group' # def is_endgroup(list): x = getproplist(list, 'end-of-group') return (x and x[0] == '#t') # Neatly display an actuator definition given as S-expression # the prefix string is printed before each line. # def show_actuator(prefix, a): for item in a: if not is_list(item): print prefix, item elif item and item[0] == 'al': print prefix, 'Subactuator list:' for a in item[1:]: show_actuator(prefix + ' ', a) elif len(item) == 2: print prefix, item[0], '=>', item[1] elif len(item) == 3 and item[0] == 'prop': print prefix, 'Prop', item[1], '=>', print item[2] else: print prefix, '?', item # Neatly display a panel. # def show_panel(prefix, p): for item in p: if not is_list(item): print prefix, item elif item and item[0] == 'al': print prefix, 'Actuator list:' for a in item[1:]: show_actuator(prefix + ' ', a) elif len(item) == 2: print prefix, item[0], '=>', item[1] elif len(item) == 3 and item[0] == 'prop': print prefix, 'Prop', item[1], '=>', print item[2] else: print prefix, '?', item # Exception raised by build_actuator or build_panel. # panel_error = 'panel error' # Dummy callback used to initialize the callbacks. # def dummy_callback(arg): pass # Assign attributes to members of the target. # Attribute names in exclist are ignored. # The member name is the attribute name prefixed with the prefix. # def assign_members(target, attrlist, exclist, prefix): for item in attrlist: if is_list(item) and len(item) == 2 and item[0] not in exclist: name, value = item[0], item[1] ok = 1 if value[0] in '-0123456789': value = eval(value) elif value[0] == '"': value = value[1:-1] elif value == 'move-then-resize': # Strange default set by Panel Editor... ok = 0 else: print 'unknown value', value, 'for', name ok = 0 if ok: lhs = 'target.' + prefix + name stmt = lhs + '=' + `value` if debug: print 'exec', stmt try: exec stmt + '\n' except KeyboardInterrupt: # Don't catch this! raise KeyboardInterrupt except: print 'assign failed:', stmt # Build a real actuator from an actuator description. # Return a pair (actuator, name). # def build_actuator(descr): namelist = getattrlist(descr, 'name') if namelist: # Assume it is a string actuatorname = namelist[0][1:-1] else: actuatorname = '' type = descr[0] if type[:4] == 'pnl_': type = type[4:] act = pnl.mkact(type) act.downfunc = act.activefunc = act.upfunc = dummy_callback # assign_members(act, descr[1:], ['al', 'data', 'name'], '') # # Treat actuator-specific data # datalist = getattrlist(descr, 'data') prefix = '' if type[-4:] == 'puck': prefix = 'puck_' elif type == 'mouse': prefix = 'mouse_' assign_members(act, datalist, [], prefix) # return act, actuatorname # Build all sub-actuators and add them to the super-actuator. # The super-actuator must already have been added to the panel. # Sub-actuators with defined names are added as members to the panel # so they can be referenced as p.name. # # Note: I have no idea how panel.endgroup() works when applied # to a sub-actuator. # def build_subactuators(panel, super_act, al): # # This is nearly the same loop as below in build_panel(), # except a call is made to addsubact() instead of addact(). # for a in al: act, name = build_actuator(a) act.addsubact(super_act) if name: stmt = 'panel.' + name + ' = act' if debug: print 'exec', stmt exec stmt + '\n' if is_endgroup(a): panel.endgroup() sub_al = getattrlist(a, 'al') if sub_al: build_subactuators(panel, act, sub_al) # # Fix the actuator to which whe just added subactuators. # This can't hurt (I hope) and is needed for the scroll actuator. # super_act.fixact() # Build a real panel from a panel definition. # Return a panel object p, where for each named actuator a, p.name is a # reference to a. # def build_panel(descr): # # Sanity check # if (not descr) or descr[0] != 'panel': raise panel_error, 'panel description must start with "panel"' # if debug: show_panel('', descr) # # Create an empty panel # panel = pnl.mkpanel() # # Assign panel attributes # assign_members(panel, descr[1:], ['al'], '') # # Look for actuator list # al = getattrlist(descr, 'al') # # The order in which actuators are created is important # because of the endgroup() operator. # Unfortunately the Panel Editor outputs the actuator list # in reverse order, so we reverse it here. # al = reverse(al) # for a in al: act, name = build_actuator(a) act.addact(panel) if name: stmt = 'panel.' + name + ' = act' exec stmt + '\n' if is_endgroup(a): panel.endgroup() sub_al = getattrlist(a, 'al') if sub_al: build_subactuators(panel, act, sub_al) # return panel # Wrapper around pnl.dopanel() which calls call-back functions. # def my_dopanel(): # Extract only the first 4 elements to allow for future expansion a, down, active, up = pnl.dopanel()[:4] if down: down.downfunc(down) if active: active.activefunc(active) if up: up.upfunc(up) return a # Create one or more panels from a description file (S-expressions) # generated by the Panel Editor. # def defpanellist(file): import panelparser descrlist = panelparser.parse_file(open(file, 'r')) panellist = [] for descr in descrlist: panellist.append(build_panel(descr)) return panellist # Import everything from built-in method pnl, so the user can always # use panel.foo() instead of pnl.foo(). # This gives *no* performance penalty once this module is imported. # from pnl import * # for export dopanel = my_dopanel # override pnl.dopanel
mit
8,022,840,077,382,427,000
22.939502
71
0.658689
false
amoretti86/auxiliary-particle-filter
apf_fhn.py
1
6209
''' An implementation of the Fully Adapted Auxiliary Particle Filter as described by Whiteley and Johansen Chapter 3 Algorithm 2 on page 5 of Recent Developments in Auxiliary Particle Filtering: http://www.maths.bris.ac.uk/~manpw/apf_chapter.pdf The algorithm is taylored to perform inference in nonlinear dynamical systems and supports the Fitzhugh Nagumo model (or Van der Pol Oscillator) by Antonio Moretti - amoretti@cs.columbia.edu ''' import math import numpy as np import scipy as sp from scipy.stats import norm from scipy import misc def make_mvn_pdf(mu, sigma): ''' creates a multivariate gaussian pdf ''' def f(x): return sp.stats.multivariate_normal.pdf(x, mu, sigma) return f def make_poisson(k): ''' creates a multivariate poisson pmf ''' def f(theta): prob = 1 for i in range(len(k)): prob *= sp.stats.poisson.pmf(k[i], np.exp(theta[i])) return prob return f def fhn(Y, deltat, I): ''' first order euler discretization of the fitzhugh nagumo differential equations ''' y1 = Y[0] y2 = Y[1] return [y1 + (y1 - (y1**3)/3 - y2 + I)*deltat, y2 + (0.08*(y1 + 0.7 - 0.8*y2))*deltat] def integrate_gaussian(grid, weight, mean, std, f): ''' Performs gaussian quadrature via hermite polynomials ''' sq2=np.sqrt(2) zz = sq2*std*grid + mean value = np.dot(f(zz), weight)/np.sqrt(np.pi) return value def bivariate_gauss_hermite(xt, wt, mean, T, gfunc, XX, YY): ''' Performs two dimensional Gauss-Hermite Quadrature with a change of measure to account for mu and sigma ''' import scipy as sp from scipy import linalg mat = np.array([XX.flatten(), YY.flatten()]).T grid_trans = np.sqrt(2)*np.dot(mat , T) + mean geval = np.asarray([gfunc(xx) for xx in grid_trans]).reshape([len(xt), len(xt)]) c = 1/(np.pi) y_marginal = np.zeros(len(xt)) for idx in xrange(len(xt)): y_marginal[idx] = np.dot(geval[idx,:], wt) theta = np.dot(y_marginal, wt)*c return theta def apf(obs, time, n_particles, n_gridpoints, B, Sigma, Gamma, x_0, I_ext): ''' Implements the Auxiliary Particle Filter as described by Whiteley and Johansen Algorithm 2 on page 5: http://www.maths.bris.ac.uk/~manpw/apf_chapter.pdf Input: [obs] : a time x dimension matrix representing a time series of observed signals [time] : a scalar representing the corresponding time length [n_particles] : a scalar representing the number of particles to use in the simulation [n_gridpoints] : a scalar representing the number of grid points or nodes to use for the quadrature [B] : a 2x2 propagator matrix for the dynamics [Sigma] : a 2x2 covariance matrix [Gamma] : a 2x2 covariance matrix [x_0] : a 2x1 vector of representing the initial value of the signal [I_ext] : a scalar representing input current magnitude Output: [W] : an n_particles x time matrix of weights [X] : an n_particles x time x dimension tensor of trajectories [k] : an n_particles x time matrix of the posterior integral for each particle at each time point Average the trajectory tensor (X) across particles to approximate the functional integral. Smooth the resulting signal to remove noise. ''' assert(len(obs) == time) # Initialize variables dimension = 2 n_gridpoints = n_gridpoints X = np.zeros((n_particles, time, dimension)) W = np.zeros((n_particles, time)) k = np.zeros((n_particles, time)) proposal_covariance_matrix = 0.075*np.eye(dimension) delta_t = 0.25 # Define gridpoints as roots of the hermite polynomials [xt, wt] = np.polynomial.hermite.hermgauss(n_gridpoints) # TO DO: generalize the computation of posterior integral # Define our mesh for numerical integration XX = np.tile(xt.reshape([1, len(xt)]), [len(xt), 1]) YY = np.tile(xt.reshape([len(xt), 1]), [1, len(xt)]) # Compute Cholesky decomposition of Sigma T = sp.linalg.sqrtm(Sigma) # Sample particles and weights at time 1 import pdb for i in range(0,n_particles): X[i,0,:] = np.random.randn(1,dimension)[0] g = make_mvn_pdf(x_0, Gamma)(obs[0,:])#(np.dot(B,X[i,0,:])).ravel() nu = make_mvn_pdf(x_0, Sigma)(X[i,0,:]) q = make_mvn_pdf(np.zeros(dimension), proposal_covariance_matrix)(X[i,0,:]) # initialize weights W[i,0] = g*nu/q # main loop of program at time > 1 for t in range(1, time): # Update weights and propagate particles based on postrior integral for i in range(n_particles): # Compute the posterior integral p(y_n | x_{n-1}) # Define the mean of the Gaussian g_mean = np.dot(B,X[i,t,:]) g_int_func = make_mvn_pdf(g_mean,Gamma) # Call our quadrature subroutine k[i,t] = bivariate_gauss_hermite(xt, wt, fhn(X[i,t-1,:], delta_t, I_ext), T, g_int_func, XX, YY) # Reweight particles W[i,t-1] = W[i,t-1]*k[i,t] # Resample Xprime = np.random.choice(n_particles, n_particles, p = W[:,t-1]/np.sum(W[:,t-1]), replace = True) Xtilde = [X[i,t-1,:] for i in Xprime] # Reset weights and particles for i in range(n_particles): # Select new particles X[i,t-1,:] = Xtilde[i] # Resample particles and reset weights X[i,t,:] = np.random.randn(1,dimension)[0] + X[i,t-1,:] # Update proposal and target distributions reshaped_g_mean = np.dot(B,X[i,t,:]).ravel() g = make_mvn_pdf(reshaped_g_mean,Gamma)(obs[t,:]) q = make_mvn_pdf(X[i,t-1,:],proposal_covariance_matrix)(X[i,t,:]) f = make_mvn_pdf(fhn(X[i,t-1,:],delta_t,I_ext),Sigma)(X[i,t,:]) # Update weights W[i,t] = (g*f)/(k[i,t]*q) print "time: ", t return W, X, k
mit
-2,725,166,112,573,761,500
38.050314
120
0.598003
false
Dark-Bob/mro
mro/routine.py
1
5006
import mro.connection import mro.helpers import collections class RoutineParameter: def __init__(self, name, data_type, mode): self.name = name self.data_type = data_type self.mode = mode def __repr__(self): return f"RoutineParameter({self.name}, {self.data_type}, {self.mode})" class Routine: def __init__(self, name, in_parameters, out_parameters, return_type, routine_type): self.name = name self.in_parameters = in_parameters self.out_parameters = out_parameters self.return_type = return_type self.execute = 'call' if routine_type == 'PROCEDURE' else 'select * from' self.base_command = f"{self.execute} {self.name}({{}})" if return_type == 'void': self.return_function = self._return_void elif return_type == 'record': self.return_function = self._return_record self.out_type = collections.namedtuple(f"{name}_out", ' '.join([p.name for p in self.out_parameters])) else: self.return_function = self._return_scalar def __call__(self, *args, **kwargs): parameter_format = ', '.join(['%s' for x in args]) if len(kwargs) > 0: parameter_format += ', ' + ', '.join([f"{k} := %s" for k in kwargs.keys()]) parameters = args + tuple(kwargs.values()) command = self.base_command.format(parameter_format) connection = mro.connection.connection cursor = connection.cursor() cursor.execute(command, parameters) connection.commit() return self.return_function(cursor) def _return_void(self, cursor): return None def _return_scalar(self, cursor): return next(cursor)[0] def _return_record(self, cursor): objs = [] for row in cursor: objs.append(self.out_type(*row)) return objs def _create_routines(connection): cursor = connection.cursor() cursor.execute("select * from information_schema.routines where routine_type in ('PROCEDURE', 'FUNCTION') and routine_schema = 'public'") connection.commit() column_name_index_map = mro.helpers.create_column_name_index_map(cursor) for routine in cursor: routine_name = routine[column_name_index_map['routine_name']] routine_type = routine[column_name_index_map['routine_type']] specific_name = routine[column_name_index_map['specific_name']] cursor2 = connection.cursor() cursor2.execute(f"select * from information_schema.parameters where specific_name='{specific_name}' and parameter_mode <> 'OUT' order by ordinal_position;") connection.commit() in_parameters = [] cim = mro.helpers.create_column_name_index_map(cursor2) for parameter in cursor2: in_parameters.append(RoutineParameter(parameter[cim['parameter_name']], parameter[cim['data_type']], parameter[cim['parameter_mode']])) # Using postgres specific tables because this information is not available in the information schema command = """ with r as ( select proallargtypes, proargnames, proargmodes, prorettype from pg_proc where proname='{}' ), proallargtypes_expanded as ( select a.index, a.t as oid from r, unnest(proallargtypes) with ordinality as a(t, index) ), proargnames_expanded as ( select a.index, a.n as name from r, unnest(proargnames) with ordinality as a(n, index) ), proargmodes_expanded as ( select a.index, a.m as mode from r, unnest(proargmodes) with ordinality as a(m, index) ), p as ( select proallargtypes_expanded.index, oid, name, mode from proallargtypes_expanded join proargnames_expanded on proallargtypes_expanded.index = proargnames_expanded.index join proargmodes_expanded on proallargtypes_expanded.index = proargmodes_expanded.index ), params as ( select p.index, p.oid, p.name, typname as data_type, p.mode from p join pg_type t on p.oid = t.oid ), outputs as ( select index, oid, name, data_type, 'OUT' as mode from params where mode in ('o', 'b', 't') ) select * from outputs order by index""".format(routine[column_name_index_map['routine_name']]) cursor2 = connection.cursor() cursor2.execute(command) connection.commit() out_parameters = [] cim = mro.helpers.create_column_name_index_map(cursor2) for parameter in cursor2: out_parameters.append(RoutineParameter(parameter[cim['name']], parameter[cim['data_type']], parameter[cim['mode']])) command = """ with r as ( select prorettype from pg_proc where proname='{}' ) select typname from pg_type t join r on t.oid = r.prorettype""".format(routine[column_name_index_map['routine_name']]) cursor2 = connection.cursor() cursor2.execute(command) connection.commit() return_type = next(cursor2)[0] routine = Routine(routine_name, in_parameters, out_parameters, return_type, routine_type) setattr(mro, routine_name, routine)
mit
-7,127,849,894,131,664,000
41.07563
262
0.655813
false