repo_name
stringlengths
5
100
path
stringlengths
4
299
copies
stringlengths
1
5
size
stringlengths
4
7
content
stringlengths
666
1.03M
license
stringclasses
15 values
hash
int64
-9,223,351,895,964,839,000
9,223,297,778B
line_mean
float64
3.17
100
line_max
int64
7
1k
alpha_frac
float64
0.25
0.98
autogenerated
bool
1 class
joachimmetz/plaso
plaso/parsers/asl.py
2
11003
# -*- coding: utf-8 -*- """The Apple System Log (ASL) file parser.""" import os from dfdatetime import posix_time as dfdatetime_posix_time from dfdatetime import semantic_time as dfdatetime_semantic_time from plaso.containers import events from plaso.containers import time_events from plaso.lib import definitions from plaso.lib import dtfabric_helper from plaso.lib import errors from plaso.lib import specification from plaso.parsers import interface from plaso.parsers import manager class ASLEventData(events.EventData): """Apple System Log (ASL) event data. Attributes: computer_name (str): name of the host. extra_information (str): extra fields associated to the event. facility (str): facility. group_id (int): group identifier (GID). level (str): level of criticality of the event. message_id (int): message identifier. message (str): message of the event. pid (int): process identifier (PID). read_uid (int): user identifier that can read this file, where -1 represents all. read_gid (int): the group identifier that can read this file, where -1 represents all. record_position (int): position of the event record. sender (str): sender or process that created the event. user_sid (str): user identifier (UID). """ DATA_TYPE = 'mac:asl:event' def __init__(self): """Initializes event data.""" super(ASLEventData, self).__init__(data_type=self.DATA_TYPE) self.computer_name = None self.extra_information = None self.facility = None self.group_id = None self.level = None self.message_id = None self.message = None self.pid = None self.read_gid = None self.read_uid = None self.record_position = None self.sender = None self.user_sid = None class ASLFileEventData(events.EventData): """Apple System Log (ASL) file event data. Attributes: format_version (int): ASL file format version. is_dirty (bool): True if the last log entry offset does not match value in file header and the file is considered dirty. """ DATA_TYPE = 'mac:asl:file' def __init__(self): """Initializes event data.""" super(ASLFileEventData, self).__init__(data_type=self.DATA_TYPE) self.format_version = None self.is_dirty = None class ASLParser(interface.FileObjectParser, dtfabric_helper.DtFabricHelper): """Parser for Apple System Log (ASL) files.""" NAME = 'asl_log' DATA_FORMAT = 'Apple System Log (ASL) file' _DEFINITION_FILE = os.path.join( os.path.dirname(__file__), 'asl.yaml') # Most significant bit of a 64-bit string offset. _STRING_OFFSET_MSB = 1 << 63 def _ParseRecord(self, parser_mediator, file_object, record_offset): """Parses a record and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (file): file-like object. record_offset (int): offset of the record relative to the start of the file. Returns: int: next record offset. Raises: ParseError: if the record cannot be parsed. """ record_map = self._GetDataTypeMap('asl_record') try: record, record_data_size = self._ReadStructureFromFileObject( file_object, record_offset, record_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse record at offset: 0x{0:08x} with error: ' '{1!s}').format(record_offset, exception)) hostname = self._ParseRecordString( file_object, record.hostname_string_offset) sender = self._ParseRecordString( file_object, record.sender_string_offset) facility = self._ParseRecordString( file_object, record.facility_string_offset) message = self._ParseRecordString( file_object, record.message_string_offset) file_offset = record_offset + record_data_size additional_data_size = record.data_size + 6 - record_data_size if additional_data_size % 8 != 0: raise errors.ParseError( 'Invalid record additional data size: {0:d}.'.format( additional_data_size)) additional_data = self._ReadData( file_object, file_offset, additional_data_size) extra_fields = {} for additional_data_offset in range(0, additional_data_size - 8, 16): record_extra_field = self._ParseRecordExtraField( additional_data[additional_data_offset:], file_offset) file_offset += 16 name = self._ParseRecordString( file_object, record_extra_field.name_string_offset) value = self._ParseRecordString( file_object, record_extra_field.value_string_offset) if name is not None: extra_fields[name] = value # TODO: implement determine previous record offset event_data = ASLEventData() event_data.computer_name = hostname event_data.extra_information = ', '.join([ '{0:s}: {1!s}'.format(name, value) for name, value in sorted(extra_fields.items())]) event_data.facility = facility event_data.group_id = record.group_identifier event_data.level = record.alert_level event_data.message_id = record.message_identifier event_data.message = message event_data.pid = record.process_identifier event_data.read_gid = record.real_group_identifier event_data.read_uid = record.real_user_identifier event_data.record_position = record_offset event_data.sender = sender # Note that the user_sid value is expected to be a string. event_data.user_sid = '{0:d}'.format(record.user_identifier) timestamp = ( (record.written_time * 1000000000) + record.written_time_nanoseconds) date_time = dfdatetime_posix_time.PosixTimeInNanoseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) return record.next_record_offset def _ParseRecordExtraField(self, byte_stream, file_offset): """Parses a record extra field. Args: byte_stream (bytes): byte stream. file_offset (int): offset of the record extra field relative to the start of the file. Returns: asl_record_extra_field: record extra field. Raises: ParseError: if the record extra field cannot be parsed. """ extra_field_map = self._GetDataTypeMap('asl_record_extra_field') try: record_extra_field = self._ReadStructureFromByteStream( byte_stream, file_offset, extra_field_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse record extra field at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) return record_extra_field def _ParseRecordString(self, file_object, string_offset): """Parses a record string. Args: file_object (file): file-like object. string_offset (int): offset of the string relative to the start of the file. Returns: str: record string or None if string offset is 0. Raises: ParseError: if the record string cannot be parsed. """ if string_offset == 0: return None if string_offset & self._STRING_OFFSET_MSB: if (string_offset >> 60) != 8: raise errors.ParseError('Invalid inline record string flag.') string_size = (string_offset >> 56) & 0x0f if string_size >= 8: raise errors.ParseError('Invalid inline record string size.') string_data = bytes(bytearray([ string_offset >> (8 * byte_index) & 0xff for byte_index in range(6, -1, -1)])) try: return string_data[:string_size].decode('utf-8') except UnicodeDecodeError as exception: raise errors.ParseError( 'Unable to decode inline record string with error: {0!s}.'.format( exception)) record_string_map = self._GetDataTypeMap('asl_record_string') try: record_string, _ = self._ReadStructureFromFileObject( file_object, string_offset, record_string_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse record string at offset: 0x{0:08x} with error: ' '{1!s}').format(string_offset, exception)) return record_string.string.rstrip('\x00') @classmethod def GetFormatSpecification(cls): """Retrieves the format specification. Returns: FormatSpecification: format specification. """ format_specification = specification.FormatSpecification(cls.NAME) format_specification.AddNewSignature( b'ASL DB\x00\x00\x00\x00\x00\x00', offset=0) return format_specification def ParseFileObject(self, parser_mediator, file_object): """Parses an ASL file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ file_header_map = self._GetDataTypeMap('asl_file_header') try: file_header, _ = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse file header with error: {0!s}'.format( exception)) is_dirty = False file_size = file_object.get_size() if file_header.first_log_entry_offset > 0: last_log_entry_offset = 0 file_offset = file_header.first_log_entry_offset while file_offset < file_size: last_log_entry_offset = file_offset try: file_offset = self._ParseRecord( parser_mediator, file_object, file_offset) except errors.ParseError as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse record with error: {0!s}'.format(exception)) return if file_offset == 0: break if last_log_entry_offset != file_header.last_log_entry_offset: is_dirty = True parser_mediator.ProduceRecoveryWarning( 'last log entry offset does not match value in file header.') event_data = ASLFileEventData() event_data.format_version = file_header.format_version event_data.is_dirty = is_dirty if file_header.creation_time: date_time = dfdatetime_posix_time.PosixTime( timestamp=file_header.creation_time) else: date_time = dfdatetime_semantic_time.NotSet() event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) manager.ParsersManager.RegisterParser(ASLParser)
apache-2.0
137,836,419,511,823,490
31.943114
80
0.669908
false
FireWRT/OpenWrt-Firefly-Libraries
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/ctypes/test/test_parameters.py
80
6196
import unittest, sys from ctypes.test import need_symbol class SimpleTypesTestCase(unittest.TestCase): def setUp(self): import ctypes try: from _ctypes import set_conversion_mode except ImportError: pass else: self.prev_conv_mode = set_conversion_mode("ascii", "strict") def tearDown(self): try: from _ctypes import set_conversion_mode except ImportError: pass else: set_conversion_mode(*self.prev_conv_mode) def test_subclasses(self): from ctypes import c_void_p, c_char_p # ctypes 0.9.5 and before did overwrite from_param in SimpleType_new class CVOIDP(c_void_p): def from_param(cls, value): return value * 2 from_param = classmethod(from_param) class CCHARP(c_char_p): def from_param(cls, value): return value * 4 from_param = classmethod(from_param) self.assertEqual(CVOIDP.from_param("abc"), "abcabc") self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc") @need_symbol('c_wchar_p') def test_subclasses_c_wchar_p(self): from ctypes import c_wchar_p class CWCHARP(c_wchar_p): def from_param(cls, value): return value * 3 from_param = classmethod(from_param) self.assertEqual(CWCHARP.from_param("abc"), "abcabcabc") # XXX Replace by c_char_p tests def test_cstrings(self): from ctypes import c_char_p, byref # c_char_p.from_param on a Python String packs the string # into a cparam object s = b"123" self.assertIs(c_char_p.from_param(s)._obj, s) # new in 0.9.1: convert (encode) unicode to ascii self.assertEqual(c_char_p.from_param(b"123")._obj, b"123") self.assertRaises(TypeError, c_char_p.from_param, "123\377") self.assertRaises(TypeError, c_char_p.from_param, 42) # calling c_char_p.from_param with a c_char_p instance # returns the argument itself: a = c_char_p(b"123") self.assertIs(c_char_p.from_param(a), a) @need_symbol('c_wchar_p') def test_cw_strings(self): from ctypes import byref, c_wchar_p c_wchar_p.from_param("123") self.assertRaises(TypeError, c_wchar_p.from_param, 42) self.assertRaises(TypeError, c_wchar_p.from_param, b"123\377") pa = c_wchar_p.from_param(c_wchar_p("123")) self.assertEqual(type(pa), c_wchar_p) def test_int_pointers(self): from ctypes import c_short, c_uint, c_int, c_long, POINTER, pointer LPINT = POINTER(c_int) ## p = pointer(c_int(42)) ## x = LPINT.from_param(p) x = LPINT.from_param(pointer(c_int(42))) self.assertEqual(x.contents.value, 42) self.assertEqual(LPINT(c_int(42)).contents.value, 42) self.assertEqual(LPINT.from_param(None), None) if c_int != c_long: self.assertRaises(TypeError, LPINT.from_param, pointer(c_long(42))) self.assertRaises(TypeError, LPINT.from_param, pointer(c_uint(42))) self.assertRaises(TypeError, LPINT.from_param, pointer(c_short(42))) def test_byref_pointer(self): # The from_param class method of POINTER(typ) classes accepts what is # returned by byref(obj), it type(obj) == typ from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref LPINT = POINTER(c_int) LPINT.from_param(byref(c_int(42))) self.assertRaises(TypeError, LPINT.from_param, byref(c_short(22))) if c_int != c_long: self.assertRaises(TypeError, LPINT.from_param, byref(c_long(22))) self.assertRaises(TypeError, LPINT.from_param, byref(c_uint(22))) def test_byref_pointerpointer(self): # See above from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref LPLPINT = POINTER(POINTER(c_int)) LPLPINT.from_param(byref(pointer(c_int(42)))) self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_short(22)))) if c_int != c_long: self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_long(22)))) self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_uint(22)))) def test_array_pointers(self): from ctypes import c_short, c_uint, c_int, c_long, POINTER INTARRAY = c_int * 3 ia = INTARRAY() self.assertEqual(len(ia), 3) self.assertEqual([ia[i] for i in range(3)], [0, 0, 0]) # Pointers are only compatible with arrays containing items of # the same type! LPINT = POINTER(c_int) LPINT.from_param((c_int*3)()) self.assertRaises(TypeError, LPINT.from_param, c_short*3) self.assertRaises(TypeError, LPINT.from_param, c_long*3) self.assertRaises(TypeError, LPINT.from_param, c_uint*3) def test_noctypes_argtype(self): import _ctypes_test from ctypes import CDLL, c_void_p, ArgumentError func = CDLL(_ctypes_test.__file__)._testfunc_p_p func.restype = c_void_p # TypeError: has no from_param method self.assertRaises(TypeError, setattr, func, "argtypes", (object,)) class Adapter(object): def from_param(cls, obj): return None func.argtypes = (Adapter(),) self.assertEqual(func(None), None) self.assertEqual(func(object()), None) class Adapter(object): def from_param(cls, obj): return obj func.argtypes = (Adapter(),) # don't know how to convert parameter 1 self.assertRaises(ArgumentError, func, object()) self.assertEqual(func(c_void_p(42)), 42) class Adapter(object): def from_param(cls, obj): raise ValueError(obj) func.argtypes = (Adapter(),) # ArgumentError: argument 1: ValueError: 99 self.assertRaises(ArgumentError, func, 99) ################################################################ if __name__ == '__main__': unittest.main()
gpl-2.0
4,392,718,686,375,580,000
34.204545
88
0.596998
false
SamReha/Concepts-Game
languages/pt-br.py
88
7249
# -*- coding: utf-8 -*- { '!langcode!': 'pt-br', '!langname!': 'Português (do Brasil)', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "campo1=\'novovalor\'". Você não pode atualizar ou apagar os resultados de um JOIN', '%s %%{row} deleted': '%s linhas apagadas', '%s %%{row} updated': '%s linhas atualizadas', '%s selected': '%s selecionado', '%Y-%m-%d': '%d-%m-%Y', '%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S', 'About': 'Sobre', 'Access Control': 'Controle de Acesso', 'Administrative Interface': 'Interface Administrativa', '@markmin\x01An error occured, please [[reload %s]] the page': 'Ocorreu um erro, por favor [[reload %s]] a página', 'Administrative interface': 'Interface administrativa', 'Ajax Recipes': 'Receitas de Ajax', 'appadmin is disabled because insecure channel': 'Administração desativada porque o canal não é seguro', 'Are you sure you want to delete this object?': 'Você está certo que deseja apagar este objeto?', 'Available Databases and Tables': 'Bancos de dados e tabelas disponíveis', 'Buy this book': 'Compre o livro', 'cache': 'cache', 'Cache': 'Cache', 'Cache Keys': 'Chaves de cache', 'Cannot be empty': 'Não pode ser vazio', 'change password': 'modificar senha', 'Check to delete': 'Marque para apagar', 'Clear CACHE?': 'Limpar CACHE?', 'Clear DISK': 'Limpar DISCO', 'Clear RAM': 'Limpar memória RAM', 'Client IP': 'IP do cliente', 'Community': 'Comunidade', 'Components and Plugins': 'Componentes e Plugins', 'Controller': 'Controlador', 'Copyright': 'Copyright', 'Current request': 'Requisição atual', 'Current response': 'Resposta atual', 'Current session': 'Sessão atual', 'customize me!': 'Personalize-me!', 'data uploaded': 'dados enviados', 'Database': 'banco de dados', 'Database %s select': 'Selecionar banco de dados %s', 'db': 'bd', 'DB Model': 'Modelo BD', 'Delete:': 'Apagar:', 'Demo': 'Demo', 'Deployment Recipes': 'Receitas de deploy', 'Description': 'Descrição', 'design': 'projeto', 'DISK': 'DISK', 'Disk Cache Keys': 'Chaves do Cache de Disco', 'Disk Cleared': 'Disco Limpo', 'Documentation': 'Documentação', "Don't know what to do?": "Não sabe o que fazer?", 'done!': 'concluído!', 'Download': 'Download', 'E-mail': 'E-mail', 'Edit': 'Editar', 'Edit current record': 'Editar o registro atual', 'edit profile': 'editar perfil', 'Edit This App': 'Editar esta aplicação', 'Email and SMS': 'Email e SMS', 'Errors': 'Erros', 'Enter an integer between %(min)g and %(max)g': 'Informe um valor inteiro entre %(min)g e %(max)g', 'export as csv file': 'exportar como um arquivo csv', 'FAQ': 'Perguntas frequentes', 'First name': 'Nome', 'Forms and Validators': 'Formulários e Validadores', 'Free Applications': 'Aplicações gratuitas', 'Group ID': 'ID do Grupo', 'Groups': 'Grupos', 'Hello World': 'Olá Mundo', 'Home': 'Principal', 'How did you get here?': 'Como você chegou aqui?', 'import': 'importar', 'Import/Export': 'Importar/Exportar', 'Index': 'Início', 'insert new': 'inserir novo', 'insert new %s': 'inserir novo %s', 'Internal State': 'Estado Interno', 'Introduction': 'Introdução', 'Invalid email': 'Email inválido', 'Invalid Query': 'Consulta Inválida', 'invalid request': 'requisição inválida', 'Key': 'Chave', 'Last name': 'Sobrenome', 'Layout': 'Layout', 'Layout Plugins': 'Plugins de Layout', 'Layouts': 'Layouts', 'Live chat': 'Chat ao vivo', 'Live Chat': 'Chat ao vivo', 'login': 'Entrar', 'Login': 'Autentique-se', 'logout': 'Sair', 'Lost Password': 'Esqueceu sua senha?', 'lost password?': 'esqueceu sua senha?', 'Main Menu': 'Menu Principal', 'Manage Cache': 'Gerenciar Cache', 'Menu Model': 'Modelo de Menu', 'My Sites': 'Meus sites', 'Name': 'Nome', 'New Record': 'Novo Registro', 'new record inserted': 'novo registro inserido', 'next 100 rows': 'próximas 100 linhas', 'No databases in this application': 'Não há bancos de dados nesta aplicação', 'Object or table name': 'Nome do objeto do da tabela', 'Online examples': 'Exemplos online', 'or import from csv file': 'ou importar de um arquivo csv', 'Origin': 'Origem', 'Other Plugins': 'Outros Plugins', 'Other Recipes': 'Outras Receitas', 'Overview': 'Visão Geral', 'Password': 'Senha', 'Plugins': 'Plugins', 'Powered by': 'Desenvolvido com', 'Preface': 'Prefácio', 'previous 100 rows': '100 linhas anteriores', 'Python': 'Python', 'Query:': 'Consulta:', 'Quick Examples': 'Exemplos rápidos', 'RAM': 'RAM', 'RAM Cache Keys': 'RAM Cache Keys', 'Ram Cleared': 'Ram Cleared', 'Recipes': 'Receitas', 'Record': 'Registro', 'record does not exist': 'registro não existe', 'Record ID': 'ID do Registro', 'Record id': 'id do registro', 'Register': 'Registre-se', 'register': 'Registre-se', 'Registration key': 'Chave de registro', 'Reset Password key': 'Resetar chave de senha', 'Resources': 'Recursos', 'Role': 'Papel', 'Registration identifier': 'Idenficador de registro', 'Rows in Table': 'Linhas na tabela', 'Rows selected': 'Linhas selecionadas', 'Semantic': 'Semântico', 'Services': 'Serviço', 'Size of cache:': 'Tamanho do cache:', 'state': 'estado', 'Statistics': 'Estatísticas', 'Stylesheet': 'Folha de estilo', 'submit': 'enviar', 'Support': 'Suporte', 'Sure you want to delete this object?': 'Está certo(a) que deseja apagar este objeto?', 'Table': 'Tabela', 'Table name': 'Nome da tabela', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'Uma "consulta" é uma condição como "db.tabela1.campo1==\'valor\'". Expressões como "db.tabela1.campo1==db.tabela2.campo2" resultam em um JOIN SQL.', 'The Core': 'The Core', 'The output of the file is a dictionary that was rendered by the view %s': 'A saída do arquivo é um dicionário que foi apresentado pela visão %s', 'The Views': 'As views', 'This App': 'Esta aplicação', 'This email already has an account': 'Este email já tem uma conta', 'This is a copy of the scaffolding application': 'Isto é uma cópia da aplicação modelo', 'Time in Cache (h:m:s)': 'Tempo em Cache (h:m:s)', 'Timestamp': 'Timestamp', 'Twitter': 'Twitter', 'unable to parse csv file': 'não foi possível analisar arquivo csv', 'Update:': 'Atualizar:', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, e ~(...) para NOT para construir consultas mais complexas.', 'User ID': 'ID do Usuário', 'User Voice': 'Opinião dos usuários', 'Videos': 'Vídeos', 'View': 'Visualização', 'Web2py': 'Web2py', 'Welcome': 'Bem-vindo', 'Welcome %s': 'Bem-vindo %s', 'Welcome to web2py': 'Bem-vindo ao web2py', 'Welcome to web2py!': 'Bem-vindo ao web2py!', 'Which called the function %s located in the file %s': 'Que chamou a função %s localizada no arquivo %s', 'You are successfully running web2py': 'Você está executando o web2py com sucesso', 'You are successfully running web2py.': 'Você está executando o web2py com sucesso.', 'You can modify this application and adapt it to your needs': 'Você pode modificar esta aplicação e adaptá-la às suas necessidades', 'You visited the url %s': 'Você acessou a url %s', 'Working...': 'Trabalhando...', }
gpl-2.0
7,410,207,669,485,902,000
39.693182
290
0.683608
false
ubic135/odoo-design
addons/auth_signup/__openerp__.py
313
1571
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## { 'name': 'Signup', 'description': """ Allow users to sign up and reset their password =============================================== """, 'author': 'OpenERP SA', 'version': '1.0', 'category': 'Authentication', 'website': 'https://www.odoo.com', 'installable': True, 'auto_install': True, 'depends': [ 'base_setup', 'email_template', 'web', ], 'data': [ 'auth_signup_data.xml', 'res_config.xml', 'res_users_view.xml', 'views/auth_signup_login.xml', ], 'bootstrap': True, }
agpl-3.0
9,124,591,899,266,322,000
33.152174
78
0.552514
false
dlazz/ansible
lib/ansible/plugins/connection/funcd.py
59
3533
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com> # Copyright (c) 2013, Michael Scherer <misc@zarb.org> # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ author: Michael Scherer (@msherer) <misc@zarb.org> connection: funcd short_description: Use funcd to connect to target description: - This transport permits you to use Ansible over Func. - For people who have already setup func and that wish to play with ansible, this permit to move gradually to ansible without having to redo completely the setup of the network. version_added: "1.1" options: remote_addr: description: - The path of the chroot you want to access. default: inventory_hostname vars: - name: ansible_host - name: ansible_func_host """ HAVE_FUNC = False try: import func.overlord.client as fc HAVE_FUNC = True except ImportError: pass import os import tempfile import shutil from ansible.errors import AnsibleError from ansible.utils.display import Display display = Display() class Connection(object): ''' Func-based connections ''' has_pipelining = False def __init__(self, runner, host, port, *args, **kwargs): self.runner = runner self.host = host # port is unused, this go on func self.port = port def connect(self, port=None): if not HAVE_FUNC: raise AnsibleError("func is not installed") self.client = fc.Client(self.host) return self def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the remote minion ''' if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") # totally ignores privlege escalation display.vvv("EXEC %s" % (cmd), host=self.host) p = self.client.command.run(cmd)[self.host] return (p[0], p[1], p[2]) def _normalize_path(self, path, prefix): if not path.startswith(os.path.sep): path = os.path.join(os.path.sep, path) normpath = os.path.normpath(path) return os.path.join(prefix, normpath[1:]) def put_file(self, in_path, out_path): ''' transfer a file from local to remote ''' out_path = self._normalize_path(out_path, '/') display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) self.client.local.copyfile.send(in_path, out_path) def fetch_file(self, in_path, out_path): ''' fetch a file from remote to local ''' in_path = self._normalize_path(in_path, '/') display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) # need to use a tmp dir due to difference of semantic for getfile # ( who take a # directory as destination) and fetch_file, who # take a file directly tmpdir = tempfile.mkdtemp(prefix="func_ansible") self.client.local.getfile.get(in_path, tmpdir) shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)), out_path) shutil.rmtree(tmpdir) def close(self): ''' terminate the connection; nothing to do here ''' pass
gpl-3.0
8,316,919,214,908,818,000
33.300971
110
0.638834
false
chamakov/namebench
nb_third_party/dns/rdtypes/ANY/CNAME.py
248
1092
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import dns.rdtypes.nsbase class CNAME(dns.rdtypes.nsbase.NSBase): """CNAME record Note: although CNAME is officially a singleton type, dnspython allows non-singleton CNAME rdatasets because such sets have been commonly used by BIND and other nameservers for load balancing.""" pass
apache-2.0
5,879,144,980,193,845,000
44.5
73
0.774725
false
alfredoavanzosc/odoomrp-wip-1
sale_product_variants/__openerp__.py
14
1775
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## { "name": "Sale - Product variants", "version": "1.0", "depends": [ "product", "sale", "product_variants_no_automatic_creation", ], "author": "OdooMRP team," "AvanzOSC," "Serv. Tecnol. Avanzados - Pedro M. Baeza", "contributors": [ "Mikel Arregi <mikelarregi@avanzosc.es>", "Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>", "Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>", "Ana Juaristi <ajuaristio@gmail.com>", ], "category": "Sales Management", "website": "http://www.odoomrp.com", "summary": "Product variants in sale management", "data": [ "security/ir.model.access.csv", "security/sale_product_variants_security.xml", 'views/res_config_view.xml', "views/sale_view.xml", ], "installable": True, "post_init_hook": "assign_product_template", }
agpl-3.0
-8,752,300,256,465,916,000
35.979167
78
0.578592
false
titasakgm/brc-stock
openerp/addons/report_geraldo/lib/geraldo/site/newsite/django_1_0/tests/regressiontests/null_queries/models.py
16
1789
from django.db import models class Poll(models.Model): question = models.CharField(max_length=200) def __unicode__(self): return u"Q: %s " % self.question class Choice(models.Model): poll = models.ForeignKey(Poll) choice = models.CharField(max_length=200) def __unicode__(self): return u"Choice: %s in poll %s" % (self.choice, self.poll) __test__ = {'API_TESTS':""" # Regression test for the use of None as a query value. None is interpreted as # an SQL NULL, but only in __exact queries. # Set up some initial polls and choices >>> p1 = Poll(question='Why?') >>> p1.save() >>> c1 = Choice(poll=p1, choice='Because.') >>> c1.save() >>> c2 = Choice(poll=p1, choice='Why Not?') >>> c2.save() # Exact query with value None returns nothing ("is NULL" in sql, but every 'id' # field has a value). >>> Choice.objects.filter(choice__exact=None) [] Excluding the previous result returns everything. >>> Choice.objects.exclude(choice=None).order_by('id') [<Choice: Choice: Because. in poll Q: Why? >, <Choice: Choice: Why Not? in poll Q: Why? >] # Valid query, but fails because foo isn't a keyword >>> Choice.objects.filter(foo__exact=None) Traceback (most recent call last): ... FieldError: Cannot resolve keyword 'foo' into field. Choices are: choice, id, poll # Can't use None on anything other than __exact >>> Choice.objects.filter(id__gt=None) Traceback (most recent call last): ... ValueError: Cannot use None as a query value # Can't use None on anything other than __exact >>> Choice.objects.filter(foo__gt=None) Traceback (most recent call last): ... ValueError: Cannot use None as a query value # Related managers use __exact=None implicitly if the object hasn't been saved. >>> p2 = Poll(question="How?") >>> p2.choice_set.all() [] """}
agpl-3.0
-7,627,443,570,640,662,000
29.322034
90
0.681386
false
gsmaxwell/phase_offset_rx
gr-digital/python/qa_probe_density.py
7
2141
#!/usr/bin/env python # # Copyright 2012 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest import digital_swig as digital class test_probe_density(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() def tearDown(self): self.tb = None def test_001(self): src_data = [0, 1, 0, 1] expected_data = 1 src = gr.vector_source_b (src_data) op = digital.probe_density_b(1) self.tb.connect (src, op) self.tb.run () result_data = op.density() self.assertEqual (expected_data, result_data) def test_002(self): src_data = [1, 1, 1, 1] expected_data = 1 src = gr.vector_source_b (src_data) op = digital.probe_density_b(0.01) self.tb.connect (src, op) self.tb.run () result_data = op.density() self.assertEqual (expected_data, result_data) def test_003(self): src_data = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1] expected_data = 0.95243 src = gr.vector_source_b (src_data) op = digital.probe_density_b(0.01) self.tb.connect (src, op) self.tb.run () result_data = op.density() print result_data self.assertAlmostEqual (expected_data, result_data, 5) if __name__ == '__main__': gr_unittest.run(test_probe_density, "test_probe_density.xml")
gpl-3.0
-9,129,614,604,184,167,000
29.15493
70
0.633349
false
rnestler/servo
tests/wpt/mozilla/tests/webgl/conformance-2.0.0/deqp/functional/gles3/uniformbuffers/uniformbuffers_test_generator.py
51
3333
#!/usr/bin/env python # Copyright (c) 2016 The Khronos Group Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and/or associated documentation files (the # "Materials"), to deal in the Materials without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Materials, and to # permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. """ Generator for uniformbuffers* tests. This file needs to be run in its folder. """ import sys _DO_NOT_EDIT_WARNING = """<!-- This file is auto-generated from uniformbuffers_test_generator.py DO NOT EDIT! --> """ _HTML_TEMPLATE = """<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>WebGL Uniform Block Conformance Tests</title> <link rel="stylesheet" href="../../../../resources/js-test-style.css"/> <script src="../../../../js/js-test-pre.js"></script> <script src="../../../../js/webgl-test-utils.js"></script> <script src="../../../../closure-library/closure/goog/base.js"></script> <script src="../../../deqp-deps.js"></script> <script>goog.require('functional.gles3.es3fUniformBlockTests');</script> </head> <body> <div id="description"></div> <div id="console"></div> <canvas id="canvas" width="200" height="100"> </canvas> <script> var wtu = WebGLTestUtils; var gl = wtu.create3DContext('canvas', null, 2); functional.gles3.es3fUniformBlockTests.run([%(start)s, %(end)s]); </script> </body> </html> """ _GROUPS = [ 'single_basic_type', 'single_basic_array', 'single_struct', 'single_struct_array', 'single_nested_struct', 'single_nested_struct_array', 'instance_array_basic_type', 'multi_basic_types', 'multi_nested_struct', 'random', ] def GenerateFilename(group): """Generate test filename.""" filename = group filename += ".html" return filename def WriteTest(filename, start, end): """Write one test.""" file = open(filename, "wb") file.write(_DO_NOT_EDIT_WARNING) file.write(_HTML_TEMPLATE % { 'start': start, 'end': end }) file.close def GenerateTests(): """Generate all tests.""" filelist = [] for ii in range(len(_GROUPS)): filename = GenerateFilename(_GROUPS[ii]) filelist.append(filename) WriteTest(filename, ii, ii + 1) return filelist def GenerateTestList(filelist): file = open("00_test_list.txt", "wb") file.write('\n'.join(filelist)) file.close def main(argv): """This is the main function.""" filelist = GenerateTests() GenerateTestList(filelist) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
mpl-2.0
1,367,308,292,002,420,500
27.982609
73
0.691269
false
alu042/edx-platform
common/djangoapps/student/tests/test_credit.py
28
8077
""" Tests for credit courses on the student dashboard. """ import unittest import datetime from mock import patch import pytz from django.conf import settings from django.core.urlresolvers import reverse from django.test.utils import override_settings from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from student.models import CourseEnrollmentAttribute from student.tests.factories import UserFactory, CourseEnrollmentFactory from openedx.core.djangoapps.credit.models import CreditCourse, CreditProvider, CreditEligibility from openedx.core.djangoapps.credit import api as credit_api TEST_CREDIT_PROVIDER_SECRET_KEY = "931433d583c84ca7ba41784bad3232e6" @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') @override_settings(CREDIT_PROVIDER_SECRET_KEYS={ "hogwarts": TEST_CREDIT_PROVIDER_SECRET_KEY, }) @patch.dict(settings.FEATURES, {"ENABLE_CREDIT_ELIGIBILITY": True}) class CreditCourseDashboardTest(ModuleStoreTestCase): """ Tests for credit courses on the student dashboard. """ USERNAME = "ron" PASSWORD = "mobiliarbus" PROVIDER_ID = "hogwarts" PROVIDER_NAME = "Hogwarts School of Witchcraft and Wizardry" PROVIDER_STATUS_URL = "http://credit.example.com/status" def setUp(self): """Create a course and an enrollment. """ super(CreditCourseDashboardTest, self).setUp() # Create a user and log in self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD) result = self.client.login(username=self.USERNAME, password=self.PASSWORD) self.assertTrue(result, msg="Could not log in") # Create a course and configure it as a credit course self.course = CourseFactory() CreditCourse.objects.create(course_key=self.course.id, enabled=True) # pylint: disable=no-member # Configure a credit provider CreditProvider.objects.create( provider_id=self.PROVIDER_ID, display_name=self.PROVIDER_NAME, provider_status_url=self.PROVIDER_STATUS_URL, enable_integration=True, ) # Configure a single credit requirement (minimum passing grade) credit_api.set_credit_requirements( self.course.id, # pylint: disable=no-member [ { "namespace": "grade", "name": "grade", "display_name": "Final Grade", "criteria": { "min_grade": 0.8 } } ] ) # Enroll the user in the course as "verified" self.enrollment = CourseEnrollmentFactory( user=self.user, course_id=self.course.id, # pylint: disable=no-member mode="verified" ) def test_not_eligible_for_credit(self): # The user is not yet eligible for credit, so no additional information should be displayed on the dashboard. response = self._load_dashboard() self.assertNotContains(response, "credit-eligibility-msg") self.assertNotContains(response, "purchase-credit-btn") def test_eligible_for_credit(self): # Simulate that the user has completed the only requirement in the course # so the user is eligible for credit. self._make_eligible() # The user should have the option to purchase credit response = self._load_dashboard() self.assertContains(response, "credit-eligibility-msg") self.assertContains(response, "purchase-credit-btn") # Move the eligibility deadline so it's within 30 days eligibility = CreditEligibility.objects.get(username=self.USERNAME) eligibility.deadline = datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=29) eligibility.save() # The user should still have the option to purchase credit, # but there should also be a message urging the user to purchase soon. response = self._load_dashboard() self.assertContains(response, "credit-eligibility-msg") self.assertContains(response, "purchase-credit-btn") def test_purchased_credit(self): # Simulate that the user has purchased credit, but has not # yet initiated a request to the credit provider self._make_eligible() self._purchase_credit() response = self._load_dashboard() self.assertContains(response, "credit-request-not-started-msg") def test_purchased_credit_and_request_pending(self): # Simulate that the user has purchased credit and initiated a request, # but we haven't yet heard back from the credit provider. self._make_eligible() self._purchase_credit() self._initiate_request() # Expect that the user's status is "pending" response = self._load_dashboard() self.assertContains(response, "credit-request-pending-msg") def test_purchased_credit_and_request_approved(self): # Simulate that the user has purchased credit and initiated a request, # and had that request approved by the credit provider self._make_eligible() self._purchase_credit() request_uuid = self._initiate_request() self._set_request_status(request_uuid, "approved") # Expect that the user's status is "approved" response = self._load_dashboard() self.assertContains(response, "credit-request-approved-msg") def test_purchased_credit_and_request_rejected(self): # Simulate that the user has purchased credit and initiated a request, # and had that request rejected by the credit provider self._make_eligible() self._purchase_credit() request_uuid = self._initiate_request() self._set_request_status(request_uuid, "rejected") # Expect that the user's status is "approved" response = self._load_dashboard() self.assertContains(response, "credit-request-rejected-msg") def test_credit_status_error(self): # Simulate an error condition: the user has a credit enrollment # but no enrollment attribute indicating which provider the user # purchased credit from. self._make_eligible() self._purchase_credit() CourseEnrollmentAttribute.objects.all().delete() # Expect an error message response = self._load_dashboard() self.assertContains(response, "credit-error-msg") def _load_dashboard(self): """Load the student dashboard and return the HttpResponse. """ return self.client.get(reverse("dashboard")) def _make_eligible(self): """Make the user eligible for credit in the course. """ credit_api.set_credit_requirement_status( self.USERNAME, self.course.id, # pylint: disable=no-member "grade", "grade", status="satisfied", reason={ "final_grade": 0.95 } ) def _purchase_credit(self): """Purchase credit from a provider in the course. """ self.enrollment.mode = "credit" self.enrollment.save() # pylint: disable=no-member CourseEnrollmentAttribute.objects.create( enrollment=self.enrollment, namespace="credit", name="provider_id", value=self.PROVIDER_ID, ) def _initiate_request(self): """Initiate a request for credit from a provider. """ request = credit_api.create_credit_request( self.course.id, # pylint: disable=no-member self.PROVIDER_ID, self.USERNAME ) return request["parameters"]["request_uuid"] def _set_request_status(self, uuid, status): """Set the status of a request for credit, simulating the notification from the provider. """ credit_api.update_credit_request_status(uuid, self.PROVIDER_ID, status)
agpl-3.0
-928,681,149,202,278,400
37.831731
117
0.653213
false
snnn/tensorflow
tensorflow/contrib/losses/python/losses/loss_ops.py
19
28132
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Loss operations for use in neural networks. Note: All the losses are added to the `GraphKeys.LOSSES` collection. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.framework.python.ops import add_arg_scope from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_ops from tensorflow.python.util.deprecation import deprecated from tensorflow.python.util.deprecation import deprecated_args from tensorflow.python.util.deprecation import deprecated_argument_lookup __all__ = [ "absolute_difference", "add_loss", "cosine_distance", "compute_weighted_loss", "get_losses", "get_regularization_losses", "get_total_loss", "hinge_loss", "log_loss", "mean_pairwise_squared_error", "mean_squared_error", "sigmoid_cross_entropy", "softmax_cross_entropy", "sparse_softmax_cross_entropy" ] def _scale_losses(losses, weights): """Computes the scaled loss. Args: losses: A `Tensor` of size [batch_size, d1, ... dN]. weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN]. The `losses` are reduced (tf.reduce_sum) until its dimension matches that of `weights` at which point the reduced `losses` are element-wise multiplied by `weights` and a final reduce_sum is computed on the result. Conceptually, this operation is equivalent to broadcasting (tiling) `weights` to be the same size as `losses`, performing an element-wise multiplication, and summing the result. Returns: A scalar tf.float32 `Tensor` whose value represents the sum of the scaled `losses`. """ # First, compute the sum of the losses over all elements: start_index = max(0, weights.get_shape().ndims) reduction_indices = list(range(start_index, losses.get_shape().ndims)) reduced_losses = math_ops.reduce_sum( losses, reduction_indices=reduction_indices) reduced_losses = math_ops.multiply(reduced_losses, weights) return math_ops.reduce_sum(reduced_losses) def _safe_div(numerator, denominator, name="value"): """Computes a safe divide which returns 0 if the denominator is zero. Note that the function contains an additional conditional check that is necessary for avoiding situations where the loss is zero causing NaNs to creep into the gradient computation. Args: numerator: An arbitrary `Tensor`. denominator: A `Tensor` whose shape matches `numerator` and whose values are assumed to be non-negative. name: An optional name for the returned op. Returns: The element-wise value of the numerator divided by the denominator. """ return array_ops.where( math_ops.greater(denominator, 0), math_ops.div(numerator, array_ops.where( math_ops.equal(denominator, 0), array_ops.ones_like(denominator), denominator)), array_ops.zeros_like(numerator), name=name) def _safe_mean(losses, num_present): """Computes a safe mean of the losses. Args: losses: A tensor whose elements contain individual loss measurements. num_present: The number of measurable losses in the tensor. Returns: A scalar representing the mean of the losses. If `num_present` is zero, then zero is returned. """ total_loss = math_ops.reduce_sum(losses) return _safe_div(total_loss, num_present) @deprecated("2016-12-30", "Use tf.losses.compute_weighted_loss instead.") def compute_weighted_loss(losses, weights=1.0, scope=None): """Computes the weighted loss. Args: losses: A tensor of size [batch_size, d1, ... dN]. weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N. scope: the scope for the operations performed in computing the loss. Returns: A scalar `Tensor` that returns the weighted loss. Raises: ValueError: If `weights` is `None` or the shape is not compatible with `losses`, or if the number of dimensions (rank) of either `losses` or `weights` is missing. """ with ops.name_scope(scope, "weighted_loss", [losses, weights]): losses = ops.convert_to_tensor(losses) input_dtype = losses.dtype losses = math_ops.to_float(losses) weights = math_ops.to_float(ops.convert_to_tensor(weights)) if losses.get_shape().ndims is None: raise ValueError("losses.get_shape().ndims cannot be None") weights_shape = weights.get_shape() if weights_shape.ndims is None: raise ValueError("weights.get_shape().ndims cannot be None") if weights_shape.ndims > 1 and weights_shape.dims[-1].is_compatible_with(1): weights = array_ops.squeeze(weights, [-1]) total_loss = _scale_losses(losses, weights) num_present = _num_present(losses, weights) mean_loss = _safe_mean(total_loss, num_present) # convert the result back to the input type mean_loss = math_ops.cast(mean_loss, input_dtype) add_loss(mean_loss) return mean_loss def _num_present(losses, weights, per_batch=False): """Computes the number of elements in the loss function induced by `weights`. A given weights tensor induces different numbers of usable elements in the `losses` tensor. The `weights` tensor is broadcast across `losses` for all possible dimensions. For example, if `losses` is a tensor of dimension [4, 5, 6, 3] and `weights` is a tensor of size [4, 5], then `weights` is, in effect, tiled to match the size of `losses`. Following this effective tile, the total number of present elements is the number of non-zero weights. Args: losses: A tensor of size [batch_size, d1, ... dN]. weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N. per_batch: Whether to return the number of elements per batch or as a sum total. Returns: The number of present (non-zero) elements in the losses tensor. If `per_batch` is True, the value is returned as a tensor of size [batch_size]. Otherwise, a single scalar tensor is returned. """ # If weights is a scalar, its easy to compute: if weights.get_shape().ndims == 0: batch_size = array_ops.reshape( array_ops.slice(array_ops.shape(losses), [0], [1]), []) num_per_batch = math_ops.div( math_ops.to_float(array_ops.size(losses)), math_ops.to_float(batch_size)) num_per_batch = array_ops.where( math_ops.equal(weights, 0), 0.0, num_per_batch) num_per_batch = math_ops.multiply( array_ops.ones(array_ops.reshape(batch_size, [1])), num_per_batch) return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch) # First, count the number of nonzero weights: if weights.get_shape().ndims >= 1: reduction_indices = list(range(1, weights.get_shape().ndims)) num_nonzero_per_batch = math_ops.reduce_sum( math_ops.to_float(math_ops.not_equal(weights, 0)), reduction_indices=reduction_indices) # Next, determine the number of elements that weights would broadcast to: broadcast_dims = array_ops.slice( array_ops.shape(losses), [weights.get_shape().ndims], [-1]) num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims)) num_per_batch = math_ops.multiply(num_nonzero_per_batch, num_to_broadcast) return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch) @deprecated("2016-12-30", "Use tf.losses.add_loss instead.") @add_arg_scope def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES): """Adds a externally defined loss to the collection of losses. Args: loss: A loss `Tensor`. loss_collection: Optional collection to add the loss to. """ if loss_collection: ops.add_to_collection(loss_collection, loss) @deprecated("2016-12-30", "Use tf.losses.get_losses instead.") def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES): """Gets the list of losses from the loss_collection. Args: scope: an optional scope for filtering the losses to return. loss_collection: Optional losses collection. Returns: a list of loss tensors. """ return ops.get_collection(loss_collection, scope) @deprecated("2016-12-30", "Use tf.losses.get_regularization_losses instead.") def get_regularization_losses(scope=None): """Gets the regularization losses. Args: scope: an optional scope for filtering the losses to return. Returns: A list of regularization losses as Tensors. """ return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope) @deprecated("2016-12-30", "Use tf.losses.get_total_loss instead.") def get_total_loss(add_regularization_losses=True, name="total_loss"): """Returns a tensor whose value represents the total loss. Notice that the function adds the given losses to the regularization losses. Args: add_regularization_losses: A boolean indicating whether or not to use the regularization losses in the sum. name: The name of the returned tensor. Returns: A `Tensor` whose value represents the total loss. Raises: ValueError: if `losses` is not iterable. """ losses = get_losses() if add_regularization_losses: losses += get_regularization_losses() return math_ops.add_n(losses, name=name) @deprecated("2016-12-30", "Use tf.losses.absolute_difference instead.") def absolute_difference(predictions, labels=None, weights=1.0, scope=None): """Adds an Absolute Difference loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: predictions: The predicted outputs. labels: The ground truth output tensor, same dimensions as 'predictions'. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "absolute_difference", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) losses = math_ops.abs(math_ops.subtract(predictions, labels)) return compute_weighted_loss(losses, weights, scope=scope) @deprecated("2016-12-30", "Use tf.losses.sigmoid_cross_entropy instead. Note that the order " "of the predictions and labels arguments has been changed.") def sigmoid_cross_entropy(logits, multi_class_labels, weights=1.0, label_smoothing=0, scope=None): """Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [`batch_size`], then the loss weights apply to each corresponding sample. If `label_smoothing` is nonzero, smooth the labels towards 1/2: new_multiclass_labels = multiclass_labels * (1 - label_smoothing) + 0.5 * label_smoothing Args: logits: [batch_size, num_classes] logits outputs of the network . multi_class_labels: [batch_size, num_classes] labels in (0, 1). weights: Coefficients for the loss. The tensor must be a scalar, a tensor of shape [batch_size] or shape [batch_size, num_classes]. label_smoothing: If greater than 0 then smooth the labels. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `logits` doesn't match that of `multi_class_labels` or if the shape of `weights` is invalid, or if `weights` is None. """ with ops.name_scope(scope, "sigmoid_cross_entropy_loss", [logits, multi_class_labels, weights]) as scope: logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape()) multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype) if label_smoothing > 0: multi_class_labels = ( multi_class_labels * (1 - label_smoothing) + 0.5 * label_smoothing) losses = nn.sigmoid_cross_entropy_with_logits( labels=multi_class_labels, logits=logits, name="xentropy") return compute_weighted_loss(losses, weights, scope=scope) @deprecated("2016-12-30", "Use tf.losses.softmax_cross_entropy instead. Note that the order " "of the logits and labels arguments has been changed.") def softmax_cross_entropy(logits, onehot_labels, weights=1.0, label_smoothing=0, scope=None): """Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [`batch_size`], then the loss weights apply to each corresponding sample. If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes: new_onehot_labels = onehot_labels * (1 - label_smoothing) + label_smoothing / num_classes Args: logits: [batch_size, num_classes] logits outputs of the network . onehot_labels: [batch_size, num_classes] one-hot-encoded labels. weights: Coefficients for the loss. The tensor must be a scalar or a tensor of shape [batch_size]. label_smoothing: If greater than 0 then smooth the labels. scope: the scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the mean loss value. Raises: ValueError: If the shape of `logits` doesn't match that of `onehot_labels` or if the shape of `weights` is invalid or if `weights` is None. """ with ops.name_scope(scope, "softmax_cross_entropy_loss", [logits, onehot_labels, weights]) as scope: logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape()) onehot_labels = math_ops.cast(onehot_labels, logits.dtype) if label_smoothing > 0: num_classes = math_ops.cast( array_ops.shape(onehot_labels)[1], logits.dtype) smooth_positives = 1.0 - label_smoothing smooth_negatives = label_smoothing / num_classes onehot_labels = onehot_labels * smooth_positives + smooth_negatives losses = nn.softmax_cross_entropy_with_logits( labels=onehot_labels, logits=logits, name="xentropy") return compute_weighted_loss(losses, weights, scope=scope) @deprecated("2016-12-30", "Use tf.losses.sparse_softmax_cross_entropy instead. Note that " "the order of the logits and labels arguments has been changed.") def sparse_softmax_cross_entropy(logits, labels, weights=1.0, scope=None): """Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [`batch_size`], then the loss weights apply to each corresponding sample. Args: logits: [batch_size, num_classes] logits outputs of the network . labels: [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64` in the range `[0, num_classes)`. weights: Coefficients for the loss. The tensor must be a scalar or a tensor of shape [batch_size] or [batch_size, 1]. scope: the scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the mean loss value. Raises: ValueError: If the shapes of `logits`, `labels`, and `weights` are incompatible, or if `weights` is None. """ with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss", [logits, labels, weights]) as scope: labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]]) losses = nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits, name="xentropy") return compute_weighted_loss(losses, weights, scope=scope) @deprecated("2016-12-30", "Use tf.losses.log_loss instead. Note that the order of the " "predictions and labels arguments has been changed.") def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None): """Adds a Log Loss term to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: predictions: The predicted outputs. labels: The ground truth output tensor, same dimensions as 'predictions'. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. epsilon: A small increment to add to avoid taking a log of zero. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "log_loss", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) losses = -math_ops.multiply( labels, math_ops.log(predictions + epsilon)) - math_ops.multiply( (1 - labels), math_ops.log(1 - predictions + epsilon)) return compute_weighted_loss(losses, weights, scope=scope) @deprecated("2016-12-30", "Use tf.losses.hinge_loss instead. Note that the order of the " "logits and labels arguments has been changed, and to stay " "unweighted, reduction=Reduction.NONE") def hinge_loss(logits, labels=None, scope=None): """Method that returns the loss tensor for hinge loss. Args: logits: The logits, a float tensor. Note that logits are assumed to be unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive (resp. negative) binary prediction. labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. Internally the {0,1} labels are converted to {-1,1} when calculating the hinge loss. scope: The scope for the operations performed in computing the loss. Returns: An unweighted `Tensor` of same shape as `logits` and `labels` representing the loss values across the batch. Raises: ValueError: If the shapes of `logits` and `labels` don't match. """ with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope: logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). labels = math_ops.to_float(labels) all_ones = array_ops.ones_like(labels) labels = math_ops.subtract(2 * labels, all_ones) return nn_ops.relu( math_ops.subtract(all_ones, math_ops.multiply(labels, logits))) @deprecated("2016-12-30", "Use tf.losses.mean_squared_error instead.") def mean_squared_error(predictions, labels=None, weights=1.0, scope=None): """Adds a Sum-of-Squares loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: predictions: The predicted outputs. labels: The ground truth output tensor, same dimensions as 'predictions'. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "mean_squared_error", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) losses = math_ops.square(math_ops.subtract(predictions, labels)) return compute_weighted_loss(losses, weights, scope=scope) @deprecated("2016-12-30", "Use tf.losses.mean_pairwise_squared_error instead. Note that the " "order of the predictions and labels arguments has been changed.") def mean_pairwise_squared_error(predictions, labels=None, weights=1.0, scope=None): """Adds a pairwise-errors-squared loss to the training procedure. Unlike `mean_squared_error`, which is a measure of the differences between corresponding elements of `predictions` and `labels`, `mean_pairwise_squared_error` is a measure of the differences between pairs of corresponding elements of `predictions` and `labels`. For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are three pairs of differences are summed to compute the loss: loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3 Note that since the inputs are of size [batch_size, d0, ... dN], the corresponding pairs are computed within each batch sample but not across samples within a batch. For example, if `predictions` represents a batch of 16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs is drawn from each image, but not across images. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. Args: predictions: The predicted outputs, a tensor of size [batch_size, d0, .. dN] where N+1 is the total number of dimensions in `predictions`. labels: The ground truth output tensor, whose shape must match the shape of the `predictions` tensor. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "mean_pairwise_squared_error", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) weights = math_ops.to_float(ops.convert_to_tensor(weights)) diffs = math_ops.subtract(predictions, labels) # Need to verify here since the function doesn't use compute_weighted_loss if diffs.get_shape().ndims is None: raise ValueError("diffs.get_shape().ndims cannot be None") if weights.get_shape().ndims is None: raise ValueError("weights.get_shape().ndims cannot be None") reduction_indices = list(range(1, diffs.get_shape().ndims)) sum_squares_diff_per_batch = math_ops.reduce_sum( math_ops.square(diffs), reduction_indices=reduction_indices) num_present_per_batch = _num_present(diffs, weights, per_batch=True) term1 = 2.0 * _safe_div(sum_squares_diff_per_batch, num_present_per_batch) sum_diff = math_ops.reduce_sum(diffs, reduction_indices=reduction_indices) term2 = 2.0 * _safe_div( math_ops.square(sum_diff), math_ops.square(num_present_per_batch)) loss = _scale_losses(term1 - term2, weights) mean_loss = array_ops.where( math_ops.reduce_sum(num_present_per_batch) > 0, loss, array_ops.zeros_like(loss), name="value") add_loss(mean_loss) return mean_loss @deprecated("2016-12-30", "Use tf.losses.cosine_distance instead.") @deprecated_args(None, "dim is deprecated, use axis instead", "dim") def cosine_distance(predictions, labels=None, axis=None, weights=1.0, scope=None, dim=None): """Adds a cosine-distance loss to the training procedure. Note that the function assumes that `predictions` and `labels` are already unit-normalized. Args: predictions: An arbitrary matrix. labels: A `Tensor` whose shape matches 'predictions' axis: The dimension along which the cosine distance is computed. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. dim: The old (deprecated) name for `axis`. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If `predictions` shape doesn't match `labels` shape, or `weights` is `None`. """ axis = deprecated_argument_lookup( "axis", axis, "dim", dim) if axis is None: raise ValueError("You must specify 'axis'.") with ops.name_scope(scope, "cosine_distance_loss", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) radial_diffs = math_ops.multiply(predictions, labels) losses = 1 - math_ops.reduce_sum( radial_diffs, reduction_indices=[ axis, ]) return compute_weighted_loss(losses, weights, scope=scope)
apache-2.0
-8,104,154,935,110,994,000
40.738872
80
0.688789
false
tjanez/ansible
lib/ansible/module_utils/dellos9.py
47
4862
# # (c) 2015 Peter Sprygada, <psprygada@ansible.com> # # Copyright (c) 2016 Dell Inc. # # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import re from ansible.module_utils.shell import CliBase from ansible.module_utils.network import register_transport, to_list, Command from ansible.module_utils.netcfg import NetworkConfig, ConfigLine def get_config(module): contents = module.params['config'] if not contents: contents = module.config.get_config() module.params['config'] = contents return NetworkConfig(indent=1, contents=contents[0]) else: return NetworkConfig(indent=1, contents=contents) def get_sublevel_config(running_config, module): contents = list() current_config_contents = list() obj = running_config.get_object(module.params['parents']) if obj: contents = obj.children contents[:0] = module.params['parents'] indent = 0 for c in contents: if isinstance(c, str): current_config_contents.append(c.rjust(len(c) + indent, ' ')) if isinstance(c, ConfigLine): current_config_contents.append(c.raw) indent = indent + 1 sublevel_config = '\n'.join(current_config_contents) return sublevel_config class Cli(CliBase): NET_PASSWD_RE = re.compile(r"[\r\n]?password:\s?$", re.I) WARNING_PROMPTS_RE = [ re.compile(r"[\r\n]?\[confirm yes/no\]:\s?$"), re.compile(r"[\r\n]?\[y/n\]:\s?$"), re.compile(r"[\r\n]?\[yes/no\]:\s?$") ] CLI_PROMPTS_RE = [ re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"), re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$") ] CLI_ERRORS_RE = [ re.compile(r"% ?Error: (?:(?!\bdoes not exist\b)(?!\balready exists\b)(?!\bHost not found\b)(?!\bnot active\b).)*$"), re.compile(r"% ?Bad secret"), re.compile(r"invalid input", re.I), re.compile(r"(?:incomplete|ambiguous) command", re.I), re.compile(r"connection timed out", re.I), re.compile(r"'[^']' +returned error code: ?\d+"), ] def connect(self, params, **kwargs): super(Cli, self).connect(params, kickstart=False, **kwargs) self.shell.send('terminal length 0') def authorize(self, params, **kwargs): passwd = params['auth_pass'] self.run_commands( Command('enable', prompt=self.NET_PASSWD_RE, response=passwd) ) def configure(self, commands, **kwargs): cmds = ['configure terminal'] cmdlist = list() for c in to_list(commands): cmd = Command(c, prompt=self.WARNING_PROMPTS_RE, response='yes') cmdlist.append(cmd) cmds.extend(cmdlist) cmds.append('end') responses = self.execute(cmds) responses.pop(0) return responses def get_config(self, **kwargs): return self.execute(['show running-config']) def load_config(self, commands, **kwargs): return self.configure(commands) def save_config(self): cmdlist = list() cmd = 'copy running-config startup-config' cmdlist.append(Command(cmd, prompt=self.WARNING_PROMPTS_RE, response='yes')) self.execute(cmdlist) Cli = register_transport('cli', default=True)(Cli)
gpl-3.0
8,573,506,676,213,166,000
35.014815
125
0.654669
false
TaylorOshan/pysal
pysal/contrib/glm/links.py
35
20123
''' Defines the link functions to be used with GLM and GEE families. ''' import numpy as np import scipy.stats FLOAT_EPS = np.finfo(float).eps class Link(object): """ A generic link function for one-parameter exponential family. `Link` does nothing, but lays out the methods expected of any subclass. """ def __call__(self, p): """ Return the value of the link function. This is just a placeholder. Parameters ---------- p : array-like Probabilities Returns ------- g(p) : array-like The value of the link function g(p) = z """ return NotImplementedError def inverse(self, z): """ Inverse of the link function. Just a placeholder. Parameters ---------- z : array-like `z` is usually the linear predictor of the transformed variable in the IRLS algorithm for GLM. Returns ------- g^(-1)(z) : array The value of the inverse of the link function g^(-1)(z) = p """ return NotImplementedError def deriv(self, p): """ Derivative of the link function g'(p). Just a placeholder. Parameters ---------- p : array-like Returns ------- g'(p) : array The value of the derivative of the link function g'(p) """ return NotImplementedError def deriv2(self, p): """Second derivative of the link function g''(p) implemented through numerical differentiation """ from statsmodels.tools.numdiff import approx_fprime_cs # TODO: workaround proplem with numdiff for 1d return np.diag(approx_fprime_cs(p, self.deriv)) def inverse_deriv(self, z): """ Derivative of the inverse link function g^(-1)(z). Notes ----- This reference implementation gives the correct result but is inefficient, so it can be overriden in subclasses. Parameters ---------- z : array-like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g'^(-1)(z) : array The value of the derivative of the inverse of the link function """ return 1 / self.deriv(self.inverse(z)) class Logit(Link): """ The logit transform Notes ----- call and derivative use a private method _clean to make trim p by machine epsilon so that p is in (0,1) Alias of Logit: logit = Logit() """ def _clean(self, p): """ Clip logistic values to range (eps, 1-eps) Parameters ----------- p : array-like Probabilities Returns -------- pclip : array Clipped probabilities """ return np.clip(p, FLOAT_EPS, 1. - FLOAT_EPS) def __call__(self, p): """ The logit transform Parameters ---------- p : array-like Probabilities Returns ------- z : array Logit transform of `p` Notes ----- g(p) = log(p / (1 - p)) """ p = self._clean(p) return np.log(p / (1. - p)) def inverse(self, z): """ Inverse of the logit transform Parameters ---------- z : array-like The value of the logit transform at `p` Returns ------- p : array Probabilities Notes ----- g^(-1)(z) = exp(z)/(1+exp(z)) """ z = np.asarray(z) t = np.exp(-z) return 1. / (1. + t) def deriv(self, p): """ Derivative of the logit transform Parameters ---------- p: array-like Probabilities Returns ------- g'(p) : array Value of the derivative of logit transform at `p` Notes ----- g'(p) = 1 / (p * (1 - p)) Alias for `Logit`: logit = Logit() """ p = self._clean(p) return 1. / (p * (1 - p)) def inverse_deriv(self, z): """ Derivative of the inverse of the logit transform Parameters ---------- z : array-like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g'^(-1)(z) : array The value of the derivative of the inverse of the logit function """ t = np.exp(z) return t/(1 + t)**2 def deriv2(self, p): """ Second derivative of the logit function. Parameters ---------- p : array-like probabilities Returns ------- g''(z) : array The value of the second derivative of the logit function """ v = p * (1 - p) return (2*p - 1) / v**2 class logit(Logit): pass class Power(Link): """ The power transform Parameters ---------- power : float The exponent of the power transform Notes ----- Aliases of Power: inverse = Power(power=-1) sqrt = Power(power=.5) inverse_squared = Power(power=-2.) identity = Power(power=1.) """ def __init__(self, power=1.): self.power = power def __call__(self, p): """ Power transform link function Parameters ---------- p : array-like Mean parameters Returns ------- z : array-like Power transform of x Notes ----- g(p) = x**self.power """ z = np.power(p, self.power) return z def inverse(self, z): """ Inverse of the power transform link function Parameters ---------- `z` : array-like Value of the transformed mean parameters at `p` Returns ------- `p` : array Mean parameters Notes ----- g^(-1)(z`) = `z`**(1/`power`) """ p = np.power(z, 1. / self.power) return p def deriv(self, p): """ Derivative of the power transform Parameters ---------- p : array-like Mean parameters Returns -------- g'(p) : array Derivative of power transform of `p` Notes ----- g'(`p`) = `power` * `p`**(`power` - 1) """ return self.power * np.power(p, self.power - 1) def deriv2(self, p): """ Second derivative of the power transform Parameters ---------- p : array-like Mean parameters Returns -------- g''(p) : array Second derivative of the power transform of `p` Notes ----- g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2) """ return self.power * (self.power - 1) * np.power(p, self.power - 2) def inverse_deriv(self, z): """ Derivative of the inverse of the power transform Parameters ---------- z : array-like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g^(-1)'(z) : array The value of the derivative of the inverse of the power transform function """ return np.power(z, (1 - self.power)/self.power) / self.power class inverse_power(Power): """ The inverse transform Notes ----- g(p) = 1/p Alias of statsmodels.family.links.Power(power=-1.) """ def __init__(self): super(inverse_power, self).__init__(power=-1.) class sqrt(Power): """ The square-root transform Notes ----- g(`p`) = sqrt(`p`) Alias of statsmodels.family.links.Power(power=.5) """ def __init__(self): super(sqrt, self).__init__(power=.5) class inverse_squared(Power): """ The inverse squared transform Notes ----- g(`p`) = 1/(`p`\ \*\*2) Alias of statsmodels.family.links.Power(power=2.) """ def __init__(self): super(inverse_squared, self).__init__(power=-2.) class identity(Power): """ The identity transform Notes ----- g(`p`) = `p` Alias of statsmodels.family.links.Power(power=1.) """ def __init__(self): super(identity, self).__init__(power=1.) class Log(Link): """ The log transform Notes ----- call and derivative call a private method _clean to trim the data by machine epsilon so that p is in (0,1). log is an alias of Log. """ def _clean(self, x): return np.clip(x, FLOAT_EPS, np.inf) def __call__(self, p, **extra): """ Log transform link function Parameters ---------- x : array-like Mean parameters Returns ------- z : array log(x) Notes ----- g(p) = log(p) """ x = self._clean(p) return np.log(x) def inverse(self, z): """ Inverse of log transform link function Parameters ---------- z : array The inverse of the link function at `p` Returns ------- p : array The mean probabilities given the value of the inverse `z` Notes ----- g^{-1}(z) = exp(z) """ return np.exp(z) def deriv(self, p): """ Derivative of log transform link function Parameters ---------- p : array-like Mean parameters Returns ------- g'(p) : array derivative of log transform of x Notes ----- g'(x) = 1/x """ p = self._clean(p) return 1. / p def deriv2(self, p): """ Second derivative of the log transform link function Parameters ---------- p : array-like Mean parameters Returns ------- g''(p) : array Second derivative of log transform of x Notes ----- g''(x) = -1/x^2 """ p = self._clean(p) return -1. / p**2 def inverse_deriv(self, z): """ Derivative of the inverse of the log transform link function Parameters ---------- z : array The inverse of the link function at `p` Returns ------- g^(-1)'(z) : array The value of the derivative of the inverse of the log function, the exponential function """ return np.exp(z) class log(Log): """ The log transform Notes ----- log is a an alias of Log. """ pass # TODO: the CDFLink is untested class CDFLink(Logit): """ The use the CDF of a scipy.stats distribution CDFLink is a subclass of logit in order to use its _clean method for the link and its derivative. Parameters ---------- dbn : scipy.stats distribution Default is dbn=scipy.stats.norm Notes ----- The CDF link is untested. """ def __init__(self, dbn=scipy.stats.norm): self.dbn = dbn def __call__(self, p): """ CDF link function Parameters ---------- p : array-like Mean parameters Returns ------- z : array (ppf) inverse of CDF transform of p Notes ----- g(`p`) = `dbn`.ppf(`p`) """ p = self._clean(p) return self.dbn.ppf(p) def inverse(self, z): """ The inverse of the CDF link Parameters ---------- z : array-like The value of the inverse of the link function at `p` Returns ------- p : array Mean probabilities. The value of the inverse of CDF link of `z` Notes ----- g^(-1)(`z`) = `dbn`.cdf(`z`) """ return self.dbn.cdf(z) def deriv(self, p): """ Derivative of CDF link Parameters ---------- p : array-like mean parameters Returns ------- g'(p) : array The derivative of CDF transform at `p` Notes ----- g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`)) """ p = self._clean(p) return 1. / self.dbn.pdf(self.dbn.ppf(p)) def deriv2(self, p): """ Second derivative of the link function g''(p) implemented through numerical differentiation """ from statsmodels.tools.numdiff import approx_fprime p = np.atleast_1d(p) # Note: special function for norm.ppf does not support complex return np.diag(approx_fprime(p, self.deriv, centered=True)) def inverse_deriv(self, z): """ Derivative of the inverse of the CDF transformation link function Parameters ---------- z : array The inverse of the link function at `p` Returns ------- g^(-1)'(z) : array The value of the derivative of the inverse of the logit function """ return 1/self.deriv(self.inverse(z)) class probit(CDFLink): """ The probit (standard normal CDF) transform Notes -------- g(p) = scipy.stats.norm.ppf(p) probit is an alias of CDFLink. """ pass class cauchy(CDFLink): """ The Cauchy (standard Cauchy CDF) transform Notes ----- g(p) = scipy.stats.cauchy.ppf(p) cauchy is an alias of CDFLink with dbn=scipy.stats.cauchy """ def __init__(self): super(cauchy, self).__init__(dbn=scipy.stats.cauchy) def deriv2(self, p): """ Second derivative of the Cauchy link function. Parameters ---------- p: array-like Probabilities Returns ------- g''(p) : array Value of the second derivative of Cauchy link function at `p` """ a = np.pi * (p - 0.5) d2 = 2 * np.pi**2 * np.sin(a) / np.cos(a)**3 return d2 class CLogLog(Logit): """ The complementary log-log transform CLogLog inherits from Logit in order to have access to its _clean method for the link and its derivative. Notes ----- CLogLog is untested. """ def __call__(self, p): """ C-Log-Log transform link function Parameters ---------- p : array Mean parameters Returns ------- z : array The CLogLog transform of `p` Notes ----- g(p) = log(-log(1-p)) """ p = self._clean(p) return np.log(-np.log(1 - p)) def inverse(self, z): """ Inverse of C-Log-Log transform link function Parameters ---------- z : array-like The value of the inverse of the CLogLog link function at `p` Returns ------- p : array Mean parameters Notes ----- g^(-1)(`z`) = 1-exp(-exp(`z`)) """ return 1 - np.exp(-np.exp(z)) def deriv(self, p): """ Derivative of C-Log-Log transform link function Parameters ---------- p : array-like Mean parameters Returns ------- g'(p) : array The derivative of the CLogLog transform link function Notes ----- g'(p) = - 1 / ((p-1)*log(1-p)) """ p = self._clean(p) return 1. / ((p - 1) * (np.log(1 - p))) def deriv2(self, p): """ Second derivative of the C-Log-Log ink function Parameters ---------- p : array-like Mean parameters Returns ------- g''(p) : array The second derivative of the CLogLog link function """ p = self._clean(p) fl = np.log(1 - p) d2 = -1 / ((1 - p)**2 * fl) d2 *= 1 + 1 / fl return d2 def inverse_deriv(self, z): """ Derivative of the inverse of the C-Log-Log transform link function Parameters ---------- z : array-like The value of the inverse of the CLogLog link function at `p` Returns ------- g^(-1)'(z) : array The derivative of the inverse of the CLogLog link function """ return np.exp(z - np.exp(z)) class cloglog(CLogLog): """ The CLogLog transform link function. Notes ----- g(`p`) = log(-log(1-`p`)) cloglog is an alias for CLogLog cloglog = CLogLog() """ pass class NegativeBinomial(object): ''' The negative binomial link function Parameters ---------- alpha : float, optional Alpha is the ancillary parameter of the Negative Binomial link function. It is assumed to be nonstochastic. The default value is 1. Permissible values are usually assumed to be in (.01, 2). ''' def __init__(self, alpha=1.): self.alpha = alpha def _clean(self, x): return np.clip(x, FLOAT_EPS, np.inf) def __call__(self, p): ''' Negative Binomial transform link function Parameters ---------- p : array-like Mean parameters Returns ------- z : array The negative binomial transform of `p` Notes ----- g(p) = log(p/(p + 1/alpha)) ''' p = self._clean(p) return np.log(p/(p + 1/self.alpha)) def inverse(self, z): ''' Inverse of the negative binomial transform Parameters ----------- z : array-like The value of the inverse of the negative binomial link at `p`. Returns ------- p : array Mean parameters Notes ----- g^(-1)(z) = exp(z)/(alpha*(1-exp(z))) ''' return -1/(self.alpha * (1 - np.exp(-z))) def deriv(self, p): ''' Derivative of the negative binomial transform Parameters ---------- p : array-like Mean parameters Returns ------- g'(p) : array The derivative of the negative binomial transform link function Notes ----- g'(x) = 1/(x+alpha*x^2) ''' return 1/(p + self.alpha * p**2) def deriv2(self,p): ''' Second derivative of the negative binomial link function. Parameters ---------- p : array-like Mean parameters Returns ------- g''(p) : array The second derivative of the negative binomial transform link function Notes ----- g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2 ''' numer = -(1 + 2 * self.alpha * p) denom = (p + self.alpha * p**2)**2 return numer / denom def inverse_deriv(self, z): ''' Derivative of the inverse of the negative binomial transform Parameters ----------- z : array-like Usually the linear predictor for a GLM or GEE model Returns ------- g^(-1)'(z) : array The value of the derivative of the inverse of the negative binomial link ''' t = np.exp(z) return t / (self.alpha * (1-t)**2) class nbinom(NegativeBinomial): """ The negative binomial link function. Notes ----- g(p) = log(p/(p + 1/alpha)) nbinom is an alias of NegativeBinomial. nbinom = NegativeBinomial(alpha=1.) """ pass
bsd-3-clause
-4,901,116,892,870,221,000
20.115425
77
0.476917
false
athena-voice/athena-voice-client
athena/modules/active/athena_control.py
1
1425
""" Finds and returns the latest bitcoin price Usage Examples: - "What is the price of bitcoin?" - "How much is a bitcoin worth?" """ from athena.classes.module import Module from athena.classes.task import ActiveTask from athena import brain class QuitTask(ActiveTask): def __init__(self): super().__init__(patterns=[r'\b(athena )?(quit|stop)\b.*']) def action(self, text): brain.inst.quit() class ListModulesTask(ActiveTask): def __init__(self): super().__init__(words=['list modules', 'list mods']) def action(self, text): brain.inst.list_mods() class ToggleModuleTask(ActiveTask): def __init__(self): super().__init__(patterns=[r'.*\b(enable|add|disable|remove) (.*)']) self.groups = {1: 'enable', 2: 'module'} def match(self, text): return self.match_and_save_groups(text, self.groups) def action(self, text): mod_name = self.module.lower().strip().replace(' ', '_') if 'disable' in self.enable.lower() or 'remove' in self.enable.lower(): brain.inst.disable_mod(mod_name) else: brain.inst.enable_mod(mod_name) class AthenaControl(Module): def __init__(self): tasks = [QuitTask(), ListModulesTask(), ToggleModuleTask()] super().__init__('athena_control', tasks, priority=3)
gpl-3.0
8,207,512,318,211,203,000
24.886792
79
0.582456
false
jumpstarter-io/neutron
neutron/tests/unit/ryu/test_ryu_plugin.py
9
1570
# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron import manager from neutron.tests.unit.ryu import fake_ryu from neutron.tests.unit import test_db_plugin as test_plugin class RyuPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): _plugin_name = 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2' def setUp(self): self.ryu_patcher = fake_ryu.patch_fake_ryu_client() self.ryu_patcher.start() super(RyuPluginV2TestCase, self).setUp(self._plugin_name) self.addCleanup(self.ryu_patcher.stop) plugin = manager.NeutronManager.get_plugin() plugin.notifier = mock.Mock() class TestRyuBasicGet(test_plugin.TestBasicGet, RyuPluginV2TestCase): pass class TestRyuV2HTTPResponse(test_plugin.TestV2HTTPResponse, RyuPluginV2TestCase): pass class TestRyuPortsV2(test_plugin.TestPortsV2, RyuPluginV2TestCase): pass class TestRyuNetworksV2(test_plugin.TestNetworksV2, RyuPluginV2TestCase): pass
apache-2.0
3,802,356,748,824,914,000
30.4
78
0.745223
false
TimBuckley/effective_django
tests/custom_pk/fields.py
33
1629
import random import string from django.db import models from django.utils import six from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class MyWrapper(object): def __init__(self, value): self.value = value def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.value) def __str__(self): return self.value def __eq__(self, other): if isinstance(other, self.__class__): return self.value == other.value return self.value == other class MyAutoField(six.with_metaclass(models.SubfieldBase, models.CharField)): def __init__(self, *args, **kwargs): kwargs['max_length'] = 10 super(MyAutoField, self).__init__(*args, **kwargs) def pre_save(self, instance, add): value = getattr(instance, self.attname, None) if not value: value = MyWrapper(''.join(random.sample(string.ascii_lowercase, 10))) setattr(instance, self.attname, value) return value def to_python(self, value): if not value: return if not isinstance(value, MyWrapper): value = MyWrapper(value) return value def get_db_prep_save(self, value, connection): if not value: return if isinstance(value, MyWrapper): return six.text_type(value) return value def get_db_prep_value(self, value, connection, prepared=False): if not value: return if isinstance(value, MyWrapper): return six.text_type(value) return value
bsd-3-clause
3,720,848,702,035,211,300
27.086207
81
0.601596
false
CeltonMcGrath/TACTIC
3rd_party/CherryPy/cherrypy/test/modfcgid.py
6
4258
"""Wrapper for mod_fcgid, for use as a CherryPy HTTP server when testing. To autostart fcgid, the "apache" executable or script must be on your system path, or you must override the global APACHE_PATH. On some platforms, "apache" may be called "apachectl", "apache2ctl", or "httpd"--create a symlink to them if needed. You'll also need the WSGIServer from flup.servers. See http://projects.amor.org/misc/wiki/ModPythonGateway KNOWN BUGS ========== 1. Apache processes Range headers automatically; CherryPy's truncated output is then truncated again by Apache. See test_core.testRanges. This was worked around in http://www.cherrypy.org/changeset/1319. 2. Apache does not allow custom HTTP methods like CONNECT as per the spec. See test_core.testHTTPMethods. 3. Max request header and body settings do not work with Apache. 4. Apache replaces status "reason phrases" automatically. For example, CherryPy may set "304 Not modified" but Apache will write out "304 Not Modified" (capital "M"). 5. Apache does not allow custom error codes as per the spec. 6. Apache (or perhaps modpython, or modpython_gateway) unquotes %xx in the Request-URI too early. 7. mod_python will not read request bodies which use the "chunked" transfer-coding (it passes REQUEST_CHUNKED_ERROR to ap_setup_client_block instead of REQUEST_CHUNKED_DECHUNK, see Apache2's http_protocol.c and mod_python's requestobject.c). 8. Apache will output a "Content-Length: 0" response header even if there's no response entity body. This isn't really a bug; it just differs from the CherryPy default. """ import os curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) import re import sys import time import cherrypy from cherrypy.process import plugins, servers from cherrypy.test import test def read_process(cmd, args=""): pipein, pipeout = os.popen4("%s %s" % (cmd, args)) try: firstline = pipeout.readline() if (re.search(r"(not recognized|No such file|not found)", firstline, re.IGNORECASE)): raise IOError('%s must be on your system path.' % cmd) output = firstline + pipeout.read() finally: pipeout.close() return output APACHE_PATH = "httpd" CONF_PATH = "fcgi.conf" conf_fcgid = """ # Apache2 server conf file for testing CherryPy with mod_fcgid. DocumentRoot "%(root)s" ServerName 127.0.0.1 Listen %(port)s LoadModule fastcgi_module modules/mod_fastcgi.dll LoadModule rewrite_module modules/mod_rewrite.so Options ExecCGI SetHandler fastcgi-script RewriteEngine On RewriteRule ^(.*)$ /fastcgi.pyc [L] FastCgiExternalServer "%(server)s" -host 127.0.0.1:4000 """ class ModFCGISupervisor(test.LocalSupervisor): using_apache = True using_wsgi = True template = conf_fcgid def __str__(self): return "FCGI Server on %s:%s" % (self.host, self.port) def start(self, modulename): cherrypy.server.httpserver = servers.FlupFCGIServer( application=cherrypy.tree, bindAddress=('127.0.0.1', 4000)) cherrypy.server.httpserver.bind_addr = ('127.0.0.1', 4000) # For FCGI, we both start apache... self.start_apache() # ...and our local server test.LocalServer.start(self, modulename) def start_apache(self): fcgiconf = CONF_PATH if not os.path.isabs(fcgiconf): fcgiconf = os.path.join(curdir, fcgiconf) # Write the Apache conf file. f = open(fcgiconf, 'wb') try: server = repr(os.path.join(curdir, 'fastcgi.pyc'))[1:-1] output = self.template % {'port': self.port, 'root': curdir, 'server': server} output = output.replace('\r\n', '\n') f.write(output) finally: f.close() result = read_process(APACHE_PATH, "-k start -f %s" % fcgiconf) if result: print(result) def stop(self): """Gracefully shutdown a server that is serving forever.""" read_process(APACHE_PATH, "-k stop") test.LocalServer.stop(self) def sync_apps(self): cherrypy.server.httpserver.fcgiserver.application = self.get_app()
epl-1.0
-2,568,828,127,213,745,700
33.33871
77
0.662987
false
bergercookie/Pump3000
for_keep/final_code/device_configuration.py
1
1621
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'new_dev.ui' # # Created: Sun Jun 8 13:50:08 2014 # by: pyside-uic 0.2.15 running on PySide 1.2.1 # # WARNING! All changes made in this file will be lost! from PySide import QtCore, QtGui class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName("Dialog") Dialog.resize(619, 391) self.horizontalLayout_2 = QtGui.QHBoxLayout(Dialog) self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.listWidget = QtGui.QListWidget(Dialog) self.listWidget.setObjectName("listWidget") self.horizontalLayout.addWidget(self.listWidget) self.horizontalLayout_2.addLayout(self.horizontalLayout) self.buttonBox = QtGui.QDialogButtonBox(Dialog) self.buttonBox.setOrientation(QtCore.Qt.Vertical) self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.horizontalLayout_2.addWidget(self.buttonBox) self.retranslateUi(Dialog) QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), Dialog.accept) QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), Dialog.reject) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
bsd-2-clause
6,336,461,285,662,444,000
42.810811
117
0.719309
false
FrankTsui/robust_rescaled_svm
common.py
1
1636
import numpy as np import matplotlib.pyplot as plt def plot_decision_function(classifier, fea, gnd, title): ''' plot the decision function in 2-d plane classifiers: the svm models fea: array like, shape = (smp_num, fea_num) gnd: array like, shape = (smp_num,) title: title of plot ''' fea_min = fea.min(axis = 0) fea_max = fea.max(axis = 0) mesh_num = 100 # meshgrid xx, yy = np.meshgrid(np.linspace(fea_min[0], fea_max[0], mesh_num), \ np.linspace(fea_min[1], fea_max[1], mesh_num)) Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()], last_model_flag = False) Z_first = Z[:, 0].copy() Z_last = Z[:, -1].copy() Z_first = Z_first.reshape(xx.shape) Z_last = Z_last.reshape(xx.shape) del Z # plot the line, the points leg_svm = plt.contour(xx, yy, Z_first, levels = [0.0], colors = 'k') leg_rsvm = plt.contour(xx, yy, Z_last, levels = [0.0], colors = 'r') posi_index = gnd == 1 nega_index = gnd == -1 marker_size = 70 plt.scatter(fea[:, 0], fea[:, 1], marker = 'o', \ s = classifier.smp_weights_mat[:, -1] * marker_size * 4, c = 'w', alpha = 1.0, edgecolors = 'm', label = 'weights') plt.scatter(fea[posi_index, 0], fea[posi_index, 1], marker = '^', s = marker_size, c = 'g', alpha = 0.8, label = 'posi') plt.scatter(fea[nega_index, 0], fea[nega_index, 1], marker = 'x', s = marker_size, c = 'b', label = 'nega') leg_svm.collections[0].set_label('svm') leg_rsvm.collections[0].set_label('rsvm') plt.legend(loc = 'upper left') plt.axis('on') plt.title(title)
apache-2.0
-7,288,017,175,188,139,000
39.925
124
0.579462
false
JeanKossaifi/scikit-learn
sklearn/tree/tests/test_tree.py
48
47506
""" Testing for the tree module (sklearn.tree). """ import pickle from functools import partial from itertools import product import platform import numpy as np from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import coo_matrix from sklearn.random_projection import sparse_random_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import mean_squared_error from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_true from sklearn.utils.testing import raises from sklearn.utils.validation import check_random_state from sklearn.utils.validation import NotFittedError from sklearn.utils.testing import ignore_warnings from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeRegressor from sklearn.tree import ExtraTreeClassifier from sklearn.tree import ExtraTreeRegressor from sklearn import tree from sklearn.tree.tree import SPARSE_SPLITTERS from sklearn.tree._tree import TREE_LEAF from sklearn import datasets from sklearn.preprocessing._weights import _balance_weights CLF_CRITERIONS = ("gini", "entropy") REG_CRITERIONS = ("mse", ) CLF_TREES = { "DecisionTreeClassifier": DecisionTreeClassifier, "Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier, splitter="presort-best"), "ExtraTreeClassifier": ExtraTreeClassifier, } REG_TREES = { "DecisionTreeRegressor": DecisionTreeRegressor, "Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor, splitter="presort-best"), "ExtraTreeRegressor": ExtraTreeRegressor, } ALL_TREES = dict() ALL_TREES.update(CLF_TREES) ALL_TREES.update(REG_TREES) SPARSE_TREES = [name for name, Tree in ALL_TREES.items() if Tree().splitter in SPARSE_SPLITTERS] X_small = np.array([ [0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ], [0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ], [-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ], [-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ], [-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ], [-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ], [2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ], [2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ], [2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ], [1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ], [3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ], [2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ], [2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ], [2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ], [2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ], [1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ], [3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]]) y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1, 0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0] # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = np.random.RandomState(1) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] digits = datasets.load_digits() perm = rng.permutation(digits.target.size) digits.data = digits.data[perm] digits.target = digits.target[perm] random_state = check_random_state(0) X_multilabel, y_multilabel = datasets.make_multilabel_classification( random_state=0, n_samples=30, n_features=10) X_sparse_pos = random_state.uniform(size=(20, 5)) X_sparse_pos[X_sparse_pos <= 0.8] = 0. y_random = random_state.randint(0, 4, size=(20, )) X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0) DATASETS = { "iris": {"X": iris.data, "y": iris.target}, "boston": {"X": boston.data, "y": boston.target}, "digits": {"X": digits.data, "y": digits.target}, "toy": {"X": X, "y": y}, "clf_small": {"X": X_small, "y": y_small}, "reg_small": {"X": X_small, "y": y_small_reg}, "multilabel": {"X": X_multilabel, "y": y_multilabel}, "sparse-pos": {"X": X_sparse_pos, "y": y_random}, "sparse-neg": {"X": - X_sparse_pos, "y": y_random}, "sparse-mix": {"X": X_sparse_mix, "y": y_random}, "zeros": {"X": np.zeros((20, 3)), "y": y_random} } for name in DATASETS: DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"]) def assert_tree_equal(d, s, message): assert_equal(s.node_count, d.node_count, "{0}: inequal number of node ({1} != {2})" "".format(message, s.node_count, d.node_count)) assert_array_equal(d.children_right, s.children_right, message + ": inequal children_right") assert_array_equal(d.children_left, s.children_left, message + ": inequal children_left") external = d.children_right == TREE_LEAF internal = np.logical_not(external) assert_array_equal(d.feature[internal], s.feature[internal], message + ": inequal features") assert_array_equal(d.threshold[internal], s.threshold[internal], message + ": inequal threshold") assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(), message + ": inequal sum(n_node_samples)") assert_array_equal(d.n_node_samples, s.n_node_samples, message + ": inequal n_node_samples") assert_almost_equal(d.impurity, s.impurity, err_msg=message + ": inequal impurity") assert_array_almost_equal(d.value[external], s.value[external], err_msg=message + ": inequal value") def test_classification_toy(): # Check classification on a toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_weighted_classification_toy(): # Check classification on a weighted toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y, sample_weight=np.ones(len(X))) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_regression_toy(): # Check regression on a toy dataset. for name, Tree in REG_TREES.items(): reg = Tree(random_state=1) reg.fit(X, y) assert_almost_equal(reg.predict(T), true_result, err_msg="Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_almost_equal(reg.predict(T), true_result, err_msg="Failed with {0}".format(name)) def test_xor(): # Check on a XOR problem y = np.zeros((10, 10)) y[:5, :5] = 1 y[5:, 5:] = 1 gridx, gridy = np.indices(y.shape) X = np.vstack([gridx.ravel(), gridy.ravel()]).T y = y.ravel() for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_equal(clf.score(X, y), 1.0, "Failed with {0}".format(name)) clf = Tree(random_state=0, max_features=1) clf.fit(X, y) assert_equal(clf.score(X, y), 1.0, "Failed with {0}".format(name)) def test_iris(): # Check consistency on dataset iris. for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS): clf = Tree(criterion=criterion, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert_greater(score, 0.9, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) clf = Tree(criterion=criterion, max_features=2, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert_greater(score, 0.5, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) def test_boston(): # Check consistency on dataset boston house prices. for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS): reg = Tree(criterion=criterion, random_state=0) reg.fit(boston.data, boston.target) score = mean_squared_error(boston.target, reg.predict(boston.data)) assert_less(score, 1, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) # using fewer features reduces the learning ability of this tree, # but reduces training time. reg = Tree(criterion=criterion, max_features=6, random_state=0) reg.fit(boston.data, boston.target) score = mean_squared_error(boston.target, reg.predict(boston.data)) assert_less(score, 2, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) def test_probability(): # Predict probabilities using DecisionTreeClassifier. for name, Tree in CLF_TREES.items(): clf = Tree(max_depth=1, max_features=1, random_state=42) clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0]), err_msg="Failed with {0}".format(name)) assert_array_equal(np.argmax(prob_predict, 1), clf.predict(iris.data), err_msg="Failed with {0}".format(name)) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8, err_msg="Failed with {0}".format(name)) def test_arrayrepr(): # Check the array representation. # Check resize X = np.arange(10000)[:, np.newaxis] y = np.arange(10000) for name, Tree in REG_TREES.items(): reg = Tree(max_depth=None, random_state=0) reg.fit(X, y) def test_pure_set(): # Check when y is pure. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [1, 1, 1, 1, 1, 1] for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) reg.fit(X, y) assert_almost_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) def test_numerical_stability(): # Check numerical stability. X = np.array([ [152.08097839, 140.40744019, 129.75102234, 159.90493774], [142.50700378, 135.81935120, 117.82884979, 162.75781250], [127.28772736, 140.40744019, 129.75102234, 159.90493774], [132.37025452, 143.71923828, 138.35694885, 157.84558105], [103.10237122, 143.71928406, 138.35696411, 157.84559631], [127.71276855, 143.71923828, 138.35694885, 157.84558105], [120.91514587, 140.40744019, 129.75102234, 159.90493774]]) y = np.array( [1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521]) with np.errstate(all="raise"): for name, Tree in REG_TREES.items(): reg = Tree(random_state=0) reg.fit(X, y) reg.fit(X, -y) reg.fit(-X, y) reg.fit(-X, -y) def test_importances(): # Check variable importances. X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) importances = clf.feature_importances_ n_important = np.sum(importances > 0.1) assert_equal(importances.shape[0], 10, "Failed with {0}".format(name)) assert_equal(n_important, 3, "Failed with {0}".format(name)) X_new = clf.transform(X, threshold="mean") assert_less(0, X_new.shape[1], "Failed with {0}".format(name)) assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name)) # Check on iris that importances are the same for all builders clf = DecisionTreeClassifier(random_state=0) clf.fit(iris.data, iris.target) clf2 = DecisionTreeClassifier(random_state=0, max_leaf_nodes=len(iris.data)) clf2.fit(iris.data, iris.target) assert_array_equal(clf.feature_importances_, clf2.feature_importances_) @raises(ValueError) def test_importances_raises(): # Check if variable importance before fit raises ValueError. clf = DecisionTreeClassifier() clf.feature_importances_ def test_importances_gini_equal_mse(): # Check that gini is equivalent to mse for binary output variable X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) # The gini index and the mean square error (variance) might differ due # to numerical instability. Since those instabilities mainly occurs at # high tree depth, we restrict this maximal depth. clf = DecisionTreeClassifier(criterion="gini", max_depth=5, random_state=0).fit(X, y) reg = DecisionTreeRegressor(criterion="mse", max_depth=5, random_state=0).fit(X, y) assert_almost_equal(clf.feature_importances_, reg.feature_importances_) assert_array_equal(clf.tree_.feature, reg.tree_.feature) assert_array_equal(clf.tree_.children_left, reg.tree_.children_left) assert_array_equal(clf.tree_.children_right, reg.tree_.children_right) assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples) def test_max_features(): # Check max_features. for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(max_features="auto") reg.fit(boston.data, boston.target) assert_equal(reg.max_features_, boston.data.shape[1]) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(max_features="auto") clf.fit(iris.data, iris.target) assert_equal(clf.max_features_, 2) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_features="sqrt") est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(np.sqrt(iris.data.shape[1]))) est = TreeEstimator(max_features="log2") est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(np.log2(iris.data.shape[1]))) est = TreeEstimator(max_features=1) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 1) est = TreeEstimator(max_features=3) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 3) est = TreeEstimator(max_features=0.01) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 1) est = TreeEstimator(max_features=0.5) est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(0.5 * iris.data.shape[1])) est = TreeEstimator(max_features=1.0) est.fit(iris.data, iris.target) assert_equal(est.max_features_, iris.data.shape[1]) est = TreeEstimator(max_features=None) est.fit(iris.data, iris.target) assert_equal(est.max_features_, iris.data.shape[1]) # use values of max_features that are invalid est = TreeEstimator(max_features=10) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=-1) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=0.0) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=1.5) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features="foobar") assert_raises(ValueError, est.fit, X, y) def test_error(): # Test that it gives proper exception on deficient input. for name, TreeEstimator in CLF_TREES.items(): # predict before fit est = TreeEstimator() assert_raises(NotFittedError, est.predict_proba, X) est.fit(X, y) X2 = [[-2, -1, 1]] # wrong feature shape for sample assert_raises(ValueError, est.predict_proba, X2) for name, TreeEstimator in ALL_TREES.items(): # Invalid values for parameters assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_weight_fraction_leaf=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_weight_fraction_leaf=0.51).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y) # Wrong dimensions est = TreeEstimator() y2 = y[:-1] assert_raises(ValueError, est.fit, X, y2) # Test with arrays that are non-contiguous. Xf = np.asfortranarray(X) est = TreeEstimator() est.fit(Xf, y) assert_almost_equal(est.predict(T), true_result) # predict before fitting est = TreeEstimator() assert_raises(NotFittedError, est.predict, T) # predict on vector with different dims est.fit(X, y) t = np.asarray(T) assert_raises(ValueError, est.predict, t[:, 1:]) # wrong sample shape Xt = np.array(X).T est = TreeEstimator() est.fit(np.dot(X, Xt), y) assert_raises(ValueError, est.predict, X) assert_raises(ValueError, est.apply, X) clf = TreeEstimator() clf.fit(X, y) assert_raises(ValueError, clf.predict, Xt) assert_raises(ValueError, clf.apply, Xt) # apply before fitting est = TreeEstimator() assert_raises(NotFittedError, est.apply, T) def test_min_samples_leaf(): # Test if leaves contain more than leaf_count training examples X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE)) y = iris.target # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes in (None, 1000): for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) def check_min_weight_fraction_leaf(name, datasets, sparse=False): """Test if leaves contain at least min_weight_fraction_leaf of the training set""" if sparse: X = DATASETS[datasets]["X_sparse"].astype(np.float32) else: X = DATASETS[datasets]["X"].astype(np.float32) y = DATASETS[datasets]["y"] weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) TreeEstimator = ALL_TREES[name] # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)): est = TreeEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y, sample_weight=weights) if sparse: out = est.tree_.apply(X.tocsr()) else: out = est.tree_.apply(X) node_weights = np.bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert_greater_equal( np.min(leaf_weights), total_weight * est.min_weight_fraction_leaf, "Failed with {0} " "min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) def test_min_weight_fraction_leaf(): # Check on dense input for name in ALL_TREES: yield check_min_weight_fraction_leaf, name, "iris" # Check on sparse input for name in SPARSE_TREES: yield check_min_weight_fraction_leaf, name, "multilabel", True def test_pickle(): # Check that tree estimator are pickable for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) serialized_object = pickle.dumps(clf) clf2 = pickle.loads(serialized_object) assert_equal(type(clf2), clf.__class__) score2 = clf2.score(iris.data, iris.target) assert_equal(score, score2, "Failed to generate same score " "after pickling (classification) " "with {0}".format(name)) for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) reg.fit(boston.data, boston.target) score = reg.score(boston.data, boston.target) serialized_object = pickle.dumps(reg) reg2 = pickle.loads(serialized_object) assert_equal(type(reg2), reg.__class__) score2 = reg2.score(boston.data, boston.target) assert_equal(score, score2, "Failed to generate same score " "after pickling (regression) " "with {0}".format(name)) def test_multioutput(): # Check estimators on multi-output problems. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] T = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]] # toy classification problem for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) y_hat = clf.fit(X, y).predict(T) assert_array_equal(y_hat, y_true) assert_equal(y_hat.shape, (4, 2)) proba = clf.predict_proba(T) assert_equal(len(proba), 2) assert_equal(proba[0].shape, (4, 2)) assert_equal(proba[1].shape, (4, 4)) log_proba = clf.predict_log_proba(T) assert_equal(len(log_proba), 2) assert_equal(log_proba[0].shape, (4, 2)) assert_equal(log_proba[1].shape, (4, 4)) # toy regression problem for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) y_hat = reg.fit(X, y).predict(T) assert_almost_equal(y_hat, y_true) assert_equal(y_hat.shape, (4, 2)) def test_classes_shape(): # Test that n_classes_ and classes_ have proper shape. for name, TreeClassifier in CLF_TREES.items(): # Classification, single output clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_equal(clf.n_classes_, 2) assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = TreeClassifier(random_state=0) clf.fit(X, _y) assert_equal(len(clf.n_classes_), 2) assert_equal(len(clf.classes_), 2) assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_unbalanced_iris(): # Check class rebalancing. unbalanced_X = iris.data[:125] unbalanced_y = iris.target[:125] sample_weight = _balance_weights(unbalanced_y) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight) assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y) def test_memory_layout(): # Check that it works no matter the memory layout for (name, TreeEstimator), dtype in product(ALL_TREES.items(), [np.float64, np.float32]): est = TreeEstimator(random_state=0) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) if est.splitter in SPARSE_SPLITTERS: # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_sample_weight(): # Check sample weighting. # Test that zero-weighted samples are not taken into account X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 sample_weight = np.ones(100) sample_weight[y == 0] = 0.0 clf = DecisionTreeClassifier(random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_array_equal(clf.predict(X), np.ones(100)) # Test that low weighted samples are not taken into account at low depth X = np.arange(200)[:, np.newaxis] y = np.zeros(200) y[50:100] = 1 y[100:200] = 2 X[100:200, 0] = 200 sample_weight = np.ones(200) sample_weight[y == 2] = .51 # Samples of class '2' are still weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_equal(clf.tree_.threshold[0], 149.5) sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved # Test that sample weighting is the same as having duplicates X = iris.data y = iris.target duplicates = rng.randint(0, X.shape[0], 100) clf = DecisionTreeClassifier(random_state=1) clf.fit(X[duplicates], y[duplicates]) sample_weight = np.bincount(duplicates, minlength=X.shape[0]) clf2 = DecisionTreeClassifier(random_state=1) clf2.fit(X, y, sample_weight=sample_weight) internal = clf.tree_.children_left != tree._tree.TREE_LEAF assert_array_almost_equal(clf.tree_.threshold[internal], clf2.tree_.threshold[internal]) def test_sample_weight_invalid(): # Check sample weighting raises errors. X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 clf = DecisionTreeClassifier(random_state=0) sample_weight = np.random.rand(100, 1) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.array(0) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.ones(101) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.ones(99) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) def check_class_weights(name): """Check class_weights resemble sample_weights behavior.""" TreeClassifier = CLF_TREES[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = TreeClassifier(class_weight='balanced', random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.}, {0: 2., 1: 1., 2: 2.}, {0: 1., 1: 2., 2: 2.}], random_state=0) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "auto" which should also have no effect clf4 = TreeClassifier(class_weight='balanced', random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1., 1: 100., 2: 1.} clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight ** 2) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) def test_class_weights(): for name in CLF_TREES: yield check_class_weights, name def check_class_weight_errors(name): # Test if class_weight raises errors and warnings when expected. TreeClassifier = CLF_TREES[name] _y = np.vstack((y, np.array(y) * 2)).T # Invalid preset string clf = TreeClassifier(class_weight='the larch', random_state=0) assert_raises(ValueError, clf.fit, X, y) assert_raises(ValueError, clf.fit, X, _y) # Not a list or preset for multi-output clf = TreeClassifier(class_weight=1, random_state=0) assert_raises(ValueError, clf.fit, X, _y) # Incorrect length list for multi-output clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0) assert_raises(ValueError, clf.fit, X, _y) def test_class_weight_errors(): for name in CLF_TREES: yield check_class_weight_errors, name def test_max_leaf_nodes(): # Test greedy trees with max_depth + 1 leafs. from sklearn.tree._tree import TREE_LEAF X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y) tree = est.tree_ assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1) # max_leaf_nodes in (0, 1) should raise ValueError est = TreeEstimator(max_depth=None, max_leaf_nodes=0) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=1) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1) assert_raises(ValueError, est.fit, X, y) def test_max_leaf_nodes_max_depth(): # Test preceedence of max_leaf_nodes over max_depth. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y) tree = est.tree_ assert_greater(tree.max_depth, 1) def test_arrays_persist(): # Ensure property arrays' memory stays alive when tree disappears # non-regression for #2726 for attr in ['n_classes', 'value', 'children_left', 'children_right', 'threshold', 'impurity', 'feature', 'n_node_samples']: value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr) # if pointing to freed memory, contents may be arbitrary assert_true(-2 <= value.flat[0] < 2, 'Array points to arbitrary memory') def test_only_constant_features(): random_state = check_random_state(0) X = np.zeros((10, 20)) y = random_state.randint(0, 2, (10, )) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(random_state=0) est.fit(X, y) assert_equal(est.tree_.max_depth, 0) def test_with_only_one_non_constant_features(): X = np.hstack([np.array([[1.], [1.], [0.], [0.]]), np.zeros((4, 1000))]) y = np.array([0., 1., 0., 1.0]) for name, TreeEstimator in CLF_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert_equal(est.tree_.max_depth, 1) assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2))) for name, TreeEstimator in REG_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert_equal(est.tree_.max_depth, 1) assert_array_equal(est.predict(X), 0.5 * np.ones((4, ))) def test_big_input(): # Test if the warning for too large inputs is appropriate. X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1) clf = DecisionTreeClassifier() try: clf.fit(X, [0, 1, 0, 1]) except ValueError as e: assert_in("float32", str(e)) def test_realloc(): from sklearn.tree._utils import _realloc_test assert_raises(MemoryError, _realloc_test) def test_huge_allocations(): n_bits = int(platform.architecture()[0].rstrip('bit')) X = np.random.randn(10, 2) y = np.random.randint(0, 2, 10) # Sanity check: we cannot request more memory than the size of the address # space. Currently raises OverflowError. huge = 2 ** (n_bits + 1) clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) assert_raises(Exception, clf.fit, X, y) # Non-regression test: MemoryError used to be dropped by Cython # because of missing "except *". huge = 2 ** (n_bits - 1) - 1 clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) assert_raises(MemoryError, clf.fit, X, y) def check_sparse_input(tree, dataset, max_depth=None): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Gain testing time if dataset in ["digits", "boston"]: n_samples = X.shape[0] // 5 X = X[:n_samples] X_sparse = X_sparse[:n_samples] y = y[:n_samples] for sparse_format in (csr_matrix, csc_matrix, coo_matrix): X_sparse = sparse_format(X_sparse) # Check the default (depth first search) d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) y_pred = d.predict(X) if tree in CLF_TREES: y_proba = d.predict_proba(X) y_log_proba = d.predict_log_proba(X) for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix): X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32) assert_array_almost_equal(s.predict(X_sparse_test), y_pred) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X_sparse_test), y_proba) assert_array_almost_equal(s.predict_log_proba(X_sparse_test), y_log_proba) def test_sparse_input(): for tree, dataset in product(SPARSE_TREES, ("clf_small", "toy", "digits", "multilabel", "sparse-pos", "sparse-neg", "sparse-mix", "zeros")): max_depth = 3 if dataset == "digits" else None yield (check_sparse_input, tree, dataset, max_depth) # Due to numerical instability of MSE and too strict test, we limit the # maximal depth for tree, dataset in product(REG_TREES, ["boston", "reg_small"]): if tree in SPARSE_TREES: yield (check_sparse_input, tree, dataset, 2) def check_sparse_parameters(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check max_features d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_split d = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_leaf d = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y) s = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check best-first search d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y) s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def test_sparse_parameters(): for tree, dataset in product(SPARSE_TREES, ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]): yield (check_sparse_parameters, tree, dataset) def check_sparse_criterion(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check various criterion CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS for criterion in CRITERIONS: d = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X, y) s = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def test_sparse_criterion(): for tree, dataset in product(SPARSE_TREES, ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]): yield (check_sparse_criterion, tree, dataset) def check_explicit_sparse_zeros(tree, max_depth=3, n_features=10): TreeEstimator = ALL_TREES[tree] # n_samples set n_feature to ease construction of a simultaneous # construction of a csr and csc matrix n_samples = n_features samples = np.arange(n_samples) # Generate X, y random_state = check_random_state(0) indices = [] data = [] offset = 0 indptr = [offset] for i in range(n_features): n_nonzero_i = random_state.binomial(n_samples, 0.5) indices_i = random_state.permutation(samples)[:n_nonzero_i] indices.append(indices_i) data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1 data.append(data_i) offset += n_nonzero_i indptr.append(offset) indices = np.concatenate(indices) data = np.array(np.concatenate(data), dtype=np.float32) X_sparse = csc_matrix((data, indices, indptr), shape=(n_samples, n_features)) X = X_sparse.toarray() X_sparse_test = csr_matrix((data, indices, indptr), shape=(n_samples, n_features)) X_test = X_sparse_test.toarray() y = random_state.randint(0, 3, size=(n_samples, )) # Ensure that X_sparse_test owns its data, indices and indptr array X_sparse_test = X_sparse_test.copy() # Ensure that we have explicit zeros assert_greater((X_sparse.data == 0.).sum(), 0) assert_greater((X_sparse_test.data == 0.).sum(), 0) # Perform the comparison d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) Xs = (X_test, X_sparse_test) for X1, X2 in product(Xs, Xs): assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2)) assert_array_almost_equal(s.apply(X1), d.apply(X2)) assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1)) assert_array_almost_equal(s.predict(X1), d.predict(X2)) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X1), d.predict_proba(X2)) def test_explicit_sparse_zeros(): for tree in SPARSE_TREES: yield (check_explicit_sparse_zeros, tree) @ignore_warnings def check_raise_error_on_1d_input(name): TreeEstimator = ALL_TREES[name] X = iris.data[:, 0].ravel() X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y) est = TreeEstimator(random_state=0) est.fit(X_2d, y) assert_raises(ValueError, est.predict, [X]) @ignore_warnings def test_1d_input(): for name in ALL_TREES: yield check_raise_error_on_1d_input, name def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight): # Private function to keep pretty printing in nose yielded tests est = TreeEstimator(random_state=0) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 1) est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 0) def check_min_weight_leaf_split_level(name): TreeEstimator = ALL_TREES[name] X = np.array([[0], [0], [0], [0], [1]]) y = [0, 0, 0, 0, 1] sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2] _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight) if TreeEstimator().splitter in SPARSE_SPLITTERS: _check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y, sample_weight) def test_min_weight_leaf_split_level(): for name in ALL_TREES: yield check_min_weight_leaf_split_level, name def check_public_apply(name): X_small32 = X_small.astype(tree._tree.DTYPE) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def check_public_apply_sparse(name): X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE)) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def test_public_apply(): for name in ALL_TREES: yield (check_public_apply, name) for name in SPARSE_TREES: yield (check_public_apply_sparse, name)
bsd-3-clause
3,487,207,254,565,511,000
35.403065
79
0.58862
false
miku/siskin
siskin/test_openurl.py
1
5627
from siskin.openurl import openurl_parameters_from_intermediateschema def test_openurl_from_intermediateschema(): cases = ( ('empty doc', {}, {}), ( 'title only', { 'rft.atitle': 'empty doc' }, { 'ctx_enc': 'info:ofi/enc:UTF-8', 'ctx_ver': 'Z39.88-2004', 'rfr_id': 'info:sid/www.ub.uni-leipzig.de:generator', 'rft.atitle': 'empty doc', 'url_ver': 'Z39.88-2004', }, ), ( 'title and date', { 'rft.atitle': 'title and date', 'rft.date': '2018-10-10', }, { 'ctx_enc': 'info:ofi/enc:UTF-8', 'ctx_ver': 'Z39.88-2004', 'rfr_id': 'info:sid/www.ub.uni-leipzig.de:generator', 'rft.date': '2018-10-10', 'rft.atitle': 'title and date', 'url_ver': 'Z39.88-2004', }, ), ( 'title and date, language', { 'languages': ['eng', 'fra'], 'rft.atitle': 'title and date, language', 'rft.date': '2018-10-10', }, { 'ctx_enc': 'info:ofi/enc:UTF-8', 'ctx_ver': 'Z39.88-2004', 'rfr_id': 'info:sid/www.ub.uni-leipzig.de:generator', 'rft.date': '2018-10-10', 'rft.language': 'eng', 'rft.atitle': 'title and date, language', 'url_ver': 'Z39.88-2004', }, ), ( 'title and date, language, book', { 'languages': ['eng', 'fra'], 'rft.atitle': 'Hello', 'rft.date': '2018-10-10', 'rft.genre': 'book', }, { 'ctx_enc': 'info:ofi/enc:UTF-8', 'ctx_ver': 'Z39.88-2004', 'rfr_id': 'info:sid/www.ub.uni-leipzig.de:generator', 'rft.atitle': 'Hello', 'rft.btitle': 'Hello', 'rft.date': '2018-10-10', 'rft.genre': 'book', 'rft.language': 'eng', 'rft_val_fmt': 'info:ofi/fmt:kev:mtx:book', 'url_ver': 'Z39.88-2004', }, ), ( 'crossref-1', { "finc.format": "ElectronicArticle", "finc.mega_collection": ["Springer Science + Business Media (CrossRef)"], "finc.id": "ai-49-aHR0cDovL2R4LmRvaS5vcmcvMTAuMTAxNi9qLm51cnguMjAwNi4wNS4wMjU", "finc.source_id": "49", "ris.type": "EJOUR", "rft.atitle": "An Analysis of Correlations Among 4 Outcome Scales Employed in Clinical Trials of Patients With Major Depressive Disorder", "rft.epage": "412", "rft.genre": "article", "rft.issn": ["1545-5343"], "rft.issue": "3", "rft.jtitle": "NeuroRX", "rft.tpages": "2", "rft.pages": "411-412", "rft.pub": ["Springer Science + Business Media"], "rft.date": "2006-07-01", "x.date": "2006-07-01T00:00:00Z", "rft.spage": "411", "rft.volume": "3", "authors": [{ "rft.aulast": "JIANG", "rft.aufirst": "Q" }, { "rft.aulast": "AHMED", "rft.aufirst": "S" }, { "rft.aulast": "PEDERSEN", "rft.aufirst": "R" }, { "rft.aulast": "MUSGNUNG", "rft.aufirst": "J" }, { "rft.aulast": "ENTSUAH", "rft.aufirst": "R" }], "doi": "10.1016/j.nurx.2006.05.025", "languages": ["eng"], "url": ["http://dx.doi.org/10.1016/j.nurx.2006.05.025"], "version": "0.9", "x.subjects": ["Pharmacology (medical)"], "x.type": "journal-article" }, { 'ctx_enc': 'info:ofi/enc:UTF-8', 'ctx_ver': 'Z39.88-2004', 'rfr_id': 'info:sid/www.ub.uni-leipzig.de:generator', 'rft.atitle': 'An Analysis of Correlations Among 4 Outcome Scales Employed in Clinical Trials of Patients With Major Depressive Disorder', 'rft.aufirst': 'Q', 'rft.aulast': 'JIANG', 'rft.date': '2006-07-01', 'rft.epage': '412', 'rft.genre': 'article', 'rft.issn': '1545-5343', 'rft.issue': '3', 'rft.jtitle': 'NeuroRX', 'rft.language': 'eng', 'rft.pages': '411-412', 'rft.spage': '411', 'rft.volume': '3', 'rft_id': 'info:doi/10.1016/j.nurx.2006.05.025', 'url_ver': 'Z39.88-2004', }, ), ) for _, doc, want in cases: result = openurl_parameters_from_intermediateschema(doc) assert result == want
gpl-3.0
-3,538,089,728,913,000,400
34.16875
154
0.376577
false
barryHub20/ServerAss2
lib/itsdangerous.py
626
31840
# -*- coding: utf-8 -*- """ itsdangerous ~~~~~~~~~~~~ A module that implements various functions to deal with untrusted sources. Mainly useful for web applications. :copyright: (c) 2014 by Armin Ronacher and the Django Software Foundation. :license: BSD, see LICENSE for more details. """ import sys import hmac import zlib import time import base64 import hashlib import operator from datetime import datetime PY2 = sys.version_info[0] == 2 if PY2: from itertools import izip text_type = unicode int_to_byte = chr number_types = (int, long, float) else: from functools import reduce izip = zip text_type = str int_to_byte = operator.methodcaller('to_bytes', 1, 'big') number_types = (int, float) try: import simplejson as json except ImportError: import json class _CompactJSON(object): """Wrapper around simplejson that strips whitespace. """ def loads(self, payload): return json.loads(payload) def dumps(self, obj): return json.dumps(obj, separators=(',', ':')) compact_json = _CompactJSON() # 2011/01/01 in UTC EPOCH = 1293840000 def want_bytes(s, encoding='utf-8', errors='strict'): if isinstance(s, text_type): s = s.encode(encoding, errors) return s def is_text_serializer(serializer): """Checks wheather a serializer generates text or binary.""" return isinstance(serializer.dumps({}), text_type) # Starting with 3.3 the standard library has a c-implementation for # constant time string compares. _builtin_constant_time_compare = getattr(hmac, 'compare_digest', None) def constant_time_compare(val1, val2): """Returns True if the two strings are equal, False otherwise. The time taken is independent of the number of characters that match. Do not use this function for anything else than comparision with known length targets. This is should be implemented in C in order to get it completely right. """ if _builtin_constant_time_compare is not None: return _builtin_constant_time_compare(val1, val2) len_eq = len(val1) == len(val2) if len_eq: result = 0 left = val1 else: result = 1 left = val2 for x, y in izip(bytearray(left), bytearray(val2)): result |= x ^ y return result == 0 class BadData(Exception): """Raised if bad data of any sort was encountered. This is the base for all exceptions that itsdangerous is currently using. .. versionadded:: 0.15 """ message = None def __init__(self, message): Exception.__init__(self, message) self.message = message def __str__(self): return text_type(self.message) if PY2: __unicode__ = __str__ def __str__(self): return self.__unicode__().encode('utf-8') class BadPayload(BadData): """This error is raised in situations when payload is loaded without checking the signature first and an exception happend as a result of that. The original exception that caused that will be stored on the exception as :attr:`original_error`. This can also happen with a :class:`JSONWebSignatureSerializer` that is subclassed and uses a different serializer for the payload than the expected one. .. versionadded:: 0.15 """ def __init__(self, message, original_error=None): BadData.__init__(self, message) #: If available, the error that indicates why the payload #: was not valid. This might be `None`. self.original_error = original_error class BadSignature(BadData): """This error is raised if a signature does not match. As of itsdangerous 0.14 there are helpful attributes on the exception instances. You can also catch down the baseclass :exc:`BadData`. """ def __init__(self, message, payload=None): BadData.__init__(self, message) #: The payload that failed the signature test. In some #: situations you might still want to inspect this, even if #: you know it was tampered with. #: #: .. versionadded:: 0.14 self.payload = payload class BadTimeSignature(BadSignature): """Raised for time based signatures that fail. This is a subclass of :class:`BadSignature` so you can catch those down as well. """ def __init__(self, message, payload=None, date_signed=None): BadSignature.__init__(self, message, payload) #: If the signature expired this exposes the date of when the #: signature was created. This can be helpful in order to #: tell the user how long a link has been gone stale. #: #: .. versionadded:: 0.14 self.date_signed = date_signed class BadHeader(BadSignature): """Raised if a signed header is invalid in some form. This only happens for serializers that have a header that goes with the signature. .. versionadded:: 0.24 """ def __init__(self, message, payload=None, header=None, original_error=None): BadSignature.__init__(self, message, payload) #: If the header is actually available but just malformed it #: might be stored here. self.header = header #: If available, the error that indicates why the payload #: was not valid. This might be `None`. self.original_error = original_error class SignatureExpired(BadTimeSignature): """Signature timestamp is older than required max_age. This is a subclass of :exc:`BadTimeSignature` so you can use the baseclass for catching the error. """ def base64_encode(string): """base64 encodes a single bytestring (and is tolerant to getting called with a unicode string). The resulting bytestring is safe for putting into URLs. """ string = want_bytes(string) return base64.urlsafe_b64encode(string).strip(b'=') def base64_decode(string): """base64 decodes a single bytestring (and is tolerant to getting called with a unicode string). The result is also a bytestring. """ string = want_bytes(string, encoding='ascii', errors='ignore') return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4)) def int_to_bytes(num): assert num >= 0 rv = [] while num: rv.append(int_to_byte(num & 0xff)) num >>= 8 return b''.join(reversed(rv)) def bytes_to_int(bytestr): return reduce(lambda a, b: a << 8 | b, bytearray(bytestr), 0) class SigningAlgorithm(object): """Subclasses of `SigningAlgorithm` have to implement `get_signature` to provide signature generation functionality. """ def get_signature(self, key, value): """Returns the signature for the given key and value""" raise NotImplementedError() def verify_signature(self, key, value, sig): """Verifies the given signature matches the expected signature""" return constant_time_compare(sig, self.get_signature(key, value)) class NoneAlgorithm(SigningAlgorithm): """This class provides a algorithm that does not perform any signing and returns an empty signature. """ def get_signature(self, key, value): return b'' class HMACAlgorithm(SigningAlgorithm): """This class provides signature generation using HMACs.""" #: The digest method to use with the MAC algorithm. This defaults to sha1 #: but can be changed for any other function in the hashlib module. default_digest_method = staticmethod(hashlib.sha1) def __init__(self, digest_method=None): if digest_method is None: digest_method = self.default_digest_method self.digest_method = digest_method def get_signature(self, key, value): mac = hmac.new(key, msg=value, digestmod=self.digest_method) return mac.digest() class Signer(object): """This class can sign bytes and unsign it and validate the signature provided. Salt can be used to namespace the hash, so that a signed string is only valid for a given namespace. Leaving this at the default value or re-using a salt value across different parts of your application where the same signed value in one part can mean something different in another part is a security risk. See :ref:`the-salt` for an example of what the salt is doing and how you can utilize it. .. versionadded:: 0.14 `key_derivation` and `digest_method` were added as arguments to the class constructor. .. versionadded:: 0.18 `algorithm` was added as an argument to the class constructor. """ #: The digest method to use for the signer. This defaults to sha1 but can #: be changed for any other function in the hashlib module. #: #: .. versionchanged:: 0.14 default_digest_method = staticmethod(hashlib.sha1) #: Controls how the key is derived. The default is Django style #: concatenation. Possible values are ``concat``, ``django-concat`` #: and ``hmac``. This is used for deriving a key from the secret key #: with an added salt. #: #: .. versionadded:: 0.14 default_key_derivation = 'django-concat' def __init__(self, secret_key, salt=None, sep='.', key_derivation=None, digest_method=None, algorithm=None): self.secret_key = want_bytes(secret_key) self.sep = sep self.salt = 'itsdangerous.Signer' if salt is None else salt if key_derivation is None: key_derivation = self.default_key_derivation self.key_derivation = key_derivation if digest_method is None: digest_method = self.default_digest_method self.digest_method = digest_method if algorithm is None: algorithm = HMACAlgorithm(self.digest_method) self.algorithm = algorithm def derive_key(self): """This method is called to derive the key. If you're unhappy with the default key derivation choices you can override them here. Keep in mind that the key derivation in itsdangerous is not intended to be used as a security method to make a complex key out of a short password. Instead you should use large random secret keys. """ salt = want_bytes(self.salt) if self.key_derivation == 'concat': return self.digest_method(salt + self.secret_key).digest() elif self.key_derivation == 'django-concat': return self.digest_method(salt + b'signer' + self.secret_key).digest() elif self.key_derivation == 'hmac': mac = hmac.new(self.secret_key, digestmod=self.digest_method) mac.update(salt) return mac.digest() elif self.key_derivation == 'none': return self.secret_key else: raise TypeError('Unknown key derivation method') def get_signature(self, value): """Returns the signature for the given value""" value = want_bytes(value) key = self.derive_key() sig = self.algorithm.get_signature(key, value) return base64_encode(sig) def sign(self, value): """Signs the given string.""" return value + want_bytes(self.sep) + self.get_signature(value) def verify_signature(self, value, sig): """Verifies the signature for the given value.""" key = self.derive_key() try: sig = base64_decode(sig) except Exception: return False return self.algorithm.verify_signature(key, value, sig) def unsign(self, signed_value): """Unsigns the given string.""" signed_value = want_bytes(signed_value) sep = want_bytes(self.sep) if sep not in signed_value: raise BadSignature('No %r found in value' % self.sep) value, sig = signed_value.rsplit(sep, 1) if self.verify_signature(value, sig): return value raise BadSignature('Signature %r does not match' % sig, payload=value) def validate(self, signed_value): """Just validates the given signed value. Returns `True` if the signature exists and is valid, `False` otherwise.""" try: self.unsign(signed_value) return True except BadSignature: return False class TimestampSigner(Signer): """Works like the regular :class:`Signer` but also records the time of the signing and can be used to expire signatures. The unsign method can rause a :exc:`SignatureExpired` method if the unsigning failed because the signature is expired. This exception is a subclass of :exc:`BadSignature`. """ def get_timestamp(self): """Returns the current timestamp. This implementation returns the seconds since 1/1/2011. The function must return an integer. """ return int(time.time() - EPOCH) def timestamp_to_datetime(self, ts): """Used to convert the timestamp from `get_timestamp` into a datetime object. """ return datetime.utcfromtimestamp(ts + EPOCH) def sign(self, value): """Signs the given string and also attaches a time information.""" value = want_bytes(value) timestamp = base64_encode(int_to_bytes(self.get_timestamp())) sep = want_bytes(self.sep) value = value + sep + timestamp return value + sep + self.get_signature(value) def unsign(self, value, max_age=None, return_timestamp=False): """Works like the regular :meth:`~Signer.unsign` but can also validate the time. See the base docstring of the class for the general behavior. If `return_timestamp` is set to `True` the timestamp of the signature will be returned as naive :class:`datetime.datetime` object in UTC. """ try: result = Signer.unsign(self, value) sig_error = None except BadSignature as e: sig_error = e result = e.payload or b'' sep = want_bytes(self.sep) # If there is no timestamp in the result there is something # seriously wrong. In case there was a signature error, we raise # that one directly, otherwise we have a weird situation in which # we shouldn't have come except someone uses a time-based serializer # on non-timestamp data, so catch that. if not sep in result: if sig_error: raise sig_error raise BadTimeSignature('timestamp missing', payload=result) value, timestamp = result.rsplit(sep, 1) try: timestamp = bytes_to_int(base64_decode(timestamp)) except Exception: timestamp = None # Signature is *not* okay. Raise a proper error now that we have # split the value and the timestamp. if sig_error is not None: raise BadTimeSignature(text_type(sig_error), payload=value, date_signed=timestamp) # Signature was okay but the timestamp is actually not there or # malformed. Should not happen, but well. We handle it nonetheless if timestamp is None: raise BadTimeSignature('Malformed timestamp', payload=value) # Check timestamp is not older than max_age if max_age is not None: age = self.get_timestamp() - timestamp if age > max_age: raise SignatureExpired( 'Signature age %s > %s seconds' % (age, max_age), payload=value, date_signed=self.timestamp_to_datetime(timestamp)) if return_timestamp: return value, self.timestamp_to_datetime(timestamp) return value def validate(self, signed_value, max_age=None): """Just validates the given signed value. Returns `True` if the signature exists and is valid, `False` otherwise.""" try: self.unsign(signed_value, max_age=max_age) return True except BadSignature: return False class Serializer(object): """This class provides a serialization interface on top of the signer. It provides a similar API to json/pickle and other modules but is slightly differently structured internally. If you want to change the underlying implementation for parsing and loading you have to override the :meth:`load_payload` and :meth:`dump_payload` functions. This implementation uses simplejson if available for dumping and loading and will fall back to the standard library's json module if it's not available. Starting with 0.14 you do not need to subclass this class in order to switch out or customer the :class:`Signer`. You can instead also pass a different class to the constructor as well as keyword arguments as dictionary that should be forwarded:: s = Serializer(signer_kwargs={'key_derivation': 'hmac'}) .. versionchanged:: 0.14: The `signer` and `signer_kwargs` parameters were added to the constructor. """ #: If a serializer module or class is not passed to the constructor #: this one is picked up. This currently defaults to :mod:`json`. default_serializer = json #: The default :class:`Signer` class that is being used by this #: serializer. #: #: .. versionadded:: 0.14 default_signer = Signer def __init__(self, secret_key, salt=b'itsdangerous', serializer=None, signer=None, signer_kwargs=None): self.secret_key = want_bytes(secret_key) self.salt = want_bytes(salt) if serializer is None: serializer = self.default_serializer self.serializer = serializer self.is_text_serializer = is_text_serializer(serializer) if signer is None: signer = self.default_signer self.signer = signer self.signer_kwargs = signer_kwargs or {} def load_payload(self, payload, serializer=None): """Loads the encoded object. This function raises :class:`BadPayload` if the payload is not valid. The `serializer` parameter can be used to override the serializer stored on the class. The encoded payload is always byte based. """ if serializer is None: serializer = self.serializer is_text = self.is_text_serializer else: is_text = is_text_serializer(serializer) try: if is_text: payload = payload.decode('utf-8') return serializer.loads(payload) except Exception as e: raise BadPayload('Could not load the payload because an ' 'exception occurred on unserializing the data', original_error=e) def dump_payload(self, obj): """Dumps the encoded object. The return value is always a bytestring. If the internal serializer is text based the value will automatically be encoded to utf-8. """ return want_bytes(self.serializer.dumps(obj)) def make_signer(self, salt=None): """A method that creates a new instance of the signer to be used. The default implementation uses the :class:`Signer` baseclass. """ if salt is None: salt = self.salt return self.signer(self.secret_key, salt=salt, **self.signer_kwargs) def dumps(self, obj, salt=None): """Returns a signed string serialized with the internal serializer. The return value can be either a byte or unicode string depending on the format of the internal serializer. """ payload = want_bytes(self.dump_payload(obj)) rv = self.make_signer(salt).sign(payload) if self.is_text_serializer: rv = rv.decode('utf-8') return rv def dump(self, obj, f, salt=None): """Like :meth:`dumps` but dumps into a file. The file handle has to be compatible with what the internal serializer expects. """ f.write(self.dumps(obj, salt)) def loads(self, s, salt=None): """Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the signature validation fails. """ s = want_bytes(s) return self.load_payload(self.make_signer(salt).unsign(s)) def load(self, f, salt=None): """Like :meth:`loads` but loads from a file.""" return self.loads(f.read(), salt) def loads_unsafe(self, s, salt=None): """Like :meth:`loads` but without verifying the signature. This is potentially very dangerous to use depending on how your serializer works. The return value is ``(signature_okay, payload)`` instead of just the payload. The first item will be a boolean that indicates if the signature is okay (``True``) or if it failed. This function never fails. Use it for debugging only and if you know that your serializer module is not exploitable (eg: do not use it with a pickle serializer). .. versionadded:: 0.15 """ return self._loads_unsafe_impl(s, salt) def _loads_unsafe_impl(self, s, salt, load_kwargs=None, load_payload_kwargs=None): """Lowlevel helper function to implement :meth:`loads_unsafe` in serializer subclasses. """ try: return True, self.loads(s, salt=salt, **(load_kwargs or {})) except BadSignature as e: if e.payload is None: return False, None try: return False, self.load_payload(e.payload, **(load_payload_kwargs or {})) except BadPayload: return False, None def load_unsafe(self, f, *args, **kwargs): """Like :meth:`loads_unsafe` but loads from a file. .. versionadded:: 0.15 """ return self.loads_unsafe(f.read(), *args, **kwargs) class TimedSerializer(Serializer): """Uses the :class:`TimestampSigner` instead of the default :meth:`Signer`. """ default_signer = TimestampSigner def loads(self, s, max_age=None, return_timestamp=False, salt=None): """Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the signature validation fails. If a `max_age` is provided it will ensure the signature is not older than that time in seconds. In case the signature is outdated, :exc:`SignatureExpired` is raised which is a subclass of :exc:`BadSignature`. All arguments are forwarded to the signer's :meth:`~TimestampSigner.unsign` method. """ base64d, timestamp = self.make_signer(salt) \ .unsign(s, max_age, return_timestamp=True) payload = self.load_payload(base64d) if return_timestamp: return payload, timestamp return payload def loads_unsafe(self, s, max_age=None, salt=None): load_kwargs = {'max_age': max_age} load_payload_kwargs = {} return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs) class JSONWebSignatureSerializer(Serializer): """This serializer implements JSON Web Signature (JWS) support. Only supports the JWS Compact Serialization. """ jws_algorithms = { 'HS256': HMACAlgorithm(hashlib.sha256), 'HS384': HMACAlgorithm(hashlib.sha384), 'HS512': HMACAlgorithm(hashlib.sha512), 'none': NoneAlgorithm(), } #: The default algorithm to use for signature generation default_algorithm = 'HS256' default_serializer = compact_json def __init__(self, secret_key, salt=None, serializer=None, signer=None, signer_kwargs=None, algorithm_name=None): Serializer.__init__(self, secret_key, salt, serializer, signer, signer_kwargs) if algorithm_name is None: algorithm_name = self.default_algorithm self.algorithm_name = algorithm_name self.algorithm = self.make_algorithm(algorithm_name) def load_payload(self, payload, return_header=False): payload = want_bytes(payload) if b'.' not in payload: raise BadPayload('No "." found in value') base64d_header, base64d_payload = payload.split(b'.', 1) try: json_header = base64_decode(base64d_header) except Exception as e: raise BadHeader('Could not base64 decode the header because of ' 'an exception', original_error=e) try: json_payload = base64_decode(base64d_payload) except Exception as e: raise BadPayload('Could not base64 decode the payload because of ' 'an exception', original_error=e) try: header = Serializer.load_payload(self, json_header, serializer=json) except BadData as e: raise BadHeader('Could not unserialize header because it was ' 'malformed', original_error=e) if not isinstance(header, dict): raise BadHeader('Header payload is not a JSON object', header=header) payload = Serializer.load_payload(self, json_payload) if return_header: return payload, header return payload def dump_payload(self, header, obj): base64d_header = base64_encode(self.serializer.dumps(header)) base64d_payload = base64_encode(self.serializer.dumps(obj)) return base64d_header + b'.' + base64d_payload def make_algorithm(self, algorithm_name): try: return self.jws_algorithms[algorithm_name] except KeyError: raise NotImplementedError('Algorithm not supported') def make_signer(self, salt=None, algorithm=None): if salt is None: salt = self.salt key_derivation = 'none' if salt is None else None if algorithm is None: algorithm = self.algorithm return self.signer(self.secret_key, salt=salt, sep='.', key_derivation=key_derivation, algorithm=algorithm) def make_header(self, header_fields): header = header_fields.copy() if header_fields else {} header['alg'] = self.algorithm_name return header def dumps(self, obj, salt=None, header_fields=None): """Like :meth:`~Serializer.dumps` but creates a JSON Web Signature. It also allows for specifying additional fields to be included in the JWS Header. """ header = self.make_header(header_fields) signer = self.make_signer(salt, self.algorithm) return signer.sign(self.dump_payload(header, obj)) def loads(self, s, salt=None, return_header=False): """Reverse of :meth:`dumps`. If requested via `return_header` it will return a tuple of payload and header. """ payload, header = self.load_payload( self.make_signer(salt, self.algorithm).unsign(want_bytes(s)), return_header=True) if header.get('alg') != self.algorithm_name: raise BadHeader('Algorithm mismatch', header=header, payload=payload) if return_header: return payload, header return payload def loads_unsafe(self, s, salt=None, return_header=False): kwargs = {'return_header': return_header} return self._loads_unsafe_impl(s, salt, kwargs, kwargs) class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer): """Works like the regular :class:`JSONWebSignatureSerializer` but also records the time of the signing and can be used to expire signatures. JWS currently does not specify this behavior but it mentions a possibility extension like this in the spec. Expiry date is encoded into the header similarily as specified in `draft-ietf-oauth-json-web-token <http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#expDef`_. The unsign method can raise a :exc:`SignatureExpired` method if the unsigning failed because the signature is expired. This exception is a subclass of :exc:`BadSignature`. """ DEFAULT_EXPIRES_IN = 3600 def __init__(self, secret_key, expires_in=None, **kwargs): JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs) if expires_in is None: expires_in = self.DEFAULT_EXPIRES_IN self.expires_in = expires_in def make_header(self, header_fields): header = JSONWebSignatureSerializer.make_header(self, header_fields) iat = self.now() exp = iat + self.expires_in header['iat'] = iat header['exp'] = exp return header def loads(self, s, salt=None, return_header=False): payload, header = JSONWebSignatureSerializer.loads( self, s, salt, return_header=True) if 'exp' not in header: raise BadSignature('Missing expiry date', payload=payload) if not (isinstance(header['exp'], number_types) and header['exp'] > 0): raise BadSignature('expiry date is not an IntDate', payload=payload) if header['exp'] < self.now(): raise SignatureExpired('Signature expired', payload=payload, date_signed=self.get_issue_date(header)) if return_header: return payload, header return payload def get_issue_date(self, header): rv = header.get('iat') if isinstance(rv, number_types): return datetime.utcfromtimestamp(int(rv)) def now(self): return int(time.time()) class URLSafeSerializerMixin(object): """Mixed in with a regular serializer it will attempt to zlib compress the string to make it shorter if necessary. It will also base64 encode the string so that it can safely be placed in a URL. """ def load_payload(self, payload): decompress = False if payload.startswith(b'.'): payload = payload[1:] decompress = True try: json = base64_decode(payload) except Exception as e: raise BadPayload('Could not base64 decode the payload because of ' 'an exception', original_error=e) if decompress: try: json = zlib.decompress(json) except Exception as e: raise BadPayload('Could not zlib decompress the payload before ' 'decoding the payload', original_error=e) return super(URLSafeSerializerMixin, self).load_payload(json) def dump_payload(self, obj): json = super(URLSafeSerializerMixin, self).dump_payload(obj) is_compressed = False compressed = zlib.compress(json) if len(compressed) < (len(json) - 1): json = compressed is_compressed = True base64d = base64_encode(json) if is_compressed: base64d = b'.' + base64d return base64d class URLSafeSerializer(URLSafeSerializerMixin, Serializer): """Works like :class:`Serializer` but dumps and loads into a URL safe string consisting of the upper and lowercase character of the alphabet as well as ``'_'``, ``'-'`` and ``'.'``. """ default_serializer = compact_json class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer): """Works like :class:`TimedSerializer` but dumps and loads into a URL safe string consisting of the upper and lowercase character of the alphabet as well as ``'_'``, ``'-'`` and ``'.'``. """ default_serializer = compact_json
apache-2.0
-2,543,009,830,124,408,300
35.513761
81
0.632004
false
felixbb/forseti-security
tests/common/gcp_api/bigquery_test.py
1
5019
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the Bigquery client.""" import mock import httplib2 from googleapiclient.errors import HttpError from google.apputils import basetest from google.cloud.security.common.gcp_api import bigquery as bq from google.cloud.security.common.gcp_api import _base_client as _base_client from google.cloud.security.common.gcp_api import errors as api_errors from tests.common.gcp_api.test_data import fake_bigquery as fbq class BigqueryTestCase(basetest.TestCase): """Test the Bigquery API Client.""" MAX_BIGQUERY_API_CALLS_PER_100_SECONDS = 88888 @mock.patch.object(bq, 'FLAGS') @mock.patch.object(_base_client.BaseClient, '__init__', autospec=True) def setUp(self, mock_base_client, mock_flags): """Set up.""" mock_flags.max_bigquery_api_calls_per_100_seconds = ( self.MAX_BIGQUERY_API_CALLS_PER_100_SECONDS) self.bq_api_client = bq.BigQueryClient() self.http_response = httplib2.Response( {'status': '400', 'content-type': 'application/json'} ) def test_api_client_is_initialized(self): """Test that the api client is initialized.""" self.assertEquals( self.MAX_BIGQUERY_API_CALLS_PER_100_SECONDS, self.bq_api_client.rate_limiter.max_calls) self.assertEquals( bq.BigQueryClient.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS, self.bq_api_client.rate_limiter.period) def test_get_bigquery_projectids_raises(self): mock_bq_stub = mock.MagicMock() self.bq_api_client.service = mock.MagicMock() self.bq_api_client.service.projects.return_value = mock_bq_stub self.bq_api_client._execute = mock.MagicMock( side_effect=HttpError(self.http_response, '{}') ) with self.assertRaises(api_errors.ApiExecutionError): self.bq_api_client.get_bigquery_projectids() def test_get_bigquery_projectids(self): mock_bq_stub = mock.MagicMock() self.bq_api_client.service = mock.MagicMock() self.bq_api_client.service.projects.return_value = mock_bq_stub self.bq_api_client._build_paged_result = mock.MagicMock( return_value=fbq.PROJECTS_LIST_REQUEST_RESPONSE ) return_value = self.bq_api_client.get_bigquery_projectids() self.assertListEqual(return_value, fbq.PROJECTS_LIST_EXPECTED) def test_get_datasets_for_projectid_raises(self): mock_bq_stub = mock.MagicMock() self.bq_api_client.service = mock.MagicMock() self.bq_api_client.service.datasets.return_value = mock_bq_stub self.bq_api_client._execute = mock.MagicMock( side_effect=HttpError(self.http_response, '{}') ) with self.assertRaises(api_errors.ApiExecutionError): self.bq_api_client.get_datasets_for_projectid(fbq.PROJECT_IDS[0]) def test_getdatasets_for_projectid(self): mock_bq_stub = mock.MagicMock() self.bq_api_client.service = mock.MagicMock() self.bq_api_client.service.datasets.return_value = mock_bq_stub self.bq_api_client._build_paged_result = mock.MagicMock( return_value=fbq.DATASETS_LIST_REQUEST_RESPONSE ) return_value = self.bq_api_client.get_datasets_for_projectid('') self.assertListEqual(return_value, fbq.DATASETS_LIST_EXPECTED) def test_get_dataset_access_raises(self): mock_bq_stub = mock.MagicMock() self.bq_api_client.service = mock.MagicMock() self.bq_api_client.service.datasets.return_value = mock_bq_stub self.bq_api_client._execute = mock.MagicMock( side_effect=HttpError(self.http_response, '{}') ) with self.assertRaises(api_errors.ApiExecutionError): self.bq_api_client.get_dataset_access(fbq.PROJECT_IDS[0], fbq.DATASET_ID) def test_get_dataset_access(self): mock_bq_stub = mock.MagicMock() self.bq_api_client.service = mock.MagicMock() self.bq_api_client.service.datasets.return_value = mock_bq_stub self.bq_api_client._build_paged_result = mock.MagicMock( return_value=fbq.DATASETS_GET_REQUEST_RESPONSE ) return_value = self.bq_api_client.get_dataset_access('','') self.assertListEqual(return_value, fbq.DATASETS_GET_EXPECTED) if __name__ == '__main__': basetest.main()
apache-2.0
1,886,433,124,228,149,000
37.906977
78
0.664873
false
slohse/ansible
setup.py
34
11028
from __future__ import print_function import json import os import os.path import re import sys import warnings from collections import defaultdict from distutils.command.build_scripts import build_scripts as BuildScripts from distutils.command.sdist import sdist as SDist try: from setuptools import setup, find_packages from setuptools.command.build_py import build_py as BuildPy from setuptools.command.install_lib import install_lib as InstallLib from setuptools.command.install_scripts import install_scripts as InstallScripts except ImportError: print("Ansible now needs setuptools in order to build. Install it using" " your package manager (usually python-setuptools) or via pip (pip" " install setuptools).", file=sys.stderr) sys.exit(1) sys.path.insert(0, os.path.abspath('lib')) from ansible.release import __version__, __author__ SYMLINK_CACHE = 'SYMLINK_CACHE.json' def _find_symlinks(topdir, extension=''): """Find symlinks that should be maintained Maintained symlinks exist in the bin dir or are modules which have aliases. Our heuristic is that they are a link in a certain path which point to a file in the same directory. """ symlinks = defaultdict(list) for base_path, dirs, files in os.walk(topdir): for filename in files: filepath = os.path.join(base_path, filename) if os.path.islink(filepath) and filename.endswith(extension): target = os.readlink(filepath) if os.path.dirname(target) == '': link = filepath[len(topdir):] if link.startswith('/'): link = link[1:] symlinks[os.path.basename(target)].append(link) return symlinks def _cache_symlinks(symlink_data): with open(SYMLINK_CACHE, 'w') as f: json.dump(symlink_data, f) def _maintain_symlinks(symlink_type, base_path): """Switch a real file into a symlink""" try: # Try the cache first because going from git checkout to sdist is the # only time we know that we're going to cache correctly with open(SYMLINK_CACHE, 'r') as f: symlink_data = json.load(f) except (IOError, OSError) as e: # IOError on py2, OSError on py3. Both have errno if e.errno == 2: # SYMLINKS_CACHE doesn't exist. Fallback to trying to create the # cache now. Will work if we're running directly from a git # checkout or from an sdist created earlier. symlink_data = {'script': _find_symlinks('bin'), 'library': _find_symlinks('lib', '.py'), } # Sanity check that something we know should be a symlink was # found. We'll take that to mean that the current directory # structure properly reflects symlinks in the git repo if 'ansible-playbook' in symlink_data['script']['ansible']: _cache_symlinks(symlink_data) else: raise RuntimeError( "Pregenerated symlink list was not present and expected " "symlinks in ./bin were missing or broken. " "Perhaps this isn't a git checkout?" ) else: raise symlinks = symlink_data[symlink_type] for source in symlinks: for dest in symlinks[source]: dest_path = os.path.join(base_path, dest) if not os.path.islink(dest_path): try: os.unlink(dest_path) except OSError as e: if e.errno == 2: # File does not exist which is all we wanted pass os.symlink(source, dest_path) class BuildPyCommand(BuildPy): def run(self): BuildPy.run(self) _maintain_symlinks('library', self.build_lib) class BuildScriptsCommand(BuildScripts): def run(self): BuildScripts.run(self) _maintain_symlinks('script', self.build_dir) class InstallLibCommand(InstallLib): def run(self): InstallLib.run(self) _maintain_symlinks('library', self.install_dir) class InstallScriptsCommand(InstallScripts): def run(self): InstallScripts.run(self) _maintain_symlinks('script', self.install_dir) class SDistCommand(SDist): def run(self): # have to generate the cache of symlinks for release as sdist is the # only command that has access to symlinks from the git repo symlinks = {'script': _find_symlinks('bin'), 'library': _find_symlinks('lib', '.py'), } _cache_symlinks(symlinks) SDist.run(self) def read_file(file_name): """Read file and return its contents.""" with open(file_name, 'r') as f: return f.read() def read_requirements(file_name): """Read requirements file as a list.""" reqs = read_file(file_name).splitlines() if not reqs: raise RuntimeError( "Unable to read requirements from the %s file" "That indicates this copy of the source code is incomplete." % file_name ) return reqs PYCRYPTO_DIST = 'pycrypto' def get_crypto_req(): """Detect custom crypto from ANSIBLE_CRYPTO_BACKEND env var. pycrypto or cryptography. We choose a default but allow the user to override it. This translates into pip install of the sdist deciding what package to install and also the runtime dependencies that pkg_resources knows about. """ crypto_backend = os.environ.get('ANSIBLE_CRYPTO_BACKEND', '').strip() if crypto_backend == PYCRYPTO_DIST: # Attempt to set version requirements return '%s >= 2.6' % PYCRYPTO_DIST return crypto_backend or None def substitute_crypto_to_req(req): """Replace crypto requirements if customized.""" crypto_backend = get_crypto_req() if crypto_backend is None: return req def is_not_crypto(r): CRYPTO_LIBS = PYCRYPTO_DIST, 'cryptography' return not any(r.lower().startswith(c) for c in CRYPTO_LIBS) return [r for r in req if is_not_crypto(r)] + [crypto_backend] def read_extras(): """Specify any extra requirements for installation.""" extras = dict() extra_requirements_dir = 'packaging/requirements' for extra_requirements_filename in os.listdir(extra_requirements_dir): filename_match = re.search(r'^requirements-(\w*).txt$', extra_requirements_filename) if not filename_match: continue extra_req_file_path = os.path.join(extra_requirements_dir, extra_requirements_filename) try: extras[filename_match.group(1)] = read_file(extra_req_file_path).splitlines() except RuntimeError: pass return extras def get_dynamic_setup_params(): """Add dynamically calculated setup params to static ones.""" return { # Retrieve the long description from the README 'long_description': read_file('README.rst'), 'install_requires': substitute_crypto_to_req( read_requirements('requirements.txt'), ), 'extras_require': read_extras(), } static_setup_params = dict( # Use the distutils SDist so that symlinks are not expanded # Use a custom Build for the same reason cmdclass={ 'build_py': BuildPyCommand, 'build_scripts': BuildScriptsCommand, 'install_lib': InstallLibCommand, 'install_scripts': InstallScriptsCommand, 'sdist': SDistCommand, }, name='ansible', version=__version__, description='Radically simple IT automation', author=__author__, author_email='info@ansible.com', url='https://ansible.com/', project_urls={ 'Bug Tracker': 'https://github.com/ansible/ansible/issues', 'CI: Shippable': 'https://app.shippable.com/github/ansible/ansible', 'Code of Conduct': 'https://docs.ansible.com/ansible/latest/community/code_of_conduct.html', 'Documentation': 'https://docs.ansible.com/ansible/', 'Mailing lists': 'https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information', 'Source Code': 'https://github.com/ansible/ansible', }, license='GPLv3+', # Ansible will also make use of a system copy of python-six and # python-selectors2 if installed but use a Bundled copy if it's not. python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*', package_dir={'': 'lib'}, packages=find_packages('lib'), package_data={ '': [ 'executor/powershell/*.ps1', 'module_utils/csharp/*.cs', 'module_utils/csharp/*/*.cs', 'module_utils/powershell/*.psm1', 'module_utils/powershell/*/*.psm1', 'modules/windows/*.ps1', 'modules/windows/*/*.ps1', 'galaxy/data/*/*.*', 'galaxy/data/*/*/.*', 'galaxy/data/*/*/*.*', 'galaxy/data/*/tests/inventory', 'config/base.yml', 'config/module_defaults.yml', ], }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Information Technology', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Natural Language :: English', 'Operating System :: POSIX', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: System :: Installation/Setup', 'Topic :: System :: Systems Administration', 'Topic :: Utilities', ], scripts=[ 'bin/ansible', 'bin/ansible-playbook', 'bin/ansible-pull', 'bin/ansible-doc', 'bin/ansible-galaxy', 'bin/ansible-console', 'bin/ansible-connection', 'bin/ansible-vault', 'bin/ansible-config', 'bin/ansible-inventory', ], data_files=[], # Installing as zip files would break due to references to __file__ zip_safe=False ) def main(): """Invoke installation process using setuptools.""" setup_params = dict(static_setup_params, **get_dynamic_setup_params()) ignore_warning_regex = ( r"Unknown distribution option: '(project_urls|python_requires)'" ) warnings.filterwarnings( 'ignore', message=ignore_warning_regex, category=UserWarning, module='distutils.dist', ) setup(**setup_params) warnings.resetwarnings() if __name__ == '__main__': main()
gpl-3.0
-3,264,189,986,342,779,400
33.35514
121
0.609993
false
sekikn/incubator-airflow
airflow/contrib/hooks/aws_athena_hook.py
12
1153
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.athena`.""" import warnings # pylint: disable=unused-import from airflow.providers.amazon.aws.hooks.athena import AWSAthenaHook # noqa warnings.warn( "This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.athena`.", DeprecationWarning, stacklevel=2, )
apache-2.0
727,083,354,184,739,200
37.433333
89
0.766696
false
clumsy/intellij-community
python/lib/Lib/encodings/cp866.py
593
34652
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP866.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp866', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A 0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE 0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE 0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE 0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE 0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE 0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE 0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE 0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I 0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I 0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA 0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL 0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM 0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN 0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O 0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE 0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER 0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES 0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE 0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U 0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF 0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA 0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE 0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE 0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA 0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA 0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN 0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU 0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN 0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E 0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU 0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA 0x00a0: 0x0430, # CYRILLIC SMALL LETTER A 0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE 0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE 0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE 0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE 0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE 0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE 0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE 0x00a8: 0x0438, # CYRILLIC SMALL LETTER I 0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I 0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA 0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL 0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM 0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN 0x00ae: 0x043e, # CYRILLIC SMALL LETTER O 0x00af: 0x043f, # CYRILLIC SMALL LETTER PE 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x258c, # LEFT HALF BLOCK 0x00de: 0x2590, # RIGHT HALF BLOCK 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER 0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES 0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE 0x00e3: 0x0443, # CYRILLIC SMALL LETTER U 0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF 0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA 0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE 0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE 0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA 0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA 0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN 0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU 0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN 0x00ed: 0x044d, # CYRILLIC SMALL LETTER E 0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU 0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA 0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO 0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO 0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE 0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE 0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI 0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI 0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U 0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U 0x00f8: 0x00b0, # DEGREE SIGN 0x00f9: 0x2219, # BULLET OPERATOR 0x00fa: 0x00b7, # MIDDLE DOT 0x00fb: 0x221a, # SQUARE ROOT 0x00fc: 0x2116, # NUMERO SIGN 0x00fd: 0x00a4, # CURRENCY SIGN 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( u'\x00' # 0x0000 -> NULL u'\x01' # 0x0001 -> START OF HEADING u'\x02' # 0x0002 -> START OF TEXT u'\x03' # 0x0003 -> END OF TEXT u'\x04' # 0x0004 -> END OF TRANSMISSION u'\x05' # 0x0005 -> ENQUIRY u'\x06' # 0x0006 -> ACKNOWLEDGE u'\x07' # 0x0007 -> BELL u'\x08' # 0x0008 -> BACKSPACE u'\t' # 0x0009 -> HORIZONTAL TABULATION u'\n' # 0x000a -> LINE FEED u'\x0b' # 0x000b -> VERTICAL TABULATION u'\x0c' # 0x000c -> FORM FEED u'\r' # 0x000d -> CARRIAGE RETURN u'\x0e' # 0x000e -> SHIFT OUT u'\x0f' # 0x000f -> SHIFT IN u'\x10' # 0x0010 -> DATA LINK ESCAPE u'\x11' # 0x0011 -> DEVICE CONTROL ONE u'\x12' # 0x0012 -> DEVICE CONTROL TWO u'\x13' # 0x0013 -> DEVICE CONTROL THREE u'\x14' # 0x0014 -> DEVICE CONTROL FOUR u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x0016 -> SYNCHRONOUS IDLE u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK u'\x18' # 0x0018 -> CANCEL u'\x19' # 0x0019 -> END OF MEDIUM u'\x1a' # 0x001a -> SUBSTITUTE u'\x1b' # 0x001b -> ESCAPE u'\x1c' # 0x001c -> FILE SEPARATOR u'\x1d' # 0x001d -> GROUP SEPARATOR u'\x1e' # 0x001e -> RECORD SEPARATOR u'\x1f' # 0x001f -> UNIT SEPARATOR u' ' # 0x0020 -> SPACE u'!' # 0x0021 -> EXCLAMATION MARK u'"' # 0x0022 -> QUOTATION MARK u'#' # 0x0023 -> NUMBER SIGN u'$' # 0x0024 -> DOLLAR SIGN u'%' # 0x0025 -> PERCENT SIGN u'&' # 0x0026 -> AMPERSAND u"'" # 0x0027 -> APOSTROPHE u'(' # 0x0028 -> LEFT PARENTHESIS u')' # 0x0029 -> RIGHT PARENTHESIS u'*' # 0x002a -> ASTERISK u'+' # 0x002b -> PLUS SIGN u',' # 0x002c -> COMMA u'-' # 0x002d -> HYPHEN-MINUS u'.' # 0x002e -> FULL STOP u'/' # 0x002f -> SOLIDUS u'0' # 0x0030 -> DIGIT ZERO u'1' # 0x0031 -> DIGIT ONE u'2' # 0x0032 -> DIGIT TWO u'3' # 0x0033 -> DIGIT THREE u'4' # 0x0034 -> DIGIT FOUR u'5' # 0x0035 -> DIGIT FIVE u'6' # 0x0036 -> DIGIT SIX u'7' # 0x0037 -> DIGIT SEVEN u'8' # 0x0038 -> DIGIT EIGHT u'9' # 0x0039 -> DIGIT NINE u':' # 0x003a -> COLON u';' # 0x003b -> SEMICOLON u'<' # 0x003c -> LESS-THAN SIGN u'=' # 0x003d -> EQUALS SIGN u'>' # 0x003e -> GREATER-THAN SIGN u'?' # 0x003f -> QUESTION MARK u'@' # 0x0040 -> COMMERCIAL AT u'A' # 0x0041 -> LATIN CAPITAL LETTER A u'B' # 0x0042 -> LATIN CAPITAL LETTER B u'C' # 0x0043 -> LATIN CAPITAL LETTER C u'D' # 0x0044 -> LATIN CAPITAL LETTER D u'E' # 0x0045 -> LATIN CAPITAL LETTER E u'F' # 0x0046 -> LATIN CAPITAL LETTER F u'G' # 0x0047 -> LATIN CAPITAL LETTER G u'H' # 0x0048 -> LATIN CAPITAL LETTER H u'I' # 0x0049 -> LATIN CAPITAL LETTER I u'J' # 0x004a -> LATIN CAPITAL LETTER J u'K' # 0x004b -> LATIN CAPITAL LETTER K u'L' # 0x004c -> LATIN CAPITAL LETTER L u'M' # 0x004d -> LATIN CAPITAL LETTER M u'N' # 0x004e -> LATIN CAPITAL LETTER N u'O' # 0x004f -> LATIN CAPITAL LETTER O u'P' # 0x0050 -> LATIN CAPITAL LETTER P u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q u'R' # 0x0052 -> LATIN CAPITAL LETTER R u'S' # 0x0053 -> LATIN CAPITAL LETTER S u'T' # 0x0054 -> LATIN CAPITAL LETTER T u'U' # 0x0055 -> LATIN CAPITAL LETTER U u'V' # 0x0056 -> LATIN CAPITAL LETTER V u'W' # 0x0057 -> LATIN CAPITAL LETTER W u'X' # 0x0058 -> LATIN CAPITAL LETTER X u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y u'Z' # 0x005a -> LATIN CAPITAL LETTER Z u'[' # 0x005b -> LEFT SQUARE BRACKET u'\\' # 0x005c -> REVERSE SOLIDUS u']' # 0x005d -> RIGHT SQUARE BRACKET u'^' # 0x005e -> CIRCUMFLEX ACCENT u'_' # 0x005f -> LOW LINE u'`' # 0x0060 -> GRAVE ACCENT u'a' # 0x0061 -> LATIN SMALL LETTER A u'b' # 0x0062 -> LATIN SMALL LETTER B u'c' # 0x0063 -> LATIN SMALL LETTER C u'd' # 0x0064 -> LATIN SMALL LETTER D u'e' # 0x0065 -> LATIN SMALL LETTER E u'f' # 0x0066 -> LATIN SMALL LETTER F u'g' # 0x0067 -> LATIN SMALL LETTER G u'h' # 0x0068 -> LATIN SMALL LETTER H u'i' # 0x0069 -> LATIN SMALL LETTER I u'j' # 0x006a -> LATIN SMALL LETTER J u'k' # 0x006b -> LATIN SMALL LETTER K u'l' # 0x006c -> LATIN SMALL LETTER L u'm' # 0x006d -> LATIN SMALL LETTER M u'n' # 0x006e -> LATIN SMALL LETTER N u'o' # 0x006f -> LATIN SMALL LETTER O u'p' # 0x0070 -> LATIN SMALL LETTER P u'q' # 0x0071 -> LATIN SMALL LETTER Q u'r' # 0x0072 -> LATIN SMALL LETTER R u's' # 0x0073 -> LATIN SMALL LETTER S u't' # 0x0074 -> LATIN SMALL LETTER T u'u' # 0x0075 -> LATIN SMALL LETTER U u'v' # 0x0076 -> LATIN SMALL LETTER V u'w' # 0x0077 -> LATIN SMALL LETTER W u'x' # 0x0078 -> LATIN SMALL LETTER X u'y' # 0x0079 -> LATIN SMALL LETTER Y u'z' # 0x007a -> LATIN SMALL LETTER Z u'{' # 0x007b -> LEFT CURLY BRACKET u'|' # 0x007c -> VERTICAL LINE u'}' # 0x007d -> RIGHT CURLY BRACKET u'~' # 0x007e -> TILDE u'\x7f' # 0x007f -> DELETE u'\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A u'\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE u'\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE u'\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE u'\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE u'\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE u'\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE u'\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE u'\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I u'\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I u'\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA u'\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL u'\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM u'\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN u'\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O u'\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE u'\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER u'\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES u'\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE u'\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U u'\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF u'\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA u'\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE u'\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE u'\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA u'\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA u'\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN u'\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU u'\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN u'\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E u'\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU u'\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A u'\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE u'\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE u'\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE u'\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE u'\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE u'\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE u'\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE u'\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I u'\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I u'\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA u'\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL u'\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM u'\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN u'\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O u'\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE u'\u2591' # 0x00b0 -> LIGHT SHADE u'\u2592' # 0x00b1 -> MEDIUM SHADE u'\u2593' # 0x00b2 -> DARK SHADE u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT u'\u2588' # 0x00db -> FULL BLOCK u'\u2584' # 0x00dc -> LOWER HALF BLOCK u'\u258c' # 0x00dd -> LEFT HALF BLOCK u'\u2590' # 0x00de -> RIGHT HALF BLOCK u'\u2580' # 0x00df -> UPPER HALF BLOCK u'\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER u'\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES u'\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE u'\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U u'\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF u'\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA u'\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE u'\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE u'\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA u'\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA u'\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN u'\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU u'\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN u'\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E u'\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU u'\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA u'\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO u'\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO u'\u0404' # 0x00f2 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE u'\u0454' # 0x00f3 -> CYRILLIC SMALL LETTER UKRAINIAN IE u'\u0407' # 0x00f4 -> CYRILLIC CAPITAL LETTER YI u'\u0457' # 0x00f5 -> CYRILLIC SMALL LETTER YI u'\u040e' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHORT U u'\u045e' # 0x00f7 -> CYRILLIC SMALL LETTER SHORT U u'\xb0' # 0x00f8 -> DEGREE SIGN u'\u2219' # 0x00f9 -> BULLET OPERATOR u'\xb7' # 0x00fa -> MIDDLE DOT u'\u221a' # 0x00fb -> SQUARE ROOT u'\u2116' # 0x00fc -> NUMERO SIGN u'\xa4' # 0x00fd -> CURRENCY SIGN u'\u25a0' # 0x00fe -> BLACK SQUARE u'\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a4: 0x00fd, # CURRENCY SIGN 0x00b0: 0x00f8, # DEGREE SIGN 0x00b7: 0x00fa, # MIDDLE DOT 0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO 0x0404: 0x00f2, # CYRILLIC CAPITAL LETTER UKRAINIAN IE 0x0407: 0x00f4, # CYRILLIC CAPITAL LETTER YI 0x040e: 0x00f6, # CYRILLIC CAPITAL LETTER SHORT U 0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A 0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE 0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE 0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE 0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE 0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE 0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE 0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE 0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I 0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I 0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA 0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL 0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM 0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN 0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O 0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE 0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER 0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES 0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE 0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U 0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF 0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA 0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE 0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE 0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA 0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA 0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN 0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU 0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN 0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E 0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU 0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA 0x0430: 0x00a0, # CYRILLIC SMALL LETTER A 0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE 0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE 0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE 0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE 0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE 0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE 0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE 0x0438: 0x00a8, # CYRILLIC SMALL LETTER I 0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I 0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA 0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL 0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM 0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN 0x043e: 0x00ae, # CYRILLIC SMALL LETTER O 0x043f: 0x00af, # CYRILLIC SMALL LETTER PE 0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER 0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES 0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE 0x0443: 0x00e3, # CYRILLIC SMALL LETTER U 0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF 0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA 0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE 0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE 0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA 0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA 0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN 0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU 0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN 0x044d: 0x00ed, # CYRILLIC SMALL LETTER E 0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU 0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA 0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO 0x0454: 0x00f3, # CYRILLIC SMALL LETTER UKRAINIAN IE 0x0457: 0x00f5, # CYRILLIC SMALL LETTER YI 0x045e: 0x00f7, # CYRILLIC SMALL LETTER SHORT U 0x2116: 0x00fc, # NUMERO SIGN 0x2219: 0x00f9, # BULLET OPERATOR 0x221a: 0x00fb, # SQUARE ROOT 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x258c: 0x00dd, # LEFT HALF BLOCK 0x2590: 0x00de, # RIGHT HALF BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
apache-2.0
4,318,136,999,309,368,000
48.644699
97
0.611278
false
BackupTheBerlios/espressopp
contrib/mpi4py/mpi4py-1.3/test/test_attributes.py
3
8196
from mpi4py import MPI import mpiunittest as unittest from sys import getrefcount as getrc class BaseTestCommAttr(object): keyval = MPI.KEYVAL_INVALID def tearDown(self): self.comm.Free() if self.keyval != MPI.KEYVAL_INVALID: self.keyval = MPI.Comm.Free_keyval(self.keyval) self.assertEqual(self.keyval, MPI.KEYVAL_INVALID) def testAttr(self, copy_fn=None, delete_fn=None): self.keyval = MPI.Comm.Create_keyval(copy_fn, delete_fn) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) attrval = [1,2,3] rc = getrc(attrval) self.comm.Set_attr(self.keyval, attrval) self.assertEqual(getrc(attrval), rc+1) o = self.comm.Get_attr(self.keyval) self.assertTrue(o is attrval) self.assertEqual(getrc(attrval), rc+2) o = None dupcomm = self.comm.Clone() if copy_fn is True: self.assertEqual(getrc(attrval), rc+2) o = dupcomm.Get_attr(self.keyval) if copy_fn is True: self.assertTrue(o is attrval) self.assertEqual(getrc(attrval), rc+3) elif not copy_fn: self.assertTrue(o is None) self.assertEqual(getrc(attrval), rc+1) dupcomm.Free() o = None self.assertEqual(getrc(attrval), rc+1) self.comm.Delete_attr(self.keyval) self.assertEqual(getrc(attrval), rc) o = self.comm.Get_attr(self.keyval) self.assertTrue(o is None) def testAttrCopyFalse(self): self.testAttr(False) def testAttrCopyTrue(self): self.testAttr(True) def testAttrCopyDelete(self): self.keyval = MPI.Comm.Create_keyval( copy_fn=MPI.Comm.Clone, delete_fn=MPI.Comm.Free) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) comm1 = self.comm dupcomm1 = comm1.Clone() rc = getrc(dupcomm1) comm1.Set_attr(self.keyval, dupcomm1) self.assertTrue(dupcomm1 != MPI.COMM_NULL) self.assertTrue(getrc(dupcomm1), rc+1) comm2 = comm1.Clone() dupcomm2 = comm2.Get_attr(self.keyval) self.assertTrue(dupcomm1 != dupcomm2) self.assertTrue(getrc(dupcomm1), rc+1) self.assertTrue(getrc(dupcomm2), 3) comm2.Free() self.assertTrue(dupcomm2 == MPI.COMM_NULL) self.assertTrue(getrc(dupcomm1), rc+1) self.assertTrue(getrc(dupcomm2), 2) self.comm.Delete_attr(self.keyval) self.assertTrue(dupcomm1 == MPI.COMM_NULL) self.assertTrue(getrc(dupcomm1), rc) class TestCommAttrWorld(BaseTestCommAttr, unittest.TestCase): def setUp(self): self.comm = MPI.COMM_WORLD.Dup() class TestCommAttrSelf(BaseTestCommAttr, unittest.TestCase): def setUp(self): self.comm = MPI.COMM_SELF.Dup() class BaseTestDatatypeAttr(object): keyval = MPI.KEYVAL_INVALID def tearDown(self): self.datatype.Free() if self.keyval != MPI.KEYVAL_INVALID: self.keyval = MPI.Datatype.Free_keyval(self.keyval) self.assertEqual(self.keyval, MPI.KEYVAL_INVALID) def testAttr(self, copy_fn=None, delete_fn=None): self.keyval = MPI.Datatype.Create_keyval(copy_fn, delete_fn) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) attrval = [1,2,3] rc = getrc(attrval) self.datatype.Set_attr(self.keyval, attrval) self.assertEqual(getrc(attrval), rc+1) o = self.datatype.Get_attr(self.keyval) self.assertTrue(o is attrval) self.assertEqual(getrc(attrval), rc+2) o = None dupdatatype = self.datatype.Dup() if copy_fn is True: self.assertEqual(getrc(attrval), rc+2) o = dupdatatype.Get_attr(self.keyval) if copy_fn is True: self.assertTrue(o is attrval) self.assertEqual(getrc(attrval), rc+3) elif not copy_fn: self.assertTrue(o is None) self.assertEqual(getrc(attrval), rc+1) dupdatatype.Free() o = None self.assertEqual(getrc(attrval), rc+1) self.datatype.Delete_attr(self.keyval) self.assertEqual(getrc(attrval), rc) o = self.datatype.Get_attr(self.keyval) self.assertTrue(o is None) def testAttrCopyFalse(self): self.testAttr(False) def testAttrCopyTrue(self): self.testAttr(True) def testAttrCopyDelete(self): self.keyval = MPI.Datatype.Create_keyval( copy_fn=MPI.Datatype.Dup, delete_fn=MPI.Datatype.Free) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) datatype1 = self.datatype dupdatatype1 = datatype1.Dup() rc = getrc(dupdatatype1) datatype1.Set_attr(self.keyval, dupdatatype1) self.assertTrue(dupdatatype1 != MPI.DATATYPE_NULL) self.assertTrue(getrc(dupdatatype1), rc+1) datatype2 = datatype1.Dup() dupdatatype2 = datatype2.Get_attr(self.keyval) self.assertTrue(dupdatatype1 != dupdatatype2) self.assertTrue(getrc(dupdatatype1), rc+1) self.assertTrue(getrc(dupdatatype2), 3) datatype2.Free() self.assertTrue(dupdatatype2 == MPI.DATATYPE_NULL) self.assertTrue(getrc(dupdatatype1), rc+1) self.assertTrue(getrc(dupdatatype2), 2) self.datatype.Delete_attr(self.keyval) self.assertTrue(dupdatatype1 == MPI.DATATYPE_NULL) self.assertTrue(getrc(dupdatatype1), rc) class TestDatatypeAttrBYTE(BaseTestDatatypeAttr, unittest.TestCase): def setUp(self): self.datatype = MPI.BYTE.Dup() class TestDatatypeAttrINT(BaseTestDatatypeAttr, unittest.TestCase): def setUp(self): self.datatype = MPI.INT.Dup() class TestDatatypeAttrFLOAT(BaseTestDatatypeAttr, unittest.TestCase): def setUp(self): self.datatype = MPI.FLOAT.Dup() class TestWinAttr(unittest.TestCase): keyval = MPI.KEYVAL_INVALID def setUp(self): self.win = MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF) def tearDown(self): self.win.Free() if self.keyval != MPI.KEYVAL_INVALID: self.keyval = MPI.Win.Free_keyval(self.keyval) self.assertEqual(self.keyval, MPI.KEYVAL_INVALID) def testAttr(self, copy_fn=None, delete_fn=None): self.keyval = MPI.Win.Create_keyval(copy_fn, delete_fn) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) attrval = [1,2,3] rc = getrc(attrval) self.win.Set_attr(self.keyval, attrval) self.assertEqual(getrc(attrval), rc+1) o = self.win.Get_attr(self.keyval) self.assertTrue(o is attrval) self.assertEqual(getrc(attrval), rc+2) o = None self.assertEqual(getrc(attrval), rc+1) self.win.Delete_attr(self.keyval) self.assertEqual(getrc(attrval), rc) o = self.win.Get_attr(self.keyval) self.assertTrue(o is None) def testAttrCopyDelete(self): self.keyval = MPI.Win.Create_keyval(delete_fn=MPI.Win.Free) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) newwin = MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF) rc = getrc(newwin) # self.win.Set_attr(self.keyval, newwin) self.assertTrue(newwin != MPI.WIN_NULL) self.assertTrue(getrc(newwin), rc+1) # self.win.Delete_attr(self.keyval) self.assertTrue(newwin == MPI.WIN_NULL) self.assertTrue(getrc(newwin), rc) try: k = MPI.Datatype.Create_keyval() k = MPI.Datatype.Free_keyval(k) except NotImplementedError: del TestDatatypeAttrBYTE del TestDatatypeAttrINT del TestDatatypeAttrFLOAT try: k = MPI.Win.Create_keyval() k = MPI.Win.Free_keyval(k) except NotImplementedError: del TestWinAttr _name, _version = MPI.get_vendor() if (_name == 'Open MPI' and _version <= (1, 5, 1)): if MPI.Query_thread() > MPI.THREAD_SINGLE: del BaseTestCommAttr.testAttrCopyDelete del TestWinAttr.testAttrCopyDelete if __name__ == '__main__': unittest.main()
gpl-3.0
3,733,257,960,122,746,000
31.141176
69
0.630063
false
Simran-B/arangodb
3rdParty/V8-4.3.61/third_party/python_26/Lib/numbers.py
59
10271
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Abstract Base Classes (ABCs) for numbers, according to PEP 3141. TODO: Fill out more detailed documentation on the operators.""" from __future__ import division from abc import ABCMeta, abstractmethod, abstractproperty __all__ = ["Number", "Complex", "Real", "Rational", "Integral"] class Number(object): """All numbers inherit from this class. If you just want to check if an argument x is a number, without caring what kind, use isinstance(x, Number). """ __metaclass__ = ABCMeta __slots__ = () # Concrete numeric types must provide their own hash implementation __hash__ = None ## Notes on Decimal ## ---------------- ## Decimal has all of the methods specified by the Real abc, but it should ## not be registered as a Real because decimals do not interoperate with ## binary floats (i.e. Decimal('3.14') + 2.71828 is undefined). But, ## abstract reals are expected to interoperate (i.e. R1 + R2 should be ## expected to work if R1 and R2 are both Reals). class Complex(Number): """Complex defines the operations that work on the builtin complex type. In short, those are: a conversion to complex, .real, .imag, +, -, *, /, abs(), .conjugate, ==, and !=. If it is given heterogenous arguments, and doesn't have special knowledge about them, it should fall back to the builtin complex type as described below. """ __slots__ = () @abstractmethod def __complex__(self): """Return a builtin complex instance. Called for complex(self).""" # Will be __bool__ in 3.0. def __nonzero__(self): """True if self != 0. Called for bool(self).""" return self != 0 @abstractproperty def real(self): """Retrieve the real component of this number. This should subclass Real. """ raise NotImplementedError @abstractproperty def imag(self): """Retrieve the real component of this number. This should subclass Real. """ raise NotImplementedError @abstractmethod def __add__(self, other): """self + other""" raise NotImplementedError @abstractmethod def __radd__(self, other): """other + self""" raise NotImplementedError @abstractmethod def __neg__(self): """-self""" raise NotImplementedError @abstractmethod def __pos__(self): """+self""" raise NotImplementedError def __sub__(self, other): """self - other""" return self + -other def __rsub__(self, other): """other - self""" return -self + other @abstractmethod def __mul__(self, other): """self * other""" raise NotImplementedError @abstractmethod def __rmul__(self, other): """other * self""" raise NotImplementedError @abstractmethod def __div__(self, other): """self / other without __future__ division May promote to float. """ raise NotImplementedError @abstractmethod def __rdiv__(self, other): """other / self without __future__ division""" raise NotImplementedError @abstractmethod def __truediv__(self, other): """self / other with __future__ division. Should promote to float when necessary. """ raise NotImplementedError @abstractmethod def __rtruediv__(self, other): """other / self with __future__ division""" raise NotImplementedError @abstractmethod def __pow__(self, exponent): """self**exponent; should promote to float or complex when necessary.""" raise NotImplementedError @abstractmethod def __rpow__(self, base): """base ** self""" raise NotImplementedError @abstractmethod def __abs__(self): """Returns the Real distance from 0. Called for abs(self).""" raise NotImplementedError @abstractmethod def conjugate(self): """(x+y*i).conjugate() returns (x-y*i).""" raise NotImplementedError @abstractmethod def __eq__(self, other): """self == other""" raise NotImplementedError def __ne__(self, other): """self != other""" # The default __ne__ doesn't negate __eq__ until 3.0. return not (self == other) Complex.register(complex) class Real(Complex): """To Complex, Real adds the operations that work on real numbers. In short, those are: a conversion to float, trunc(), divmod, %, <, <=, >, and >=. Real also provides defaults for the derived operations. """ __slots__ = () @abstractmethod def __float__(self): """Any Real can be converted to a native float object. Called for float(self).""" raise NotImplementedError @abstractmethod def __trunc__(self): """trunc(self): Truncates self to an Integral. Returns an Integral i such that: * i>0 iff self>0; * abs(i) <= abs(self); * for any Integral j satisfying the first two conditions, abs(i) >= abs(j) [i.e. i has "maximal" abs among those]. i.e. "truncate towards 0". """ raise NotImplementedError def __divmod__(self, other): """divmod(self, other): The pair (self // other, self % other). Sometimes this can be computed faster than the pair of operations. """ return (self // other, self % other) def __rdivmod__(self, other): """divmod(other, self): The pair (self // other, self % other). Sometimes this can be computed faster than the pair of operations. """ return (other // self, other % self) @abstractmethod def __floordiv__(self, other): """self // other: The floor() of self/other.""" raise NotImplementedError @abstractmethod def __rfloordiv__(self, other): """other // self: The floor() of other/self.""" raise NotImplementedError @abstractmethod def __mod__(self, other): """self % other""" raise NotImplementedError @abstractmethod def __rmod__(self, other): """other % self""" raise NotImplementedError @abstractmethod def __lt__(self, other): """self < other < on Reals defines a total ordering, except perhaps for NaN.""" raise NotImplementedError @abstractmethod def __le__(self, other): """self <= other""" raise NotImplementedError # Concrete implementations of Complex abstract methods. def __complex__(self): """complex(self) == complex(float(self), 0)""" return complex(float(self)) @property def real(self): """Real numbers are their real component.""" return +self @property def imag(self): """Real numbers have no imaginary component.""" return 0 def conjugate(self): """Conjugate is a no-op for Reals.""" return +self Real.register(float) class Rational(Real): """.numerator and .denominator should be in lowest terms.""" __slots__ = () @abstractproperty def numerator(self): raise NotImplementedError @abstractproperty def denominator(self): raise NotImplementedError # Concrete implementation of Real's conversion to float. def __float__(self): """float(self) = self.numerator / self.denominator It's important that this conversion use the integer's "true" division rather than casting one side to float before dividing so that ratios of huge integers convert without overflowing. """ return self.numerator / self.denominator class Integral(Rational): """Integral adds a conversion to long and the bit-string operations.""" __slots__ = () @abstractmethod def __long__(self): """long(self)""" raise NotImplementedError def __index__(self): """index(self)""" return long(self) @abstractmethod def __pow__(self, exponent, modulus=None): """self ** exponent % modulus, but maybe faster. Accept the modulus argument if you want to support the 3-argument version of pow(). Raise a TypeError if exponent < 0 or any argument isn't Integral. Otherwise, just implement the 2-argument version described in Complex. """ raise NotImplementedError @abstractmethod def __lshift__(self, other): """self << other""" raise NotImplementedError @abstractmethod def __rlshift__(self, other): """other << self""" raise NotImplementedError @abstractmethod def __rshift__(self, other): """self >> other""" raise NotImplementedError @abstractmethod def __rrshift__(self, other): """other >> self""" raise NotImplementedError @abstractmethod def __and__(self, other): """self & other""" raise NotImplementedError @abstractmethod def __rand__(self, other): """other & self""" raise NotImplementedError @abstractmethod def __xor__(self, other): """self ^ other""" raise NotImplementedError @abstractmethod def __rxor__(self, other): """other ^ self""" raise NotImplementedError @abstractmethod def __or__(self, other): """self | other""" raise NotImplementedError @abstractmethod def __ror__(self, other): """other | self""" raise NotImplementedError @abstractmethod def __invert__(self): """~self""" raise NotImplementedError # Concrete implementations of Rational and Real abstract methods. def __float__(self): """float(self) == float(long(self))""" return float(long(self)) @property def numerator(self): """Integers are their own numerators.""" return +self @property def denominator(self): """Integers have a denominator of 1.""" return 1 Integral.register(int) Integral.register(long)
apache-2.0
3,724,814,421,603,377,700
25.268542
80
0.595852
false
mgracer48/panda3d
direct/src/distributed/MsgTypes.py
5
5594
"""MsgTypes module: contains distributed object message types""" from direct.showbase.PythonUtil import invertDictLossless MsgName2Id = { # 2 new params: passwd, char bool 0/1 1 = new account # 2 new return values: 129 = not found, 12 = bad passwd, 'CLIENT_LOGIN': 1, 'CLIENT_LOGIN_RESP': 2, 'CLIENT_GET_AVATARS': 3, # Sent by the server when it is dropping the connection deliberately. 'CLIENT_GO_GET_LOST': 4, 'CLIENT_GET_AVATARS_RESP': 5, 'CLIENT_CREATE_AVATAR': 6, 'CLIENT_CREATE_AVATAR_RESP': 7, 'CLIENT_GET_FRIEND_LIST': 10, 'CLIENT_GET_FRIEND_LIST_RESP': 11, 'CLIENT_GET_AVATAR_DETAILS': 14, 'CLIENT_GET_AVATAR_DETAILS_RESP': 15, 'CLIENT_LOGIN_2': 16, 'CLIENT_LOGIN_2_RESP': 17, 'CLIENT_OBJECT_UPDATE_FIELD': 24, 'CLIENT_OBJECT_UPDATE_FIELD_RESP': 24, 'CLIENT_OBJECT_DISABLE': 25, 'CLIENT_OBJECT_DISABLE_RESP': 25, 'CLIENT_OBJECT_DISABLE_OWNER': 26, 'CLIENT_OBJECT_DISABLE_OWNER_RESP': 26, 'CLIENT_OBJECT_DELETE': 27, 'CLIENT_OBJECT_DELETE_RESP': 27, 'CLIENT_SET_ZONE_CMU': 29, 'CLIENT_REMOVE_ZONE': 30, 'CLIENT_SET_AVATAR': 32, 'CLIENT_CREATE_OBJECT_REQUIRED': 34, 'CLIENT_CREATE_OBJECT_REQUIRED_RESP': 34, 'CLIENT_CREATE_OBJECT_REQUIRED_OTHER': 35, 'CLIENT_CREATE_OBJECT_REQUIRED_OTHER_RESP': 35, 'CLIENT_CREATE_OBJECT_REQUIRED_OTHER_OWNER': 36, 'CLIENT_CREATE_OBJECT_REQUIRED_OTHER_OWNER_RESP':36, 'CLIENT_REQUEST_GENERATES': 36, 'CLIENT_DISCONNECT': 37, 'CLIENT_GET_STATE_RESP': 47, 'CLIENT_DONE_INTEREST_RESP': 48, 'CLIENT_DELETE_AVATAR': 49, 'CLIENT_DELETE_AVATAR_RESP': 5, 'CLIENT_HEARTBEAT': 52, 'CLIENT_FRIEND_ONLINE': 53, 'CLIENT_FRIEND_OFFLINE': 54, 'CLIENT_REMOVE_FRIEND': 56, 'CLIENT_CHANGE_PASSWORD': 65, 'CLIENT_SET_NAME_PATTERN': 67, 'CLIENT_SET_NAME_PATTERN_ANSWER': 68, 'CLIENT_SET_WISHNAME': 70, 'CLIENT_SET_WISHNAME_RESP': 71, 'CLIENT_SET_WISHNAME_CLEAR': 72, 'CLIENT_SET_SECURITY': 73, 'CLIENT_SET_DOID_RANGE': 74, 'CLIENT_GET_AVATARS_RESP2': 75, 'CLIENT_CREATE_AVATAR2': 76, 'CLIENT_SYSTEM_MESSAGE': 78, 'CLIENT_SET_AVTYPE': 80, 'CLIENT_GET_PET_DETAILS': 81, 'CLIENT_GET_PET_DETAILS_RESP': 82, 'CLIENT_ADD_INTEREST': 97, 'CLIENT_REMOVE_INTEREST': 99, 'CLIENT_OBJECT_LOCATION': 102, 'CLIENT_LOGIN_3': 111, 'CLIENT_LOGIN_3_RESP': 110, 'CLIENT_GET_FRIEND_LIST_EXTENDED': 115, 'CLIENT_GET_FRIEND_LIST_EXTENDED_RESP': 116, 'CLIENT_SET_FIELD_SENDABLE': 120, 'CLIENT_SYSTEMMESSAGE_AKNOWLEDGE': 123, 'CLIENT_CHANGE_GENERATE_ORDER': 124, # new toontown specific login message, adds last logged in, and if child account has parent acount 'CLIENT_LOGIN_TOONTOWN': 125, 'CLIENT_LOGIN_TOONTOWN_RESP': 126, 'STATESERVER_OBJECT_GENERATE_WITH_REQUIRED': 2001, 'STATESERVER_OBJECT_GENERATE_WITH_REQUIRED_OTHER': 2003, 'STATESERVER_OBJECT_UPDATE_FIELD': 2004, 'STATESERVER_OBJECT_CREATE_WITH_REQUIRED_CONTEXT': 2050, 'STATESERVER_OBJECT_CREATE_WITH_REQUIR_OTHER_CONTEXT': 2051, 'STATESERVER_BOUNCE_MESSAGE': 2086, } # create id->name table for debugging MsgId2Names = invertDictLossless(MsgName2Id) # put msg names in module scope, assigned to msg value globals().update(MsgName2Id) # These messages are ignored when the client is headed to the quiet zone QUIET_ZONE_IGNORED_LIST = [ # We mustn't ignore updates, because some updates for localToon # are always important. #CLIENT_OBJECT_UPDATE_FIELD, # These are now handled. If it is a create for a class that is in the # uber zone, we should create it. #CLIENT_CREATE_OBJECT_REQUIRED, #CLIENT_CREATE_OBJECT_REQUIRED_OTHER, ] # The following is a different set of numbers from above. # These are the sub-message types for CLIENT_LOGIN_2. CLIENT_LOGIN_2_GREEN = 1 # Disney's GoReg subscription token, not used. CLIENT_LOGIN_2_PLAY_TOKEN = 2 # VR Studio PlayToken. CLIENT_LOGIN_2_BLUE = 3 # The international GoReg token. CLIENT_LOGIN_3_DISL_TOKEN = 4 # SSL encoded blob from DISL system.
bsd-3-clause
-4,714,705,701,539,997,000
41.70229
102
0.499821
false
Bulochkin/tensorflow_pack
tensorflow/python/kernel_tests/distributions/multinomial_test.py
48
13141
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import multinomial from tensorflow.python.platform import test class MultinomialTest(test.TestCase): def setUp(self): self._rng = np.random.RandomState(42) def testSimpleShapes(self): with self.test_session(): p = [.1, .3, .6] dist = multinomial.Multinomial(total_count=1., probs=p) self.assertEqual(3, dist.event_shape_tensor().eval()) self.assertAllEqual([], dist.batch_shape_tensor().eval()) self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape) self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape) def testComplexShapes(self): with self.test_session(): p = 0.5 * np.ones([3, 2, 2], dtype=np.float32) n = [[3., 2], [4, 5], [6, 7]] dist = multinomial.Multinomial(total_count=n, probs=p) self.assertEqual(2, dist.event_shape_tensor().eval()) self.assertAllEqual([3, 2], dist.batch_shape_tensor().eval()) self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape) self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape) def testN(self): p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]] n = [[3.], [4]] with self.test_session(): dist = multinomial.Multinomial(total_count=n, probs=p) self.assertEqual((2, 1), dist.total_count.get_shape()) self.assertAllClose(n, dist.total_count.eval()) def testP(self): p = [[0.1, 0.2, 0.7]] with self.test_session(): dist = multinomial.Multinomial(total_count=3., probs=p) self.assertEqual((1, 3), dist.probs.get_shape()) self.assertEqual((1, 3), dist.logits.get_shape()) self.assertAllClose(p, dist.probs.eval()) def testLogits(self): p = np.array([[0.1, 0.2, 0.7]], dtype=np.float32) logits = np.log(p) - 50. with self.test_session(): multinom = multinomial.Multinomial(total_count=3., logits=logits) self.assertEqual((1, 3), multinom.probs.get_shape()) self.assertEqual((1, 3), multinom.logits.get_shape()) self.assertAllClose(p, multinom.probs.eval()) self.assertAllClose(logits, multinom.logits.eval()) def testPmfandCountsAgree(self): p = [[0.1, 0.2, 0.7]] n = [[5.]] with self.test_session(): dist = multinomial.Multinomial(total_count=n, probs=p, validate_args=True) dist.prob([2., 3, 0]).eval() dist.prob([3., 0, 2]).eval() with self.assertRaisesOpError("must be non-negative"): dist.prob([-1., 4, 2]).eval() with self.assertRaisesOpError("counts must sum to `self.total_count`"): dist.prob([3., 3, 0]).eval() def testPmfNonIntegerCounts(self): p = [[0.1, 0.2, 0.7]] n = [[5.]] with self.test_session(): # No errors with integer n. multinom = multinomial.Multinomial( total_count=n, probs=p, validate_args=True) multinom.prob([2., 1, 2]).eval() multinom.prob([3., 0, 2]).eval() # Counts don't sum to n. with self.assertRaisesOpError("counts must sum to `self.total_count`"): multinom.prob([2., 3, 2]).eval() # Counts are non-integers. x = array_ops.placeholder(dtypes.float32) with self.assertRaisesOpError( "cannot contain fractional components."): multinom.prob(x).eval(feed_dict={x: [1.0, 2.5, 1.5]}) multinom = multinomial.Multinomial( total_count=n, probs=p, validate_args=False) multinom.prob([1., 2., 2.]).eval() # Non-integer arguments work. multinom.prob([1.0, 2.5, 1.5]).eval() def testPmfBothZeroBatches(self): with self.test_session(): # Both zero-batches. No broadcast p = [0.5, 0.5] counts = [1., 0] pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts) self.assertAllClose(0.5, pmf.eval()) self.assertEqual((), pmf.get_shape()) def testPmfBothZeroBatchesNontrivialN(self): with self.test_session(): # Both zero-batches. No broadcast p = [0.1, 0.9] counts = [3., 2] dist = multinomial.Multinomial(total_count=5., probs=p) pmf = dist.prob(counts) # 5 choose 3 = 5 choose 2 = 10. 10 * (.9)^2 * (.1)^3 = 81/10000. self.assertAllClose(81. / 10000, pmf.eval()) self.assertEqual((), pmf.get_shape()) def testPmfPStretchedInBroadcastWhenSameRank(self): with self.test_session(): p = [[0.1, 0.9]] counts = [[1., 0], [0, 1]] pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts) self.assertAllClose([0.1, 0.9], pmf.eval()) self.assertEqual((2), pmf.get_shape()) def testPmfPStretchedInBroadcastWhenLowerRank(self): with self.test_session(): p = [0.1, 0.9] counts = [[1., 0], [0, 1]] pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts) self.assertAllClose([0.1, 0.9], pmf.eval()) self.assertEqual((2), pmf.get_shape()) def testPmfCountsStretchedInBroadcastWhenSameRank(self): with self.test_session(): p = [[0.1, 0.9], [0.7, 0.3]] counts = [[1., 0]] pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts) self.assertAllClose(pmf.eval(), [0.1, 0.7]) self.assertEqual((2), pmf.get_shape()) def testPmfCountsStretchedInBroadcastWhenLowerRank(self): with self.test_session(): p = [[0.1, 0.9], [0.7, 0.3]] counts = [1., 0] pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts) self.assertAllClose(pmf.eval(), [0.1, 0.7]) self.assertEqual(pmf.get_shape(), (2)) def testPmfShapeCountsStretchedN(self): with self.test_session(): # [2, 2, 2] p = [[[0.1, 0.9], [0.1, 0.9]], [[0.7, 0.3], [0.7, 0.3]]] # [2, 2] n = [[3., 3], [3, 3]] # [2] counts = [2., 1] pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts) pmf.eval() self.assertEqual(pmf.get_shape(), (2, 2)) def testPmfShapeCountsPStretchedN(self): with self.test_session(): p = [0.1, 0.9] counts = [3., 2] n = np.full([4, 3], 5., dtype=np.float32) pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts) pmf.eval() self.assertEqual((4, 3), pmf.get_shape()) def testMultinomialMean(self): with self.test_session(): n = 5. p = [0.1, 0.2, 0.7] dist = multinomial.Multinomial(total_count=n, probs=p) expected_means = 5 * np.array(p, dtype=np.float32) self.assertEqual((3,), dist.mean().get_shape()) self.assertAllClose(expected_means, dist.mean().eval()) def testMultinomialCovariance(self): with self.test_session(): n = 5. p = [0.1, 0.2, 0.7] dist = multinomial.Multinomial(total_count=n, probs=p) expected_covariances = [[9. / 20, -1 / 10, -7 / 20], [-1 / 10, 4 / 5, -7 / 10], [-7 / 20, -7 / 10, 21 / 20]] self.assertEqual((3, 3), dist.covariance().get_shape()) self.assertAllClose(expected_covariances, dist.covariance().eval()) def testMultinomialCovarianceBatch(self): with self.test_session(): # Shape [2] n = [5.] * 2 # Shape [4, 1, 2] p = [[[0.1, 0.9]], [[0.1, 0.9]]] * 2 dist = multinomial.Multinomial(total_count=n, probs=p) # Shape [2, 2] inner_var = [[9. / 20, -9 / 20], [-9 / 20, 9 / 20]] # Shape [4, 2, 2, 2] expected_covariances = [[inner_var, inner_var]] * 4 self.assertEqual((4, 2, 2, 2), dist.covariance().get_shape()) self.assertAllClose(expected_covariances, dist.covariance().eval()) def testCovarianceMultidimensional(self): # Shape [3, 5, 4] p = np.random.dirichlet([.25, .25, .25, .25], [3, 5]).astype(np.float32) # Shape [6, 3, 3] p2 = np.random.dirichlet([.3, .3, .4], [6, 3]).astype(np.float32) ns = np.random.randint(low=1, high=11, size=[3, 5]).astype(np.float32) ns2 = np.random.randint(low=1, high=11, size=[6, 1]).astype(np.float32) with self.test_session(): dist = multinomial.Multinomial(ns, p) dist2 = multinomial.Multinomial(ns2, p2) covariance = dist.covariance() covariance2 = dist2.covariance() self.assertEqual((3, 5, 4, 4), covariance.get_shape()) self.assertEqual((6, 3, 3, 3), covariance2.get_shape()) def testCovarianceFromSampling(self): # We will test mean, cov, var, stddev on a DirichletMultinomial constructed # via broadcast between alpha, n. theta = np.array([[1., 2, 3], [2.5, 4, 0.01]], dtype=np.float32) theta /= np.sum(theta, 1)[..., array_ops.newaxis] # Ideally we'd be able to test broadcasting but, the multinomial sampler # doesn't support different total counts. n = np.float32(5) with self.test_session() as sess: # batch_shape=[2], event_shape=[3] dist = multinomial.Multinomial(n, theta) x = dist.sample(int(250e3), seed=1) sample_mean = math_ops.reduce_mean(x, 0) x_centered = x - sample_mean[array_ops.newaxis, ...] sample_cov = math_ops.reduce_mean(math_ops.matmul( x_centered[..., array_ops.newaxis], x_centered[..., array_ops.newaxis, :]), 0) sample_var = array_ops.matrix_diag_part(sample_cov) sample_stddev = math_ops.sqrt(sample_var) [ sample_mean_, sample_cov_, sample_var_, sample_stddev_, analytic_mean, analytic_cov, analytic_var, analytic_stddev, ] = sess.run([ sample_mean, sample_cov, sample_var, sample_stddev, dist.mean(), dist.covariance(), dist.variance(), dist.stddev(), ]) self.assertAllClose(sample_mean_, analytic_mean, atol=0., rtol=0.01) self.assertAllClose(sample_cov_, analytic_cov, atol=0., rtol=0.01) self.assertAllClose(sample_var_, analytic_var, atol=0., rtol=0.01) self.assertAllClose(sample_stddev_, analytic_stddev, atol=0., rtol=0.01) def testSampleUnbiasedNonScalarBatch(self): with self.test_session() as sess: dist = multinomial.Multinomial( total_count=5., logits=math_ops.log(2. * self._rng.rand(4, 3, 2).astype(np.float32))) n = int(3e3) x = dist.sample(n, seed=0) sample_mean = math_ops.reduce_mean(x, 0) # Cyclically rotate event dims left. x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0]) sample_covariance = math_ops.matmul( x_centered, x_centered, adjoint_b=True) / n [ sample_mean_, sample_covariance_, actual_mean_, actual_covariance_, ] = sess.run([ sample_mean, sample_covariance, dist.mean(), dist.covariance(), ]) self.assertAllEqual([4, 3, 2], sample_mean.get_shape()) self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.07) self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape()) self.assertAllClose( actual_covariance_, sample_covariance_, atol=0., rtol=0.10) def testSampleUnbiasedScalarBatch(self): with self.test_session() as sess: dist = multinomial.Multinomial( total_count=5., logits=math_ops.log(2. * self._rng.rand(4).astype(np.float32))) n = int(5e3) x = dist.sample(n, seed=0) sample_mean = math_ops.reduce_mean(x, 0) x_centered = x - sample_mean # Already transposed to [n, 2]. sample_covariance = math_ops.matmul( x_centered, x_centered, adjoint_a=True) / n [ sample_mean_, sample_covariance_, actual_mean_, actual_covariance_, ] = sess.run([ sample_mean, sample_covariance, dist.mean(), dist.covariance(), ]) self.assertAllEqual([4], sample_mean.get_shape()) self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.07) self.assertAllEqual([4, 4], sample_covariance.get_shape()) self.assertAllClose( actual_covariance_, sample_covariance_, atol=0., rtol=0.10) if __name__ == "__main__": test.main()
apache-2.0
-627,194,293,295,146,100
37.311953
80
0.602542
false
sebmarchand/syzygy
third_party/numpy/files/numpy/distutils/fcompiler/gnu.py
85
14383
import re import os import sys import warnings import platform import tempfile from subprocess import Popen, PIPE, STDOUT from numpy.distutils.cpuinfo import cpu from numpy.distutils.fcompiler import FCompiler from numpy.distutils.exec_command import exec_command from numpy.distutils.misc_util import msvc_runtime_library from numpy.distutils.compat import get_exception compilers = ['GnuFCompiler', 'Gnu95FCompiler'] TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)") # XXX: handle cross compilation def is_win64(): return sys.platform == "win32" and platform.architecture()[0] == "64bit" if is_win64(): #_EXTRAFLAGS = ["-fno-leading-underscore"] _EXTRAFLAGS = [] else: _EXTRAFLAGS = [] class GnuFCompiler(FCompiler): compiler_type = 'gnu' compiler_aliases = ('g77',) description = 'GNU Fortran 77 compiler' def gnu_version_match(self, version_string): """Handle the different versions of GNU fortran compilers""" m = re.match(r'GNU Fortran', version_string) if not m: return None m = re.match(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) if m: return ('gfortran', m.group(1)) m = re.match(r'GNU Fortran.*?([0-9-.]+)', version_string) if m: v = m.group(1) if v.startswith('0') or v.startswith('2') or v.startswith('3'): # the '0' is for early g77's return ('g77', v) else: # at some point in the 4.x series, the ' 95' was dropped # from the version string return ('gfortran', v) def version_match(self, version_string): v = self.gnu_version_match(version_string) if not v or v[0] != 'g77': return None return v[1] # 'g77 --version' results # SunOS: GNU Fortran (GCC 3.2) 3.2 20020814 (release) # Debian: GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian) # GNU Fortran (GCC) 3.3.3 (Debian 20040401) # GNU Fortran 0.5.25 20010319 (prerelease) # Redhat: GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2 20030222 (Red Hat Linux 3.2.2-5) # GNU Fortran (GCC) 3.4.2 (mingw-special) possible_executables = ['g77', 'f77'] executables = { 'version_cmd' : [None, "--version"], 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes 'compiler_fix' : None, 'linker_so' : [None, "-g", "-Wall"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"], 'linker_exe' : [None, "-g", "-Wall"] } module_dir_switch = None module_include_switch = None # Cygwin: f771: warning: -fPIC ignored for target (all code is # position independent) if os.name != 'nt' and sys.platform != 'cygwin': pic_flags = ['-fPIC'] # use -mno-cygwin for g77 when Python is not Cygwin-Python if sys.platform == 'win32': for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: executables[key].append('-mno-cygwin') g2c = 'g2c' suggested_f90_compiler = 'gnu95' #def get_linker_so(self): # # win32 linking should be handled by standard linker # # Darwin g77 cannot be used as a linker. # #if re.match(r'(darwin)', sys.platform): # # return # return FCompiler.get_linker_so(self) def get_flags_linker_so(self): opt = self.linker_so[1:] if sys.platform=='darwin': target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value # and leave it alone. But, distutils will complain if the # environment's value is different from the one in the Python # Makefile used to build Python. We let disutils handle this # error checking. if not target: # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, # we try to get it first from the Python Makefile and then we # fall back to setting it to 10.3 to maximize the set of # versions we can work with. This is a reasonable default # even when using the official Python dist and those derived # from it. import distutils.sysconfig as sc g = {} filename = sc.get_makefile_filename() sc.parse_makefile(filename, g) target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3') os.environ['MACOSX_DEPLOYMENT_TARGET'] = target if target == '10.3': s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3' warnings.warn(s) opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) else: opt.append("-shared") if sys.platform.startswith('sunos'): # SunOS often has dynamically loaded symbols defined in the # static library libg2c.a The linker doesn't like this. To # ignore the problem, use the -mimpure-text flag. It isn't # the safest thing, but seems to work. 'man gcc' says: # ".. Instead of using -mimpure-text, you should compile all # source code with -fpic or -fPIC." opt.append('-mimpure-text') return opt def get_libgcc_dir(self): status, output = exec_command(self.compiler_f77 + ['-print-libgcc-file-name'], use_tee=0) if not status: return os.path.dirname(output) return None def get_library_dirs(self): opt = [] if sys.platform[:5] != 'linux': d = self.get_libgcc_dir() if d: # if windows and not cygwin, libg2c lies in a different folder if sys.platform == 'win32' and not d.startswith('/usr/lib'): d = os.path.normpath(d) if not os.path.exists(os.path.join(d, "lib%s.a" % self.g2c)): d2 = os.path.abspath(os.path.join(d, '../../../../lib')) if os.path.exists(os.path.join(d2, "lib%s.a" % self.g2c)): opt.append(d2) opt.append(d) return opt def get_libraries(self): opt = [] d = self.get_libgcc_dir() if d is not None: g2c = self.g2c + '-pic' f = self.static_lib_format % (g2c, self.static_lib_extension) if not os.path.isfile(os.path.join(d,f)): g2c = self.g2c else: g2c = self.g2c if g2c is not None: opt.append(g2c) c_compiler = self.c_compiler if sys.platform == 'win32' and c_compiler and \ c_compiler.compiler_type=='msvc': # the following code is not needed (read: breaks) when using MinGW # in case want to link F77 compiled code with MSVC opt.append('gcc') runtime_lib = msvc_runtime_library() if runtime_lib: opt.append(runtime_lib) if sys.platform == 'darwin': opt.append('cc_dynamic') return opt def get_flags_debug(self): return ['-g'] def get_flags_opt(self): v = self.get_version() if v and v<='3.3.3': # With this compiler version building Fortran BLAS/LAPACK # with -O3 caused failures in lib.lapack heevr,syevr tests. opt = ['-O2'] else: opt = ['-O3'] opt.append('-funroll-loops') return opt def _c_arch_flags(self): """ Return detected arch flags from CFLAGS """ from distutils import sysconfig try: cflags = sysconfig.get_config_vars()['CFLAGS'] except KeyError: return [] arch_re = re.compile(r"-arch\s+(\w+)") arch_flags = [] for arch in arch_re.findall(cflags): arch_flags += ['-arch', arch] return arch_flags def get_flags_arch(self): return [] class Gnu95FCompiler(GnuFCompiler): compiler_type = 'gnu95' compiler_aliases = ('gfortran',) description = 'GNU Fortran 95 compiler' def version_match(self, version_string): v = self.gnu_version_match(version_string) if not v or v[0] != 'gfortran': return None v = v[1] if v>='4.': # gcc-4 series releases do not support -mno-cygwin option pass else: # use -mno-cygwin flag for gfortran when Python is not Cygwin-Python if sys.platform == 'win32': for key in ['version_cmd', 'compiler_f77', 'compiler_f90', 'compiler_fix', 'linker_so', 'linker_exe']: self.executables[key].append('-mno-cygwin') return v # 'gfortran --version' results: # XXX is the below right? # Debian: GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3)) # GNU Fortran 95 (GCC) 4.1.2 20061115 (prerelease) (Debian 4.1.1-21) # OS X: GNU Fortran 95 (GCC) 4.1.0 # GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental) # GNU Fortran (GCC) 4.3.0 20070316 (experimental) possible_executables = ['gfortran', 'f95'] executables = { 'version_cmd' : ["<F90>", "--version"], 'compiler_f77' : [None, "-Wall", "-ffixed-form", "-fno-second-underscore"] + _EXTRAFLAGS, 'compiler_f90' : [None, "-Wall", "-fno-second-underscore"] + _EXTRAFLAGS, 'compiler_fix' : [None, "-Wall", "-ffixed-form", "-fno-second-underscore"] + _EXTRAFLAGS, 'linker_so' : ["<F90>", "-Wall"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"], 'linker_exe' : [None, "-Wall"] } module_dir_switch = '-J' module_include_switch = '-I' g2c = 'gfortran' def _universal_flags(self, cmd): """Return a list of -arch flags for every supported architecture.""" if not sys.platform == 'darwin': return [] arch_flags = [] # get arches the C compiler gets. c_archs = self._c_arch_flags() if "i386" in c_archs: c_archs[c_archs.index("i386")] = "i686" # check the arches the Fortran compiler supports, and compare with # arch flags from C compiler for arch in ["ppc", "i686", "x86_64", "ppc64"]: if _can_target(cmd, arch) and arch in c_archs: arch_flags.extend(["-arch", arch]) return arch_flags def get_flags(self): flags = GnuFCompiler.get_flags(self) arch_flags = self._universal_flags(self.compiler_f90) if arch_flags: flags[:0] = arch_flags return flags def get_flags_linker_so(self): flags = GnuFCompiler.get_flags_linker_so(self) arch_flags = self._universal_flags(self.linker_so) if arch_flags: flags[:0] = arch_flags return flags def get_library_dirs(self): opt = GnuFCompiler.get_library_dirs(self) if sys.platform == 'win32': c_compiler = self.c_compiler if c_compiler and c_compiler.compiler_type == "msvc": target = self.get_target() if target: d = os.path.normpath(self.get_libgcc_dir()) root = os.path.join(d, os.pardir, os.pardir, os.pardir, os.pardir) mingwdir = os.path.normpath(os.path.join(root, target, "lib")) full = os.path.join(mingwdir, "libmingwex.a") if os.path.exists(full): opt.append(mingwdir) return opt def get_libraries(self): opt = GnuFCompiler.get_libraries(self) if sys.platform == 'darwin': opt.remove('cc_dynamic') if sys.platform == 'win32': c_compiler = self.c_compiler if c_compiler and c_compiler.compiler_type == "msvc": if "gcc" in opt: i = opt.index("gcc") opt.insert(i+1, "mingwex") opt.insert(i+1, "mingw32") # XXX: fix this mess, does not work for mingw if is_win64(): c_compiler = self.c_compiler if c_compiler and c_compiler.compiler_type == "msvc": return [] else: raise NotImplementedError("Only MS compiler supported with gfortran on win64") return opt def get_target(self): status, output = exec_command(self.compiler_f77 + ['-v'], use_tee=0) if not status: m = TARGET_R.search(output) if m: return m.group(1) return "" def get_flags_opt(self): if is_win64(): return ['-O0'] else: return GnuFCompiler.get_flags_opt(self) def _can_target(cmd, arch): """Return true is the command supports the -arch flag for the given architecture.""" newcmd = cmd[:] fid, filename = tempfile.mkstemp(suffix=".f") try: d = os.path.dirname(filename) output = os.path.splitext(filename)[0] + ".o" try: newcmd.extend(["-arch", arch, "-c", filename]) p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) p.communicate() return p.returncode == 0 finally: if os.path.exists(output): os.remove(output) finally: os.remove(filename) return False if __name__ == '__main__': from distutils import log log.set_verbosity(2) compiler = GnuFCompiler() compiler.customize() print(compiler.get_version()) raw_input('Press ENTER to continue...') try: compiler = Gnu95FCompiler() compiler.customize() print(compiler.get_version()) except Exception: msg = get_exception() print(msg) raw_input('Press ENTER to continue...')
apache-2.0
-6,499,297,468,068,734,000
36.455729
109
0.538761
false
KirtoXX/Security_Camera
ssd_mobilenet/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py
13
9042
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Inception Resnet v2 Faster R-CNN implementation. See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261) as well as "Speed/accuracy trade-offs for modern convolutional object detectors" by Huang et al. (https://arxiv.org/abs/1611.10012) """ import tensorflow as tf from object_detection.meta_architectures import faster_rcnn_meta_arch from nets import inception_resnet_v2 slim = tf.contrib.slim class FasterRCNNInceptionResnetV2FeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN with Inception Resnet v2 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__( is_training, first_stage_features_stride, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN with Inception Resnet v2 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Extracts features using the first half of the Inception Resnet v2 network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( weight_decay=self._weight_decay)): # Forces is_training to False to disable batch norm update. with slim.arg_scope([slim.batch_norm], is_training=False): with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights) as scope: rpn_feature_map, _ = ( inception_resnet_v2.inception_resnet_v2_base( preprocessed_inputs, final_endpoint='PreAuxLogits', scope=scope, output_stride=self._first_stage_features_stride, align_feature_maps=True)) return rpn_feature_map def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the Inception ResNet v2 network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights): with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( weight_decay=self._weight_decay)): # Forces is_training to False to disable batch norm update. with slim.arg_scope([slim.batch_norm], is_training=False): with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'): with tf.variable_scope('Mixed_7a'): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv_1 = slim.conv2d( tower_conv, 384, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): tower_conv1 = slim.conv2d( proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d( tower_conv1, 288, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): tower_conv2 = slim.conv2d( proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3, scope='Conv2d_0b_3x3') tower_conv2_2 = slim.conv2d( tower_conv2_1, 320, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_3'): tower_pool = slim.max_pool2d( proposal_feature_maps, 3, stride=2, padding='VALID', scope='MaxPool_1a_3x3') net = tf.concat( [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3) net = slim.repeat(net, 9, inception_resnet_v2.block8, scale=0.20) net = inception_resnet_v2.block8(net, activation_fn=None) proposal_classifier_features = slim.conv2d( net, 1536, 1, scope='Conv2d_7b_1x1') return proposal_classifier_features def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for InceptionResnetV2 checkpoints. TODO: revisit whether it's possible to force the `Repeat` namescope as created in `_extract_box_classifier_features` to start counting at 2 (e.g. `Repeat_2`) so that the default restore_fn can be used. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in tf.global_variables(): if variable.op.name.startswith( first_stage_feature_extractor_scope): var_name = variable.op.name.replace( first_stage_feature_extractor_scope + '/', '') variables_to_restore[var_name] = variable if variable.op.name.startswith( second_stage_feature_extractor_scope): var_name = variable.op.name.replace( second_stage_feature_extractor_scope + '/InceptionResnetV2/Repeat', 'InceptionResnetV2/Repeat_2') var_name = var_name.replace( second_stage_feature_extractor_scope + '/', '') variables_to_restore[var_name] = variable return variables_to_restore
apache-2.0
-2,878,342,389,594,409,500
42.263158
80
0.641009
false
jkyeung/XlsxWriter
xlsxwriter/test/comparison/test_fit_to_pages01.py
1
1354
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'fit_to_pages01.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = ['xl/printerSettings/printerSettings1.bin', 'xl/worksheets/_rels/sheet1.xml.rels'] self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'], 'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']} def test_create_file(self): """Test the creation of a simple XlsxWriter file with fit to print.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.fit_to_pages(1, 1) worksheet.set_paper(9) worksheet.write('A1', 'Foo') workbook.close() self.assertExcelEqual()
bsd-2-clause
7,276,973,820,359,425,000
28.434783
91
0.581241
false
est31/godot
tools/translations/extract.py
4
3162
#!/bin/python import fnmatch import os import shutil import subprocess import sys line_nb = False for arg in sys.argv[1:]: if (arg == "--with-line-nb"): print("Enabling line numbers in the context locations.") line_nb = True else: os.sys.exit("Non supported argument '" + arg + "'. Aborting.") if (not os.path.exists("tools")): os.sys.exit("ERROR: This script should be started from the root of the git repo.") matches = [] for root, dirnames, filenames in os.walk('.'): for filename in fnmatch.filter(filenames, '*.cpp'): if (filename.find("collada") != -1): continue matches.append(os.path.join(root, filename)) for filename in fnmatch.filter(filenames, '*.h'): if (filename.find("collada") != -1): continue matches.append(os.path.join(root, filename)) matches.sort() unique_str = [] unique_loc = {} main_po = """ # LANGUAGE translation of the Godot Engine editor # Copyright (C) 2016 Juan Linietsky, Ariel Manzur and the Godot community # This file is distributed under the same license as the Godot source code. # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: Godot Engine editor\\n" "Content-Type: text/plain; charset=UTF-8\\n" "Content-Transfer-Encoding: 8-bit\\n" """ print("Updating the tools.pot template...") for fname in matches: f = open(fname, "rb") l = f.readline() lc = 1 while (l): patterns = ['RTR(\"', 'TTR(\"'] idx = 0 pos = 0 while (pos >= 0): pos = l.find(patterns[idx], pos) if (pos == -1): if (idx < len(patterns) - 1): idx += 1 pos = 0 continue pos += 5 msg = "" while (pos < len(l) and (l[pos] != '"' or l[pos - 1] == '\\')): msg += l[pos] pos += 1 location = os.path.relpath(fname).replace('\\','/') if (line_nb): location += ":" + str(lc) if (not msg in unique_str): main_po += "\n#: " + location + "\n" main_po += 'msgid "' + msg + '"\n' main_po += 'msgstr ""\n' unique_str.append(msg) unique_loc[msg] = [location] elif (not location in unique_loc[msg]): # Add additional location to previous occurence too msg_pos = main_po.find('\nmsgid "' + msg + '"') if (msg_pos == -1): print("Someone apparently thought writing Python was as easy as GDScript. Ping Akien.") main_po = main_po[:msg_pos] + ' ' + location + main_po[msg_pos:] unique_loc[msg].append(location) l = f.readline() lc += 1 f.close() f = open("tools.pot", "wb") f.write(main_po) f.close() if (os.name == "posix"): os.system("msgmerge -w80 tools.pot tools.pot > tools.pot.wrap") shutil.move("tools.pot.wrap", "tools.pot") shutil.move("tools.pot", "tools/translations/tools.pot") # TODO: Make that in a portable way, if we care; if not, kudos to Unix users if (os.name == "posix"): added = subprocess.check_output("git diff tools/translations/tools.pot | grep \+msgid | wc -l", shell = True) removed = subprocess.check_output("git diff tools/translations/tools.pot | grep \\\-msgid | wc -l", shell = True) print("\n# Template changes compared to the staged status:") print("# Additions: %s msgids.\n# Deletions: %s msgids." % (int(added), int(removed)))
mit
-9,170,374,170,648,537,000
25.571429
114
0.629981
false
kerrpy/kerrpy
docs/source/conf.py
1
10716
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # kerrpy documentation build configuration file, created by # sphinx-quickstart on Wed Aug 10 14:00:40 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import sphinx_bootstrap_theme import os import sys sys.path.insert(0, os.path.abspath('../../kerrpy')) print(sys.path) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', 'sphinxcontrib.bibtex', 'breathe'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'kerrpy' copyright = '2017, Pablo Galindo and Alejandro García' author = 'Pablo Galindo and Alejandro García' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # # today = '' # # Else, today_fmt is used as the format for a strftime call. # # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'bootstrap' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {'bootswatch_theme': "paper"} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() # The name for this set of Sphinx documents. # "<project> v<release> documentation" by default. # # html_title = 'kerrpy v0.1' # A shorter title for the navigation bar. Default is the same as html_title. # # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # # html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # # html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. # # html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # # html_additional_pages = {} # If false, no module index is generated. # # html_domain_indices = True # If false, no index is generated. # # html_use_index = True # If true, the index is split into individual pages for each letter. # # html_split_index = False # If true, links to the reST sources are added to the pages. # # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh' # # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. # # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'kerrpydoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). # latex_documents = [(master_doc, 'kerrpy.tex', 'kerrpy Documentation', 'Alejandro García Montoro', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # # latex_use_parts = False # If true, show page references after internal links. # # latex_show_pagerefs = False # If true, show URL addresses after external links. # # latex_show_urls = False # Documents to append as an appendix to all manuals. # # latex_appendices = [] # It false, will not define \strong, \code, itleref, \crossref ... but only # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added # packages. # # latex_keep_old_macro_names = True # If false, no module index is generated. # # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, 'kerrpy', 'kerrpy Documentation', [author], 1)] # If true, show URL addresses after external links. # # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [(master_doc, 'kerrpy', 'kerrpy Documentation', author, 'kerrpy', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # # texinfo_appendices = [] # If false, no module index is generated. # # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # # texinfo_no_detailmenu = False # Number the figures numfig = True # # Taken from http://stackoverflow.com/a/5599712 # def skip(app, what, name, obj, skip, options): # if name == "__init__": # return False # return skip # # # def setup(app): # app.connect("autodoc-skip-member", skip) autodoc_default_flags = ['members', 'special-members'] # -- Doxygen configuration ------------------------------------------------ breathe_projects = {"kerrpy": "../build/doxygen/xml/", } breathe_default_project = "kerrpy" # Sphinx goes nuts with cuda macros, so we need to add them (see http://www.sphinx-doc.org/en/1.5.1/config.html#confval-cpp_id_attributes) cpp_id_attributes = ['__device__', '__global__']
gpl-3.0
4,364,511,312,134,557,000
29.434659
183
0.692803
false
crazcalm/AngelHack_python34
myenv/Lib/site-packages/pip/exceptions.py
398
1086
"""Exceptions used throughout package""" class PipError(Exception): """Base pip exception""" class InstallationError(PipError): """General exception during installation""" class UninstallationError(PipError): """General exception during uninstallation""" class DistributionNotFound(InstallationError): """Raised when a distribution cannot be found to satisfy a requirement""" class BestVersionAlreadyInstalled(PipError): """Raised when the most up-to-date version of a package is already installed. """ class BadCommand(PipError): """Raised when virtualenv or a command is not found""" class CommandError(PipError): """Raised when there is an error in command-line arguments""" class PreviousBuildDirError(PipError): """Raised when there's a previous conflicting build directory""" class HashMismatch(InstallationError): """Distribution file hash values don't match.""" class InvalidWheelFilename(InstallationError): """Invalid wheel filename.""" class UnsupportedWheel(InstallationError): """Unsupported wheel."""
mit
7,565,227,415,835,890,000
22.608696
77
0.739411
false
feroda/django
django/contrib/postgres/fields/hstore.py
45
2945
import json from django.contrib.postgres import forms, lookups from django.contrib.postgres.fields.array import ArrayField from django.core import exceptions from django.db.models import Field, TextField, Transform from django.utils import six from django.utils.translation import ugettext_lazy as _ __all__ = ['HStoreField'] class HStoreField(Field): empty_strings_allowed = False description = _('Map of strings to strings') default_error_messages = { 'not_a_string': _('The value of "%(key)s" is not a string.'), } def db_type(self, connection): return 'hstore' def get_transform(self, name): transform = super(HStoreField, self).get_transform(name) if transform: return transform return KeyTransformFactory(name) def validate(self, value, model_instance): super(HStoreField, self).validate(value, model_instance) for key, val in value.items(): if not isinstance(val, six.string_types): raise exceptions.ValidationError( self.error_messages['not_a_string'], code='not_a_string', params={'key': key}, ) def to_python(self, value): if isinstance(value, six.string_types): value = json.loads(value) return value def value_to_string(self, obj): value = self._get_val_from_obj(obj) return json.dumps(value) def formfield(self, **kwargs): defaults = { 'form_class': forms.HStoreField, } defaults.update(kwargs) return super(HStoreField, self).formfield(**defaults) HStoreField.register_lookup(lookups.DataContains) HStoreField.register_lookup(lookups.ContainedBy) @HStoreField.register_lookup class HasKeyLookup(lookups.PostgresSimpleLookup): lookup_name = 'has_key' operator = '?' @HStoreField.register_lookup class HasKeysLookup(lookups.PostgresSimpleLookup): lookup_name = 'has_keys' operator = '?&' class KeyTransform(Transform): output_field = TextField() def __init__(self, key_name, *args, **kwargs): super(KeyTransform, self).__init__(*args, **kwargs) self.key_name = key_name def as_sql(self, compiler, connection): lhs, params = compiler.compile(self.lhs) return "%s -> '%s'" % (lhs, self.key_name), params class KeyTransformFactory(object): def __init__(self, key_name): self.key_name = key_name def __call__(self, *args, **kwargs): return KeyTransform(self.key_name, *args, **kwargs) @HStoreField.register_lookup class KeysTransform(lookups.FunctionTransform): lookup_name = 'keys' function = 'akeys' output_field = ArrayField(TextField()) @HStoreField.register_lookup class ValuesTransform(lookups.FunctionTransform): lookup_name = 'values' function = 'avals' output_field = ArrayField(TextField())
bsd-3-clause
6,559,330,261,788,073,000
27.317308
69
0.647878
false
Juniper/tempest
tempest/tests/lib/common/test_http.py
1
2801
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common import http from tempest.tests import base class TestClosingHttp(base.TestCase): def setUp(self): super(TestClosingHttp, self).setUp() self.cert_none = "CERT_NONE" self.cert_location = "/etc/ssl/certs/ca-certificates.crt" def test_constructor_invalid_ca_certs_and_timeout(self): connection = http.ClosingHttp( disable_ssl_certificate_validation=False, ca_certs=None, timeout=None) for attr in ('cert_reqs', 'ca_certs', 'timeout'): self.assertNotIn(attr, connection.connection_pool_kw) def test_constructor_valid_ca_certs(self): cert_required = 'CERT_REQUIRED' connection = http.ClosingHttp( disable_ssl_certificate_validation=False, ca_certs=self.cert_location, timeout=None) self.assertEqual(cert_required, connection.connection_pool_kw['cert_reqs']) self.assertEqual(self.cert_location, connection.connection_pool_kw['ca_certs']) self.assertNotIn('timeout', connection.connection_pool_kw) def test_constructor_ssl_cert_validation_disabled(self): connection = http.ClosingHttp( disable_ssl_certificate_validation=True, ca_certs=None, timeout=30) self.assertEqual(self.cert_none, connection.connection_pool_kw['cert_reqs']) self.assertEqual(30, connection.connection_pool_kw['timeout']) self.assertNotIn('ca_certs', connection.connection_pool_kw) def test_constructor_ssl_cert_validation_disabled_and_ca_certs(self): connection = http.ClosingHttp( disable_ssl_certificate_validation=True, ca_certs=self.cert_location, timeout=None) self.assertNotIn('timeout', connection.connection_pool_kw) self.assertEqual(self.cert_none, connection.connection_pool_kw['cert_reqs']) self.assertNotIn('ca_certs', connection.connection_pool_kw)
apache-2.0
4,677,533,993,914,791,000
40.191176
78
0.625134
false
pratikmallya/hue
desktop/core/ext-py/Django-1.6.10/django/views/decorators/cache.py
129
2286
from functools import wraps from django.utils.decorators import decorator_from_middleware_with_args, available_attrs from django.utils.cache import patch_cache_control, add_never_cache_headers from django.middleware.cache import CacheMiddleware def cache_page(*args, **kwargs): """ Decorator for views that tries getting the page from the cache and populates the cache if the page isn't in the cache yet. The cache is keyed by the URL and some data from the headers. Additionally there is the key prefix that is used to distinguish different cache areas in a multi-site setup. You could use the sites.get_current_site().domain, for example, as that is unique across a Django project. Additionally, all headers from the response's Vary header will be taken into account on caching -- just like the middleware does. """ # We also add some asserts to give better error messages in case people are # using other ways to call cache_page that no longer work. if len(args) != 1 or callable(args[0]): raise TypeError("cache_page has a single mandatory positional argument: timeout") cache_timeout = args[0] cache_alias = kwargs.pop('cache', None) key_prefix = kwargs.pop('key_prefix', None) if kwargs: raise TypeError("cache_page has two optional keyword arguments: cache and key_prefix") return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=cache_timeout, cache_alias=cache_alias, key_prefix=key_prefix) def cache_control(**kwargs): def _cache_controller(viewfunc): @wraps(viewfunc, assigned=available_attrs(viewfunc)) def _cache_controlled(request, *args, **kw): response = viewfunc(request, *args, **kw) patch_cache_control(response, **kwargs) return response return _cache_controlled return _cache_controller def never_cache(view_func): """ Decorator that adds headers to a response so that it will never be cached. """ @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view_func(request, *args, **kwargs): response = view_func(request, *args, **kwargs) add_never_cache_headers(response) return response return _wrapped_view_func
apache-2.0
-3,524,898,767,110,126,000
40.563636
140
0.704724
false
sf-wind/caffe2
caffe2/python/pipeline_test.py
4
3358
# Copyright (c) 2016-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python.schema import ( Struct, FetchRecord, NewRecord, FeedRecord, InitEmptyRecord) from caffe2.python import core, workspace from caffe2.python.session import LocalSession from caffe2.python.dataset import Dataset from caffe2.python.pipeline import pipe from caffe2.python.queue_util import Queue from caffe2.python.task import TaskGroup from caffe2.python.test_util import TestCase from caffe2.python.net_builder import ops import numpy as np import math class TestPipeline(TestCase): def test_dequeue_many(self): init_net = core.Net('init') N = 17 NUM_DEQUEUE_RECORDS = 3 src_values = Struct( ('uid', np.array(range(N))), ('value', 0.1 * np.array(range(N)))) expected_dst = Struct( ('uid', 2 * np.array(range(N))), ('value', np.array(N * [0.0]))) with core.NameScope('init'): src_blobs = NewRecord(init_net, src_values) dst_blobs = InitEmptyRecord(init_net, src_values.clone_schema()) counter = init_net.Const(0) ONE = init_net.Const(1) def proc1(rec): with core.NameScope('proc1'): out = NewRecord(ops, rec) ops.Add([rec.uid(), rec.uid()], [out.uid()]) out.value.set(blob=rec.value(), unsafe=True) return out def proc2(rec): with core.NameScope('proc2'): out = NewRecord(ops, rec) out.uid.set(blob=rec.uid(), unsafe=True) ops.Sub([rec.value(), rec.value()], [out.value()]) ops.Add([counter, ONE], [counter]) return out src_ds = Dataset(src_blobs) dst_ds = Dataset(dst_blobs) with TaskGroup() as tg: out1 = pipe( src_ds.reader(), output=Queue( capacity=11, num_dequeue_records=NUM_DEQUEUE_RECORDS), processor=proc1) out2 = pipe(out1, processor=proc2) pipe(out2, dst_ds.writer()) ws = workspace.C.Workspace() FeedRecord(src_blobs, src_values, ws) session = LocalSession(ws) session.run(init_net) session.run(tg) output = FetchRecord(dst_blobs, ws=ws) num_dequeues = ws.blobs[str(counter)].fetch() self.assertEquals( num_dequeues, int(math.ceil(float(N) / NUM_DEQUEUE_RECORDS))) for a, b in zip(output.field_blobs(), expected_dst.field_blobs()): np.testing.assert_array_equal(a, b)
apache-2.0
-5,869,858,987,812,251,000
35.5
78
0.603931
false
tody411/ImageViewerFramework
ivf/cmds/save_depth.py
1
1284
# -*- coding: utf-8 -*- ## @package ivf.cmds.save_depth # # ivf.cmds.save_depth utility package. # @author tody # @date 2016/02/02 from PyQt4.QtGui import * from PyQt4.QtCore import * import os from ivf.cmds.base_cmds import BaseCommand from ivf.scene.gl3d.image_plane import ImagePlane from ivf.io_util.obj_model import saveOBJ class SaveDepthCommand(BaseCommand): def __init__(self, scene, file_path="", parent=None): super(SaveDepthCommand, self).__init__(scene, "Save Depth Mesh", parent) self._file_path = file_path self._show_ui = file_path is "" self._root_dir = os.path.expanduser('~') def _runImp(self): if self._show_ui: self._file_path = str(QFileDialog.getSaveFileName(None, "Save Depth Mesh", self._root_dir, "Obj File (*.obj)" )) if self._file_path is "": return RGBA_8U = self._scene.image() D_32F = self._scene.depth() if D_32F is None: return model = ImagePlane(RGBA_8U) model.setDepth(D_32F) vertices = model.mesh().positions() index_array = model.mesh().indexArray() vertex_colors = model.mesh().vertexColors() saveOBJ(self._file_path, vertices, index_array, vertex_colors)
mit
-5,482,225,773,421,896,000
26.934783
124
0.614486
false
David-Amaro/bank-payment
account_payment_blocking/__init__.py
16
1081
# -*- encoding: utf-8 -*- ############################################################################## # # Account Payment Blocking module for Odoo # Copyright (C) 2014-2015 ACSONE SA/NV (http://acsone.eu) # @author Stéphane Bidoul <stephane.bidoul@acsone.eu> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import model from . import tests
agpl-3.0
5,374,418,594,436,135,000
44
78
0.613889
false
kisel/trex-core
scripts/astf/param_mss_err.py
2
1056
from trex_astf_lib.api import * # IPV6 tunable example # # ipv6.src_msb # ipv6.dst_msb # ipv6.enable # class Prof1(): def __init__(self): pass def get_profile(self, **kwargs): # ip generator ip_gen_c = ASTFIPGenDist(ip_range=["16.0.0.0", "16.0.0.255"], distribution="seq") ip_gen_s = ASTFIPGenDist(ip_range=["48.0.0.0", "48.0.255.255"], distribution="seq") ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset="1.0.0.0"), dist_client=ip_gen_c, dist_server=ip_gen_s) c_glob_info = ASTFGlobalInfo() c_glob_info.tcp.mss = 1 return ASTFProfile(default_ip_gen=ip_gen, # Defaults affects all files default_c_glob_info=c_glob_info, cap_list=[ ASTFCapInfo(file="../avl/delay_10_http_browsing_0.pcap", cps=1) ] ) def register(): return Prof1()
apache-2.0
7,100,825,590,404,855,000
28.333333
100
0.484848
false
vschs007/buck
third-party/py/pex/pex/interpreter.py
52
12996
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). """pex support for interacting with interpreters.""" from __future__ import absolute_import import os import re import subprocess import sys from collections import defaultdict from pkg_resources import Distribution, Requirement, find_distributions from .base import maybe_requirement from .compatibility import string from .tracer import TRACER try: from numbers import Integral except ImportError: Integral = (int, long) # Determine in the most platform-compatible way possible the identity of the interpreter # and its known packages. ID_PY = b""" import sys if hasattr(sys, 'pypy_version_info'): subversion = 'PyPy' elif sys.platform.startswith('java'): subversion = 'Jython' else: subversion = 'CPython' print("%s %s %s %s" % ( subversion, sys.version_info[0], sys.version_info[1], sys.version_info[2])) setuptools_path = None try: import pkg_resources except ImportError: sys.exit(0) requirements = {} for item in sys.path: for dist in pkg_resources.find_distributions(item): requirements[str(dist.as_requirement())] = dist.location for requirement_str, location in requirements.items(): rs = requirement_str.split('==', 2) if len(rs) == 2: print('%s %s %s' % (rs[0], rs[1], location)) """ class PythonIdentity(object): class Error(Exception): pass class InvalidError(Error): pass class UnknownRequirement(Error): pass # TODO(wickman) Support interpreter-specific versions, e.g. PyPy-2.2.1 HASHBANGS = { 'CPython': 'python%(major)d.%(minor)d', 'Jython': 'jython', 'PyPy': 'pypy', } @classmethod def get_subversion(cls): if hasattr(sys, 'pypy_version_info'): subversion = 'PyPy' elif sys.platform.startswith('java'): subversion = 'Jython' else: subversion = 'CPython' return subversion @classmethod def get(cls): return cls(cls.get_subversion(), sys.version_info[0], sys.version_info[1], sys.version_info[2]) @classmethod def from_id_string(cls, id_string): values = id_string.split() if len(values) != 4: raise cls.InvalidError("Invalid id string: %s" % id_string) return cls(str(values[0]), int(values[1]), int(values[2]), int(values[3])) @classmethod def from_path(cls, dirname): interp, version = dirname.split('-') major, minor, patch = version.split('.') return cls(str(interp), int(major), int(minor), int(patch)) def __init__(self, interpreter, major, minor, patch): for var in (major, minor, patch): assert isinstance(var, Integral) self._interpreter = interpreter self._version = (major, minor, patch) @property def interpreter(self): return self._interpreter @property def version(self): return self._version @property def requirement(self): return self.distribution.as_requirement() @property def distribution(self): return Distribution(project_name=self._interpreter, version='.'.join(map(str, self._version))) @classmethod def parse_requirement(cls, requirement, default_interpreter='CPython'): if isinstance(requirement, Requirement): return requirement elif isinstance(requirement, string): try: requirement = Requirement.parse(requirement) except ValueError: try: requirement = Requirement.parse('%s%s' % (default_interpreter, requirement)) except ValueError: raise ValueError('Unknown requirement string: %s' % requirement) return requirement else: raise ValueError('Unknown requirement type: %r' % (requirement,)) def matches(self, requirement): """Given a Requirement, check if this interpreter matches.""" try: requirement = self.parse_requirement(requirement, self._interpreter) except ValueError as e: raise self.UnknownRequirement(str(e)) return self.distribution in requirement def hashbang(self): hashbang_string = self.HASHBANGS.get(self.interpreter, 'CPython') % { 'major': self._version[0], 'minor': self._version[1], 'patch': self._version[2], } return '#!/usr/bin/env %s' % hashbang_string @property def python(self): # return the python version in the format of the 'python' key for distributions # specifically, '2.6', '2.7', '3.2', etc. return '%d.%d' % (self.version[0:2]) def __str__(self): return '%s-%s.%s.%s' % (self._interpreter, self._version[0], self._version[1], self._version[2]) def __repr__(self): return 'PythonIdentity(%r, %s, %s, %s)' % ( self._interpreter, self._version[0], self._version[1], self._version[2]) def __eq__(self, other): return all([isinstance(other, PythonIdentity), self.interpreter == other.interpreter, self.version == other.version]) def __hash__(self): return hash((self._interpreter, self._version)) class PythonInterpreter(object): REGEXEN = ( re.compile(r'jython$'), # NB: OSX ships python binaries named Python so we allow for capital-P. re.compile(r'[Pp]ython$'), re.compile(r'python[23].[0-9]$'), re.compile(r'pypy$'), re.compile(r'pypy-1.[0-9]$'), ) CACHE = {} # memoize executable => PythonInterpreter try: # Versions of distribute prior to the setuptools merge would automatically replace # 'setuptools' requirements with 'distribute'. It provided the 'replacement' kwarg # to toggle this, but it was removed post-merge. COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0', replacement=False) except TypeError: COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0') class Error(Exception): pass class IdentificationError(Error): pass class InterpreterNotFound(Error): pass @classmethod def get(cls): return cls.from_binary(sys.executable) @classmethod def all(cls, paths=None): if paths is None: paths = os.getenv('PATH', '').split(':') return cls.filter(cls.find(paths)) @classmethod def _parse_extras(cls, output_lines): def iter_lines(): for line in output_lines: try: dist_name, dist_version, location = line.split() except ValueError: raise cls.IdentificationError('Could not identify requirement: %s' % line) yield ((dist_name, dist_version), location) return dict(iter_lines()) @classmethod def _from_binary_internal(cls, path_extras): def iter_extras(): for item in sys.path + list(path_extras): for dist in find_distributions(item): if dist.version: yield ((dist.key, dist.version), dist.location) return cls(sys.executable, PythonIdentity.get(), dict(iter_extras())) @classmethod def _from_binary_external(cls, binary, path_extras): environ = cls.sanitized_environment() environ['PYTHONPATH'] = ':'.join(path_extras) po = subprocess.Popen( [binary], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=environ) so, _ = po.communicate(ID_PY) output = so.decode('utf8').splitlines() if len(output) == 0: raise cls.IdentificationError('Could not establish identity of %s' % binary) identity, extras = output[0], output[1:] return cls( binary, PythonIdentity.from_id_string(identity), extras=cls._parse_extras(extras)) @classmethod def expand_path(cls, path): if os.path.isfile(path): return [path] elif os.path.isdir(path): return [os.path.join(path, fn) for fn in os.listdir(path)] return [] @classmethod def from_env(cls, hashbang): """Resolve a PythonInterpreter as /usr/bin/env would. :param hashbang: A string, e.g. "python3.3" representing some binary on the $PATH. """ paths = os.getenv('PATH', '').split(':') for path in paths: for fn in cls.expand_path(path): basefile = os.path.basename(fn) if hashbang == basefile: try: return cls.from_binary(fn) except Exception as e: TRACER.log('Could not identify %s: %s' % (fn, e)) @classmethod def from_binary(cls, binary, path_extras=None): path_extras = path_extras or () if binary not in cls.CACHE: if binary == sys.executable: cls.CACHE[binary] = cls._from_binary_internal(path_extras) else: cls.CACHE[binary] = cls._from_binary_external(binary, path_extras) return cls.CACHE[binary] @classmethod def find(cls, paths): """ Given a list of files or directories, try to detect python interpreters amongst them. Returns a list of PythonInterpreter objects. """ pythons = [] for path in paths: for fn in cls.expand_path(path): basefile = os.path.basename(fn) if any(matcher.match(basefile) is not None for matcher in cls.REGEXEN): try: pythons.append(cls.from_binary(fn)) except Exception as e: TRACER.log('Could not identify %s: %s' % (fn, e)) continue return pythons @classmethod def filter(cls, pythons): """ Given a map of python interpreters in the format provided by PythonInterpreter.find(), filter out duplicate versions and versions we would prefer not to use. Returns a map in the same format as find. """ good = [] MAJOR, MINOR, SUBMINOR = range(3) def version_filter(version): return (version[MAJOR] == 2 and version[MINOR] >= 6 or version[MAJOR] == 3 and version[MINOR] >= 2) all_versions = set(interpreter.identity.version for interpreter in pythons) good_versions = filter(version_filter, all_versions) for version in good_versions: # For each candidate, use the latest version we find on the filesystem. candidates = defaultdict(list) for interp in pythons: if interp.identity.version == version: candidates[interp.identity.interpreter].append(interp) for interp_class in candidates: candidates[interp_class].sort( key=lambda interp: os.path.getmtime(interp.binary), reverse=True) good.append(candidates[interp_class].pop(0)) return good @classmethod def sanitized_environment(cls): # N.B. This is merely a hack because sysconfig.py on the default OS X # installation of 2.6/2.7 breaks. env_copy = os.environ.copy() env_copy.pop('MACOSX_DEPLOYMENT_TARGET', None) return env_copy @classmethod def replace(cls, requirement): self = cls.get() if self.identity.matches(requirement): return False for pi in cls.all(): if pi.identity.matches(requirement): break else: raise cls.InterpreterNotFound('Could not find interpreter matching filter!') os.execve(pi.binary, [pi.binary] + sys.argv, cls.sanitized_environment()) def __init__(self, binary, identity, extras=None): """Construct a PythonInterpreter. You should probably PythonInterpreter.from_binary instead. :param binary: The full path of the python binary. :param identity: The :class:`PythonIdentity` of the PythonInterpreter. :param extras: A mapping from (dist.key, dist.version) to dist.location of the extras associated with this interpreter. """ self._binary = os.path.realpath(binary) self._extras = extras or {} self._identity = identity def with_extra(self, key, version, location): extras = self._extras.copy() extras[(key, version)] = location return self.__class__(self._binary, self._identity, extras) @property def extras(self): return self._extras.copy() @property def binary(self): return self._binary @property def identity(self): return self._identity @property def python(self): return self._identity.python @property def version(self): return self._identity.version @property def version_string(self): return str(self._identity) def satisfies(self, capability): if not isinstance(capability, list): raise TypeError('Capability must be a list, got %s' % type(capability)) return not any(self.get_location(req) is None for req in capability) def get_location(self, req): req = maybe_requirement(req) for dist, location in self.extras.items(): dist_name, dist_version = dist if req.key == dist_name and dist_version in req: return location def __hash__(self): return hash((self._binary, self._identity)) def __eq__(self, other): if not isinstance(other, PythonInterpreter): return False return (self._binary, self._identity) == (other._binary, other._identity) def __lt__(self, other): if not isinstance(other, PythonInterpreter): return False return self.version < other.version def __repr__(self): return '%s(%r, %r, %r)' % (self.__class__.__name__, self._binary, self._identity, self._extras)
apache-2.0
-4,550,507,036,375,673,300
29.578824
99
0.657433
false
AOKP/external_chromium_org
tools/python/google/gethash_timer.py
182
4366
#!/usr/bin/env python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Issue a series of GetHash requests to the SafeBrowsing servers and measure the response times. Usage: $ ./gethash_timer.py --period=600 --samples=20 --output=resp.csv --period (or -p): The amount of time (in seconds) to wait between GetHash requests. Using a value of more than 300 (5 minutes) to include the effect of DNS. --samples (or -s): The number of requests to issue. If this parameter is not specified, the test will run indefinitely. --output (or -o): The path to a file where the output will be written in CSV format: sample_number,response_code,elapsed_time_ms """ import getopt import httplib import sys import time _GETHASH_HOST = 'safebrowsing.clients.google.com' _GETHASH_REQUEST = ( '/safebrowsing/gethash?client=googleclient&appver=1.0&pver=2.1') # Global logging file handle. g_file_handle = None def IssueGetHash(prefix): '''Issue one GetHash request to the safebrowsing servers. Args: prefix: A 4 byte value to look up on the server. Returns: The HTTP response code for the GetHash request. ''' body = '4:4\n' + prefix h = httplib.HTTPConnection(_GETHASH_HOST) h.putrequest('POST', _GETHASH_REQUEST) h.putheader('content-length', str(len(body))) h.endheaders() h.send(body) response_code = h.getresponse().status h.close() return response_code def TimedGetHash(prefix): '''Measure the amount of time it takes to receive a GetHash response. Args: prefix: A 4 byte value to look up on the the server. Returns: A tuple of HTTP resonse code and the response time (in milliseconds). ''' start = time.time() response_code = IssueGetHash(prefix) return response_code, (time.time() - start) * 1000 def RunTimedGetHash(period, samples=None): '''Runs an experiment to measure the amount of time it takes to receive multiple responses from the GetHash servers. Args: period: A floating point value that indicates (in seconds) the delay between requests. samples: An integer value indicating the number of requests to make. If 'None', the test continues indefinitely. Returns: None. ''' global g_file_handle prefix = '\x50\x61\x75\x6c' sample_count = 1 while True: response_code, elapsed_time = TimedGetHash(prefix) LogResponse(sample_count, response_code, elapsed_time) sample_count += 1 if samples is not None and sample_count == samples: break time.sleep(period) def LogResponse(sample_count, response_code, elapsed_time): '''Output the response for one GetHash query. Args: sample_count: The current sample number. response_code: The HTTP response code for the GetHash request. elapsed_time: The round-trip time (in milliseconds) for the GetHash request. Returns: None. ''' global g_file_handle output_list = (sample_count, response_code, elapsed_time) print 'Request: %d, status: %d, elapsed time: %f ms' % output_list if g_file_handle is not None: g_file_handle.write(('%d,%d,%f' % output_list) + '\n') g_file_handle.flush() def SetupOutputFile(file_name): '''Open a file for logging results. Args: file_name: A path to a file to store the output. Returns: None. ''' global g_file_handle g_file_handle = open(file_name, 'w') def main(): period = 10 samples = None options, args = getopt.getopt(sys.argv[1:], 's:p:o:', ['samples=', 'period=', 'output=']) for option, value in options: if option == '-s' or option == '--samples': samples = int(value) elif option == '-p' or option == '--period': period = float(value) elif option == '-o' or option == '--output': file_name = value else: print 'Bad option: %s' % option return 1 try: print 'Starting Timed GetHash ----------' SetupOutputFile(file_name) RunTimedGetHash(period, samples) except KeyboardInterrupt: pass print 'Timed GetHash complete ----------' g_file_handle.close() if __name__ == '__main__': sys.exit(main())
bsd-3-clause
-2,610,213,376,318,876,000
28.302013
78
0.653
false
srinathv/vispy
vispy/visuals/isocurve.py
18
7809
# -*- coding: utf-8 -*- # Copyright (c) 2015, Vispy Development Team. # Distributed under the (new) BSD License. See LICENSE.txt for more info. from __future__ import division import numpy as np from .line import LineVisual from ..color import ColorArray from ..color.colormap import _normalize, get_colormap from ..geometry.isocurve import isocurve from ..testing import has_matplotlib # checking for matplotlib _HAS_MPL = has_matplotlib() if _HAS_MPL: from matplotlib import _cntr as cntr class IsocurveVisual(LineVisual): """Displays an isocurve of a 2D scalar array. Parameters ---------- data : ndarray | None 2D scalar array. levels : ndarray, shape (Nlev,) | None The levels at which the isocurve is constructed from "*data*". color_lev : Color, colormap name, tuple, list or array The color to use when drawing the line. If a list is given, it must be of shape (Nlev), if an array is given, it must be of shape (Nlev, ...). and provide one color per level (rgba, colorname). clim : tuple (min, max) limits to apply when mapping level values through a colormap. **kwargs : dict Keyword arguments to pass to `LineVisual`. Notes ----- """ def __init__(self, data=None, levels=None, color_lev=None, clim=None, **kwargs): self._data = None self._levels = levels self._color_lev = color_lev self._clim = clim self._need_color_update = True self._need_level_update = True self._need_recompute = True self._X = None self._Y = None self._iso = None self._level_min = None self._data_is_uniform = False self._lc = None self._cl = None self._li = None self._connect = None self._verts = None kwargs['method'] = 'gl' kwargs['antialias'] = False LineVisual.__init__(self, **kwargs) if data is not None: self.set_data(data) @property def levels(self): """ The threshold at which the isocurve is constructed from the 2D data. """ return self._levels @levels.setter def levels(self, levels): self._levels = levels self._need_level_update = True self._need_recompute = True self.update() @property def color(self): return self._color_lev @color.setter def color(self, color): self._color_lev = color self._need_level_update = True self._need_color_update = True self.update() def set_data(self, data): """ Set the scalar array data Parameters ---------- data : ndarray A 2D array of scalar values. The isocurve is constructed to show all locations in the scalar field equal to ``self.levels``. """ self._data = data # if using matplotlib isoline algorithm we have to check for meshgrid # and we can setup the tracer object here if _HAS_MPL: if self._X is None or self._X.T.shape != data.shape: self._X, self._Y = np.meshgrid(np.arange(data.shape[0]), np.arange(data.shape[1])) self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float)) if self._clim is None: self._clim = (data.min(), data.max()) # sanity check, # should we raise an error here, since no isolines can be drawn? # for now, _prepare_draw returns False if no isoline can be drawn if self._data.min() != self._data.max(): self._data_is_uniform = False else: self._data_is_uniform = True self._need_recompute = True self.update() def _get_verts_and_connect(self, paths): """ retrieve vertices and connects from given paths-list """ verts = np.vstack(paths) gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1 connect = np.ones(gaps[-1], dtype=bool) connect[gaps[:-1]] = False return verts, connect def _compute_iso_line(self): """ compute LineVisual vertices, connects and color-index """ level_index = [] connects = [] verts = [] # calculate which level are within data range # this works for now and the existing examples, but should be tested # thoroughly also with the data-sanity check in set_data-function choice = np.nonzero((self.levels > self._data.min()) & (self._levels < self._data.max())) levels_to_calc = np.array(self.levels)[choice] # save minimum level index self._level_min = choice[0][0] for level in levels_to_calc: # if we use matplotlib isoline algorithm we need to add half a # pixel in both (x,y) dimensions because isolines are aligned to # pixel centers if _HAS_MPL: nlist = self._iso.trace(level, level, 0) paths = nlist[:len(nlist)//2] v, c = self._get_verts_and_connect(paths) v += np.array([0.5, 0.5]) else: paths = isocurve(self._data.astype(float).T, level, extend_to_edge=True, connected=True) v, c = self._get_verts_and_connect(paths) level_index.append(v.shape[0]) connects.append(np.hstack((c, [False]))) verts.append(v) self._li = np.hstack(level_index) self._connect = np.hstack(connects) self._verts = np.vstack(verts) def _compute_iso_color(self): """ compute LineVisual color from level index and corresponding color """ level_color = [] colors = self._lc for i, index in enumerate(self._li): level_color.append(np.zeros((index, 4)) + colors[i+self._level_min]) self._cl = np.vstack(level_color) def _levels_to_colors(self): # computes ColorArrays for given levels # try _color_lev as colormap, except as everything else try: f_color_levs = get_colormap(self._color_lev) except: colors = ColorArray(self._color_lev).rgba else: lev = _normalize(self._levels, self._clim[0], self._clim[1]) # map function expects (Nlev,1)! colors = f_color_levs.map(lev[:, np.newaxis]) # broadcast to (nlev, 4) array if len(colors) == 1: colors = colors * np.ones((len(self._levels), 1)) # detect color_lev/levels mismatch and raise error if (len(colors) != len(self._levels)): raise TypeError("Color/level mismatch. Color must be of shape " "(Nlev, ...) and provide one color per level") self._lc = colors def _prepare_draw(self, view): if (self._data is None or self._levels is None or self._color_lev is None or self._data_is_uniform): return False if self._need_level_update: self._levels_to_colors() self._need_level_update = False if self._need_recompute: self._compute_iso_line() self._compute_iso_color() LineVisual.set_data(self, pos=self._verts, connect=self._connect, color=self._cl) self._need_recompute = False if self._need_color_update: self._compute_iso_color() LineVisual.set_data(self, color=self._cl) self._need_color_update = False return LineVisual._prepare_draw(self, view)
bsd-3-clause
4,437,686,698,291,279,000
33.25
77
0.560891
false
jef-n/QGIS
python/plugins/processing/tests/SagaAlgorithmsTest.py
36
6002
# -*- coding: utf-8 -*- """ *************************************************************************** SagaAlgorithmsTests.py --------------------- Date : September 2017 Copyright : (C) 2017 by Alexander Bruy Email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Alexander Bruy' __date__ = 'September 2017' __copyright__ = '(C) 2017, Alexander Bruy' import os import nose2 import shutil import tempfile from qgis.core import (QgsProcessingParameterNumber, QgsProcessingParameterDefinition, QgsVectorLayer, QgsApplication, QgsFeature, QgsGeometry, QgsPointXY, QgsProcessingContext, QgsProject, QgsProcessingFeedback, QgsProcessingFeatureSourceDefinition) from qgis.testing import start_app, unittest from processing.algs.saga.SagaParameters import Parameters, SagaImageOutputParam import AlgorithmsTestBase class TestSagaAlgorithms(unittest.TestCase, AlgorithmsTestBase.AlgorithmsTest): @classmethod def setUpClass(cls): start_app() from processing.core.Processing import Processing Processing.initialize() cls.cleanup_paths = [] cls.temp_dir = tempfile.mkdtemp() cls.cleanup_paths.append(cls.temp_dir) @classmethod def tearDownClass(cls): from processing.core.Processing import Processing Processing.deinitialize() for path in cls.cleanup_paths: shutil.rmtree(path) def test_definition_file(self): return 'saga_algorithm_tests.yaml' def test_is_parameter_line(self): # Test determining whether a line is a parameter line self.assertFalse(Parameters.is_parameter_line('')) self.assertFalse(Parameters.is_parameter_line('xxxxxxxxx')) self.assertTrue(Parameters.is_parameter_line('QgsProcessingParameterNumber|R_PERCTL_MIN|Percentiles Range for RED max|QgsProcessingParameterNumber.Integer|1|False|1|99')) self.assertTrue(Parameters.is_parameter_line('*QgsProcessingParameterNumber|R_PERCTL_MIN|Percentiles Range for RED max|QgsProcessingParameterNumber.Integer|1|False|1|99')) self.assertTrue(Parameters.is_parameter_line('SagaImageOutput|RGB|Output RGB')) def test_param_line(self): # Test creating a parameter from a description line param = Parameters.create_parameter_from_line('QgsProcessingParameterNumber|R_PERCTL_MIN|Percentiles Range for RED max|QgsProcessingParameterNumber.Integer|1|False|1|99') self.assertIsInstance(param, QgsProcessingParameterNumber) self.assertEqual(param.name(), 'R_PERCTL_MIN') self.assertEqual(param.description(), 'Percentiles Range for RED max') self.assertEqual(param.dataType(), QgsProcessingParameterNumber.Integer) self.assertFalse(param.flags() & QgsProcessingParameterDefinition.FlagOptional) self.assertEqual(param.minimum(), 1) self.assertEqual(param.maximum(), 99) # Test SagaImageOutputParam line param = Parameters.create_parameter_from_line('SagaImageOutput|RGB|Output RGB') self.assertIsInstance(param, SagaImageOutputParam) self.assertEqual(param.name(), 'RGB') self.assertEqual(param.description(), 'Output RGB') self.assertEqual(param.defaultFileExtension(), 'tif') self.assertEqual(param.supportedOutputRasterLayerExtensions(), ['tif']) def test_non_ascii_output(self): # create a memory layer and add to project and context layer = QgsVectorLayer("Point?crs=epsg:3857&field=fldtxt:string&field=fldint:integer", "testmem", "memory") self.assertTrue(layer.isValid()) pr = layer.dataProvider() f = QgsFeature() f.setAttributes(["test", 123]) f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200))) f2 = QgsFeature() f2.setAttributes(["test2", 457]) f2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(110, 200))) self.assertTrue(pr.addFeatures([f, f2])) self.assertEqual(layer.featureCount(), 2) QgsProject.instance().addMapLayer(layer) context = QgsProcessingContext() context.setProject(QgsProject.instance()) alg = QgsApplication.processingRegistry().createAlgorithmById('saga:fixeddistancebuffer') self.assertIsNotNone(alg) temp_file = os.path.join(self.temp_dir, 'non_ascii_ñññ.shp') parameters = {'SHAPES': 'testmem', 'DIST_FIELD_DEFAULT': 5, 'NZONES': 1, 'DARC': 5, 'DISSOLVE': False, 'POLY_INNER': False, 'BUFFER': temp_file} feedback = QgsProcessingFeedback() results, ok = alg.run(parameters, context, feedback) self.assertTrue(ok) self.assertTrue(os.path.exists(temp_file)) # make sure that layer has correct features res = QgsVectorLayer(temp_file, 'res') self.assertTrue(res.isValid()) self.assertEqual(res.featureCount(), 2) QgsProject.instance().removeMapLayer(layer) if __name__ == '__main__': nose2.main()
gpl-2.0
-5,906,617,634,566,161,000
42.158273
179
0.601934
false
cbare/Etudes
python/strings.py
1
1543
""" String algorithms """ def balanced_parens(s: str) -> bool: open = 0 for c in s: if c=='(': open += 1 if c==')': if open > 0: open -= 1 else: return False return open==0 assert balanced_parens('') assert balanced_parens('()') assert balanced_parens('((()))') assert balanced_parens('((()()()))') assert balanced_parens('((()()()))()(())(()())') assert not balanced_parens('(()') assert not balanced_parens('((())))') assert not balanced_parens('((()())') assert not balanced_parens('())(()') def longest_valid_parens(s: str) -> int: """ return the length of the longest run of valid nested parens. Given a string containing just the characters '(' and ')', find the length of the longest well-formed substring. """ seeds = [(i,i+1) for i in range(len(s)-1) if s[i:i+2]=='()'] grew = True while grew or merged: grew = 0 merged = 0 # grow for i in range(len(seeds)): a,b = seeds[i] if a>0 and b+1<len(s) and s[a-1]=='(' and s[b+1]==')': grew += 1 seeds[i] = (a-1, b+1) # merge new_seeds = [] s0 = seeds[0] for s1 in seeds[1:]: if s0[1]+1==s1[0]: merged += 1 s0 = (s0[0], s1[1]) else: new_seeds.append(s0) s0 = s1 new_seeds.append(s0) seeds = new_seeds return max(b-a+1 for a,b in seeds)
apache-2.0
6,885,814,736,535,310,000
23.492063
78
0.473104
false
lorenzo-desantis/mne-python
mne/preprocessing/eog.py
6
7587
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Denis Engemann <denis.engemann@gmail.com> # Eric Larson <larson.eric.d@gmail.com> # # License: BSD (3-clause) import numpy as np from .peak_finder import peak_finder from .. import pick_types, pick_channels from ..utils import logger, verbose from ..filter import band_pass_filter from ..epochs import Epochs @verbose def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, filter_length='10s', ch_name=None, tstart=0, verbose=None): """Locate EOG artifacts Parameters ---------- raw : instance of Raw The raw data. event_id : int The index to assign to found events. l_freq : float Low cut-off frequency in Hz. h_freq : float High cut-off frequency in Hz. filter_length : str | int | None Number of taps to use for filtering. ch_name: str | None If not None, use specified channel(s) for EOG tstart : float Start detection after tstart seconds. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- eog_events : array Events. """ # Getting EOG Channel eog_inds = _get_eog_channel_index(ch_name, raw) logger.info('EOG channel index for this subject is: %s' % eog_inds) eog, _ = raw[eog_inds, :] eog_events = _find_eog_events(eog, event_id=event_id, l_freq=l_freq, h_freq=h_freq, sampling_rate=raw.info['sfreq'], first_samp=raw.first_samp, filter_length=filter_length, tstart=tstart) return eog_events def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, filter_length='10s', tstart=0.): """Helper function""" logger.info('Filtering the data to remove DC offset to help ' 'distinguish blinks from saccades') # filtering to remove dc offset so that we know which is blink and saccades fmax = np.minimum(45, sampling_rate / 2.0 - 0.75) # protect Nyquist filteog = np.array([band_pass_filter(x, sampling_rate, 2, fmax, filter_length=filter_length) for x in eog]) temp = np.sqrt(np.sum(filteog ** 2, axis=1)) indexmax = np.argmax(temp) # easier to detect peaks with filtering. filteog = band_pass_filter(eog[indexmax], sampling_rate, l_freq, h_freq, filter_length=filter_length) # detecting eog blinks and generating event file logger.info('Now detecting blinks and generating corresponding events') temp = filteog - np.mean(filteog) n_samples_start = int(sampling_rate * tstart) if np.abs(np.max(temp)) > np.abs(np.min(temp)): eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=1) else: eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=-1) eog_events += n_samples_start n_events = len(eog_events) logger.info("Number of EOG events detected : %d" % n_events) eog_events = np.array([eog_events + first_samp, np.zeros(n_events, int), event_id * np.ones(n_events, int)]).T return eog_events def _get_eog_channel_index(ch_name, inst): if isinstance(ch_name, str): # Check if multiple EOG Channels if ',' in ch_name: ch_name = ch_name.split(',') else: ch_name = [ch_name] eog_inds = pick_channels(inst.ch_names, include=ch_name) if len(eog_inds) == 0: raise ValueError('%s not in channel list' % ch_name) else: logger.info('Using channel %s as EOG channel%s' % ( " and ".join(ch_name), '' if len(eog_inds) < 2 else 's')) elif ch_name is None: eog_inds = pick_types(inst.info, meg=False, eeg=False, stim=False, eog=True, ecg=False, emg=False, ref_meg=False, exclude='bads') if len(eog_inds) == 0: logger.info('No EOG channels found') logger.info('Trying with EEG 061 and EEG 062') eog_inds = pick_channels(inst.ch_names, include=['EEG 061', 'EEG 062']) if len(eog_inds) != 2: raise RuntimeError('EEG 61 or EEG 62 channel not found !!') else: raise ValueError('Could not find EOG channel.') return eog_inds @verbose def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=0.5, l_freq=1, h_freq=10, reject=None, flat=None, baseline=None, preload=True, verbose=None): """Conveniently generate epochs around EOG artifact events Parameters ---------- raw : instance of Raw The raw data ch_name : str The name of the channel to use for EOG peak detection. The argument is mandatory if the dataset contains no EOG channels. event_id : int The index to assign to found events picks : array-like of int | None (default) Indices of channels to include (if None, all channels are used). tmin : float Start time before event. tmax : float End time after event. l_freq : float Low pass frequency. h_freq : float High pass frequency. reject : dict | None Rejection parameters based on peak-to-peak amplitude. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. If reject is None then no rejection is done. Example:: reject = dict(grad=4000e-13, # T / m (gradiometers) mag=4e-12, # T (magnetometers) eeg=40e-6, # uV (EEG channels) eog=250e-6 # uV (EOG channels) ) flat : dict | None Rejection parameters based on flatness of signal. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values are floats that set the minimum acceptable peak-to-peak amplitude. If flat is None then no rejection is done. baseline : tuple or list of length 2, or None The time interval to apply rescaling / baseline correction. If None do not apply it. If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. If baseline is equal ot (None, None) all the time interval is used. If None, no correction is applied. preload : bool Preload epochs or not. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- eog_epochs : instance of Epochs Data epoched around EOG events. """ events = find_eog_events(raw, ch_name=ch_name, event_id=event_id, l_freq=l_freq, h_freq=h_freq) # create epochs around EOG events eog_epochs = Epochs(raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload) return eog_epochs
bsd-3-clause
-1,182,136,550,195,087,600
35.475962
79
0.568604
false
ttrifonov/EventBrain
src/eventbrain/bin/launcher.py
1
9958
#!/usr/bin/env python # file: launcher.py import os import sys import logging from optparse import OptionParser, OptionGroup from eventbrain.util.daemon import Daemon FORMAT = '%(asctime)-15s:%(name)s:%(process)d:%(levelname)s === %(message)s' logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stdout) usage = "Usage: %prog [options] start|stop|restart" def set_daemonize(option, opt_str, value, parser): parser.values.daemonize = True parser = OptionParser(usage=usage) parser.add_option("-t", "--type", dest="type", help="type of object to process('actor', 'a' " "or 'decision', 'd')") parser.add_option("-i", "--id", dest="Id", help="Id of the object to process") parser.add_option("-p", "--pid-dir", dest="pid_dir", default="/var/run/eventbrain/", help="Directory to store pid files for daemonized objects. " "Default path is %default") parser.add_option("-l", "--log-file", dest="logfile", default='/dev/null', help="File to write logs. Default is %default") parser.add_option("-d", "--daemonize", dest="daemonize", action="callback", callback=set_daemonize, default=False, help="Start in daemon mode") parser.add_option("-o", "--options", dest="opts", default=None, help="Additional options to send to the class constructor") parser.add_option("-c", "--config", dest="config", default=None, help="Config file with initial settings. " "If a config file is provided, " "other parameters are ignored.") server_opts = OptionGroup(parser, "RabbitMQ options") server_opts.add_option("-s", "--server", dest="host", default='localhost', help="RabbitMQ server. Default is %default") server_opts.add_option("-u", "--user", dest="user", help="RabbitMQ credentials: username") server_opts.add_option("-v", "--vhost", dest="vhost", help="RabbitMQ credentials: virtual host") server_opts.add_option("-w", "--password", dest="password", help="RabbitMQ credentials: password") parser.add_option_group(server_opts) (options, args) = parser.parse_args() commands = ('start', 'stop', 'restart') types = ('actor', 'a', 'decision', 'd') command = args[0] class DaemonRunner(Daemon): def run(self): print "Run" if hasattr(options, "kwargs"): kwargs = options.kwargs else: kwargs = {} if options.opts: for opt in options.opts.split(";"): (k, v) = opt.split("=") kwargs[k] = v print "kwargs", kwargs if options.user and options.password: kwargs['user'] = options.user kwargs['vhost'] = options.vhost kwargs['password'] = options.password if options.host: kwargs['host'] = options.host inst = self.klass(**kwargs) try: inst.connect() except KeyboardInterrupt: inst.disconnect(reason="keyboard interruption") def run_actor(obj_id): print "Starting actor %s" % obj_id klass = _import('actors', obj_id) print "Found actor with exchange %s" % klass.id if options.daemonize: daemon = DaemonRunner(pid_file('a', klass), stdout=options.logfile, stderr=options.logfile) daemon.klass = klass daemon.start() else: kwargs = {} if options.user and options.password: kwargs['user'] = options.user kwargs['vhost'] = options.vhost kwargs['password'] = options.password if options.host: kwargs['host'] = options.host if options.opts: for opt in options.opts.split(";"): (k, v) = opt.split("=") kwargs[k] = v print "kwargs", kwargs inst = klass(**kwargs) try: inst.connect() except KeyboardInterrupt: inst.disconnect(reason="keyboard interruption") print "Done" def stop_actor(obj_id): print "Stopping actor %s" % obj_id klass = _import('actors', obj_id) daemon = DaemonRunner(pid_file('a', klass)) daemon.stop() print "Done" def run_decision(obj_id): print "Starting decision %s" % obj_id klass = _import('decisions', obj_id) print "Found decision with exchange %s" % klass.id if options.daemonize: daemon = DaemonRunner(pid_file('d', klass), stdout=options.logfile, stderr=options.logfile) daemon.klass = klass daemon.start() else: kwargs = {} if options.user and options.password: kwargs['user'] = options.user kwargs['vhost'] = options.vhost kwargs['password'] = options.password if options.host: kwargs['host'] = options.host if options.opts: for opt in options.opts.split(";"): (k, v) = opt.split("=") kwargs[k] = v print "kwargs", kwargs inst = klass(**kwargs) try: inst.connect() except KeyboardInterrupt: inst.disconnect(reason="keyboard interruption") print "Done" def stop_decision(obj_id): print "Stopping decision %s" % obj_id klass = _import('decisions', obj_id) daemon = DaemonRunner(pid_file('d', klass)) daemon.stop() print "Done" def pid_file(prefix, klass): pidfile = os.path.join(options.pid_dir, "".join([prefix, '-', klass.id, ".pid"])) pidfile = os.path.abspath(pidfile) print "PID file: %s" % pidfile return pidfile def _import(scope, obj_id): try: (_mod, _klass) = obj_id.split('.') module = __import__('eventbrain.contrib.%s.%s' % (scope, _mod), fromlist=[_klass]) klass = getattr(module, _klass) except Exception, ex: print "Cannot import class %s\n%r" % (obj_id, ex) exit(1) return klass def from_config(): import ConfigParser config = ConfigParser.RawConfigParser() config.readfp(open(options.config)) sections = config.sections() if config.has_section("Main"): if config.has_option("Main", "host"): parser.values.host = config.get("Main", "host") if config.has_option("Main", "user"): parser.values.user = config.get("Main", "user") if config.has_option("Main", "password"): parser.values.password = config.get("Main", "password") if config.has_option("Main", "vhost"): parser.values.vhost = config.get("Main", "vhost") for section in sections: print ">>> Found section ", section if section == "Main": continue else: # Fork to avoid exiting from main thread after daemonizing fpid = os.fork() if fpid != 0: process_section(config, section) exit(0) else: continue return True def process_section(config, section): if config.has_option(section, "type"): _type = config.get(section, "type") if _type not in types: print "Unrecognized type: %s" % _type return False kwargs = {} for item in config.items(section): if item[0] == "daemonize": parser.values.daemonize = config.getboolean(section, "daemonize") elif item[0] == "pid_dir": parser.values.pid_dir = item[1] elif item[0] == "log_file": parser.values.logfile = item[1] else: kwargs[item[0]] = item[1] print "kwargs", kwargs parser.values.kwargs = kwargs if _type in ('actor', 'a'): if command == "start": run_actor(section) elif command == "stop": stop_actor(section) elif command == "restart": stop_actor(section) run_actor(section) elif _type in ('decision', 'd'): if command == "start": run_decision(section) elif command == "stop": stop_decision(section) elif command == "restart": stop_decision(section) run_decision(section) if __name__ == "__main__": if options.config: if from_config(): exit(0) else: exit(1) if not options.type: print "Type not specified" exit(1) if options.type not in types: print "Unrecognized type: %s" % options.type exit(1) if not options.Id: print "Id not specified" exit(1) if not args or args[0] not in commands: print "Unknown command %s" % ",".join(args) exit(1) if options.type in ('actor', 'a'): # Actor if command == "start": run_actor(options.Id) elif command == "stop": stop_actor(options.Id) elif command == "restart": stop_actor(options.Id) run_actor(options.Id) if options.type in ('decision', 'd'): # Decision if command == "start": run_decision(options.Id) elif command == "stop": stop_decision(options.Id) elif command == "restart": stop_decision(options.Id) run_decision(options.Id)
apache-2.0
-816,246,304,667,039,600
31.864686
78
0.527214
false
dhalleine/tensorflow
tensorflow/contrib/distributions/python/kernel_tests/exponential_test.py
3
4546
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for initializers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from scipy import stats import tensorflow as tf class ExponentialTest(tf.test.TestCase): def testExponentialLogPDF(self): with tf.Session(): batch_size = 6 lam = tf.constant([2.0] * batch_size) lam_v = 2.0 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32) exponential = tf.contrib.distributions.Exponential(lam=lam) expected_log_pdf = stats.expon.logpdf(x, scale=1 / lam_v) log_pdf = exponential.log_pdf(x) self.assertEqual(log_pdf.get_shape(), (6,)) self.assertAllClose(log_pdf.eval(), expected_log_pdf) pdf = exponential.pdf(x) self.assertEqual(pdf.get_shape(), (6,)) self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf)) def testExponentialCDF(self): with tf.Session(): batch_size = 6 lam = tf.constant([2.0] * batch_size) lam_v = 2.0 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32) exponential = tf.contrib.distributions.Exponential(lam=lam) expected_cdf = stats.expon.cdf(x, scale=1 / lam_v) cdf = exponential.cdf(x) self.assertEqual(cdf.get_shape(), (6,)) self.assertAllClose(cdf.eval(), expected_cdf) def testExponentialMean(self): with tf.Session(): lam_v = np.array([1.0, 4.0, 2.5]) expected_mean = stats.expon.mean(scale=1 / lam_v) exponential = tf.contrib.distributions.Exponential(lam=lam_v) self.assertEqual(exponential.mean().get_shape(), (3,)) self.assertAllClose(exponential.mean().eval(), expected_mean) def testExponentialVariance(self): with tf.Session(): lam_v = np.array([1.0, 4.0, 2.5]) expected_variance = stats.expon.var(scale=1 / lam_v) exponential = tf.contrib.distributions.Exponential(lam=lam_v) self.assertEqual(exponential.variance().get_shape(), (3,)) self.assertAllClose(exponential.variance().eval(), expected_variance) def testExponentialEntropy(self): with tf.Session(): lam_v = np.array([1.0, 4.0, 2.5]) expected_entropy = stats.expon.entropy(scale=1 / lam_v) exponential = tf.contrib.distributions.Exponential(lam=lam_v) self.assertEqual(exponential.entropy().get_shape(), (3,)) self.assertAllClose(exponential.entropy().eval(), expected_entropy) def testExponentialSample(self): with self.test_session(): lam = tf.constant([3.0, 4.0]) lam_v = [3.0, 4.0] n = tf.constant(100000) exponential = tf.contrib.distributions.Exponential(lam=lam) samples = exponential.sample(n, seed=137) sample_values = samples.eval() self.assertEqual(sample_values.shape, (100000, 2)) self.assertFalse(np.any(sample_values < 0.0)) for i in range(2): self.assertLess( stats.kstest( sample_values[:, i], stats.expon(scale=1.0/lam_v[i]).cdf)[0], 0.01) def testExponentialSampleMultiDimensional(self): with self.test_session(): batch_size = 2 lam_v = [3.0, 22.0] lam = tf.constant([lam_v] * batch_size) exponential = tf.contrib.distributions.Exponential(lam=lam) n = 100000 samples = exponential.sample(n, seed=138) self.assertEqual(samples.get_shape(), (n, batch_size, 2)) sample_values = samples.eval() self.assertFalse(np.any(sample_values < 0.0)) for i in range(2): self.assertLess( stats.kstest( sample_values[:, 0, i], stats.expon(scale=1.0/lam_v[i]).cdf)[0], 0.01) self.assertLess( stats.kstest( sample_values[:, 1, i], stats.expon(scale=1.0/lam_v[i]).cdf)[0], 0.01) if __name__ == '__main__': tf.test.main()
apache-2.0
-7,137,812,298,313,653,000
34.795276
80
0.630444
false
BraveSirRobbo/steam-scraper
test.py
1
4225
import unittest from bs4 import BeautifulSoup import re def justOne(ls): assert(len(ls) == 1) return ls[0] def scrapePage(html_doc): soup = BeautifulSoup(html_doc, 'html.parser') ratings = [s.get_text() for s in soup.find_all("span",attrs={ "class": re.compile(r"game_review_summary .*")})] assert(len(ratings) != 1) reviewCounts = [x.attrs["content"] for x in soup.find_all("meta",attrs={"itemprop":"reviewCount"})] aList = [t.get_text() for t in soup.find_all("div",class_="game_area_details_specs")] def tagChecker(*things): for thing in things: if thing in aList: return True return False return { "title": justOne(soup.find_all("div",class_="apphub_AppName")).get_text() , "overall_rating" : ratings[1] if len(ratings) > 0 else None , "num_reviews" : reviewCounts[0] if len(reviewCounts) > 0 else None , "release_year" : justOne(soup.find_all("span",class_="date")).get_text()[-4:] , "user_tags" : [x.get_text().strip() for x in justOne(soup.find_all("div",class_="glance_tags popular_tags")).find_all("a")] , "multiplayer" : tagChecker("Multi-player") , "co-op" : tagChecker("Co-op") , "local_multiplayer" : tagChecker("Shared/Split Screen") , "steam_cloud" : tagChecker("Steam Cloud") , "controller_supported" : tagChecker("Full controller support", "Partial Controller Support") } class ScraperTests(unittest.TestCase): def assertKeyValue(self, d, key, value): self.assertIn(key, d) self.assertEqual(d[key], value) def test_example_page(self): with open("examples/Age of Wonders III on Steam.html", "r") as f: page_text = "".join(f.readlines()) res = scrapePage(page_text) self.assertKeyValue(res, "title", "Age of Wonders III") self.assertKeyValue(res, "overall_rating", "Very Positive") self.assertKeyValue(res, "num_reviews", "3504") self.assertKeyValue(res, "release_year","2014") #from class "release_date" self.assertKeyValue(res, "user_tags", ['Strategy', 'Turn-Based Strategy', 'Fantasy', 'RPG', '4X', 'Turn-Based', 'Multiplayer', 'Singleplayer', 'Tactical', 'Co-op', 'Adventure', 'Hex Grid', 'Great Soundtrack', 'Grand Strategy', 'Classic', 'Atmospheric', 'Moddable', 'Action', 'Female Protagonist', 'Indie']) #from class "glance_tags popular_tags" self.assertKeyValue(res, "multiplayer", True) #"Multi-Player" from class "game_area_details_specs" self.assertKeyValue(res, "co-op", True) #"Co-op" from class "game_area_details_specs" self.assertKeyValue(res, "local_multiplayer", True) #"Shared/Split Screen" from class "game_area_details_specs" self.assertKeyValue(res, "steam_cloud", True) #Cross-Platform Multiplayer from class "game_area_details_specs" self.assertKeyValue(res, "controller_supported", False) #Full OR Partial Controller Support from class "game_area_details_specs" def test_no_recent_reviews(self): with open("examples/No Recent Reviews.html") as f: page_text = "".join(f.readlines()) res = scrapePage(page_text) self.assertKeyValue(res,"overall_rating", "Very Positive") def test_no_reviews(self): with open("examples/No Reviews.html") as f: page_text = "".join(f.readlines()) res = scrapePage(page_text) self.assertKeyValue(res,"overall_rating", None) self.assertKeyValue(res,"num_reviews", None) # TODO: Real implementation def filterGames(ls,q): return [ls[0]] # TODO: This is just a silly example class FilterTests(unittest.TestCase): def test_basic_filter(self): examples = [ {"title" : "blah", "overall_rating" : "Very Positive"} , {"title" : "bad", "overall_rating" : "Very Negative"} ] # TODO: Will - Do you have ideas about the form of the query input to the filter q = "overall_rating > Ok" self.assertEqual(filterGames(examples, q), [ {"title" : "blah", "overall_rating" : "Very Positive"} ]) if __name__ =="__main__": unittest.main()
apache-2.0
8,001,148,401,895,838,000
35.73913
351
0.624615
false
felipeAraujo/Bet-Calculator
test_bet.py
1
13680
import unittest from bet_calculator.bet_calculator import Bet_Calculator from decimal import * class Bet_Test_Case(unittest.TestCase): """Test the Bet_Calculator class""" def setUp(self): self.bet_calculator = Bet_Calculator() def test_if_is_calculating_that_odds_will_profit(self): """ Just remember that to be profit te quotation in houses must follow the equation: d1 => decimal 1 in one bet house, d2 => decimal 2 in other bet house 1 ________ < (d2-1) (d1 - 1) """ # Testing edge cases # Just remember 1.33333...4 is not a the reapeating decimal # 1.33333.... and is bigger than it so it's an edge case self.bet_calculator.decimal_team1_house1 = '1.333333333333333334' self.bet_calculator.decimal_team2_house2 = '4.0' # put in the edge to be more sure that is calculating # correctly self.bet_calculator.decimal_team1_house2 = '3.0000000001' self.bet_calculator.decimal_team2_house1 = '1.5' self.assertTrue(self.bet_calculator.can_bet_team1_house1) self.assertTrue(self.bet_calculator.can_bet_team1_house2) # Testing normal cases self.bet_calculator.decimal_team1_house1 = '1.5' self.bet_calculator.decimal_team2_house2 = '4.0' self.bet_calculator.decimal_team1_house2 = '3.0' self.bet_calculator.decimal_team2_house1 = '1.8' self.assertTrue(self.bet_calculator.can_bet_team1_house1) self.assertTrue(self.bet_calculator.can_bet_team1_house2) def test_if_is_calculating_that_odds_will_not_profit(self): """ Just remember that to not be profit te quotation in houses must follow the equation: d1 => decimal 1 in one bet house, d2 => decimal 2 in other bet house 1 ________ >= (d2-1) (d1 - 1) """ # Testing Edge Cases # Just remember 1.333333333333333333 is not a the reapeating decimal # 1.33333.... and is smaller than it so it's an edge case self.bet_calculator.decimal_team1_house1 = '1.333333333333333333' self.bet_calculator.decimal_team2_house2 = '4.0' # put in the edge to be more sure that is calculating # correctly self.bet_calculator.decimal_team1_house2 = '2.999999999999999999' self.bet_calculator.decimal_team2_house1 = '1.5' self.assertFalse(self.bet_calculator.can_bet_team1_house1) self.assertFalse(self.bet_calculator.can_bet_team1_house2) # Testing Normal Cases self.bet_calculator.decimal_team1_house1 = '1.3' self.bet_calculator.decimal_team2_house2 = '1.5' # put in the edge to be more sure that is calculating # correctly self.bet_calculator.decimal_team1_house2 = '1.01' self.bet_calculator.decimal_team2_house1 = '3' self.assertFalse(self.bet_calculator.can_bet_team1_house1) self.assertFalse(self.bet_calculator.can_bet_team1_house2) def test_cash_made_when_you_profit(self): """ Will be inserted some odds, and they don't need to profit the Profit equation is simple: d1 => decimal in team1 d2 => decimal in team 2, tm => total money spent in your gamble m1 => (money spent on team1) part of the tm you spent in team1 (It's obvious you spent tm-m1 in team2) m2 => (money spent on team2) part of the tm you spent in team2 = tm-m1 cashMade => ? you will win the following cash if team1 wins: cashMade = m1 x d1 - tm and if team2 wins the cashMade will be: cashMade = m2 x d2 - tm """ self.bet_calculator.cash_to_bet = '200' # tm in the equation self.bet_calculator.decimal_team1_house1 = '1.2' self.bet_calculator.decimal_team2_house2 = '6.5' self.bet_calculator.decimal_team1_house2 = '1.4' self.bet_calculator.decimal_team2_house1 = '4.5' # 180.43 x 1.2 = 216.516 =...> 216.516 - 200 = 16.516 self.assertEqual(self.bet_calculator.profit_if_team1_wins('180.43', bet_team1_house1 = True) , Decimal('16.516')) # 150.75 x 1.4 = 211.05 =...> 211.05 - 200 = 11.05 self.assertEqual(self.bet_calculator.profit_if_team1_wins('150.75', bet_team1_house1 = False) , Decimal('11.05')) # 35 x 6.5 = 227.5 =...> 227.5 - 200 = 27.05 # If you bet team1 in house 1 you bet team2 in house 2 self.assertEqual(self.bet_calculator.profit_if_team2_wins('35', bet_team1_house1 = True) , Decimal('27.5')) # 64.41 x 4.5 = 289.845 =...> 289.845 - 200 = 89.845 # If you don't bet team1 in house 1 you bet team2 in house 1 self.assertEqual(self.bet_calculator.profit_if_team2_wins('64.41', bet_team1_house1 = False) , Decimal('89.845')) def test_cash_made_when_you_lose(self): """ Will be inserted some odds, and they don't need to profit the Profit equation is simple: d1 => decimal in team1 d2 => decimal in team 2, tm => total money spent in your gamble m1 => (money spent on team1) part of the tm you spent in team1 (It's obvious you spent tm-m1 in team2) m2 => (money spent on team2) part of the tm you spent in team2 = tm-m1 cashMade => ? you will win the following cash if team1 wins: cashMade = m1 x d1 - tm and if team2 wins the cashMade will be: cashMade = m2 x d2 - tm So, if tm is bigger than m1 x d1, or m2 x d2 you will lose money if team1 or team2 win. So the result will be negative """ self.bet_calculator.cash_to_bet = '200' # tm in the equation self.bet_calculator.decimal_team1_house1 = '1.5' self.bet_calculator.decimal_team2_house2 = '3.2' self.bet_calculator.decimal_team1_house2 = '1.7' self.bet_calculator.decimal_team2_house1 = '2.2' # 50.54 x 1.5 = 75.81 =...> 75.81 - 200 = -124.19 self.assertEqual(self.bet_calculator.profit_if_team1_wins('50.54', bet_team1_house1 = True) , Decimal('-124.19')) # 81.56 x 1.7 = 138.652 =...> 138.652 - 200 = -61.348 self.assertEqual(self.bet_calculator.profit_if_team1_wins('81.56', bet_team1_house1 = False) , Decimal('-61.348')) # 55.17 x 3.2 = 176.544 =...> 176.544 - 200 = -23.456 (it was coincidence :) ) # If you bet team1 in house 1 you bet team2 in house 2 self.assertEqual(self.bet_calculator.profit_if_team2_wins('55.17', bet_team1_house1 = True) , Decimal('-23.456')) # 85.42 x 2.2 = 187.924 =...> 187.924 - 200 = -12.076 # If you don't bet team1 in house 1 you bet team2 in house 1 self.assertEqual(self.bet_calculator.profit_if_team2_wins('85.42', bet_team1_house1 = False) , Decimal('-12.076')) def test_cash_made_when_you_bet_more_than_total_cash(self): """ We don't have to see the all equation. But if you bet in one team more than total cash you said it was supposed to have an exception """ self.bet_calculator.cash_to_bet = '200' # tm in the equation # these informations don't matter -------------- self.bet_calculator.decimal_team1_house1 = '1.5' self.bet_calculator.decimal_team2_house2 = '3.2' self.bet_calculator.decimal_team1_house2 = '1.7' self.bet_calculator.decimal_team2_house1 = '2.2' # ---------------------------------------------- # Edge Case self.assertRaises(Exception, self.bet_calculator.profit_if_team1_wins, '200.000000000000001', bet_team1_house1 = True) self.assertRaises(Exception, self.bet_calculator.profit_if_team1_wins, '200.000000000000001', bet_team1_house1 = False) self.assertRaises(Exception, self.bet_calculator.profit_if_team2_wins, '200.000000000000001', bet_team1_house1 = True) self.assertRaises(Exception, self.bet_calculator.profit_if_team2_wins, '200.000000000000001', bet_team1_house1 = False) # Normal Case self.assertRaises(Exception, self.bet_calculator.profit_if_team1_wins, '205', bet_team1_house1 = True) self.assertRaises(Exception, self.bet_calculator.profit_if_team1_wins, '301', bet_team1_house1 = False) self.assertRaises(Exception, self.bet_calculator.profit_if_team2_wins, '400', bet_team1_house1 = True) self.assertRaises(Exception, self.bet_calculator.profit_if_team2_wins, '405', bet_team1_house1 = False) def test_calc_of_least_guaranteed_profit_when_team1_wins(self): """ Test if team 1 wins, I will get the least possible profit (it will be 0, because we want to assure any loses). The least possible profit that you ensure no loses if team 1 wins, if that occurs you will maintain the same mone you bet at the principle Ps: Always remember that if the result of the match is draw you will lose everything You have to have in mind that you have to bet at least to guarantee the return of your total money back. The following equation must be followed to get your money back without loses: d1 = decimal in team1 tm = total money invested (in team1 and team2) m1 = How much I should spend in team1 without lose the tm I Invested => ? tm if d1 x m1 = tm => m1 = _________ d1 """ self.bet_calculator.cash_to_bet = '200' # tm in the equation self.bet_calculator.decimal_team1_house1 = '1.5' self.bet_calculator.decimal_team2_house2 = '3.2' self.bet_calculator.decimal_team1_house2 = '1.7' self.bet_calculator.decimal_team2_house1 = '2.5' # Because the real result is 133.3333333..., we need to quantize to 5 (don't have to be exactly 5) # decimal points to ensure the result will be followed self.assertEqual( self.bet_calculator.least_possible_value_team1(bet_team1_house1 = True).quantize(Decimal('0.00001')), Decimal('133.33333') ) # Because the real result is 117.6470588235294..., we need to quantize to 5 (don't have to be exactly 5) # decimal points to ensure the result will be followed self.assertEqual( self.bet_calculator.least_possible_value_team1(bet_team1_house1 = False).quantize(Decimal('0.00001')), Decimal('117.64706') ) def test_calc_of_least_guaranteed_profit_when_team1_wins_when_the_decimals_dont_profit(self): """ In this situation, when the decimals don't profit it will raise Exception, because there is no guaranteed pair of Bets Ps: If the result of the match is draw you will lose everything """ self.bet_calculator.cash_to_bet = '200' # tm in the equation self.bet_calculator.decimal_team1_house1 = '1.5' self.bet_calculator.decimal_team2_house2 = '1.8' self.bet_calculator.decimal_team1_house2 = '1.01' self.bet_calculator.decimal_team2_house1 = '10.999999999' self.assertRaises(Exception, self.bet_calculator.least_possible_value_team1, bet_team1_house1 = True) self.assertRaises(Exception, self.bet_calculator.least_possible_value_team1, bet_team1_house1 = False) def test_calc_of_biggest_guaranteed_profit_if_team1_wins(self): """ Test if team1 wins I will win the most profitable bet in this situation without having lost if team2 wins. Ps: Always remember that if the result of the match is draw you will lose everything With this situation in mind, the equation is... d2 = decimal in team2 tm = total money invested (in team1 and team2) m2 = How much I shoud spend in team2 without lose the tm I Invested => ? tm m2 = __________ (The same as I did in the previous test with d1 and m1) d2 m1 = How much I should spend in team1 having in mind that if team1 wins I Will have that most profitable bet => ? Be sure, that when I ensure that I will have no lost if team2 wins, we will use the max money I could spent in team1, so this bet ensure that I will have the most profitable return if team1 wins so: tm m1 = tm - _________ OR tm - m2 d2 """ self.bet_calculator.cash_to_bet = '200' # tm in the equation self.bet_calculator.decimal_team1_house1 = '1.3' self.bet_calculator.decimal_team2_house2 = '4.5' self.bet_calculator.decimal_team1_house2 = '1.9' self.bet_calculator.decimal_team2_house1 = '2.6' # Because the real result is 155.55555..., we need to quantize to 5 (don't have to be exactly 5) # decimal points to ensure the result will be followed self.assertEqual( self.bet_calculator.biggest_possible_value_team1(bet_team1_house1 = True).quantize(Decimal('0.00001')), Decimal('155.55556') ) # Because the real result is 123.0769230769..., we need to quantize to 5 (don't have to be exactly 5) # decimal points to ensure the result will be followed self.assertEqual( self.bet_calculator.biggest_possible_value_team1(bet_team1_house1 = False).quantize(Decimal('0.00001')), Decimal('123.07692') ) if __name__ == '__main__': unittest.main()
mit
-3,532,013,752,297,662,000
40.707317
127
0.619591
false
zoyanhui/flask
tests/test_templating.py
148
13057
# -*- coding: utf-8 -*- """ tests.templating ~~~~~~~~~~~~~~~~ Template functionality :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import pytest import flask import logging from jinja2 import TemplateNotFound def test_context_processing(): app = flask.Flask(__name__) @app.context_processor def context_processor(): return {'injected_value': 42} @app.route('/') def index(): return flask.render_template('context_template.html', value=23) rv = app.test_client().get('/') assert rv.data == b'<p>23|42' def test_original_win(): app = flask.Flask(__name__) @app.route('/') def index(): return flask.render_template_string('{{ config }}', config=42) rv = app.test_client().get('/') assert rv.data == b'42' def test_request_less_rendering(): app = flask.Flask(__name__) app.config['WORLD_NAME'] = 'Special World' @app.context_processor def context_processor(): return dict(foo=42) with app.app_context(): rv = flask.render_template_string('Hello {{ config.WORLD_NAME }} ' '{{ foo }}') assert rv == 'Hello Special World 42' def test_standard_context(): app = flask.Flask(__name__) app.secret_key = 'development key' @app.route('/') def index(): flask.g.foo = 23 flask.session['test'] = 'aha' return flask.render_template_string(''' {{ request.args.foo }} {{ g.foo }} {{ config.DEBUG }} {{ session.test }} ''') rv = app.test_client().get('/?foo=42') assert rv.data.split() == [b'42', b'23', b'False', b'aha'] def test_escaping(): text = '<p>Hello World!' app = flask.Flask(__name__) @app.route('/') def index(): return flask.render_template('escaping_template.html', text=text, html=flask.Markup(text)) lines = app.test_client().get('/').data.splitlines() assert lines == [ b'&lt;p&gt;Hello World!', b'<p>Hello World!', b'<p>Hello World!', b'<p>Hello World!', b'&lt;p&gt;Hello World!', b'<p>Hello World!' ] def test_no_escaping(): text = '<p>Hello World!' app = flask.Flask(__name__) @app.route('/') def index(): return flask.render_template('non_escaping_template.txt', text=text, html=flask.Markup(text)) lines = app.test_client().get('/').data.splitlines() assert lines == [ b'<p>Hello World!', b'<p>Hello World!', b'<p>Hello World!', b'<p>Hello World!', b'&lt;p&gt;Hello World!', b'<p>Hello World!', b'<p>Hello World!', b'<p>Hello World!' ] def test_escaping_without_template_filename(): app = flask.Flask(__name__) with app.test_request_context(): assert flask.render_template_string( '{{ foo }}', foo='<test>') == '&lt;test&gt;' assert flask.render_template('mail.txt', foo='<test>') == \ '<test> Mail' def test_macros(): app = flask.Flask(__name__) with app.test_request_context(): macro = flask.get_template_attribute('_macro.html', 'hello') assert macro('World') == 'Hello World!' def test_template_filter(): app = flask.Flask(__name__) @app.template_filter() def my_reverse(s): return s[::-1] assert 'my_reverse' in app.jinja_env.filters.keys() assert app.jinja_env.filters['my_reverse'] == my_reverse assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba' def test_add_template_filter(): app = flask.Flask(__name__) def my_reverse(s): return s[::-1] app.add_template_filter(my_reverse) assert 'my_reverse' in app.jinja_env.filters.keys() assert app.jinja_env.filters['my_reverse'] == my_reverse assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba' def test_template_filter_with_name(): app = flask.Flask(__name__) @app.template_filter('strrev') def my_reverse(s): return s[::-1] assert 'strrev' in app.jinja_env.filters.keys() assert app.jinja_env.filters['strrev'] == my_reverse assert app.jinja_env.filters['strrev']('abcd') == 'dcba' def test_add_template_filter_with_name(): app = flask.Flask(__name__) def my_reverse(s): return s[::-1] app.add_template_filter(my_reverse, 'strrev') assert 'strrev' in app.jinja_env.filters.keys() assert app.jinja_env.filters['strrev'] == my_reverse assert app.jinja_env.filters['strrev']('abcd') == 'dcba' def test_template_filter_with_template(): app = flask.Flask(__name__) @app.template_filter() def super_reverse(s): return s[::-1] @app.route('/') def index(): return flask.render_template('template_filter.html', value='abcd') rv = app.test_client().get('/') assert rv.data == b'dcba' def test_add_template_filter_with_template(): app = flask.Flask(__name__) def super_reverse(s): return s[::-1] app.add_template_filter(super_reverse) @app.route('/') def index(): return flask.render_template('template_filter.html', value='abcd') rv = app.test_client().get('/') assert rv.data == b'dcba' def test_template_filter_with_name_and_template(): app = flask.Flask(__name__) @app.template_filter('super_reverse') def my_reverse(s): return s[::-1] @app.route('/') def index(): return flask.render_template('template_filter.html', value='abcd') rv = app.test_client().get('/') assert rv.data == b'dcba' def test_add_template_filter_with_name_and_template(): app = flask.Flask(__name__) def my_reverse(s): return s[::-1] app.add_template_filter(my_reverse, 'super_reverse') @app.route('/') def index(): return flask.render_template('template_filter.html', value='abcd') rv = app.test_client().get('/') assert rv.data == b'dcba' def test_template_test(): app = flask.Flask(__name__) @app.template_test() def boolean(value): return isinstance(value, bool) assert 'boolean' in app.jinja_env.tests.keys() assert app.jinja_env.tests['boolean'] == boolean assert app.jinja_env.tests['boolean'](False) def test_add_template_test(): app = flask.Flask(__name__) def boolean(value): return isinstance(value, bool) app.add_template_test(boolean) assert 'boolean' in app.jinja_env.tests.keys() assert app.jinja_env.tests['boolean'] == boolean assert app.jinja_env.tests['boolean'](False) def test_template_test_with_name(): app = flask.Flask(__name__) @app.template_test('boolean') def is_boolean(value): return isinstance(value, bool) assert 'boolean' in app.jinja_env.tests.keys() assert app.jinja_env.tests['boolean'] == is_boolean assert app.jinja_env.tests['boolean'](False) def test_add_template_test_with_name(): app = flask.Flask(__name__) def is_boolean(value): return isinstance(value, bool) app.add_template_test(is_boolean, 'boolean') assert 'boolean' in app.jinja_env.tests.keys() assert app.jinja_env.tests['boolean'] == is_boolean assert app.jinja_env.tests['boolean'](False) def test_template_test_with_template(): app = flask.Flask(__name__) @app.template_test() def boolean(value): return isinstance(value, bool) @app.route('/') def index(): return flask.render_template('template_test.html', value=False) rv = app.test_client().get('/') assert b'Success!' in rv.data def test_add_template_test_with_template(): app = flask.Flask(__name__) def boolean(value): return isinstance(value, bool) app.add_template_test(boolean) @app.route('/') def index(): return flask.render_template('template_test.html', value=False) rv = app.test_client().get('/') assert b'Success!' in rv.data def test_template_test_with_name_and_template(): app = flask.Flask(__name__) @app.template_test('boolean') def is_boolean(value): return isinstance(value, bool) @app.route('/') def index(): return flask.render_template('template_test.html', value=False) rv = app.test_client().get('/') assert b'Success!' in rv.data def test_add_template_test_with_name_and_template(): app = flask.Flask(__name__) def is_boolean(value): return isinstance(value, bool) app.add_template_test(is_boolean, 'boolean') @app.route('/') def index(): return flask.render_template('template_test.html', value=False) rv = app.test_client().get('/') assert b'Success!' in rv.data def test_add_template_global(): app = flask.Flask(__name__) @app.template_global() def get_stuff(): return 42 assert 'get_stuff' in app.jinja_env.globals.keys() assert app.jinja_env.globals['get_stuff'] == get_stuff assert app.jinja_env.globals['get_stuff'](), 42 with app.app_context(): rv = flask.render_template_string('{{ get_stuff() }}') assert rv == '42' def test_custom_template_loader(): class MyFlask(flask.Flask): def create_global_jinja_loader(self): from jinja2 import DictLoader return DictLoader({'index.html': 'Hello Custom World!'}) app = MyFlask(__name__) @app.route('/') def index(): return flask.render_template('index.html') c = app.test_client() rv = c.get('/') assert rv.data == b'Hello Custom World!' def test_iterable_loader(): app = flask.Flask(__name__) @app.context_processor def context_processor(): return {'whiskey': 'Jameson'} @app.route('/') def index(): return flask.render_template( ['no_template.xml', # should skip this one 'simple_template.html', # should render this 'context_template.html'], value=23) rv = app.test_client().get('/') assert rv.data == b'<h1>Jameson</h1>' def test_templates_auto_reload(): # debug is False, config option is None app = flask.Flask(__name__) assert app.debug is False assert app.config['TEMPLATES_AUTO_RELOAD'] is None assert app.jinja_env.auto_reload is False # debug is False, config option is False app = flask.Flask(__name__) app.config['TEMPLATES_AUTO_RELOAD'] = False assert app.debug is False assert app.jinja_env.auto_reload is False # debug is False, config option is True app = flask.Flask(__name__) app.config['TEMPLATES_AUTO_RELOAD'] = True assert app.debug is False assert app.jinja_env.auto_reload is True # debug is True, config option is None app = flask.Flask(__name__) app.config['DEBUG'] = True assert app.config['TEMPLATES_AUTO_RELOAD'] is None assert app.jinja_env.auto_reload is True # debug is True, config option is False app = flask.Flask(__name__) app.config['DEBUG'] = True app.config['TEMPLATES_AUTO_RELOAD'] = False assert app.jinja_env.auto_reload is False # debug is True, config option is True app = flask.Flask(__name__) app.config['DEBUG'] = True app.config['TEMPLATES_AUTO_RELOAD'] = True assert app.jinja_env.auto_reload is True def test_template_loader_debugging(test_apps): from blueprintapp import app called = [] class _TestHandler(logging.Handler): def handle(x, record): called.append(True) text = str(record.msg) assert '1: trying loader of application "blueprintapp"' in text assert ('2: trying loader of blueprint "admin" ' '(blueprintapp.apps.admin)') in text assert ('trying loader of blueprint "frontend" ' '(blueprintapp.apps.frontend)') in text assert 'Error: the template could not be found' in text assert ('looked up from an endpoint that belongs to ' 'the blueprint "frontend"') in text assert 'See http://flask.pocoo.org/docs/blueprints/#templates' in text with app.test_client() as c: try: old_load_setting = app.config['EXPLAIN_TEMPLATE_LOADING'] old_handlers = app.logger.handlers[:] app.logger.handlers = [_TestHandler()] app.config['EXPLAIN_TEMPLATE_LOADING'] = True with pytest.raises(TemplateNotFound) as excinfo: c.get('/missing') assert 'missing_template.html' in str(excinfo.value) finally: app.logger.handlers[:] = old_handlers app.config['EXPLAIN_TEMPLATE_LOADING'] = old_load_setting assert len(called) == 1 def test_custom_jinja_env(): class CustomEnvironment(flask.templating.Environment): pass class CustomFlask(flask.Flask): jinja_environment = CustomEnvironment app = CustomFlask(__name__) assert isinstance(app.jinja_env, CustomEnvironment)
bsd-3-clause
-3,852,446,670,625,639,400
32.308673
82
0.603967
false
eljost/pysisyphus
deprecated/tests/test_dynamics/test_dynamics.py
1
2531
from matplotlib.patches import Circle import matplotlib.pyplot as plt import numpy as np import pytest from pysisyphus.calculators.AnaPot import AnaPot from pysisyphus.dynamics.velocity_verlet import md def test_velocity_verlet(): geom = AnaPot.get_geom((0.52, 1.80, 0)) x0 = geom.coords.copy() v0 = .1 * np.random.rand(*geom.coords.shape) t = 3 dts = (.005, .01, .02, .04, .08) all_xs = list() for dt in dts: geom.coords = x0.copy() md_kwargs = { "v0": v0.copy(), "t": t, "dt": dt, } md_result = md(geom, **md_kwargs) all_xs.append(md_result.coords) calc = geom.calculator calc.plot() ax = calc.ax for dt, xs in zip(dts, all_xs): ax.plot(*xs.T[:2], "o-", label=f"dt={dt:.3f}") # ax.plot(*xs.T[:2], "-", label=f"dt={dt:.3f}") ax.legend() plt.show() def ase_md_playground(): geom = AnaPot.get_geom((0.52, 1.80, 0), atoms=("H", )) atoms = geom.as_ase_atoms() # ase_calc = FakeASE(geom.calculator) # from ase.optimize import BFGS # dyn = BFGS(atoms) # dyn.run(fmax=0.05) import ase from ase import units from ase.io.trajectory import Trajectory from ase.md.velocitydistribution import MaxwellBoltzmannDistribution from ase.md.verlet import VelocityVerlet MaxwellBoltzmannDistribution(atoms, 300 * units.kB) momenta = atoms.get_momenta() momenta[0, 2] = 0. # Zero 3rd dimension atoms.set_momenta(momenta) dyn = VelocityVerlet(atoms, .005 * units.fs) # 5 fs time step. def printenergy(a): """Function to print the potential, kinetic and total energy""" epot = a.get_potential_energy() / len(a) ekin = a.get_kinetic_energy() / len(a) print('Energy per atom: Epot = %.3feV Ekin = %.3feV (T=%3.0fK) ' 'Etot = %.3feV' % (epot, ekin, ekin / (1.5 * units.kB), epot + ekin)) # Now run the dynamics printenergy(atoms) traj_fn = 'asemd.traj' traj = Trajectory(traj_fn, 'w', atoms) dyn.attach(traj.write, interval=5) # dyn.attach(bumms().bimms, interval=1) dyn.run(10000) printenergy(atoms) traj.close() traj = ase.io.read(traj_fn+"@:")#, "r") pos = [a.get_positions() for a in traj] from pysisyphus.constants import BOHR2ANG pos = np.array(pos) / BOHR2ANG calc = geom.calculator calc.plot() ax = calc.ax ax.plot(*pos[:,0,:2].T) plt.show() if __name__ == "__main__": ase_md_playground()
gpl-3.0
-5,884,419,591,824,381,000
26.51087
83
0.591861
false
yashsharan/sympy
sympy/calculus/euler.py
16
3263
""" This module implements a method to find Euler-Lagrange Equations for given Lagrangian. """ from itertools import combinations_with_replacement from sympy import Function, sympify, diff, Eq, S, Symbol, Derivative from sympy.core.compatibility import (iterable, range) def euler_equations(L, funcs=(), vars=()): r""" Find the Euler-Lagrange equations [1]_ for a given Lagrangian. Parameters ========== L : Expr The Lagrangian that should be a function of the functions listed in the second argument and their derivatives. For example, in the case of two functions `f(x,y)`, `g(x,y)` and two independent variables `x`, `y` the Lagrangian would have the form: .. math:: L\left(f(x,y),g(x,y),\frac{\partial f(x,y)}{\partial x}, \frac{\partial f(x,y)}{\partial y}, \frac{\partial g(x,y)}{\partial x}, \frac{\partial g(x,y)}{\partial y},x,y\right) In many cases it is not necessary to provide anything, except the Lagrangian, it will be auto-detected (and an error raised if this couldn't be done). funcs : Function or an iterable of Functions The functions that the Lagrangian depends on. The Euler equations are differential equations for each of these functions. vars : Symbol or an iterable of Symbols The Symbols that are the independent variables of the functions. Returns ======= eqns : list of Eq The list of differential equations, one for each function. Examples ======== >>> from sympy import Symbol, Function >>> from sympy.calculus.euler import euler_equations >>> x = Function('x') >>> t = Symbol('t') >>> L = (x(t).diff(t))**2/2 - x(t)**2/2 >>> euler_equations(L, x(t), t) [Eq(-x(t) - Derivative(x(t), t, t), 0)] >>> u = Function('u') >>> x = Symbol('x') >>> L = (u(t, x).diff(t))**2/2 - (u(t, x).diff(x))**2/2 >>> euler_equations(L, u(t, x), [t, x]) [Eq(-Derivative(u(t, x), t, t) + Derivative(u(t, x), x, x), 0)] References ========== .. [1] http://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation """ funcs = tuple(funcs) if iterable(funcs) else (funcs,) if not funcs: funcs = tuple(L.atoms(Function)) else: for f in funcs: if not isinstance(f, Function): raise TypeError('Function expected, got: %s' % f) vars = tuple(vars) if iterable(vars) else (vars,) if not vars: vars = funcs[0].args else: vars = tuple(sympify(var) for var in vars) if not all(isinstance(v, Symbol) for v in vars): raise TypeError('Variables are not symbols, got %s' % vars) for f in funcs: if not vars == f.args: raise ValueError("Variables %s don't match args: %s" % (vars, f)) order = max(len(d.variables) for d in L.atoms(Derivative) if d.expr in funcs) eqns = [] for f in funcs: eq = diff(L, f) for i in range(1, order + 1): for p in combinations_with_replacement(vars, i): eq = eq + S.NegativeOne**i*diff(L, diff(f, *p), *p) eqns.append(Eq(eq)) return eqns
bsd-3-clause
1,317,070,981,513,880,300
30.679612
78
0.579835
false
Bachaco-ve/odoo
addons/account_test/report/account_test_report.py
194
3819
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import datetime import time from openerp.osv import osv from openerp.tools.translate import _ from openerp.report import report_sxw from openerp.tools.safe_eval import safe_eval as eval # # Use period and Journal for selection or resources # class report_assert_account(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(report_assert_account, self).__init__(cr, uid, name, context=context) self.localcontext.update( { 'time': time, 'datetime': datetime, 'execute_code': self.execute_code, }) def execute_code(self, code_exec): def reconciled_inv(): """ returns the list of invoices that are set as reconciled = True """ return self.pool.get('account.invoice').search(self.cr, self.uid, [('reconciled','=',True)]) def order_columns(item, cols=None): """ This function is used to display a dictionary as a string, with its columns in the order chosen. :param item: dict :param cols: list of field names :returns: a list of tuples (fieldname: value) in a similar way that would dict.items() do except that the returned values are following the order given by cols :rtype: [(key, value)] """ if cols is None: cols = item.keys() return [(col, item.get(col)) for col in cols if col in item.keys()] localdict = { 'cr': self.cr, 'uid': self.uid, 'reconciled_inv': reconciled_inv, #specific function used in different tests 'result': None, #used to store the result of the test 'column_order': None, #used to choose the display order of columns (in case you are returning a list of dict) } eval(code_exec, localdict, mode="exec", nocopy=True) result = localdict['result'] column_order = localdict.get('column_order', None) if not isinstance(result, (tuple, list, set)): result = [result] if not result: result = [_('The test was passed successfully')] else: def _format(item): if isinstance(item, dict): return ', '.join(["%s: %s" % (tup[0], tup[1]) for tup in order_columns(item, column_order)]) else: return item result = [_(_format(rec)) for rec in result] return result class report_accounttest(osv.AbstractModel): _name = 'report.account_test.report_accounttest' _inherit = 'report.abstract_report' _template = 'account_test.report_accounttest' _wrapped_report_class = report_assert_account # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
3,707,707,582,298,795,500
38.371134
121
0.59204
false
adit-chandra/tensorflow
tensorflow/python/keras/applications/imagenet_utils.py
5
1439
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for ImageNet data preprocessing & prediction decoding. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import imagenet_utils from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.imagenet_utils.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return imagenet_utils.decode_predictions(*args, **kwargs) @keras_export('keras.applications.imagenet_utils.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return imagenet_utils.preprocess_input(*args, **kwargs)
apache-2.0
5,496,285,182,079,640,000
38.972222
80
0.737318
false
fibbo/DIRAC
Interfaces/scripts/dirac-wms-job-attributes.py
8
1197
#!/usr/bin/env python ######################################################################## # $HeadURL$ # File : dirac-wms-job-attributes # Author : Stuart Paterson ######################################################################## """ Retrieve attributes associated with the given DIRAC job """ __RCSID__ = "$Id$" import DIRAC from DIRAC.Core.Base import Script Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1], 'Usage:', ' %s [option|cfgfile] ... JobID ...' % Script.scriptName, 'Arguments:', ' JobID: DIRAC Job ID' ] ) ) Script.parseCommandLine( ignoreErrors = True ) args = Script.getPositionalArgs() if len( args ) < 1: Script.showHelp() from DIRAC.Interfaces.API.Dirac import Dirac dirac = Dirac() exitCode = 0 errorList = [] for job in args: result = dirac.attributes( int(job), printOutput = True ) if not result['OK']: errorList.append( ( job, result['Message'] ) ) exitCode = 2 for error in errorList: print "ERROR %s: %s" % error DIRAC.exit( exitCode )
gpl-3.0
6,073,621,364,408,975,000
28.925
95
0.482038
false
nikhil93uf/Qemu
scripts/tracetool/backend/simple.py
97
2669
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Simple built-in backend. """ __author__ = "Lluís Vilanova <vilanova@ac.upc.edu>" __copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>" __license__ = "GPL version 2 or (at your option) any later version" __maintainer__ = "Stefan Hajnoczi" __email__ = "stefanha@linux.vnet.ibm.com" from tracetool import out PUBLIC = True def is_string(arg): strtype = ('const char*', 'char*', 'const char *', 'char *') if arg.lstrip().startswith(strtype): return True else: return False def generate_h_begin(events): for event in events: out('void _simple_%(api)s(%(args)s);', api=event.api(), args=event.args) out('') def generate_h(event): out(' _simple_%(api)s(%(args)s);', api=event.api(), args=", ".join(event.args.names())) def generate_c_begin(events): out('#include "trace.h"', '#include "trace/control.h"', '#include "trace/simple.h"', '') def generate_c(event): out('void _simple_%(api)s(%(args)s)', '{', ' TraceBufferRecord rec;', api=event.api(), args=event.args) sizes = [] for type_, name in event.args: if is_string(type_): out(' size_t arg%(name)s_len = %(name)s ? MIN(strlen(%(name)s), MAX_TRACE_STRLEN) : 0;', name=name) strsizeinfo = "4 + arg%s_len" % name sizes.append(strsizeinfo) else: sizes.append("8") sizestr = " + ".join(sizes) if len(event.args) == 0: sizestr = '0' out('', ' if (!trace_event_get_state(%(event_id)s)) {', ' return;', ' }', '', ' if (trace_record_start(&rec, %(event_id)s, %(size_str)s)) {', ' return; /* Trace Buffer Full, Event Dropped ! */', ' }', event_id='TRACE_' + event.name.upper(), size_str=sizestr) if len(event.args) > 0: for type_, name in event.args: # string if is_string(type_): out(' trace_record_write_str(&rec, %(name)s, arg%(name)s_len);', name=name) # pointer var (not string) elif type_.endswith('*'): out(' trace_record_write_u64(&rec, (uintptr_t)(uint64_t *)%(name)s);', name=name) # primitive data type else: out(' trace_record_write_u64(&rec, (uint64_t)%(name)s);', name=name) out(' trace_record_finish(&rec);', '}', '')
gpl-2.0
-3,565,729,492,435,361,000
25.939394
103
0.493063
false
tod31/pyload
module/plugins/hoster/RPNetBiz.py
5
3545
# -*- coding: utf-8 -*- import re from module.plugins.internal.MultiHoster import MultiHoster from module.plugins.internal.misc import json class RPNetBiz(MultiHoster): __name__ = "RPNetBiz" __type__ = "hoster" __version__ = "0.20" __status__ = "testing" __pattern__ = r'https?://.+rpnet\.biz' __config__ = [("activated" , "bool", "Activated" , True ), ("use_premium" , "bool", "Use premium account if available" , True ), ("fallback" , "bool", "Fallback to free download if premium fails" , False), ("chk_filesize", "bool", "Check file size" , True ), ("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 ), ("revertfailed", "bool", "Revert to standard download if fails" , True )] __description__ = """RPNet.biz multi-hoster plugin""" __license__ = "GPLv3" __authors__ = [("Dman", "dmanugm@gmail.com")] def setup(self): self.chunk_limit = -1 def handle_premium(self, pyfile): user, info = self.account.select() res = self.load("https://premium.rpnet.biz/client_api.php", get={'username': user, 'password': info['login']['password'], 'action' : "generate", 'links' : pyfile.url}) self.log_debug("JSON data: %s" % res) link_status = json.loads(res)['links'][0] #: Get the first link... since we only queried one #: Check if we only have an id as a HDD link if 'id' in link_status: self.log_debug("Need to wait at least 30 seconds before requery") self.wait(30) #: Wait for 30 seconds #: Lets query the server again asking for the status on the link, #: We need to keep doing this until we reach 100 attemps = 30 my_try = 0 while (my_try <= attemps): self.log_debug("Try: %d ; Max Tries: %d" % (my_try, attemps)) res = self.load("https://premium.rpnet.biz/client_api.php", get={'username': user, 'password': info['login']['password'], 'action' : "downloadInformation", 'id' : link_status['id']}) self.log_debug("JSON data hdd query: %s" % res) download_status = json.loads(res)['download'] if download_status['status'] == "100": link_status['generated'] = download_status['rpnet_link'] self.log_debug("Successfully downloaded to rpnet HDD: %s" % link_status['generated']) break else: self.log_debug("At %s%% for the file download" % download_status['status']) self.wait(30) my_try += 1 if my_try > attemps: #: We went over the limit! self.fail(_("Waited for about 15 minutes for download to finish but failed")) if 'generated' in link_status: self.link = link_status['generated'] return elif 'error' in link_status: self.fail(link_status['error']) else: self.fail(_("Something went wrong, not supposed to enter here"))
gpl-3.0
5,885,631,327,325,925,000
42.765432
105
0.490268
false
jeffery-do/Vizdoombot
doom/lib/python3.5/site-packages/theano/tensor/tests/test_io.py
7
2053
import unittest import theano from theano import tensor, function, Variable, Generic import numpy import os class T_load_tensor(unittest.TestCase): def setUp(self): self.data = numpy.arange(5, dtype=numpy.int32) self.filename = os.path.join( theano.config.compiledir, "_test.npy") numpy.save(self.filename, self.data) def test0(self): path = Variable(Generic()) # Not specifying mmap_mode defaults to None, and the data is # copied into main memory x = tensor.load(path, 'int32', (False,)) y = x * 2 fn = function([path], y) assert (fn(self.filename) == (self.data * 2)).all() def test_invalid_modes(self): # Modes 'r+', 'r', and 'w+' cannot work with Theano, becausei # the output array may be modified inplace, and that should not # modify the original file. path = Variable(Generic()) for mmap_mode in ('r+', 'r', 'w+', 'toto'): self.assertRaises(ValueError, tensor.load, path, 'int32', (False,), mmap_mode) def test1(self): path = Variable(Generic()) # 'c' means "copy-on-write", which allow the array to be overwritten # by an inplace Op in the graph, without modifying the underlying # file. x = tensor.load(path, 'int32', (False,), 'c') # x ** 2 has been chosen because it will work inplace. y = (x ** 2).sum() fn = function([path], y) # Call fn() twice, to check that inplace ops do not cause trouble assert (fn(self.filename) == (self.data ** 2).sum()).all() assert (fn(self.filename) == (self.data ** 2).sum()).all() def test_memmap(self): path = Variable(Generic()) x = tensor.load(path, 'int32', (False,), mmap_mode='c') fn = function([path], x) assert type(fn(self.filename)) == numpy.core.memmap def tearDown(self): os.remove(os.path.join( theano.config.compiledir, "_test.npy"))
mit
-4,291,947,874,992,496,600
35.660714
76
0.574769
false
Hellowlol/HTPC-Manager
libs/mako/_ast_util.py
60
25652
# mako/_ast_util.py # Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file> # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ ast ~~~ The `ast` module helps Python applications to process trees of the Python abstract syntax grammar. The abstract syntax itself might change with each Python release; this module helps to find out programmatically what the current grammar looks like and allows modifications of it. An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as a flag to the `compile()` builtin function or by using the `parse()` function from this module. The result will be a tree of objects whose classes all inherit from `ast.AST`. A modified abstract syntax tree can be compiled into a Python code object using the built-in `compile()` function. Additionally various helper functions are provided that make working with the trees simpler. The main intention of the helper functions and this module in general is to provide an easy to use interface for libraries that work tightly with the python syntax (template engines for example). :copyright: Copyright 2008 by Armin Ronacher. :license: Python License. """ from _ast import * from mako.compat import arg_stringname BOOLOP_SYMBOLS = { And: 'and', Or: 'or' } BINOP_SYMBOLS = { Add: '+', Sub: '-', Mult: '*', Div: '/', FloorDiv: '//', Mod: '%', LShift: '<<', RShift: '>>', BitOr: '|', BitAnd: '&', BitXor: '^' } CMPOP_SYMBOLS = { Eq: '==', Gt: '>', GtE: '>=', In: 'in', Is: 'is', IsNot: 'is not', Lt: '<', LtE: '<=', NotEq: '!=', NotIn: 'not in' } UNARYOP_SYMBOLS = { Invert: '~', Not: 'not', UAdd: '+', USub: '-' } ALL_SYMBOLS = {} ALL_SYMBOLS.update(BOOLOP_SYMBOLS) ALL_SYMBOLS.update(BINOP_SYMBOLS) ALL_SYMBOLS.update(CMPOP_SYMBOLS) ALL_SYMBOLS.update(UNARYOP_SYMBOLS) def parse(expr, filename='<unknown>', mode='exec'): """Parse an expression into an AST node.""" return compile(expr, filename, mode, PyCF_ONLY_AST) def to_source(node, indent_with=' ' * 4): """ This function can convert a node tree back into python sourcecode. This is useful for debugging purposes, especially if you're dealing with custom asts not generated by python itself. It could be that the sourcecode is evaluable when the AST itself is not compilable / evaluable. The reason for this is that the AST contains some more data than regular sourcecode does, which is dropped during conversion. Each level of indentation is replaced with `indent_with`. Per default this parameter is equal to four spaces as suggested by PEP 8, but it might be adjusted to match the application's styleguide. """ generator = SourceGenerator(indent_with) generator.visit(node) return ''.join(generator.result) def dump(node): """ A very verbose representation of the node passed. This is useful for debugging purposes. """ def _format(node): if isinstance(node, AST): return '%s(%s)' % (node.__class__.__name__, ', '.join('%s=%s' % (a, _format(b)) for a, b in iter_fields(node))) elif isinstance(node, list): return '[%s]' % ', '.join(_format(x) for x in node) return repr(node) if not isinstance(node, AST): raise TypeError('expected AST, got %r' % node.__class__.__name__) return _format(node) def copy_location(new_node, old_node): """ Copy the source location hint (`lineno` and `col_offset`) from the old to the new node if possible and return the new one. """ for attr in 'lineno', 'col_offset': if attr in old_node._attributes and attr in new_node._attributes \ and hasattr(old_node, attr): setattr(new_node, attr, getattr(old_node, attr)) return new_node def fix_missing_locations(node): """ Some nodes require a line number and the column offset. Without that information the compiler will abort the compilation. Because it can be a dull task to add appropriate line numbers and column offsets when adding new nodes this function can help. It copies the line number and column offset of the parent node to the child nodes without this information. Unlike `copy_location` this works recursive and won't touch nodes that already have a location information. """ def _fix(node, lineno, col_offset): if 'lineno' in node._attributes: if not hasattr(node, 'lineno'): node.lineno = lineno else: lineno = node.lineno if 'col_offset' in node._attributes: if not hasattr(node, 'col_offset'): node.col_offset = col_offset else: col_offset = node.col_offset for child in iter_child_nodes(node): _fix(child, lineno, col_offset) _fix(node, 1, 0) return node def increment_lineno(node, n=1): """ Increment the line numbers of all nodes by `n` if they have line number attributes. This is useful to "move code" to a different location in a file. """ for node in zip((node,), walk(node)): if 'lineno' in node._attributes: node.lineno = getattr(node, 'lineno', 0) + n def iter_fields(node): """Iterate over all fields of a node, only yielding existing fields.""" # CPython 2.5 compat if not hasattr(node, '_fields') or not node._fields: return for field in node._fields: try: yield field, getattr(node, field) except AttributeError: pass def get_fields(node): """Like `iter_fiels` but returns a dict.""" return dict(iter_fields(node)) def iter_child_nodes(node): """Iterate over all child nodes or a node.""" for name, field in iter_fields(node): if isinstance(field, AST): yield field elif isinstance(field, list): for item in field: if isinstance(item, AST): yield item def get_child_nodes(node): """Like `iter_child_nodes` but returns a list.""" return list(iter_child_nodes(node)) def get_compile_mode(node): """ Get the mode for `compile` of a given node. If the node is not a `mod` node (`Expression`, `Module` etc.) a `TypeError` is thrown. """ if not isinstance(node, mod): raise TypeError('expected mod node, got %r' % node.__class__.__name__) return { Expression: 'eval', Interactive: 'single' }.get(node.__class__, 'expr') def get_docstring(node): """ Return the docstring for the given node or `None` if no docstring can be found. If the node provided does not accept docstrings a `TypeError` will be raised. """ if not isinstance(node, (FunctionDef, ClassDef, Module)): raise TypeError("%r can't have docstrings" % node.__class__.__name__) if node.body and isinstance(node.body[0], Str): return node.body[0].s def walk(node): """ Iterate over all nodes. This is useful if you only want to modify nodes in place and don't care about the context or the order the nodes are returned. """ from collections import deque todo = deque([node]) while todo: node = todo.popleft() todo.extend(iter_child_nodes(node)) yield node class NodeVisitor(object): """ Walks the abstract syntax tree and call visitor functions for every node found. The visitor functions may return values which will be forwarded by the `visit` method. Per default the visitor functions for the nodes are ``'visit_'`` + class name of the node. So a `TryFinally` node visit function would be `visit_TryFinally`. This behavior can be changed by overriding the `get_visitor` function. If no visitor function exists for a node (return value `None`) the `generic_visit` visitor is used instead. Don't use the `NodeVisitor` if you want to apply changes to nodes during traversing. For this a special visitor exists (`NodeTransformer`) that allows modifications. """ def get_visitor(self, node): """ Return the visitor function for this node or `None` if no visitor exists for this node. In that case the generic visit function is used instead. """ method = 'visit_' + node.__class__.__name__ return getattr(self, method, None) def visit(self, node): """Visit a node.""" f = self.get_visitor(node) if f is not None: return f(node) return self.generic_visit(node) def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" for field, value in iter_fields(node): if isinstance(value, list): for item in value: if isinstance(item, AST): self.visit(item) elif isinstance(value, AST): self.visit(value) class NodeTransformer(NodeVisitor): """ Walks the abstract syntax tree and allows modifications of nodes. The `NodeTransformer` will walk the AST and use the return value of the visitor functions to replace or remove the old node. If the return value of the visitor function is `None` the node will be removed from the previous location otherwise it's replaced with the return value. The return value may be the original node in which case no replacement takes place. Here an example transformer that rewrites all `foo` to `data['foo']`:: class RewriteName(NodeTransformer): def visit_Name(self, node): return copy_location(Subscript( value=Name(id='data', ctx=Load()), slice=Index(value=Str(s=node.id)), ctx=node.ctx ), node) Keep in mind that if the node you're operating on has child nodes you must either transform the child nodes yourself or call the generic visit function for the node first. Nodes that were part of a collection of statements (that applies to all statement nodes) may also return a list of nodes rather than just a single node. Usually you use the transformer like this:: node = YourTransformer().visit(node) """ def generic_visit(self, node): for field, old_value in iter_fields(node): old_value = getattr(node, field, None) if isinstance(old_value, list): new_values = [] for value in old_value: if isinstance(value, AST): value = self.visit(value) if value is None: continue elif not isinstance(value, AST): new_values.extend(value) continue new_values.append(value) old_value[:] = new_values elif isinstance(old_value, AST): new_node = self.visit(old_value) if new_node is None: delattr(node, field) else: setattr(node, field, new_node) return node class SourceGenerator(NodeVisitor): """ This visitor is able to transform a well formed syntax tree into python sourcecode. For more details have a look at the docstring of the `node_to_source` function. """ def __init__(self, indent_with): self.result = [] self.indent_with = indent_with self.indentation = 0 self.new_lines = 0 def write(self, x): if self.new_lines: if self.result: self.result.append('\n' * self.new_lines) self.result.append(self.indent_with * self.indentation) self.new_lines = 0 self.result.append(x) def newline(self, n=1): self.new_lines = max(self.new_lines, n) def body(self, statements): self.new_line = True self.indentation += 1 for stmt in statements: self.visit(stmt) self.indentation -= 1 def body_or_else(self, node): self.body(node.body) if node.orelse: self.newline() self.write('else:') self.body(node.orelse) def signature(self, node): want_comma = [] def write_comma(): if want_comma: self.write(', ') else: want_comma.append(True) padding = [None] * (len(node.args) - len(node.defaults)) for arg, default in zip(node.args, padding + node.defaults): write_comma() self.visit(arg) if default is not None: self.write('=') self.visit(default) if node.vararg is not None: write_comma() self.write('*' + arg_stringname(node.vararg)) if node.kwarg is not None: write_comma() self.write('**' + arg_stringname(node.kwarg)) def decorators(self, node): for decorator in node.decorator_list: self.newline() self.write('@') self.visit(decorator) # Statements def visit_Assign(self, node): self.newline() for idx, target in enumerate(node.targets): if idx: self.write(', ') self.visit(target) self.write(' = ') self.visit(node.value) def visit_AugAssign(self, node): self.newline() self.visit(node.target) self.write(BINOP_SYMBOLS[type(node.op)] + '=') self.visit(node.value) def visit_ImportFrom(self, node): self.newline() self.write('from %s%s import ' % ('.' * node.level, node.module)) for idx, item in enumerate(node.names): if idx: self.write(', ') self.write(item) def visit_Import(self, node): self.newline() for item in node.names: self.write('import ') self.visit(item) def visit_Expr(self, node): self.newline() self.generic_visit(node) def visit_FunctionDef(self, node): self.newline(n=2) self.decorators(node) self.newline() self.write('def %s(' % node.name) self.signature(node.args) self.write('):') self.body(node.body) def visit_ClassDef(self, node): have_args = [] def paren_or_comma(): if have_args: self.write(', ') else: have_args.append(True) self.write('(') self.newline(n=3) self.decorators(node) self.newline() self.write('class %s' % node.name) for base in node.bases: paren_or_comma() self.visit(base) # XXX: the if here is used to keep this module compatible # with python 2.6. if hasattr(node, 'keywords'): for keyword in node.keywords: paren_or_comma() self.write(keyword.arg + '=') self.visit(keyword.value) if node.starargs is not None: paren_or_comma() self.write('*') self.visit(node.starargs) if node.kwargs is not None: paren_or_comma() self.write('**') self.visit(node.kwargs) self.write(have_args and '):' or ':') self.body(node.body) def visit_If(self, node): self.newline() self.write('if ') self.visit(node.test) self.write(':') self.body(node.body) while True: else_ = node.orelse if len(else_) == 1 and isinstance(else_[0], If): node = else_[0] self.newline() self.write('elif ') self.visit(node.test) self.write(':') self.body(node.body) else: self.newline() self.write('else:') self.body(else_) break def visit_For(self, node): self.newline() self.write('for ') self.visit(node.target) self.write(' in ') self.visit(node.iter) self.write(':') self.body_or_else(node) def visit_While(self, node): self.newline() self.write('while ') self.visit(node.test) self.write(':') self.body_or_else(node) def visit_With(self, node): self.newline() self.write('with ') self.visit(node.context_expr) if node.optional_vars is not None: self.write(' as ') self.visit(node.optional_vars) self.write(':') self.body(node.body) def visit_Pass(self, node): self.newline() self.write('pass') def visit_Print(self, node): # XXX: python 2.6 only self.newline() self.write('print ') want_comma = False if node.dest is not None: self.write(' >> ') self.visit(node.dest) want_comma = True for value in node.values: if want_comma: self.write(', ') self.visit(value) want_comma = True if not node.nl: self.write(',') def visit_Delete(self, node): self.newline() self.write('del ') for idx, target in enumerate(node): if idx: self.write(', ') self.visit(target) def visit_TryExcept(self, node): self.newline() self.write('try:') self.body(node.body) for handler in node.handlers: self.visit(handler) def visit_TryFinally(self, node): self.newline() self.write('try:') self.body(node.body) self.newline() self.write('finally:') self.body(node.finalbody) def visit_Global(self, node): self.newline() self.write('global ' + ', '.join(node.names)) def visit_Nonlocal(self, node): self.newline() self.write('nonlocal ' + ', '.join(node.names)) def visit_Return(self, node): self.newline() self.write('return ') self.visit(node.value) def visit_Break(self, node): self.newline() self.write('break') def visit_Continue(self, node): self.newline() self.write('continue') def visit_Raise(self, node): # XXX: Python 2.6 / 3.0 compatibility self.newline() self.write('raise') if hasattr(node, 'exc') and node.exc is not None: self.write(' ') self.visit(node.exc) if node.cause is not None: self.write(' from ') self.visit(node.cause) elif hasattr(node, 'type') and node.type is not None: self.visit(node.type) if node.inst is not None: self.write(', ') self.visit(node.inst) if node.tback is not None: self.write(', ') self.visit(node.tback) # Expressions def visit_Attribute(self, node): self.visit(node.value) self.write('.' + node.attr) def visit_Call(self, node): want_comma = [] def write_comma(): if want_comma: self.write(', ') else: want_comma.append(True) self.visit(node.func) self.write('(') for arg in node.args: write_comma() self.visit(arg) for keyword in node.keywords: write_comma() self.write(keyword.arg + '=') self.visit(keyword.value) if node.starargs is not None: write_comma() self.write('*') self.visit(node.starargs) if node.kwargs is not None: write_comma() self.write('**') self.visit(node.kwargs) self.write(')') def visit_Name(self, node): self.write(node.id) def visit_NameConstant(self, node): self.write(str(node.value)) def visit_arg(self, node): self.write(node.arg) def visit_Str(self, node): self.write(repr(node.s)) def visit_Bytes(self, node): self.write(repr(node.s)) def visit_Num(self, node): self.write(repr(node.n)) def visit_Tuple(self, node): self.write('(') idx = -1 for idx, item in enumerate(node.elts): if idx: self.write(', ') self.visit(item) self.write(idx and ')' or ',)') def sequence_visit(left, right): def visit(self, node): self.write(left) for idx, item in enumerate(node.elts): if idx: self.write(', ') self.visit(item) self.write(right) return visit visit_List = sequence_visit('[', ']') visit_Set = sequence_visit('{', '}') del sequence_visit def visit_Dict(self, node): self.write('{') for idx, (key, value) in enumerate(zip(node.keys, node.values)): if idx: self.write(', ') self.visit(key) self.write(': ') self.visit(value) self.write('}') def visit_BinOp(self, node): self.write('(') self.visit(node.left) self.write(' %s ' % BINOP_SYMBOLS[type(node.op)]) self.visit(node.right) self.write(')') def visit_BoolOp(self, node): self.write('(') for idx, value in enumerate(node.values): if idx: self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)]) self.visit(value) self.write(')') def visit_Compare(self, node): self.write('(') self.visit(node.left) for op, right in zip(node.ops, node.comparators): self.write(' %s ' % CMPOP_SYMBOLS[type(op)]) self.visit(right) self.write(')') def visit_UnaryOp(self, node): self.write('(') op = UNARYOP_SYMBOLS[type(node.op)] self.write(op) if op == 'not': self.write(' ') self.visit(node.operand) self.write(')') def visit_Subscript(self, node): self.visit(node.value) self.write('[') self.visit(node.slice) self.write(']') def visit_Slice(self, node): if node.lower is not None: self.visit(node.lower) self.write(':') if node.upper is not None: self.visit(node.upper) if node.step is not None: self.write(':') if not (isinstance(node.step, Name) and node.step.id == 'None'): self.visit(node.step) def visit_ExtSlice(self, node): for idx, item in node.dims: if idx: self.write(', ') self.visit(item) def visit_Yield(self, node): self.write('yield ') self.visit(node.value) def visit_Lambda(self, node): self.write('lambda ') self.signature(node.args) self.write(': ') self.visit(node.body) def visit_Ellipsis(self, node): self.write('Ellipsis') def generator_visit(left, right): def visit(self, node): self.write(left) self.visit(node.elt) for comprehension in node.generators: self.visit(comprehension) self.write(right) return visit visit_ListComp = generator_visit('[', ']') visit_GeneratorExp = generator_visit('(', ')') visit_SetComp = generator_visit('{', '}') del generator_visit def visit_DictComp(self, node): self.write('{') self.visit(node.key) self.write(': ') self.visit(node.value) for comprehension in node.generators: self.visit(comprehension) self.write('}') def visit_IfExp(self, node): self.visit(node.body) self.write(' if ') self.visit(node.test) self.write(' else ') self.visit(node.orelse) def visit_Starred(self, node): self.write('*') self.visit(node.value) def visit_Repr(self, node): # XXX: python 2.6 only self.write('`') self.visit(node.value) self.write('`') # Helper Nodes def visit_alias(self, node): self.write(node.name) if node.asname is not None: self.write(' as ' + node.asname) def visit_comprehension(self, node): self.write(' for ') self.visit(node.target) self.write(' in ') self.visit(node.iter) if node.ifs: for if_ in node.ifs: self.write(' if ') self.visit(if_) def visit_excepthandler(self, node): self.newline() self.write('except') if node.type is not None: self.write(' ') self.visit(node.type) if node.name is not None: self.write(' as ') self.visit(node.name) self.write(':') self.body(node.body)
mit
607,937,448,600,968,600
29.357396
79
0.555668
false
40223117cda/cda
static/Brython3.1.1-20150328-091302/Lib/test/test_int.py
765
12587
import sys import unittest from test.support import run_unittest L = [ ('0', 0), ('1', 1), ('9', 9), ('10', 10), ('99', 99), ('100', 100), ('314', 314), (' 314', 314), ('314 ', 314), (' \t\t 314 \t\t ', 314), (repr(sys.maxsize), sys.maxsize), (' 1x', ValueError), (' 1 ', 1), (' 1\02 ', ValueError), ('', ValueError), (' ', ValueError), (' \t\t ', ValueError), ("\u0200", ValueError) ] class IntTestCases(unittest.TestCase): def test_basic(self): self.assertEqual(int(314), 314) self.assertEqual(int(3.14), 3) # Check that conversion from float truncates towards zero self.assertEqual(int(-3.14), -3) self.assertEqual(int(3.9), 3) self.assertEqual(int(-3.9), -3) self.assertEqual(int(3.5), 3) self.assertEqual(int(-3.5), -3) self.assertEqual(int("-3"), -3) self.assertEqual(int(" -3 "), -3) self.assertEqual(int("\N{EM SPACE}-3\N{EN SPACE}"), -3) # Different base: self.assertEqual(int("10",16), 16) # Test conversion from strings and various anomalies for s, v in L: for sign in "", "+", "-": for prefix in "", " ", "\t", " \t\t ": ss = prefix + sign + s vv = v if sign == "-" and v is not ValueError: vv = -v try: self.assertEqual(int(ss), vv) except ValueError: pass s = repr(-1-sys.maxsize) x = int(s) self.assertEqual(x+1, -sys.maxsize) self.assertIsInstance(x, int) # should return int self.assertEqual(int(s[1:]), sys.maxsize+1) # should return int x = int(1e100) self.assertIsInstance(x, int) x = int(-1e100) self.assertIsInstance(x, int) # SF bug 434186: 0x80000000/2 != 0x80000000>>1. # Worked by accident in Windows release build, but failed in debug build. # Failed in all Linux builds. x = -1-sys.maxsize self.assertEqual(x >> 1, x//2) self.assertRaises(ValueError, int, '123\0') self.assertRaises(ValueError, int, '53', 40) # SF bug 1545497: embedded NULs were not detected with # explicit base self.assertRaises(ValueError, int, '123\0', 10) self.assertRaises(ValueError, int, '123\x00 245', 20) x = int('1' * 600) self.assertIsInstance(x, int) self.assertRaises(TypeError, int, 1, 12) self.assertEqual(int('0o123', 0), 83) self.assertEqual(int('0x123', 16), 291) # Bug 1679: "0x" is not a valid hex literal self.assertRaises(ValueError, int, "0x", 16) self.assertRaises(ValueError, int, "0x", 0) self.assertRaises(ValueError, int, "0o", 8) self.assertRaises(ValueError, int, "0o", 0) self.assertRaises(ValueError, int, "0b", 2) self.assertRaises(ValueError, int, "0b", 0) # Bug #3236: Return small longs from PyLong_FromString self.assertTrue(int("10") is 10) self.assertTrue(int("-1") is -1) # SF bug 1334662: int(string, base) wrong answers # Various representations of 2**32 evaluated to 0 # rather than 2**32 in previous versions self.assertEqual(int('100000000000000000000000000000000', 2), 4294967296) self.assertEqual(int('102002022201221111211', 3), 4294967296) self.assertEqual(int('10000000000000000', 4), 4294967296) self.assertEqual(int('32244002423141', 5), 4294967296) self.assertEqual(int('1550104015504', 6), 4294967296) self.assertEqual(int('211301422354', 7), 4294967296) self.assertEqual(int('40000000000', 8), 4294967296) self.assertEqual(int('12068657454', 9), 4294967296) self.assertEqual(int('4294967296', 10), 4294967296) self.assertEqual(int('1904440554', 11), 4294967296) self.assertEqual(int('9ba461594', 12), 4294967296) self.assertEqual(int('535a79889', 13), 4294967296) self.assertEqual(int('2ca5b7464', 14), 4294967296) self.assertEqual(int('1a20dcd81', 15), 4294967296) self.assertEqual(int('100000000', 16), 4294967296) self.assertEqual(int('a7ffda91', 17), 4294967296) self.assertEqual(int('704he7g4', 18), 4294967296) self.assertEqual(int('4f5aff66', 19), 4294967296) self.assertEqual(int('3723ai4g', 20), 4294967296) self.assertEqual(int('281d55i4', 21), 4294967296) self.assertEqual(int('1fj8b184', 22), 4294967296) self.assertEqual(int('1606k7ic', 23), 4294967296) self.assertEqual(int('mb994ag', 24), 4294967296) self.assertEqual(int('hek2mgl', 25), 4294967296) self.assertEqual(int('dnchbnm', 26), 4294967296) self.assertEqual(int('b28jpdm', 27), 4294967296) self.assertEqual(int('8pfgih4', 28), 4294967296) self.assertEqual(int('76beigg', 29), 4294967296) self.assertEqual(int('5qmcpqg', 30), 4294967296) self.assertEqual(int('4q0jto4', 31), 4294967296) self.assertEqual(int('4000000', 32), 4294967296) self.assertEqual(int('3aokq94', 33), 4294967296) self.assertEqual(int('2qhxjli', 34), 4294967296) self.assertEqual(int('2br45qb', 35), 4294967296) self.assertEqual(int('1z141z4', 36), 4294967296) # tests with base 0 # this fails on 3.0, but in 2.x the old octal syntax is allowed self.assertEqual(int(' 0o123 ', 0), 83) self.assertEqual(int(' 0o123 ', 0), 83) self.assertEqual(int('000', 0), 0) self.assertEqual(int('0o123', 0), 83) self.assertEqual(int('0x123', 0), 291) self.assertEqual(int('0b100', 0), 4) self.assertEqual(int(' 0O123 ', 0), 83) self.assertEqual(int(' 0X123 ', 0), 291) self.assertEqual(int(' 0B100 ', 0), 4) # without base still base 10 self.assertEqual(int('0123'), 123) self.assertEqual(int('0123', 10), 123) # tests with prefix and base != 0 self.assertEqual(int('0x123', 16), 291) self.assertEqual(int('0o123', 8), 83) self.assertEqual(int('0b100', 2), 4) self.assertEqual(int('0X123', 16), 291) self.assertEqual(int('0O123', 8), 83) self.assertEqual(int('0B100', 2), 4) # the code has special checks for the first character after the # type prefix self.assertRaises(ValueError, int, '0b2', 2) self.assertRaises(ValueError, int, '0b02', 2) self.assertRaises(ValueError, int, '0B2', 2) self.assertRaises(ValueError, int, '0B02', 2) self.assertRaises(ValueError, int, '0o8', 8) self.assertRaises(ValueError, int, '0o08', 8) self.assertRaises(ValueError, int, '0O8', 8) self.assertRaises(ValueError, int, '0O08', 8) self.assertRaises(ValueError, int, '0xg', 16) self.assertRaises(ValueError, int, '0x0g', 16) self.assertRaises(ValueError, int, '0Xg', 16) self.assertRaises(ValueError, int, '0X0g', 16) # SF bug 1334662: int(string, base) wrong answers # Checks for proper evaluation of 2**32 + 1 self.assertEqual(int('100000000000000000000000000000001', 2), 4294967297) self.assertEqual(int('102002022201221111212', 3), 4294967297) self.assertEqual(int('10000000000000001', 4), 4294967297) self.assertEqual(int('32244002423142', 5), 4294967297) self.assertEqual(int('1550104015505', 6), 4294967297) self.assertEqual(int('211301422355', 7), 4294967297) self.assertEqual(int('40000000001', 8), 4294967297) self.assertEqual(int('12068657455', 9), 4294967297) self.assertEqual(int('4294967297', 10), 4294967297) self.assertEqual(int('1904440555', 11), 4294967297) self.assertEqual(int('9ba461595', 12), 4294967297) self.assertEqual(int('535a7988a', 13), 4294967297) self.assertEqual(int('2ca5b7465', 14), 4294967297) self.assertEqual(int('1a20dcd82', 15), 4294967297) self.assertEqual(int('100000001', 16), 4294967297) self.assertEqual(int('a7ffda92', 17), 4294967297) self.assertEqual(int('704he7g5', 18), 4294967297) self.assertEqual(int('4f5aff67', 19), 4294967297) self.assertEqual(int('3723ai4h', 20), 4294967297) self.assertEqual(int('281d55i5', 21), 4294967297) self.assertEqual(int('1fj8b185', 22), 4294967297) self.assertEqual(int('1606k7id', 23), 4294967297) self.assertEqual(int('mb994ah', 24), 4294967297) self.assertEqual(int('hek2mgm', 25), 4294967297) self.assertEqual(int('dnchbnn', 26), 4294967297) self.assertEqual(int('b28jpdn', 27), 4294967297) self.assertEqual(int('8pfgih5', 28), 4294967297) self.assertEqual(int('76beigh', 29), 4294967297) self.assertEqual(int('5qmcpqh', 30), 4294967297) self.assertEqual(int('4q0jto5', 31), 4294967297) self.assertEqual(int('4000001', 32), 4294967297) self.assertEqual(int('3aokq95', 33), 4294967297) self.assertEqual(int('2qhxjlj', 34), 4294967297) self.assertEqual(int('2br45qc', 35), 4294967297) self.assertEqual(int('1z141z5', 36), 4294967297) def test_intconversion(self): # Test __int__() class ClassicMissingMethods: pass self.assertRaises(TypeError, int, ClassicMissingMethods()) class MissingMethods(object): pass self.assertRaises(TypeError, int, MissingMethods()) class Foo0: def __int__(self): return 42 class Foo1(object): def __int__(self): return 42 class Foo2(int): def __int__(self): return 42 class Foo3(int): def __int__(self): return self class Foo4(int): def __int__(self): return 42 class Foo5(int): def __int__(self): return 42. self.assertEqual(int(Foo0()), 42) self.assertEqual(int(Foo1()), 42) self.assertEqual(int(Foo2()), 42) self.assertEqual(int(Foo3()), 0) self.assertEqual(int(Foo4()), 42) self.assertRaises(TypeError, int, Foo5()) class Classic: pass for base in (object, Classic): class IntOverridesTrunc(base): def __int__(self): return 42 def __trunc__(self): return -12 self.assertEqual(int(IntOverridesTrunc()), 42) class JustTrunc(base): def __trunc__(self): return 42 self.assertEqual(int(JustTrunc()), 42) for trunc_result_base in (object, Classic): class Integral(trunc_result_base): def __int__(self): return 42 class TruncReturnsNonInt(base): def __trunc__(self): return Integral() self.assertEqual(int(TruncReturnsNonInt()), 42) class NonIntegral(trunc_result_base): def __trunc__(self): # Check that we avoid infinite recursion. return NonIntegral() class TruncReturnsNonIntegral(base): def __trunc__(self): return NonIntegral() try: int(TruncReturnsNonIntegral()) except TypeError as e: self.assertEqual(str(e), "__trunc__ returned non-Integral" " (type NonIntegral)") else: self.fail("Failed to raise TypeError with %s" % ((base, trunc_result_base),)) def test_error_message(self): testlist = ('\xbd', '123\xbd', ' 123 456 ') for s in testlist: try: int(s) except ValueError as e: self.assertIn(s.strip(), e.args[0]) else: self.fail("Expected int(%r) to raise a ValueError", s) def test_main(): run_unittest(IntTestCases) if __name__ == "__main__": test_main()
gpl-3.0
-2,974,264,190,853,167,600
38.090062
81
0.561373
false
mzdaniel/oh-mainline
vendor/packages/python-otp/otp/hash.py
16
2386
import struct class BaseHash: def __init__(self, name): self.name = name def prepare(self, hash): if type(hash) is long: newhash = '' for i in range(56, -8, -8): newhash += chr((hash>>i)%256) hash = newhash return hash class HashMD5(BaseHash): def __init__(self, name): BaseHash.__init__(self, name) import md5 self._hash = md5.md5 self.name = name def hash(self, hash): hash = self.prepare(hash) return self.fold(self._hash(hash).digest()) def fold(self, hash): result = map(ord, hash) n = 0L for i in range(8): result[i] ^= result[i+8] n <<= 8 n |= result[i] return n class HashSHA1(BaseHash): def __init__(self, name): BaseHash.__init__(self, name) import sha self._hash = sha.sha self.name = name def hash(self, hash): hash = self.prepare(hash) return self.fold(self._hash(hash).digest()) def fold(self, hash): hash = map(ord, hash) n = 0L n |= hash[3]^hash[11]^hash[19] n <<= 8 n |= hash[2]^hash[10]^hash[18] n <<= 8 n |= hash[1]^hash[9]^hash[17] n <<= 8 n |= hash[0]^hash[8]^hash[16] n <<= 8 n |= hash[7]^hash[15] n <<= 8 n |= hash[6]^hash[14] n <<= 8 n |= hash[5]^hash[13] n <<= 8 n |= hash[4]^hash[12] return n class HashMD4MHASH(HashMD5): def __init__(self, name): BaseHash.__init__(self, name) import mhash self._hash = mhash.MHASH self._md4 = mhash.MHASH_MD4 self.name = name def hash(self, hash): hash = self.prepare(hash) return self.fold(self._hash(self._md4, hash).digest()) def test(): try: import mhash except ImportError: return 0 return 1 test = staticmethod(test) class HashMD4Crypto(HashMD5): def __init__(self, name): BaseHash.__init__(self, name) from Crypto.Hash import MD4 self._hash = MD4.new self.name = name def test(): try: from Crypto.Hash import MD4 except ImportError: return 0 return 1 test = staticmethod(test)
agpl-3.0
-1,892,038,729,153,453,000
23.10101
62
0.489103
false
rdio/sentry
tests/sentry/udp/tests.py
4
1167
# -*- coding: utf-8 -*- from __future__ import absolute_import from sentry.models import Project, User from sentry.services.udp import SentryUDPServer from sentry.testutils import TestCase, get_auth_header class SentryUDPTest(TestCase): def setUp(self): self.address = (('0.0.0.0', 0)) self.server = SentryUDPServer(*self.address) self.user = User.objects.create(username='coreapi') self.project = Project.objects.create(owner=self.user, name='Foo', slug='bar') self.pm = self.project.team.member_set.get_or_create(user=self.user)[0] self.pk = self.project.key_set.get_or_create(user=self.user)[0] def test_failure(self): self.assertNotEquals(None, self.server.handle('deadbeef', self.address)) def test_success(self): data = {'message': 'hello', 'server_name': 'not_dcramer.local', 'level': 40, 'site': 'not_a_real_site'} message = self._makeMessage(data) header = get_auth_header('udpTest', api_key=self.pk.public_key, secret_key=self.pk.secret_key) packet = header + '\n\n' + message self.assertEquals(None, self.server.handle(packet, self.address))
bsd-3-clause
408,418,204,307,300,900
42.222222
111
0.667524
false
hoonkim/Lesser
parsers/tests/test_http_parser.py
1
1060
from unittest import TestCase from parsers.http_parser import * __author__ = 'kimothy' class TestHttpParser(TestCase): def test_parse_url(self): testList = parse_url("//foo/bar//") self.assertEqual(testList[0], "foo") self.assertEqual(testList[1], "bar") self.assertEqual(len(testList), 2) def test_parse_body(self): jsonExample = '{\ "error": {\ "message": "(#803) Cannot query users by their username (kimothykr)",\ "type": "OAuthException",\ "code": 803,\ "fbtrace_id": "DtEEUjyuC6h"\ }\ }' with self.assertRaises(Exception) : testDictionary = parse_body('{error:te') testDictionary = parse_body(jsonExample) self.assertEqual(testDictionary["error"]["code"], 803) self.assertEqual(testDictionary["error"]["fbtrace_id"], "DtEEUjyuC6h") self.assertEqual(len(testDictionary), 1) self.assertEqual(len(testDictionary["error"]), 4)
mit
-4,969,871,266,550,382,000
30.176471
88
0.570755
false
kashev/kashev.rocks
manage.py
1
1238
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # kashev.rocks # Kashev Dalmia - kashev.dalmia@gmail.com from flask.ext.script import Manager from flask.ext.assets import ManageAssets from src.kashevrocks import app from src.assets import register_assets manager = Manager(app) assets_env = register_assets(app) manager.add_command("assets", ManageAssets(assets_env)) @manager.command def liveserver(debug=True): """ Runs a live reloading server which watches non-python code as well. """ import livereload app.debug = debug assets_env.debug = debug server = livereload.Server(app.wsgi_app) server.watch('src/') server.serve() @manager.command def clean(): """ Cleans up all generated and cache files from the project. """ import shutil import os paths_to_clean = ['src/static/.webassets-cache', 'src/static/generated', 'debug.log'] for path in paths_to_clean: try: shutil.rmtree(path) except NotADirectoryError: os.remove(path) # It's a file, not a directory except FileNotFoundError: pass # They're not there, that's fine. if __name__ == "__main__": manager.run()
mit
-2,425,773,170,606,357,000
22.807692
79
0.642973
false
brayden2544/Mystuff-final
account/cached_templates/templates/password_reset.html.py
1
1886
# -*- coding:ascii -*- from mako import runtime, filters, cache UNDEFINED = runtime.UNDEFINED __M_dict_builtin = dict __M_locals_builtin = locals _magic_number = 9 _modified_time = 1397087641.387625 _enable_loop = True _template_filename = 'C:\\app\\account\\templates/password_reset.html' _template_uri = 'password_reset.html' _source_encoding = 'ascii' import os, os.path, re _exports = ['content'] def _mako_get_namespace(context, name): try: return context.namespaces[(__name__, name)] except KeyError: _mako_generate_namespaces(context) return context.namespaces[(__name__, name)] def _mako_generate_namespaces(context): pass def _mako_inherit(template, context): _mako_generate_namespaces(context) return runtime._inherit_from(context, 'base_template.htm', _template_uri) def render_body(context,**pageargs): __M_caller = context.caller_stack._push_frame() try: __M_locals = __M_dict_builtin(pageargs=pageargs) def content(): return render_content(context._locals(__M_locals)) __M_writer = context.writer() # SOURCE LINE 2 __M_writer('\n\n\n') if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'): context['self'].content(**pageargs) # SOURCE LINE 12 __M_writer(' \n\n\n') return '' finally: context.caller_stack._pop_frame() def render_content(context,**pageargs): __M_caller = context.caller_stack._push_frame() try: def content(): return render_content(context) __M_writer = context.writer() # SOURCE LINE 5 __M_writer('\n \n<h3>Password Reset</h3>\n\n<p>Your password has been reset. Please log in again with your new password.</p>\n\t\n\n') return '' finally: context.caller_stack._pop_frame()
apache-2.0
1,488,263,980,460,247,600
30.966102
143
0.626723
false
nathanial/lettuce
tests/integration/lib/Django-1.3/tests/modeltests/model_inheritance/models.py
118
3888
""" XX. Model inheritance Model inheritance exists in two varieties: - abstract base classes which are a way of specifying common information inherited by the subclasses. They don't exist as a separate model. - non-abstract base classes (the default), which are models in their own right with their own database tables and everything. Their subclasses have references back to them, created automatically. Both styles are demonstrated here. """ from django.db import models # # Abstract base classes # class CommonInfo(models.Model): name = models.CharField(max_length=50) age = models.PositiveIntegerField() class Meta: abstract = True ordering = ['name'] def __unicode__(self): return u'%s %s' % (self.__class__.__name__, self.name) class Worker(CommonInfo): job = models.CharField(max_length=50) class Student(CommonInfo): school_class = models.CharField(max_length=10) class Meta: pass class StudentWorker(Student, Worker): pass # # Abstract base classes with related models # class Post(models.Model): title = models.CharField(max_length=50) class Attachment(models.Model): post = models.ForeignKey(Post, related_name='attached_%(class)s_set') content = models.TextField() class Meta: abstract = True def __unicode__(self): return self.content class Comment(Attachment): is_spam = models.BooleanField() class Link(Attachment): url = models.URLField() # # Multi-table inheritance # class Chef(models.Model): name = models.CharField(max_length=50) def __unicode__(self): return u"%s the chef" % self.name class Place(models.Model): name = models.CharField(max_length=50) address = models.CharField(max_length=80) def __unicode__(self): return u"%s the place" % self.name class Rating(models.Model): rating = models.IntegerField(null=True, blank=True) class Meta: abstract = True ordering = ['-rating'] class Restaurant(Place, Rating): serves_hot_dogs = models.BooleanField() serves_pizza = models.BooleanField() chef = models.ForeignKey(Chef, null=True, blank=True) class Meta(Rating.Meta): db_table = 'my_restaurant' def __unicode__(self): return u"%s the restaurant" % self.name class ItalianRestaurant(Restaurant): serves_gnocchi = models.BooleanField() def __unicode__(self): return u"%s the italian restaurant" % self.name class Supplier(Place): customers = models.ManyToManyField(Restaurant, related_name='provider') def __unicode__(self): return u"%s the supplier" % self.name class ParkingLot(Place): # An explicit link to the parent (we can control the attribute name). parent = models.OneToOneField(Place, primary_key=True, parent_link=True) main_site = models.ForeignKey(Place, related_name='lot') def __unicode__(self): return u"%s the parking lot" % self.name # # Abstract base classes with related models where the sub-class has the # same name in a different app and inherits from the same abstract base # class. # NOTE: The actual API tests for the following classes are in # model_inheritance_same_model_name/models.py - They are defined # here in order to have the name conflict between apps # class Title(models.Model): title = models.CharField(max_length=50) class NamedURL(models.Model): title = models.ForeignKey(Title, related_name='attached_%(app_label)s_%(class)s_set') url = models.URLField() class Meta: abstract = True class Copy(NamedURL): content = models.TextField() def __unicode__(self): return self.content class Mixin(object): def __init__(self): self.other_attr = 1 super(Mixin, self).__init__() class MixinModel(models.Model, Mixin): pass
gpl-3.0
-5,259,749,329,917,326,000
24.411765
89
0.675154
false
AtsushiSakai/PythonRobotics
PathPlanning/Eta3SplinePath/eta3_spline_path.py
1
13649
""" eta^3 polynomials planner author: Joe Dinius, Ph.D (https://jwdinius.github.io) Atsushi Sakai (@Atsushi_twi) Ref: - [eta^3-Splines for the Smooth Path Generation of Wheeled Mobile Robots] (https://ieeexplore.ieee.org/document/4339545/) """ import numpy as np import matplotlib.pyplot as plt from scipy.integrate import quad # NOTE: *_pose is a 3-array: # 0 - x coord, 1 - y coord, 2 - orientation angle \theta show_animation = True class Eta3Path(object): """ Eta3Path input segments: a list of `Eta3PathSegment` instances defining a continuous path """ def __init__(self, segments): # ensure input has the correct form assert(isinstance(segments, list) and isinstance( segments[0], Eta3PathSegment)) # ensure that each segment begins from the previous segment's end (continuity) for r, s in zip(segments[:-1], segments[1:]): assert(np.array_equal(r.end_pose, s.start_pose)) self.segments = segments def calc_path_point(self, u): """ Eta3Path::calc_path_point input normalized interpolation point along path object, 0 <= u <= len(self.segments) returns 2d (x,y) position vector """ assert(0 <= u <= len(self.segments)) if np.isclose(u, len(self.segments)): segment_idx = len(self.segments) - 1 u = 1. else: segment_idx = int(np.floor(u)) u -= segment_idx return self.segments[segment_idx].calc_point(u) class Eta3PathSegment(object): """ Eta3PathSegment - constructs an eta^3 path segment based on desired shaping, eta, and curvature vector, kappa. If either, or both, of eta and kappa are not set during initialization, they will default to zeros. input start_pose - starting pose array (x, y, \theta) end_pose - ending pose array (x, y, \theta) eta - shaping parameters, default=None kappa - curvature parameters, default=None """ def __init__(self, start_pose, end_pose, eta=None, kappa=None): # make sure inputs are of the correct size assert(len(start_pose) == 3 and len(start_pose) == len(end_pose)) self.start_pose = start_pose self.end_pose = end_pose # if no eta is passed, initialize it to array of zeros if not eta: eta = np.zeros((6,)) else: # make sure that eta has correct size assert(len(eta) == 6) # if no kappa is passed, initialize to array of zeros if not kappa: kappa = np.zeros((4,)) else: assert(len(kappa) == 4) # set up angle cosines and sines for simpler computations below ca = np.cos(start_pose[2]) sa = np.sin(start_pose[2]) cb = np.cos(end_pose[2]) sb = np.sin(end_pose[2]) # 2 dimensions (x,y) x 8 coefficients per dimension self.coeffs = np.empty((2, 8)) # constant terms (u^0) self.coeffs[0, 0] = start_pose[0] self.coeffs[1, 0] = start_pose[1] # linear (u^1) self.coeffs[0, 1] = eta[0] * ca self.coeffs[1, 1] = eta[0] * sa # quadratic (u^2) self.coeffs[0, 2] = 1. / 2 * eta[2] * \ ca - 1. / 2 * eta[0]**2 * kappa[0] * sa self.coeffs[1, 2] = 1. / 2 * eta[2] * \ sa + 1. / 2 * eta[0]**2 * kappa[0] * ca # cubic (u^3) self.coeffs[0, 3] = 1. / 6 * eta[4] * ca - 1. / 6 * \ (eta[0]**3 * kappa[1] + 3. * eta[0] * eta[2] * kappa[0]) * sa self.coeffs[1, 3] = 1. / 6 * eta[4] * sa + 1. / 6 * \ (eta[0]**3 * kappa[1] + 3. * eta[0] * eta[2] * kappa[0]) * ca # quartic (u^4) tmp1 = 35. * (end_pose[0] - start_pose[0]) tmp2 = (20. * eta[0] + 5 * eta[2] + 2. / 3 * eta[4]) * ca tmp3 = (5. * eta[0] ** 2 * kappa[0] + 2. / 3 * eta[0] ** 3 * kappa[1] + 2. * eta[0] * eta[2] * kappa[0]) * sa tmp4 = (15. * eta[1] - 5. / 2 * eta[3] + 1. / 6 * eta[5]) * cb tmp5 = (5. / 2 * eta[1] ** 2 * kappa[2] - 1. / 6 * eta[1] ** 3 * kappa[3] - 1. / 2 * eta[1] * eta[3] * kappa[2]) * sb self.coeffs[0, 4] = tmp1 - tmp2 + tmp3 - tmp4 - tmp5 tmp1 = 35. * (end_pose[1] - start_pose[1]) tmp2 = (20. * eta[0] + 5. * eta[2] + 2. / 3 * eta[4]) * sa tmp3 = (5. * eta[0] ** 2 * kappa[0] + 2. / 3 * eta[0] ** 3 * kappa[1] + 2. * eta[0] * eta[2] * kappa[0]) * ca tmp4 = (15. * eta[1] - 5. / 2 * eta[3] + 1. / 6 * eta[5]) * sb tmp5 = (5. / 2 * eta[1] ** 2 * kappa[2] - 1. / 6 * eta[1] ** 3 * kappa[3] - 1. / 2 * eta[1] * eta[3] * kappa[2]) * cb self.coeffs[1, 4] = tmp1 - tmp2 - tmp3 - tmp4 + tmp5 # quintic (u^5) tmp1 = -84. * (end_pose[0] - start_pose[0]) tmp2 = (45. * eta[0] + 10. * eta[2] + eta[4]) * ca tmp3 = (10. * eta[0] ** 2 * kappa[0] + eta[0] ** 3 * kappa[1] + 3. * eta[0] * eta[2] * kappa[0]) * sa tmp4 = (39. * eta[1] - 7. * eta[3] + 1. / 2 * eta[5]) * cb tmp5 = + (7. * eta[1] ** 2 * kappa[2] - 1. / 2 * eta[1] ** 3 * kappa[3] - 3. / 2 * eta[1] * eta[3] * kappa[2]) * sb self.coeffs[0, 5] = tmp1 + tmp2 - tmp3 + tmp4 + tmp5 tmp1 = -84. * (end_pose[1] - start_pose[1]) tmp2 = (45. * eta[0] + 10. * eta[2] + eta[4]) * sa tmp3 = (10. * eta[0] ** 2 * kappa[0] + eta[0] ** 3 * kappa[1] + 3. * eta[0] * eta[2] * kappa[0]) * ca tmp4 = (39. * eta[1] - 7. * eta[3] + 1. / 2 * eta[5]) * sb tmp5 = - (7. * eta[1] ** 2 * kappa[2] - 1. / 2 * eta[1] ** 3 * kappa[3] - 3. / 2 * eta[1] * eta[3] * kappa[2]) * cb self.coeffs[1, 5] = tmp1 + tmp2 + tmp3 + tmp4 + tmp5 # sextic (u^6) tmp1 = 70. * (end_pose[0] - start_pose[0]) tmp2 = (36. * eta[0] + 15. / 2 * eta[2] + 2. / 3 * eta[4]) * ca tmp3 = + (15. / 2 * eta[0] ** 2 * kappa[0] + 2. / 3 * eta[0] ** 3 * kappa[1] + 2. * eta[0] * eta[2] * kappa[0]) * sa tmp4 = (34. * eta[1] - 13. / 2 * eta[3] + 1. / 2 * eta[5]) * cb tmp5 = - (13. / 2 * eta[1] ** 2 * kappa[2] - 1. / 2 * eta[1] ** 3 * kappa[3] - 3. / 2 * eta[1] * eta[3] * kappa[2]) * sb self.coeffs[0, 6] = tmp1 - tmp2 + tmp3 - tmp4 + tmp5 tmp1 = 70. * (end_pose[1] - start_pose[1]) tmp2 = - (36. * eta[0] + 15. / 2 * eta[2] + 2. / 3 * eta[4]) * sa tmp3 = - (15. / 2 * eta[0] ** 2 * kappa[0] + 2. / 3 * eta[0] ** 3 * kappa[1] + 2. * eta[0] * eta[2] * kappa[0]) * ca tmp4 = - (34. * eta[1] - 13. / 2 * eta[3] + 1. / 2 * eta[5]) * sb tmp5 = + (13. / 2 * eta[1] ** 2 * kappa[2] - 1. / 2 * eta[1] ** 3 * kappa[3] - 3. / 2 * eta[1] * eta[3] * kappa[2]) * cb self.coeffs[1, 6] = tmp1 + tmp2 + tmp3 + tmp4 + tmp5 # septic (u^7) tmp1 = -20. * (end_pose[0] - start_pose[0]) tmp2 = (10. * eta[0] + 2. * eta[2] + 1. / 6 * eta[4]) * ca tmp3 = - (2. * eta[0] ** 2 * kappa[0] + 1. / 6 * eta[0] ** 3 * kappa[1] + 1. / 2 * eta[0] * eta[2] * kappa[0]) * sa tmp4 = (10. * eta[1] - 2. * eta[3] + 1. / 6 * eta[5]) * cb tmp5 = (2. * eta[1] ** 2 * kappa[2] - 1. / 6 * eta[1] ** 3 * kappa[3] - 1. / 2 * eta[1] * eta[3] * kappa[2]) * sb self.coeffs[0, 7] = tmp1 + tmp2 + tmp3 + tmp4 + tmp5 tmp1 = -20. * (end_pose[1] - start_pose[1]) tmp2 = (10. * eta[0] + 2. * eta[2] + 1. / 6 * eta[4]) * sa tmp3 = (2. * eta[0] ** 2 * kappa[0] + 1. / 6 * eta[0] ** 3 * kappa[1] + 1. / 2 * eta[0] * eta[2] * kappa[0]) * ca tmp4 = (10. * eta[1] - 2. * eta[3] + 1. / 6 * eta[5]) * sb tmp5 = - (2. * eta[1] ** 2 * kappa[2] - 1. / 6 * eta[1] ** 3 * kappa[3] - 1. / 2 * eta[1] * eta[3] * kappa[2]) * cb self.coeffs[1, 7] = tmp1 + tmp2 + tmp3 + tmp4 + tmp5 self.s_dot = lambda u: max(np.linalg.norm( self.coeffs[:, 1:].dot(np.array( [1, 2. * u, 3. * u**2, 4. * u**3, 5. * u**4, 6. * u**5, 7. * u**6]))), 1e-6) self.f_length = lambda ue: quad(lambda u: self.s_dot(u), 0, ue) self.segment_length = self.f_length(1)[0] def calc_point(self, u): """ Eta3PathSegment::calc_point input u - parametric representation of a point along the segment, 0 <= u <= 1 returns (x,y) of point along the segment """ assert(0 <= u <= 1) return self.coeffs.dot(np.array([1, u, u**2, u**3, u**4, u**5, u**6, u**7])) def calc_deriv(self, u, order=1): """ Eta3PathSegment::calc_deriv input u - parametric representation of a point along the segment, 0 <= u <= 1 returns (d^nx/du^n,d^ny/du^n) of point along the segment, for 0 < n <= 2 """ assert(0 <= u <= 1) assert(0 < order <= 2) if order == 1: return self.coeffs[:, 1:].dot(np.array([1, 2. * u, 3. * u**2, 4. * u**3, 5. * u**4, 6. * u**5, 7. * u**6])) return self.coeffs[:, 2:].dot(np.array([2, 6. * u, 12. * u**2, 20. * u**3, 30. * u**4, 42. * u**5])) def test1(): for i in range(10): path_segments = [] # segment 1: lane-change curve start_pose = [0, 0, 0] end_pose = [4, 3.0, 0] # NOTE: The ordering on kappa is [kappa_A, kappad_A, kappa_B, kappad_B], with kappad_* being the curvature derivative kappa = [0, 0, 0, 0] eta = [i, i, 0, 0, 0, 0] path_segments.append(Eta3PathSegment( start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa)) path = Eta3Path(path_segments) # interpolate at several points along the path ui = np.linspace(0, len(path_segments), 1001) pos = np.empty((2, ui.size)) for j, u in enumerate(ui): pos[:, j] = path.calc_path_point(u) if show_animation: # plot the path plt.plot(pos[0, :], pos[1, :]) # for stopping simulation with the esc key. plt.gcf().canvas.mpl_connect( 'key_release_event', lambda event: [exit(0) if event.key == 'escape' else None]) plt.pause(1.0) if show_animation: plt.close("all") def test2(): for i in range(10): path_segments = [] # segment 1: lane-change curve start_pose = [0, 0, 0] end_pose = [4, 3.0, 0] # NOTE: The ordering on kappa is [kappa_A, kappad_A, kappa_B, kappad_B], with kappad_* being the curvature derivative kappa = [0, 0, 0, 0] eta = [0, 0, (i - 5) * 20, (5 - i) * 20, 0, 0] path_segments.append(Eta3PathSegment( start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa)) path = Eta3Path(path_segments) # interpolate at several points along the path ui = np.linspace(0, len(path_segments), 1001) pos = np.empty((2, ui.size)) for j, u in enumerate(ui): pos[:, j] = path.calc_path_point(u) if show_animation: # plot the path plt.plot(pos[0, :], pos[1, :]) plt.pause(1.0) if show_animation: plt.close("all") def test3(): path_segments = [] # segment 1: lane-change curve start_pose = [0, 0, 0] end_pose = [4, 1.5, 0] # NOTE: The ordering on kappa is [kappa_A, kappad_A, kappa_B, kappad_B], with kappad_* being the curvature derivative kappa = [0, 0, 0, 0] eta = [4.27, 4.27, 0, 0, 0, 0] path_segments.append(Eta3PathSegment( start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa)) # segment 2: line segment start_pose = [4, 1.5, 0] end_pose = [5.5, 1.5, 0] kappa = [0, 0, 0, 0] eta = [0, 0, 0, 0, 0, 0] path_segments.append(Eta3PathSegment( start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa)) # segment 3: cubic spiral start_pose = [5.5, 1.5, 0] end_pose = [7.4377, 1.8235, 0.6667] kappa = [0, 0, 1, 1] eta = [1.88, 1.88, 0, 0, 0, 0] path_segments.append(Eta3PathSegment( start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa)) # segment 4: generic twirl arc start_pose = [7.4377, 1.8235, 0.6667] end_pose = [7.8, 4.3, 1.8] kappa = [1, 1, 0.5, 0] eta = [7, 10, 10, -10, 4, 4] path_segments.append(Eta3PathSegment( start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa)) # segment 5: circular arc start_pose = [7.8, 4.3, 1.8] end_pose = [5.4581, 5.8064, 3.3416] kappa = [0.5, 0, 0.5, 0] eta = [2.98, 2.98, 0, 0, 0, 0] path_segments.append(Eta3PathSegment( start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa)) # construct the whole path path = Eta3Path(path_segments) # interpolate at several points along the path ui = np.linspace(0, len(path_segments), 1001) pos = np.empty((2, ui.size)) for i, u in enumerate(ui): pos[:, i] = path.calc_path_point(u) # plot the path if show_animation: plt.figure('Path from Reference') plt.plot(pos[0, :], pos[1, :]) plt.xlabel('x') plt.ylabel('y') plt.title('Path') plt.pause(1.0) plt.show() def main(): """ recreate path from reference (see Table 1) """ test1() test2() test3() if __name__ == '__main__': main()
mit
5,245,574,693,435,425,000
36.808864
125
0.486776
false
steenzout/python-barcode
docs/conf.py
1
11075
# -*- coding: utf-8 -*- # # Copyright 2010-2013, Thorsten Weimann; 2014, Alexander Shorin; 2016 Pedro Salgado # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Sphinx documentation build configuration file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. """ import sys import os sys.path.insert(0, os.path.abspath('..')) import semantic_version from recommonmark.parser import CommonMarkParser _package = 'steenzout.barcode' _version = semantic_version.Version('1.0.0-beta3') # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = '.rst' source_parsers = { '.md': CommonMarkParser } source_suffix = [ '.md', '.rst' ] # The encoding of source files. # # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = _package author = 'Thorsten Weimann, Alexander Shorin, Pedro Salgado' copyright = '2010-2013, Thorsten Weimann; 2014, Alexander Shorin; 2016 Pedro Salgado' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'%d.%d' % (_version.major, _version.minor) # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # # today = '' # # Else, today_fmt is used as the format for a strftime call. # # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', '_templates', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all # documents. # # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. # "<project> v<release> documentation" by default. # # html_title = u'' # A shorter title for the navigation bar. Default is the same as html_title. # # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # # html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # # html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. # # html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # # html_additional_pages = {} # If false, no module index is generated. # # html_domain_indices = True # If false, no index is generated. # # html_use_index = True # If true, the index is split into individual pages for each letter. # # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' # # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. # # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = u'py_%s' % _package.replace('.', '_') # -- Options for LaTeX output --------------------------------------------- # latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # # Additional stuff for the LaTeX preamble. # # 'preamble': '', # # Latex figure (float) alignment # # 'figure_align': 'htbp', # } latex_elements = {} # Grouping the document tree into LaTeX files. # List of tuples ( # source start file, # target name, # title, # author, # documentclass [howto, manual, or own class] # ). latex_documents = [( master_doc, u'py_%s.tex' % _package.replace('.', '_'), u'%s documentation' % _package, 'Thorsten Weimann, Alexander Shorin, Pedro Salgado', u'manual' )] # The name of an image file (relative to this directory) to place at the top of # the title page. # # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # # latex_use_parts = False # If true, show page references after internal links. # # latex_show_pagerefs = False # If true, show URL addresses after external links. # # latex_show_urls = False # Documents to append as an appendix to all manuals. # # latex_appendices = [] # It false, will not define \strong, \code, itleref, \crossref ... but only # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added # sphinxs. # # latex_keep_old_macro_names = True # If false, no module index is generated. # # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. # List of tuples ( # source start file, # name, # description, # authors, # manual section # ). man_pages = [( master_doc, 'py_%s' % _package.replace('.', '_'), '%s documentation' % _package, ['Thorsten Weimann, Alexander Shorin, Pedro Salgado'], 1 )] # If true, show URL addresses after external links. # # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. # List of tuples ( # source start file, # target name, # title, # author, # dir menu entry, # description, # category # ). texinfo_documents = [( master_doc, u'py_%s' % _package.replace('.', '_'), u'%s documentation' % _package, 'Thorsten Weimann, Alexander Shorin, Pedro Salgado', u'', u'%s documentation.' % _package, u'Miscellaneous' )] # Documents to append as an appendix to all manuals. # # texinfo_appendices = [] # If false, no module index is generated. # # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # # texinfo_no_detailmenu = False # Generate API doc from sphinx import apidoc apidoc.main(['-f', '-T', '--separate', '-o', 'apidoc', '../steenzout'])
mit
4,455,014,411,438,905,000
26.413366
85
0.68614
false
SpredfastLegacy/JsBeautify
jsbeautifier/unpackers/packer.py
76
3302
# # Unpacker for Dean Edward's p.a.c.k.e.r, a part of javascript beautifier # by Einar Lielmanis <einar@jsbeautifier.org> # # written by Stefano Sanfilippo <a.little.coder@gmail.com> # # usage: # # if detect(some_string): # unpacked = unpack(some_string) # """Unpacker for Dean Edward's p.a.c.k.e.r""" import re import string from jsbeautifier.unpackers import UnpackingError PRIORITY = 1 def detect(source): """Detects whether `source` is P.A.C.K.E.R. coded.""" return source.replace(' ', '').startswith('eval(function(p,a,c,k,e,r') def unpack(source): """Unpacks P.A.C.K.E.R. packed js code.""" payload, symtab, radix, count = _filterargs(source) if count != len(symtab): raise UnpackingError('Malformed p.a.c.k.e.r. symtab.') try: unbase = Unbaser(radix) except TypeError: raise UnpackingError('Unknown p.a.c.k.e.r. encoding.') def lookup(match): """Look up symbols in the synthetic symtab.""" word = match.group(0) return symtab[unbase(word)] or word source = re.sub(r'\b\w+\b', lookup, payload) return _replacestrings(source) def _filterargs(source): """Juice from a source file the four args needed by decoder.""" argsregex = (r"}\('(.*)', *(\d+), *(\d+), *'(.*)'\." r"split\('\|'\), *(\d+), *(.*)\)\)") args = re.search(argsregex, source, re.DOTALL).groups() try: return args[0], args[3].split('|'), int(args[1]), int(args[2]) except ValueError: raise UnpackingError('Corrupted p.a.c.k.e.r. data.') def _replacestrings(source): """Strip string lookup table (list) and replace values in source.""" match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL) if match: varname, strings = match.groups() startpoint = len(match.group(0)) lookup = strings.split('","') variable = '%s[%%d]' % varname for index, value in enumerate(lookup): source = source.replace(variable % index, '"%s"' % value) return source[startpoint:] return source class Unbaser(object): """Functor for a given base. Will efficiently convert strings to natural numbers.""" ALPHABET = { 62 : '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', 95 : (' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ' '[\]^_`abcdefghijklmnopqrstuvwxyz{|}~') } def __init__(self, base): self.base = base # If base can be handled by int() builtin, let it do it for us if 2 <= base <= 36: self.unbase = lambda string: int(string, base) else: # Build conversion dictionary cache try: self.dictionary = dict((cipher, index) for index, cipher in enumerate(self.ALPHABET[base])) except KeyError: raise TypeError('Unsupported base encoding.') self.unbase = self._dictunbaser def __call__(self, string): return self.unbase(string) def _dictunbaser(self, string): """Decodes a value to an integer.""" ret = 0 for index, cipher in enumerate(string[::-1]): ret += (self.base ** index) * self.dictionary[cipher] return ret
bsd-3-clause
-3,147,354,450,630,264,000
30.75
78
0.586917
false
midma101/AndIWasJustGoingToBed
.venv/lib/python2.7/site-packages/markupsafe/__init__.py
371
8205
# -*- coding: utf-8 -*- """ markupsafe ~~~~~~~~~~ Implements a Markup string. :copyright: (c) 2010 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import re from markupsafe._compat import text_type, string_types, int_types, \ unichr, PY2 __all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent'] _striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)') _entity_re = re.compile(r'&([^;]+);') class Markup(text_type): r"""Marks a string as being safe for inclusion in HTML/XML output without needing to be escaped. This implements the `__html__` interface a couple of frameworks and web applications use. :class:`Markup` is a direct subclass of `unicode` and provides all the methods of `unicode` just that it escapes arguments passed and always returns `Markup`. The `escape` function returns markup objects so that double escaping can't happen. The constructor of the :class:`Markup` class can be used for three different things: When passed an unicode object it's assumed to be safe, when passed an object with an HTML representation (has an `__html__` method) that representation is used, otherwise the object passed is converted into a unicode string and then assumed to be safe: >>> Markup("Hello <em>World</em>!") Markup(u'Hello <em>World</em>!') >>> class Foo(object): ... def __html__(self): ... return '<a href="#">foo</a>' ... >>> Markup(Foo()) Markup(u'<a href="#">foo</a>') If you want object passed being always treated as unsafe you can use the :meth:`escape` classmethod to create a :class:`Markup` object: >>> Markup.escape("Hello <em>World</em>!") Markup(u'Hello &lt;em&gt;World&lt;/em&gt;!') Operations on a markup string are markup aware which means that all arguments are passed through the :func:`escape` function: >>> em = Markup("<em>%s</em>") >>> em % "foo & bar" Markup(u'<em>foo &amp; bar</em>') >>> strong = Markup("<strong>%(text)s</strong>") >>> strong % {'text': '<blink>hacker here</blink>'} Markup(u'<strong>&lt;blink&gt;hacker here&lt;/blink&gt;</strong>') >>> Markup("<em>Hello</em> ") + "<foo>" Markup(u'<em>Hello</em> &lt;foo&gt;') """ __slots__ = () def __new__(cls, base=u'', encoding=None, errors='strict'): if hasattr(base, '__html__'): base = base.__html__() if encoding is None: return text_type.__new__(cls, base) return text_type.__new__(cls, base, encoding, errors) def __html__(self): return self def __add__(self, other): if isinstance(other, string_types) or hasattr(other, '__html__'): return self.__class__(super(Markup, self).__add__(self.escape(other))) return NotImplemented def __radd__(self, other): if hasattr(other, '__html__') or isinstance(other, string_types): return self.escape(other).__add__(self) return NotImplemented def __mul__(self, num): if isinstance(num, int_types): return self.__class__(text_type.__mul__(self, num)) return NotImplemented __rmul__ = __mul__ def __mod__(self, arg): if isinstance(arg, tuple): arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg) else: arg = _MarkupEscapeHelper(arg, self.escape) return self.__class__(text_type.__mod__(self, arg)) def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, text_type.__repr__(self) ) def join(self, seq): return self.__class__(text_type.join(self, map(self.escape, seq))) join.__doc__ = text_type.join.__doc__ def split(self, *args, **kwargs): return list(map(self.__class__, text_type.split(self, *args, **kwargs))) split.__doc__ = text_type.split.__doc__ def rsplit(self, *args, **kwargs): return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs))) rsplit.__doc__ = text_type.rsplit.__doc__ def splitlines(self, *args, **kwargs): return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs))) splitlines.__doc__ = text_type.splitlines.__doc__ def unescape(self): r"""Unescape markup again into an text_type string. This also resolves known HTML4 and XHTML entities: >>> Markup("Main &raquo; <em>About</em>").unescape() u'Main \xbb <em>About</em>' """ from markupsafe._constants import HTML_ENTITIES def handle_match(m): name = m.group(1) if name in HTML_ENTITIES: return unichr(HTML_ENTITIES[name]) try: if name[:2] in ('#x', '#X'): return unichr(int(name[2:], 16)) elif name.startswith('#'): return unichr(int(name[1:])) except ValueError: pass return u'' return _entity_re.sub(handle_match, text_type(self)) def striptags(self): r"""Unescape markup into an text_type string and strip all tags. This also resolves known HTML4 and XHTML entities. Whitespace is normalized to one: >>> Markup("Main &raquo; <em>About</em>").striptags() u'Main \xbb About' """ stripped = u' '.join(_striptags_re.sub('', self).split()) return Markup(stripped).unescape() @classmethod def escape(cls, s): """Escape the string. Works like :func:`escape` with the difference that for subclasses of :class:`Markup` this function would return the correct subclass. """ rv = escape(s) if rv.__class__ is not cls: return cls(rv) return rv def make_wrapper(name): orig = getattr(text_type, name) def func(self, *args, **kwargs): args = _escape_argspec(list(args), enumerate(args), self.escape) #_escape_argspec(kwargs, kwargs.iteritems(), None) return self.__class__(orig(self, *args, **kwargs)) func.__name__ = orig.__name__ func.__doc__ = orig.__doc__ return func for method in '__getitem__', 'capitalize', \ 'title', 'lower', 'upper', 'replace', 'ljust', \ 'rjust', 'lstrip', 'rstrip', 'center', 'strip', \ 'translate', 'expandtabs', 'swapcase', 'zfill': locals()[method] = make_wrapper(method) # new in python 2.5 if hasattr(text_type, 'partition'): def partition(self, sep): return tuple(map(self.__class__, text_type.partition(self, self.escape(sep)))) def rpartition(self, sep): return tuple(map(self.__class__, text_type.rpartition(self, self.escape(sep)))) # new in python 2.6 if hasattr(text_type, 'format'): format = make_wrapper('format') # not in python 3 if hasattr(text_type, '__getslice__'): __getslice__ = make_wrapper('__getslice__') del method, make_wrapper def _escape_argspec(obj, iterable, escape): """Helper for various string-wrapped functions.""" for key, value in iterable: if hasattr(value, '__html__') or isinstance(value, string_types): obj[key] = escape(value) return obj class _MarkupEscapeHelper(object): """Helper for Markup.__mod__""" def __init__(self, obj, escape): self.obj = obj self.escape = escape __getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape) __unicode__ = __str__ = lambda s: text_type(s.escape(s.obj)) __repr__ = lambda s: str(s.escape(repr(s.obj))) __int__ = lambda s: int(s.obj) __float__ = lambda s: float(s.obj) # we have to import it down here as the speedups and native # modules imports the markup type which is define above. try: from markupsafe._speedups import escape, escape_silent, soft_unicode except ImportError: from markupsafe._native import escape, escape_silent, soft_unicode if not PY2: soft_str = soft_unicode __all__.append('soft_str')
mit
1,277,062,579,156,340,500
34.064103
85
0.577331
false
denfromufa/pythonnet
demo/helloform.py
8
1769
#!/usr/bin/env python # -*- coding: utf-8 -*- import clr SWF = clr.AddReference("System.Windows.Forms") print (SWF.Location) import System.Windows.Forms as WinForms from System.Drawing import Size, Point class HelloApp(WinForms.Form): """A simple hello world app that demonstrates the essentials of winforms programming and event-based programming in Python.""" def __init__(self): self.Text = "Hello World From Python" self.AutoScaleBaseSize = Size(5, 13) self.ClientSize = Size(392, 117) h = WinForms.SystemInformation.CaptionHeight self.MinimumSize = Size(392, (117 + h)) # Create the button self.button = WinForms.Button() self.button.Location = Point(160, 64) self.button.Size = Size(820, 20) self.button.TabIndex = 2 self.button.Text = "Click Me!" # Register the event handler self.button.Click += self.button_Click # Create the text box self.textbox = WinForms.TextBox() self.textbox.Text = "Hello World" self.textbox.TabIndex = 1 self.textbox.Size = Size(1260, 40) self.textbox.Location = Point(160, 24) # Add the controls to the form self.AcceptButton = self.button self.Controls.Add(self.button) self.Controls.Add(self.textbox) def button_Click(self, sender, args): """Button click event handler""" print ("Click") WinForms.MessageBox.Show("Please do not press this button again.") def run(self): WinForms.Application.Run(self) def main(): form = HelloApp() print ("form created") app = WinForms.Application print ("app referenced") app.Run(form) if __name__ == '__main__': main()
mit
-985,810,494,902,420,700
27.079365
74
0.626343
false
SpOOnman/claws
tools/vcard2xml.py
11
8810
#!/usr/bin/env python # -*- coding: latin-1 -*- """ Copyright © 2003 Bogdan Sumanariu <zarrok@yahoo.com> This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. script name : evolutionvcard2claws.py script purpose : convert an evolution addressbook VCARD file into a Claws Mail addressbook tested with evolution 1.2.x, and 1.4.x """ import string import sys import time import os import StringIO keywds = ('x-evolution-file-as','fn', 'n','email;internet','nickname', 'url', 'org') def normalizeLongLines(file): """ Skip line breaks after 72 chars """ buf = '' line = file.readline() while line: if line[0] == ' ': buf = buf.rstrip('\n') line = line.lstrip(); buf += line else: buf += line line = file.readline() return buf def getEmailAddress(vcard): """ Get email address. Supported formats: - email;something - email;type=something something := (internet,work,home, other) """ for key in vcard: items = key.split(';') if len(items) == 2: if items[0].lower() == 'email': list = vcard[key] return list[0] else: if key.lower() == 'email': list = vcard[key] return list[0] return "" def findName(vcard): """ Find a version 3.0 name """ for key in vcard: items = key.split(';') if len(items) == 2: if items[0].lower() == 'n': return vcard[key] else: if key.lower() == 'n': return vcard[key] return None ################################################################################ ## reads a vcard and stores as hash pairs key/value where value is a list ## ################################################################################ def readVCARD (buffer) : """ skips fom <file> until a 'begin' tag from VCARD is encountered. from this point starts constructing a map (key, [values] ) VCARD entry format -> tag:value key <- tag [values] <- list with the values of <tag> if there are more tags with the same name """ r=' ' bgn,end = -1, -1; d = dict() while r and bgn < 0 : r = buffer.readline() if len (r) == 0 : return dict() if string.find('begin',string.lower(string.strip(r))) : bgn = 1 while r and end < 0 : r = buffer.readline() s = string.split(string.lower(string.strip(r)),':') if s[0] <> '' : if d.has_key(s[0]) : d[s[0]].append(s[1]) elif len(s) > 1: d[s[0]] = [s[1]] else : d[s[0]] = [''] if s[0] == 'end' : end = 1 return d ################################################################################## ############################################################################################### ## writes on a given file an xml representation for claws-mail addressbook received as a hash ## ############################################################################################### def writeXMLREPR (vcard,file,uid) : """ based on <vcard> and <uid> writes only recognized tags (the ones defined in <keywds> list) NOTE: <url> and <org> tag will be written as attributes (there are such tags in claws-mail's XML schema) """ if len (vcard.keys()) == 0 : return item = vcard.get(keywds[2]); if item: name = string.split(item[0],';') else: """ version 3.0 n ?""" name = findName(vcard) if not name: return fn, ln, nick, cn, a = '', '', '', '', '' if len(name) >= 2 : fn = name[0] ln = name[1] elif len(name) ==1 : fn = name[0] if vcard.has_key(keywds[4]) : nick = vcard.get(keywds[4])[0] if len(vcard.get(keywds[1])[0]) : cn = vcard.get(keywds[1])[0] else : cn = vcard.get(keywds[0])[0]; a += str('\n<person uid=\"' + str(uid[0]) + '\" first-name=\"' + fn + '\" last-name=\"' + ln + '\" nick-name=\"' + nick + '\" cn=\"' + cn + '\" >\n') a += '\t<address-list>\n' if vcard.get(keywds[3]) : for c in vcard.get(keywds[3]) : uid[0] = uid[0] + 1 a += '\t\t<address uid=\"' + str(uid[0]) + '\" alias=\"' + nick + '\" email=\"' + c + '\" remarks=\"\" />\n' else : email = getEmailAddress(vcard) uid[0] = uid[0]+1 a += '\t\t<address uid=\"' + str(uid[0]) + '\" alias=\"' + nick + '\" email=\"' + email + '\" remarks=\"\" />\n' a += '\t</address-list>\n' a += '\t<attribute-list>\n' for key in keywds[5:] : if vcard.get(key) : for c in vcard.get(key) : uid[0] = uid[0] + 1 a += '\t\t<attribute uid=\"' + str(uid[0]) + '\" name=\"' + key +'\">'+c+'</attribute>\n' a += '\t</attribute-list>\n' a += '</person>\n' file.write(a) file.flush() ################################################################################################### def convert (in_f, o_f, name='INBOX') : d = {'d':1} uid = [int(time.time())] try : print 'proccessing...\n' o_f.write('<?xml version="1.0" encoding="ISO-8859-1" ?>\n<address-book name="'+name+'" >\n'); buf = normalizeLongLines(in_f) buffer = StringIO.StringIO(buf) while len(d.keys()) > 0 : d = readVCARD(buffer) writeXMLREPR (d, o_f, uid) uid[0] = uid [0]+1 o_f.write('\n</address-book>') print 'finished processing...\n' except IOError, err : print 'Caught an IOError : ',err,'\t ABORTING!!!' raise err ################################################################################################# def execute () : if len(sys.argv) <> 3 and len(sys.argv) <> 2 : print str("\nUsage: vcard2xml.py source_file [destination_file]\n\n" + '\tWhen only <source_file> is specified will overwrite the existing addressbook.\n'+ '\tWhen both arguments are suplied will create a new additional addressbook named \n\tas the destination file.'+'\n\tNOTE: in both cases the Claws Mail must be closed and ran at least once.\n\n') sys.exit(1) in_file = None out_file = None path_to_out = os.environ['HOME']+'/.claws-mail/' adr_idx = 'addrbook--index.xml' adr_idx_file = None tmp_adr_idx_file= None got_ex = 0 try : in_file = open(sys.argv[1]) except IOError, e: print 'Could not open input file <',sys.argv[1],'> ABORTING' sys.exit(1) if len(sys.argv) == 2 : try : dlist = os.listdir(path_to_out); flist=[] for l in dlist : if l.find('addrbook') == 0 and l.find("addrbook--index.xml") < 0 and l.find('bak') < 0 : flist.append(l) flist.sort() out_file = flist.pop() os.rename(path_to_out+out_file, path_to_out+out_file+'.tmp') out_file = open(path_to_out+out_file,'w') convert(in_file, out_file) except Exception, e: got_ex = 1 print 'got exception: ', e else : try : os.rename(path_to_out+adr_idx, path_to_out+adr_idx+'.tmp') tmp_adr_idx_file = open(path_to_out+adr_idx+'.tmp') adr_idx_file = open(path_to_out+adr_idx,'w') except Exception, e : print 'Could not open <', path_to_out+adr_idx,'> file. Make sure you started Claws Mail at least once.' sys.exit(1) try : out_file = open(path_to_out+sys.argv[2],'w') convert(in_file, out_file, sys.argv[2].split('.xml')[0]) l = tmp_adr_idx_file.readline() while l : if l.strip() == '</book_list>' : adr_idx_file.write('\t<book name="'+sys.argv[2].split('.xml')[0] +'" file="'+sys.argv[2]+'" />\n') adr_idx_file.write(l) else : adr_idx_file.write(l) l = tmp_adr_idx_file.readline() except Exception, e: got_ex = 1 print 'got exception: ', e if got_ex : #clean up the mess print 'got exception, cleaning up the mess... changed files will be restored...\n' if adr_idx_file : adr_idx_file.close() if out_file : out_file.close() if len(sys.argv) == 2 : os.rename(out_file.name+'.tmp', out_file.name) else : os.remove(out_file.name) os.rename(path_to_out+adr_idx+'.tmp', path_to_out+adr_idx) if tmp_adr_idx_file : tmp_adr_idx_file.close() else : #closing all and moving temporary data into place print 'closing open files...\n' in_file.close() out_file.close() if len(sys.argv) == 3 : os.rename(path_to_out+adr_idx+'.tmp',path_to_out+adr_idx+'.bak' ) if len(sys.argv) == 2 : os.rename(out_file.name+'.tmp', out_file.name+'.bak') if adr_idx_file : adr_idx_file.close() if tmp_adr_idx_file : tmp_adr_idx_file.close() print 'done!' if __name__ == '__main__': execute ()
gpl-3.0
1,002,031,994,638,320,500
27.419355
197
0.56084
false
overtherain/scriptfile
software/googleAppEngine/lib/django_1_4/django/contrib/localflavor/za/forms.py
89
1940
""" South Africa-specific Form helpers """ from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import CharField, RegexField from django.utils.checksums import luhn from django.utils.translation import gettext as _ import re from datetime import date id_re = re.compile(r'^(?P<yy>\d\d)(?P<mm>\d\d)(?P<dd>\d\d)(?P<mid>\d{4})(?P<end>\d{3})') class ZAIDField(CharField): """A form field for South African ID numbers -- the checksum is validated using the Luhn checksum, and uses a simlistic (read: not entirely accurate) check for the birthdate """ default_error_messages = { 'invalid': _(u'Enter a valid South African ID number'), } def clean(self, value): super(ZAIDField, self).clean(value) if value in EMPTY_VALUES: return u'' # strip spaces and dashes value = value.strip().replace(' ', '').replace('-', '') match = re.match(id_re, value) if not match: raise ValidationError(self.error_messages['invalid']) g = match.groupdict() try: # The year 2000 is conveniently a leapyear. # This algorithm will break in xx00 years which aren't leap years # There is no way to guess the century of a ZA ID number d = date(int(g['yy']) + 2000, int(g['mm']), int(g['dd'])) except ValueError: raise ValidationError(self.error_messages['invalid']) if not luhn(value): raise ValidationError(self.error_messages['invalid']) return value class ZAPostCodeField(RegexField): default_error_messages = { 'invalid': _(u'Enter a valid South African postal code'), } def __init__(self, max_length=None, min_length=None, *args, **kwargs): super(ZAPostCodeField, self).__init__(r'^\d{4}$', max_length, min_length, *args, **kwargs)
mit
3,290,177,538,023,332,400
31.333333
88
0.624742
false
linearregression/socorro
socorro/app/fetch_transform_save_app.py
7
11493
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. """this is the basis for any app that follows the fetch/transform/save model * the configman versions of the crash mover and the processor apps will derive from this class The form of fetch/transform/save, of course, in three parts 1) fetch - some iterating or streaming function or object fetches packets of from data a source 2) transform - some function transforms each packet of data into a new form 3) save - some function or class saves or streams the packet to some data sink. For the crash mover, the fetch phase is reading new crashes from the collector's file system datastore. The transform phase is the degenerate case of identity: no transformation. The save phase is just sending the crashes to HBase. For the processor, the fetch phase is reading from the new crash queue. In, 2012, that's the union of reading a postgres jobs/crash_id table and fetching the crash from HBase. The transform phase is the running of minidump stackwalk and production of the processed crash data. The save phase is the union of sending new crash records to Postgres; sending the processed crash to HBase; the the submission of the crash_id to Elastic Search.""" import signal from functools import partial from configman import Namespace from configman.converters import class_converter from socorro.lib.task_manager import respond_to_SIGTERM from socorro.app.generic_app import App, main # main not used here, but # is imported from generic_app # into this scope to offer to # apps that derive from the # class defined here. #============================================================================== class FetchTransformSaveApp(App): """base class for apps that follow the fetch/transform/save model""" app_name = 'generic_fetch_transform_save_app' app_version = '0.1' app_description = __doc__ required_config = Namespace() # the required config is broken into two parts: the source and the # destination. Each of those gets assigned a crasnstorage class. required_config.source = Namespace() # For source, the storage class should be one that defines a method # of fetching new crashes through the three storage api methods: the # iterator 'new_crashes' and the accessors 'get_raw_crash' and # 'get_raw_dumps' required_config.source.add_option( 'crashstorage_class', doc='the source storage class', default=None, from_string_converter=class_converter ) required_config.destination = Namespace() # For destination, the storage class should implement the 'save_raw_crash' # method. Of course, a subclass may redefine either the source_iterator # or transform methods and therefore completely redefine what api calls # are relevant. required_config.destination.add_option( 'crashstorage_class', doc='the destination storage class', default=None, from_string_converter=class_converter ) required_config.producer_consumer = Namespace() required_config.producer_consumer.add_option( 'producer_consumer_class', doc='the class implements a threaded producer consumer queue', default='socorro.lib.threaded_task_manager.ThreadedTaskManager', from_string_converter=class_converter ) ########################################################################### ### TODO: add a feature where clients of this class may register a waiting ### function. The MainThread will run all the registered waiting ### functions at their configured interval. A first application of this ### feature will be to allow periodic reloading of config data from a ### database. Specifically, the skip list rules could be reloaded without ### having to restart the processor. ########################################################################### #-------------------------------------------------------------------------- @staticmethod def get_application_defaults(): """this method allows an app to inject defaults into the configuration that can override defaults not under the direct control of the app. For example, if an app were to use a class that had a config default of X and that was not appropriate as a default for this app, then this method could be used to override that default. This is a technique of getting defaults into an app that replaces an older method of going to the configman option and using the 'set_default' method with 'force=True'""" return { 'source.crashstorage_class': 'socorro.external.fs.crashstorage.FSPermanentStorage', 'destination.crashstorage_class': 'socorro.external.fs.crashstorage.FSPermanentStorage', } #-------------------------------------------------------------------------- def __init__(self, config): super(FetchTransformSaveApp, self).__init__(config) self.waiting_func = None #-------------------------------------------------------------------------- def source_iterator(self): """this iterator yields individual crash_ids from the source crashstorage class's 'new_crashes' method.""" while(True): # loop forever and never raise StopIteration for x in self.source.new_crashes(): if x is None: yield None elif isinstance(x, tuple): yield x # already in (args, kwargs) form else: yield ((x,), {}) # (args, kwargs) else: yield None # if the inner iterator yielded nothing at all, # yield None to give the caller the chance to sleep #-------------------------------------------------------------------------- def transform( self, crash_id, finished_func=(lambda: None), ): try: self._transform(crash_id) finally: # no matter what causes this method to end, we need to make sure # that the finished_func gets called. If the new crash source is # RabbitMQ, this is what removes the job from the queue. try: finished_func() except Exception, x: # when run in a thread, a failure here is not a problem, but if # we're running all in the same thread, a failure here could # derail the the whole processor. Best just log the problem # so that we can continue. self.config.logger.error( 'Error completing job %s: %s', crash_id, x, exc_info=True ) #-------------------------------------------------------------------------- def _transform(self, crash_id): """this default transform function only transfers raw data from the source to the destination without changing the data. While this may be good enough for the raw crashmover, the processor would override this method to create and save processed crashes""" try: raw_crash = self.source.get_raw_crash(crash_id) except Exception as x: self.config.logger.error( "reading raw_crash: %s", str(x), exc_info=True ) raw_crash = {} try: dumps = self.source.get_raw_dumps(crash_id) except Exception as x: self.config.logger.error( "reading dump: %s", str(x), exc_info=True ) dumps = {} try: self.destination.save_raw_crash(raw_crash, dumps, crash_id) self.config.logger.info('saved - %s', crash_id) except Exception as x: self.config.logger.error( "writing raw: %s", str(x), exc_info=True ) else: try: self.source.remove(crash_id) except Exception as x: self.config.logger.error( "removing raw: %s", str(x), exc_info=True ) #-------------------------------------------------------------------------- def quit_check(self): self.task_manager.quit_check() #-------------------------------------------------------------------------- def signal_quit(self): self.task_manager.stop() #-------------------------------------------------------------------------- def _setup_source_and_destination(self): """instantiate the classes that implement the source and destination crash storage systems.""" try: self.source = self.config.source.crashstorage_class( self.config.source, quit_check_callback=self.quit_check ) except Exception: self.config.logger.critical( 'Error in creating crash source', exc_info=True ) raise try: self.destination = self.config.destination.crashstorage_class( self.config.destination, quit_check_callback=self.quit_check ) except Exception: self.config.logger.critical( 'Error in creating crash destination', exc_info=True ) raise #-------------------------------------------------------------------------- def _setup_task_manager(self): """instantiate the threaded task manager to run the producer/consumer queue that is the heart of the processor.""" self.config.logger.info('installing signal handers') respond_to_SIGTERM_with_logging = partial( respond_to_SIGTERM, logger=self.config.logger ) signal.signal(signal.SIGTERM, respond_to_SIGTERM_with_logging) self.task_manager = \ self.config.producer_consumer.producer_consumer_class( self.config.producer_consumer, job_source_iterator=self.source_iterator, task_func=self.transform ) self.config.executor_identity = self.task_manager.executor_identity #-------------------------------------------------------------------------- def _cleanup(self): pass #-------------------------------------------------------------------------- def main(self): """this main routine sets up the signal handlers, the source and destination crashstorage systems at the theaded task manager. That starts a flock of threads that are ready to shepherd crashes from the source to the destination.""" self._setup_task_manager() self._setup_source_and_destination() self.task_manager.blocking_start(waiting_func=self.waiting_func) self._cleanup()
mpl-2.0
-2,816,078,811,531,067,000
41.409594
79
0.553641
false
Weihonghao/ECM
Vpy34/lib/python3.5/site-packages/numpy/f2py/__init__.py
63
2038
#!/usr/bin/env python """Fortran to Python Interface Generator. """ from __future__ import division, absolute_import, print_function __all__ = ['run_main', 'compile', 'f2py_testing'] import sys from . import f2py2e from . import f2py_testing from . import diagnose run_main = f2py2e.run_main main = f2py2e.main def compile(source, modulename='untitled', extra_args='', verbose=True, source_fn=None, extension='.f' ): """ Build extension module from processing source with f2py. Parameters ---------- source : str Fortran source of module / subroutine to compile modulename : str, optional The name of the compiled python module extra_args : str, optional Additional parameters passed to f2py verbose : bool, optional Print f2py output to screen source_fn : str, optional Name of the file where the fortran source is written. The default is to use a temporary file with the extension provided by the `extension` parameter extension : {'.f', '.f90'}, optional Filename extension if `source_fn` is not provided. The extension tells which fortran standard is used. The default is `.f`, which implies F77 standard. .. versionadded:: 1.11.0 """ from numpy.distutils.exec_command import exec_command import tempfile if source_fn is None: f = tempfile.NamedTemporaryFile(suffix=extension) else: f = open(source_fn, 'w') try: f.write(source) f.flush() args = ' -c -m {} {} {}'.format(modulename, f.name, extra_args) c = '{} -c "import numpy.f2py as f2py2e;f2py2e.main()" {}' c = c.format(sys.executable, args) status, output = exec_command(c) if verbose: print(output) finally: f.close() return status from numpy.testing.nosetester import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench
agpl-3.0
-8,136,486,999,501,733,000
26.540541
71
0.61629
false
gomiero/PTVS
Python/Product/Pyvot/Pyvot/xl/__init__.py
18
4494
# Copyright (c) Microsoft Corporation. # # This source code is subject to terms and conditions of the Apache License, Version 2.0. A # copy of the license can be found in the LICENSE.txt file at the root of this distribution. If # you cannot locate the Apache License, Version 2.0, please send an email to # vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound # by the terms of the Apache License, Version 2.0. # # You must not remove this notice, or any other, from this software. """Pyvot - Pythonic interface for data exploration in Excel The user-level API for the `xl` package follows. For interactive use, consider running the :ref:`interactive shell <interactive>`:: python -m xl.shell **Managing Excel workbooks**: - :class:`xl.Workbook() <xl.sheet.Workbook>` opens a new workbook - xl.Workbook("filename") attaches to an existing workbook, or opens it - :func:`xl.workbooks() <xl.tools.workbooks>` returns a Workbook for each that is currently open **Excel Ranges**: - :class:`xl.Range <xl.range.Range>` is the base type for a contiguous range of Excel cells. - :func:`xl.get() <xl.tools.get>` / :meth:`Workbook.get <xl.sheet.Workbook.get>` / etc. return Ranges; namely, subclasses such as :class:`xl.RowVector <xl.range.RowVector>`, :class:`xl.ColumnVector <xl.range.ColumnVector>`, :class:`xl.Matrix <xl.range.Matrix>`, or :class:`xl.Scalar <xl.range.Scalar>` - :meth:`xl.Range.get` / :meth:`xl.Range.set` allow reading from / writing to Excel **Tools**: - :func:`xl.map <xl.tools.map>` / :func:`xl.apply <xl.tools.apply>` / :func:`xl.filter <xl.tools.filter>` operate like their Python counterparts, but read and write from an Excel workbook ``from xl import *`` imports :func:`xlmap`, etc. instead, to avoid overriding builtins. - :func:`xl.join() <xl.tools.join>` allows joining two Excel tables by a pair of key columns - :func:`xl.get() <xl.tools.get>` fetches a Range for a table column (by column name), named Excel range, or for an Excel address (ex. A1:B1). It attempts to guess the active Workbook, and begins looking in the active sheet. See also :meth:`Workbook.get <xl.sheet.Workbook.get>` - :func:`xl.view() <xl.tools.view>` splats a list of Python values to an empty column in Excel - :func:`xl.selected_range() <xl.tools.selected_range>` / :func:`xl.selected_value() <xl.tools.selected_value>` provide the active sheet's selection""" try: __import__('win32com') except ImportError as e: import ctypes import sys is_64bit = ctypes.sizeof(ctypes.c_voidp) > 4 arch_str = "64-bit" if is_64bit else "32-bit" ver = "%d.%d" % (sys.version_info.major, sys.version_info.minor) raise Exception("pywin32 does not appear to be installed. Visit http://sourceforge.net/projects/pywin32/ and download " "build 216 or above for Python %s (%s)" % (ver, arch_str), e) from .version import __version__ # Conventions: # - prefix excel COM objectss with "xl". Apply to field and method names. # Design conventions: # - Very low activation energy for users. # Layer between "precise (dumb)" operations (which are often not useful) and "guess user intent (smart)" operations # (which can be much more useful). # Users start with "smart" general operations and work towards the precise ones. # - Global functions user "current" workbook, which iterates all sheets. from .range import Range, Vector, Scalar, RowVector, ColumnVector, Matrix, ExcelRangeError from .cache import CacheManager, enable_caching, cache_result from .tools import get, view, join, map, apply, filter, selected_range, selected_value, workbooks from .sheet import Workbook # We want to allow 'from xl import *' without clobbering builtin map / apply / filter. # We define these aliases, and exclude map / apply / filter from __all__. # This way xl.map works, but 'from xl import *' imports xlmap instead xlmap, xlapply, xlfilter = map, apply, filter __all__ = ['Range', 'Vector', 'Scalar', 'RowVector', 'ColumnVector', 'Matrix', 'ExcelRangeError', 'CacheManager', 'enable_caching', 'cache_result', 'get', 'view', 'join', 'selected_range', 'selected_value', 'workbooks', 'xlmap', 'xlapply', 'xlfilter', # We omit map / apply / filter from __all__ but include these. See above 'Workbook']
apache-2.0
-5,316,928,215,186,286,000
56.38961
133
0.683133
false
odootr/odoo
addons/account/wizard/account_tax_chart.py
385
3247
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class account_tax_chart(osv.osv_memory): """ For Chart of taxes """ _name = "account.tax.chart" _description = "Account tax chart" _columns = { 'period_id': fields.many2one('account.period', \ 'Period', \ ), 'target_move': fields.selection([('posted', 'All Posted Entries'), ('all', 'All Entries'), ], 'Target Moves', required=True), } def _get_period(self, cr, uid, context=None): """Return default period value""" period_ids = self.pool.get('account.period').find(cr, uid, context=context) return period_ids and period_ids[0] or False def account_tax_chart_open_window(self, cr, uid, ids, context=None): """ Opens chart of Accounts @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: List of account chart’s IDs @return: dictionary of Open account chart window on given fiscalyear and all Entries or posted entries """ mod_obj = self.pool.get('ir.model.data') act_obj = self.pool.get('ir.actions.act_window') if context is None: context = {} data = self.browse(cr, uid, ids, context=context)[0] result = mod_obj.get_object_reference(cr, uid, 'account', 'action_tax_code_tree') id = result and result[1] or False result = act_obj.read(cr, uid, [id], context=context)[0] if data.period_id: result['context'] = str({'period_id': data.period_id.id, \ 'fiscalyear_id': data.period_id.fiscalyear_id.id, \ 'state': data.target_move}) period_code = data.period_id.code result['name'] += period_code and (':' + period_code) or '' else: result['context'] = str({'state': data.target_move}) return result _defaults = { 'period_id': _get_period, 'target_move': 'posted' } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
1,979,491,283,107,187,200
41.116883
110
0.55905
false
blitzmann/Pyfa
eos/saveddata/fighterAbility.py
1
5887
# =============================================================================== # Copyright (C) 2010 Diego Duclos # # This file is part of eos. # # eos is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # eos is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with eos. If not, see <http://www.gnu.org/licenses/>. # =============================================================================== from logbook import Logger from sqlalchemy.orm import reconstructor from eos.utils.stats import DmgTypes pyfalog = Logger(__name__) class FighterAbility(object): # We aren't able to get data on the charges that can be stored with fighters. So we hardcode that data here, keyed # with the fighter squadron role NUM_SHOTS_MAPPING = { 1: 0, # Superiority fighter / Attack 2: 12, # Light fighter / Attack 4: 6, # Heavy fighter / Heavy attack 5: 3, # Heavy fighter / Long range attack } # Same as above REARM_TIME_MAPPING = { 1: 0, # Superiority fighter / Attack 2: 4000, # Light fighter / Attack 4: 6000, # Heavy fighter / Heavy attack 5: 20000, # Heavy fighter / Long range attack } def __init__(self, effect): """Initialize from the program""" self.__effect = effect self.effectID = effect.ID if effect is not None else None self.active = False self.build() @reconstructor def init(self): """Initialize from the database""" self.__effect = None if self.effectID: self.__effect = next((x for x in self.fighter.item.effects.values() if x.ID == self.effectID), None) if self.__effect is None: pyfalog.error("Effect (id: {0}) does not exist", self.effectID) return self.build() def build(self): pass @property def effect(self): return self.__effect @property def name(self): return self.__effect.getattr('displayName') or self.__effect.name @property def attrPrefix(self): return self.__effect.getattr('prefix') @property def dealsDamage(self): attr = "{}DamageMultiplier".format(self.attrPrefix) return attr in self.fighter.itemModifiedAttributes or self.fighter.charge is not None @property def grouped(self): # is the ability applied per fighter (webs, returns False), or as a group (MWD, returned True) return self.__effect.getattr('grouped') @property def hasCharges(self): return self.__effect.getattr('hasCharges') @property def reloadTime(self): rearm_time = (self.REARM_TIME_MAPPING[self.fighter.getModifiedItemAttr("fighterSquadronRole")] or 0 if self.hasCharges else 0) return self.fighter.getModifiedItemAttr("fighterRefuelingTime") + rearm_time * self.numShots @property def numShots(self): return self.NUM_SHOTS_MAPPING[self.fighter.getModifiedItemAttr("fighterSquadronRole")] or 0 if self.hasCharges else 0 @property def cycleTime(self): speed = self.fighter.getModifiedItemAttr("{}Duration".format(self.attrPrefix)) # Factor in reload ''' reload = self.reloadTime if self.fighter.owner.factorReload: numShots = self.numShots # Speed here already takes into consideration reactivation time speed = (speed * numShots + reload) / numShots if numShots > 0 else speed ''' return speed def getVolley(self, targetResists=None): if not self.dealsDamage or not self.active: return DmgTypes(0, 0, 0, 0) if self.attrPrefix == "fighterAbilityLaunchBomb": em = self.fighter.getModifiedChargeAttr("emDamage", 0) therm = self.fighter.getModifiedChargeAttr("thermalDamage", 0) kin = self.fighter.getModifiedChargeAttr("kineticDamage", 0) exp = self.fighter.getModifiedChargeAttr("explosiveDamage", 0) else: em = self.fighter.getModifiedItemAttr("{}DamageEM".format(self.attrPrefix), 0) therm = self.fighter.getModifiedItemAttr("{}DamageTherm".format(self.attrPrefix), 0) kin = self.fighter.getModifiedItemAttr("{}DamageKin".format(self.attrPrefix), 0) exp = self.fighter.getModifiedItemAttr("{}DamageExp".format(self.attrPrefix), 0) dmgMult = self.fighter.amountActive * self.fighter.getModifiedItemAttr("{}DamageMultiplier".format(self.attrPrefix), 1) volley = DmgTypes( em=em * dmgMult * (1 - getattr(targetResists, "emAmount", 0)), thermal=therm * dmgMult * (1 - getattr(targetResists, "thermalAmount", 0)), kinetic=kin * dmgMult * (1 - getattr(targetResists, "kineticAmount", 0)), explosive=exp * dmgMult * (1 - getattr(targetResists, "explosiveAmount", 0))) return volley def getDps(self, targetResists=None): volley = self.getVolley(targetResists=targetResists) if not volley: return DmgTypes(0, 0, 0, 0) dpsFactor = 1 / (self.cycleTime / 1000) dps = DmgTypes( em=volley.em * dpsFactor, thermal=volley.thermal * dpsFactor, kinetic=volley.kinetic * dpsFactor, explosive=volley.explosive * dpsFactor) return dps def clear(self): self.__dps = None self.__volley = None
gpl-3.0
-2,150,383,331,423,504,100
36.737179
134
0.625786
false
davidwaroquiers/pymatgen
pymatgen/io/abinit/tests/test_inputs.py
5
10923
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. import os import tempfile import unittest import numpy as np from pymatgen.core.structure import Structure from pymatgen.io.abinit.inputs import ( BasicAbinitInput, BasicMultiDataset, ShiftMode, calc_shiftk, ebands_input, gs_input, ion_ioncell_relax_input, num_valence_electrons, ) from pymatgen.util.testing import PymatgenTest _test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "test_files", "abinit") def abiref_file(filename): """Return absolute path to filename in ~pymatgen/test_files/abinit""" return os.path.join(_test_dir, filename) def abiref_files(*filenames): """Return list of absolute paths to filenames in ~pymatgen/test_files/abinit""" return [os.path.join(_test_dir, f) for f in filenames] class AbinitInputTestCase(PymatgenTest): """Unit tests for BasicAbinitInput.""" def test_api(self): """Testing BasicAbinitInput API.""" # Build simple input with structure and pseudos unit_cell = { "acell": 3 * [10.217], "rprim": [[0.0, 0.5, 0.5], [0.5, 0.0, 0.5], [0.5, 0.5, 0.0]], "ntypat": 1, "znucl": [14], "natom": 2, "typat": [1, 1], "xred": [[0.0, 0.0, 0.0], [0.25, 0.25, 0.25]], } inp = BasicAbinitInput(structure=unit_cell, pseudos=abiref_file("14si.pspnc")) shiftk = [[0.5, 0.5, 0.5], [0.5, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 0.5]] self.assertArrayEqual(calc_shiftk(inp.structure), shiftk) assert num_valence_electrons(inp.structure, inp.pseudos) == 8 repr(inp), str(inp) assert len(inp) == 0 and not inp assert inp.get("foo", "bar") == "bar" and inp.pop("foo", "bar") == "bar" assert inp.comment is None inp.set_comment("This is a comment") assert inp.comment == "This is a comment" assert inp.isnc and not inp.ispaw inp["ecut"] = 1 assert inp.get("ecut") == 1 and len(inp) == 1 and "ecut" in inp.keys() and "foo" not in inp # Test to_string assert inp.to_string(with_structure=True, with_pseudos=True) assert inp.to_string(with_structure=False, with_pseudos=False) inp.set_vars(ecut=5, toldfe=1e-6) assert inp["ecut"] == 5 inp.set_vars_ifnotin(ecut=-10) assert inp["ecut"] == 5 _, tmpname = tempfile.mkstemp(text=True) inp.write(filepath=tmpname) # Cannot change structure variables directly. with self.assertRaises(inp.Error): inp.set_vars(unit_cell) with self.assertRaises(TypeError): inp.add_abiobjects({}) with self.assertRaises(KeyError): inp.remove_vars("foo", strict=True) assert not inp.remove_vars("foo", strict=False) # Test deepcopy and remove_vars. inp["bdgw"] = [1, 2] inp_copy = inp.deepcopy() inp_copy["bdgw"][1] = 3 assert inp["bdgw"] == [1, 2] assert inp.remove_vars("bdgw") and "bdgw" not in inp removed = inp.pop_tolerances() assert len(removed) == 1 and removed["toldfe"] == 1e-6 # Test set_spin_mode old_vars = inp.set_spin_mode("polarized") assert "nsppol" in inp and inp["nspden"] == 2 and inp["nspinor"] == 1 inp.set_vars(old_vars) # Test set_structure new_structure = inp.structure.copy() new_structure.perturb(distance=0.1) inp.set_structure(new_structure) assert inp.structure == new_structure # Compatible with Pickle and MSONable? self.serialize_with_pickle(inp, test_eq=False) def test_input_errors(self): """Testing typical BasicAbinitInput Error""" si_structure = Structure.from_file(abiref_file("si.cif")) # Ambiguous list of pseudos. with self.assertRaises(BasicAbinitInput.Error): BasicAbinitInput(si_structure, pseudos=abiref_files("14si.pspnc", "14si.4.hgh")) # Pseudos do not match structure. with self.assertRaises(BasicAbinitInput.Error): BasicAbinitInput(si_structure, pseudos=abiref_file("H-wdr.oncvpsp")) si1_negative_volume = dict( ntypat=1, natom=1, typat=[1], znucl=14, acell=3 * [7.60], rprim=[[0.0, 0.5, 0.5], [-0.5, -0.0, -0.5], [0.5, 0.5, 0.0]], xred=[[0.0, 0.0, 0.0]], ) # Negative triple product. with self.assertRaises(BasicAbinitInput.Error): BasicAbinitInput(si1_negative_volume, pseudos=abiref_files("14si.pspnc")) def test_helper_functions(self): """Testing BasicAbinitInput helper functions.""" inp = BasicAbinitInput(structure=abiref_file("si.cif"), pseudos="14si.pspnc", pseudo_dir=_test_dir) inp.set_kmesh(ngkpt=(1, 2, 3), shiftk=(1, 2, 3, 4, 5, 6)) assert inp["kptopt"] == 1 and inp["nshiftk"] == 2 inp.set_gamma_sampling() assert inp["kptopt"] == 1 and inp["nshiftk"] == 1 assert np.all(inp["shiftk"] == 0) inp.set_kpath(ndivsm=3, kptbounds=None) assert inp["ndivsm"] == 3 and inp["iscf"] == -2 and len(inp["kptbounds"]) == 12 class TestMultiDataset(PymatgenTest): """Unit tests for BasicMultiDataset.""" def test_api(self): """Testing BasicMultiDataset API.""" structure = Structure.from_file(abiref_file("si.cif")) pseudo = abiref_file("14si.pspnc") pseudo_dir = os.path.dirname(pseudo) multi = BasicMultiDataset(structure=structure, pseudos=pseudo) with self.assertRaises(ValueError): BasicMultiDataset(structure=structure, pseudos=pseudo, ndtset=-1) multi = BasicMultiDataset(structure=structure, pseudos=pseudo, pseudo_dir=pseudo_dir) assert len(multi) == 1 and multi.ndtset == 1 assert multi.isnc for i, inp in enumerate(multi): assert list(inp.keys()) == list(multi[i].keys()) multi.addnew_from(0) assert multi.ndtset == 2 and multi[0] is not multi[1] assert multi[0].structure == multi[1].structure assert multi[0].structure is not multi[1].structure multi.set_vars(ecut=2) assert all(inp["ecut"] == 2 for inp in multi) self.assertEqual(multi.get("ecut"), [2, 2]) multi[1].set_vars(ecut=1) assert multi[0]["ecut"] == 2 and multi[1]["ecut"] == 1 self.assertEqual(multi.get("ecut"), [2, 1]) self.assertEqual(multi.get("foo", "default"), ["default", "default"]) multi[1].set_vars(paral_kgb=1) assert "paral_kgb" not in multi[0] self.assertEqual(multi.get("paral_kgb"), [None, 1]) pert_structure = structure.copy() pert_structure.perturb(distance=0.1) assert structure != pert_structure assert multi.set_structure(structure) == multi.ndtset * [structure] assert all(s == structure for s in multi.structure) assert multi.has_same_structures multi[1].set_structure(pert_structure) assert multi[0].structure != multi[1].structure and multi[1].structure == pert_structure assert not multi.has_same_structures split = multi.split_datasets() assert len(split) == 2 and all(split[i] == multi[i] for i in range(multi.ndtset)) repr(multi) str(multi) assert multi.to_string(with_pseudos=False) tmpdir = tempfile.mkdtemp() filepath = os.path.join(tmpdir, "run.abi") inp.write(filepath=filepath) multi.write(filepath=filepath) new_multi = BasicMultiDataset.from_inputs([inp for inp in multi]) assert new_multi.ndtset == multi.ndtset assert new_multi.structure == multi.structure for old_inp, new_inp in zip(multi, new_multi): assert old_inp is not new_inp self.assertDictEqual(old_inp.as_dict(), new_inp.as_dict()) ref_input = multi[0] new_multi = BasicMultiDataset.replicate_input(input=ref_input, ndtset=4) assert new_multi.ndtset == 4 for inp in new_multi: assert ref_input is not inp self.assertDictEqual(ref_input.as_dict(), inp.as_dict()) # Compatible with Pickle and MSONable? self.serialize_with_pickle(multi, test_eq=False) class ShiftModeTest(PymatgenTest): def test_shiftmode(self): """Testing shiftmode""" gamma = ShiftMode.GammaCentered assert ShiftMode.from_object("G") == gamma assert ShiftMode.from_object(gamma) == gamma with self.assertRaises(TypeError): ShiftMode.from_object({}) class FactoryTest(PymatgenTest): def setUp(self): # Si ebands self.si_structure = Structure.from_file(abiref_file("si.cif")) self.si_pseudo = abiref_file("14si.pspnc") def test_gs_input(self): """Testing gs_input factory.""" inp = gs_input(self.si_structure, self.si_pseudo, kppa=10, ecut=10, spin_mode="polarized") str(inp) assert inp["nsppol"] == 2 assert inp["nband"] == 14 self.assertArrayEqual(inp["ngkpt"], [2, 2, 2]) def test_ebands_input(self): """Testing ebands_input factory.""" multi = ebands_input(self.si_structure, self.si_pseudo, kppa=10, ecut=2) str(multi) scf_inp, nscf_inp = multi.split_datasets() # Test dos_kppa and other options. multi_dos = ebands_input( self.si_structure, self.si_pseudo, nscf_nband=10, kppa=10, ecut=2, spin_mode="unpolarized", smearing=None, charge=2.0, dos_kppa=50, ) assert len(multi_dos) == 3 assert all(i["charge"] == 2 for i in multi_dos) self.assertEqual(multi_dos.get("nsppol"), [1, 1, 1]) self.assertEqual(multi_dos.get("iscf"), [None, -2, -2]) multi_dos = ebands_input( self.si_structure, self.si_pseudo, nscf_nband=10, kppa=10, ecut=2, spin_mode="unpolarized", smearing=None, charge=2.0, dos_kppa=[50, 100], ) assert len(multi_dos) == 4 self.assertEqual(multi_dos.get("iscf"), [None, -2, -2, -2]) str(multi_dos) def test_ion_ioncell_relax_input(self): """Testing ion_ioncell_relax_input factory.""" multi = ion_ioncell_relax_input(self.si_structure, self.si_pseudo, kppa=10, ecut=2) str(multi) ion_inp, ioncell_inp = multi.split_datasets() assert ion_inp["chksymbreak"] == 0 assert ion_inp["ionmov"] == 3 and ion_inp["optcell"] == 0 assert ioncell_inp["ionmov"] == 3 and ioncell_inp["optcell"] == 2
mit
-6,020,666,609,939,037,000
34.349515
107
0.590955
false
yasoob/youtube-dl-GUI
youtube_dl/extractor/glide.py
64
1543
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class GlideIE(InfoExtractor): IE_DESC = 'Glide mobile video messages (glide.me)' _VALID_URL = r'https?://share\.glide\.me/(?P<id>[A-Za-z0-9\-=_+]+)' _TEST = { 'url': 'http://share.glide.me/UZF8zlmuQbe4mr+7dCiQ0w==', 'md5': '4466372687352851af2d131cfaa8a4c7', 'info_dict': { 'id': 'UZF8zlmuQbe4mr+7dCiQ0w==', 'ext': 'mp4', 'title': "Damon's Glide message", 'thumbnail': r're:^https?://.*?\.cloudfront\.net/.*\.jpg$', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<title>(.+?)</title>', webpage, 'title', default=None) or self._og_search_title(webpage) video_url = self._proto_relative_url(self._search_regex( r'<source[^>]+src=(["\'])(?P<url>.+?)\1', webpage, 'video URL', default=None, group='url')) or self._og_search_video_url(webpage) thumbnail = self._proto_relative_url(self._search_regex( r'<img[^>]+id=["\']video-thumbnail["\'][^>]+src=(["\'])(?P<url>.+?)\1', webpage, 'thumbnail url', default=None, group='url')) or self._og_search_thumbnail(webpage) return { 'id': video_id, 'title': title, 'url': video_url, 'thumbnail': thumbnail, }
mit
8,006,231,734,576,173,000
34.883721
83
0.52884
false
acshi/osf.io
website/util/rubeus.py
7
11153
# -*- coding: utf-8 -*- """Contains helper functions for generating correctly formatted hgrid list/folders. """ import logging import hurry.filesize from django.utils import timezone from framework import sentry from framework.auth.decorators import Auth from django.apps import apps from website import settings from website.util import paths from website.util import sanitize from website.settings import DISK_SAVING_MODE logger = logging.getLogger(__name__) FOLDER = 'folder' FILE = 'file' KIND = 'kind' # TODO: Validate the JSON schema, esp. for addons DEFAULT_PERMISSIONS = { 'view': True, 'edit': False, } def format_filesize(size): return hurry.filesize.size(size, system=hurry.filesize.alternative) def default_urls(node_api, short_name): return { 'fetch': u'{node_api}{addonshort}/hgrid/'.format(node_api=node_api, addonshort=short_name), 'upload': u'{node_api}{addonshort}/'.format(node_api=node_api, addonshort=short_name), } def to_hgrid(node, auth, **data): """Converts a node into a rubeus grid format :param Node node: the node to be parsed :param Auth auth: the user authorization object :returns: rubeus-formatted dict """ return NodeFileCollector(node, auth, **data).to_hgrid() def build_addon_root(node_settings, name, permissions=None, urls=None, extra=None, buttons=None, user=None, private_key=None, **kwargs): """Builds the root or "dummy" folder for an addon. :param addonNodeSettingsBase node_settings: Addon settings :param String name: Additional information for the folder title eg. Repo name for Github or bucket name for S3 :param dict or Auth permissions: Dictionary of permissions for the addon's content or Auth for use in node.can_X methods :param dict urls: Hgrid related urls :param String extra: Html to be appened to the addon folder name eg. Branch switcher for github :param list of dicts buttons: List of buttons to appear in HGrid row. Each dict must have 'text', a string that will appear on the button, and 'action', the name of a function in :param bool private_key: Used to check if information should be stripped from anonymous links :param dict kwargs: Any additional information to add to the root folder :return dict: Hgrid formatted dictionary for the addon root folder """ from website.util import check_private_key_for_anonymized_link permissions = permissions or DEFAULT_PERMISSIONS if name and not check_private_key_for_anonymized_link(private_key): name = u'{0}: {1}'.format(node_settings.config.full_name, name) else: name = node_settings.config.full_name if hasattr(node_settings.config, 'urls') and node_settings.config.urls: urls = node_settings.config.urls if urls is None: urls = default_urls(node_settings.owner.api_url, node_settings.config.short_name) forbid_edit = DISK_SAVING_MODE if node_settings.config.short_name == 'osfstorage' else False if isinstance(permissions, Auth): auth = permissions permissions = { 'view': node_settings.owner.can_view(auth), 'edit': (node_settings.owner.can_edit(auth) and not node_settings.owner.is_registration and not forbid_edit), } max_size = node_settings.config.max_file_size if user and 'high_upload_limit' in user.system_tags: max_size = node_settings.config.high_max_file_size ret = { 'provider': node_settings.config.short_name, 'addonFullname': node_settings.config.full_name, 'name': name, 'iconUrl': node_settings.config.icon_url, KIND: FOLDER, 'extra': extra, 'buttons': buttons, 'isAddonRoot': True, 'permissions': permissions, 'accept': { 'maxSize': max_size, 'acceptedFiles': node_settings.config.accept_extensions, }, 'urls': urls, 'isPointer': False, 'nodeId': node_settings.owner._id, 'nodeUrl': node_settings.owner.url, 'nodeApiUrl': node_settings.owner.api_url, } ret.update(kwargs) return ret def build_addon_button(text, action, title=''): """Builds am action button to be rendered in HGrid :param str text: A string or html to appear on the button itself :param str action: The name of the HGrid action for the button to call. The callback for the HGrid action must be defined as a member of HGrid.Actions :return dict: Hgrid formatted dictionary for custom buttons """ button = { 'text': text, 'action': action, } if title: button['attributes'] = 'title="{title}" data-toggle="tooltip" data-placement="right" '.format(title=title) return button def sort_by_name(hgrid_data): return_value = hgrid_data if hgrid_data is not None: return_value = sorted(hgrid_data, key=lambda item: item['name'].lower()) return return_value class NodeFileCollector(object): """A utility class for creating rubeus formatted node data""" def __init__(self, node, auth, **kwargs): NodeRelation = apps.get_model('osf.NodeRelation') self.node = node.child if isinstance(node, NodeRelation) else node self.auth = auth self.extra = kwargs self.can_view = self.node.can_view(auth) self.can_edit = self.node.can_edit(auth) and not self.node.is_registration def to_hgrid(self): """Return the Rubeus.JS representation of the node's file data, including addons and components """ root = self._serialize_node(self.node) return [root] def _collect_components(self, node, visited): rv = [] if not node.can_view(self.auth): return rv for child in node.get_nodes(is_deleted=False): if not child.can_view(self.auth): if child.primary: for desc in child.find_readable_descendants(self.auth): visited.append(desc.resolve()._id) rv.append(self._serialize_node(desc, visited=visited, parent=node)) elif child.resolve()._id not in visited: visited.append(child.resolve()._id) rv.append(self._serialize_node(child, visited=visited, parent=node)) return rv def _get_node_name(self, node): """Input node object, return the project name to be display. """ NodeRelation = apps.get_model('osf.NodeRelation') is_node_relation = isinstance(node, NodeRelation) node = node.child if is_node_relation else node can_view = node.can_view(auth=self.auth) if can_view: node_name = sanitize.unescape_entities(node.title) elif node.is_registration: node_name = u'Private Registration' elif node.is_fork: node_name = u'Private Fork' elif is_node_relation: node_name = u'Private Link' else: node_name = u'Private Component' return node_name def _serialize_node(self, node, visited=None, parent=None): """Returns the rubeus representation of a node folder. """ visited = visited or [] visited.append(node.resolve()._id) can_view = node.can_view(auth=self.auth) if can_view: children = self._collect_addons(node) + self._collect_components(node, visited) else: children = [] is_pointer = parent and parent.has_node_link_to(node) return { # TODO: Remove safe_unescape_html when mako html safe comes in 'name': self._get_node_name(node), 'category': node.category, 'kind': FOLDER, 'permissions': { 'edit': node.can_edit(self.auth) and not node.is_registration, 'view': can_view, }, 'urls': { 'upload': None, 'fetch': None, }, 'children': children, 'isPointer': is_pointer, 'isSmartFolder': False, 'nodeType': node.project_or_component, 'nodeID': node.resolve()._id, } def _collect_addons(self, node): rv = [] for addon in node.get_addons(): if addon.config.has_hgrid_files: # WARNING: get_hgrid_data can return None if the addon is added but has no credentials. try: temp = addon.config.get_hgrid_data(addon, self.auth, **self.extra) except Exception as e: logger.warn( getattr( e, 'data', 'Unexpected error when fetching file contents for {0}.'.format(addon.config.full_name) ) ) sentry.log_exception() rv.append({ KIND: FOLDER, 'unavailable': True, 'iconUrl': addon.config.icon_url, 'provider': addon.config.short_name, 'addonFullname': addon.config.full_name, 'permissions': {'view': False, 'edit': False}, 'name': '{} is currently unavailable'.format(addon.config.full_name), }) continue rv.extend(sort_by_name(temp) or []) return rv # TODO: these might belong in addons module def collect_addon_assets(node): """Return a dictionary containing lists of JS and CSS assets for a node's addons. :rtype: {'tree_js': <list of JS scripts>, 'tree_css': <list of CSS files>} """ return { 'tree_js': list(collect_addon_js(node)), 'tree_css': list(collect_addon_css(node)), } # TODO: Abstract static collectors def collect_addon_js(node, visited=None, filename='files.js', config_entry='files'): """Collect JavaScript includes for all add-ons implementing HGrid views. :return list: List of JavaScript include paths """ js = [] for addon_config in settings.ADDONS_AVAILABLE_DICT.values(): # JS modules configured in each addon's __init__ file js.extend(addon_config.include_js.get(config_entry, [])) # Webpack bundle js_path = paths.resolve_addon_path(addon_config, filename) if js_path: js.append(js_path) return js def collect_addon_css(node, visited=None): """Collect CSS includes for all addons-ons implementing Hgrid views. :return: List of CSS include paths :rtype: list """ css = [] for addon_config in settings.ADDONS_AVAILABLE_DICT.values(): # CSS modules configured in each addon's __init__ file css.extend(addon_config.include_css.get('files', [])) return css def delta_date(d): diff = d - timezone.now() s = diff.total_seconds() return s
apache-2.0
5,438,896,815,541,774,000
34.406349
124
0.60477
false
daafgo/Server_LRS
lrs/util/retrieve_statement.py
1
11615
import bencode import hashlib import json from datetime import datetime from itertools import chain from django.core.cache import cache from django.conf import settings from django.core.paginator import Paginator from django.db.models import Q from util import convert_to_utc, convert_to_dict from ..models import Statement, Agent from ..objects.AgentManager import AgentManager from ..exceptions import NotFound, IDNotFoundError MORE_ENDPOINT = '/xapi/statements/more/' def complex_get(param_dict, limit, language, format, attachments): # Tests if value is True or "true" voidQ = Q(voided=False) # keep track if a filter other than time or sequence is used reffilter = False sinceQ = Q() if 'since' in param_dict: sinceQ = Q(stored__gt=convert_to_utc(param_dict['since'])) untilQ = Q() if 'until' in param_dict: untilQ = Q(stored__lte=convert_to_utc(param_dict['until'])) # For statements/read/mine oauth scope authQ = Q() if 'auth' in param_dict and (param_dict['auth'] and 'statements_mine_only' in param_dict['auth']): q_auth = param_dict['auth']['authority'] # If oauth - set authority to look for as the user if q_auth.oauth_identifier: authQ = Q(authority=q_auth) | Q(authority=q_auth.get_user_from_oauth_group()) # Chain all of user's oauth clients as well else: oauth_clients = Agent.objects.filter(member__in=[q_auth]) authQ = Q(authority=q_auth) for client in oauth_clients: authQ = authQ | Q(authority=client.get_user_from_oauth_group()) agentQ = Q() if 'agent' in param_dict: reffilter = True agent = None data = param_dict['agent'] related = 'related_agents' in param_dict and param_dict['related_agents'] if not type(data) is dict: data = convert_to_dict(data) try: agent = AgentManager(data).Agent if agent.objectType == "Group": groups = [] else: groups = agent.member.all() agentQ = Q(actor=agent) for g in groups: agentQ = agentQ | Q(actor=g) if related: me = chain([agent], groups) for a in me: agentQ = agentQ | Q(object_agent=a) | Q(authority=a) \ | Q(context_instructor=a) | Q(context_team=a) \ | Q(object_substatement__actor=a) \ | Q(object_substatement__object_agent=a) \ | Q(object_substatement__context_instructor=a) \ | Q(object_substatement__context_team=a) except IDNotFoundError: return[] verbQ = Q() if 'verb' in param_dict: reffilter = True verbQ = Q(verb__verb_id=param_dict['verb']) # activity activityQ = Q() if 'activity' in param_dict: reffilter = True activityQ = Q(object_activity__activity_id=param_dict['activity']) if 'related_activities' in param_dict and param_dict['related_activities']: activityQ = activityQ | Q(statementcontextactivity__context_activity__activity_id=param_dict['activity']) \ | Q(object_substatement__object_activity__activity_id=param_dict['activity']) \ | Q(object_substatement__substatementcontextactivity__context_activity__activity_id=param_dict['activity']) registrationQ = Q() if 'registration' in param_dict: reffilter = True registrationQ = Q(context_registration=param_dict['registration']) # If want ordered by ascending stored_param = '-stored' if 'ascending' in param_dict and param_dict['ascending']: stored_param = 'stored' stmtset = Statement.objects.filter(voidQ & untilQ & sinceQ & authQ & agentQ & verbQ & activityQ & registrationQ) # only find references when a filter other than # since, until, or limit was used if reffilter: stmtset = findstmtrefs(stmtset.distinct(), sinceQ, untilQ) # Calculate limit of stmts to return return_limit = set_limit(limit) # If there are more stmts than the limit, need to break it up and return more id if stmtset.count() > return_limit: return initial_cache_return(stmtset, stored_param, return_limit, language, format, attachments) else: return create_stmt_result(stmtset, stored_param, language, format) def create_stmt_result(stmt_set, stored, language, format): stmt_result = {} # blows up if the idlist is empty... so i gotta check for that idlist = stmt_set.values_list('id', flat=True) if idlist > 0: if format == 'exact': stmt_result = '{"statements": [%s], "more": ""}' % ",".join([json.dumps(stmt.full_statement) for stmt in \ Statement.objects.filter(id__in=idlist).order_by(stored)]) else: stmt_result['statements'] = [stmt.to_dict(language, format) for stmt in \ Statement.objects.filter(id__in=idlist).order_by(stored)] stmt_result['more'] = "" else: stmt_result['statements'] = [] stmt_result['more'] = "" return stmt_result def findstmtrefs(stmtset, sinceQ, untilQ): if stmtset.count() == 0: return stmtset q = Q() for s in stmtset: q = q | Q(object_statementref__ref_id=s.statement_id) if sinceQ and untilQ: q = q & Q(sinceQ, untilQ) elif sinceQ: q = q & sinceQ elif untilQ: q = q & untilQ # finally weed out voided statements in this lookup q = q & Q(voided=False) return findstmtrefs(Statement.objects.filter(q).distinct(), sinceQ, untilQ) | stmtset def create_cache_key(stmt_list): # Create unique hash data to use for the cache key hash_data = [] hash_data.append(str(datetime.now())) hash_data.append(str(stmt_list)) # Create cache key from hashed data (always 32 digits) key = hashlib.md5(bencode.bencode(hash_data)).hexdigest() return key def initial_cache_return(stmt_list, stored, limit, language, format, attachments): # First time someone queries POST/GET result = {} cache_list = [] cache_list.append([s for s in stmt_list.order_by(stored).values_list('id', flat=True)]) stmt_pager = Paginator(cache_list[0], limit) # Always start on first page current_page = 1 total_pages = stmt_pager.num_pages # Create cache key from hashed data (always 32 digits) cache_key = create_cache_key(cache_list[0]) # Add data to cache cache_list.append(current_page) cache_list.append(total_pages) cache_list.append(limit) cache_list.append(attachments) cache_list.append(language) cache_list.append(format) cache_list.append(stored) # Encode data encoded_info = json.dumps(cache_list) # Save encoded_dict in cache cache.set(cache_key,encoded_info) # Return first page of results if format == 'exact': result = '{"statements": [%s], "more": "%s"}' % (",".join([json.dumps(stmt.full_statement) for stmt in \ Statement.objects.filter(id__in=stmt_pager.page(1).object_list).order_by(stored)]), MORE_ENDPOINT + cache_key) else: result['statements'] = [stmt.to_dict(language, format) for stmt in \ Statement.objects.filter(id__in=stmt_pager.page(1).object_list).order_by(stored)] result['more'] = MORE_ENDPOINT + cache_key return result def set_limit(req_limit): if not req_limit or req_limit > settings.SERVER_STMT_LIMIT: req_limit = settings.SERVER_STMT_LIMIT return req_limit def get_more_statement_request(req_id): # Retrieve encoded info for statements encoded_info = cache.get(req_id) # Could have expired or never existed if not encoded_info: raise NotFound("List does not exist - may have expired after 24 hours") # Decode info decoded_info = json.loads(encoded_info) # Info is always cached as [stmt_list, start_page, total_pages, limit, attachments, language, format] stmt_list = decoded_info[0] start_page = decoded_info[1] total_pages = decoded_info[2] limit = decoded_info[3] attachments = decoded_info[4] language = decoded_info[5] format = decoded_info[6] stored = decoded_info[7] # Build statementResult stmt_result = build_statement_result(stmt_list, start_page, total_pages, limit, attachments, language, format, stored, req_id) return stmt_result, attachments # Gets called from req_process after complex_get with list of django objects and also gets called from get_more_statement_request when # more_id is used so list will be serialized def build_statement_result(stmt_list, start_page, total_pages, limit, attachments, language, format, stored, more_id): result = {} current_page = start_page + 1 # If that was the last page to display then just return the remaining stmts if current_page == total_pages: stmt_pager = Paginator(stmt_list, limit) # Return first page of results if format == 'exact': result = '{"statements": [%s], "more": ""}' % ",".join([json.dumps(stmt.to_dict(language, format)) for stmt in \ Statement.objects.filter(id__in=stmt_pager.page(current_page).object_list).order_by(stored)]) else: result['statements'] = [stmt.to_dict(language, format) for stmt in \ Statement.objects.filter(id__in=stmt_pager.page(current_page).object_list).order_by(stored)] result['more'] = "" # Set current page back for when someone hits the URL again current_page -= 1 # Retrieve list stored in cache encoded_list = cache.get(more_id) # Decode info to set the current page back then encode again decoded_list = json.loads(encoded_list) decoded_list[1] = current_page encoded_list = json.dumps(decoded_list) cache.set(more_id, encoded_list) # There are more pages to display else: stmt_pager = Paginator(stmt_list, limit) # Create cache key from hashed data (always 32 digits) cache_key = create_cache_key(stmt_list) # Return first page of results if format == 'exact': result = '{"statements": [%s], "more": "%s"}' % (",".join([json.dumps(stmt.to_dict(language, format)) for stmt in \ Statement.objects.filter(id__in=stmt_pager.page(current_page).object_list).order_by(stored)]), MORE_ENDPOINT + cache_key) else: # Set result to have selected page of stmts and more endpoint result['statements'] = [stmt.to_dict(language, format) for stmt in \ Statement.objects.filter(id__in=stmt_pager.page(current_page).object_list).order_by(stored)] result['more'] = MORE_ENDPOINT + cache_key more_cache_list = [] # Increment next page start_page = current_page more_cache_list.append(stmt_list) more_cache_list.append(start_page) more_cache_list.append(total_pages) more_cache_list.append(limit) more_cache_list.append(attachments) more_cache_list.append(language) more_cache_list.append(format) more_cache_list.append(stored) # Encode info encoded_list = json.dumps(more_cache_list) cache.set(cache_key, encoded_list) return result
apache-2.0
5,895,940,552,125,944,000
39.190311
137
0.624193
false
RAtechntukan/CouchPotatoServer
libs/subliminal/services/tvsubtitles.py
106
6240
# -*- coding: utf-8 -*- # Copyright 2012 Nicolas Wack <wackou@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from . import ServiceBase from ..cache import cachedmethod from ..language import language_set, Language from ..subtitles import get_subtitle_path, ResultSubtitle from ..utils import get_keywords from ..videos import Episode from bs4 import BeautifulSoup import logging import re logger = logging.getLogger(__name__) def match(pattern, string): try: return re.search(pattern, string).group(1) except AttributeError: logger.debug(u'Could not match %r on %r' % (pattern, string)) return None class TvSubtitles(ServiceBase): server_url = 'http://www.tvsubtitles.net' api_based = False languages = language_set(['ar', 'bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'uk', 'zh', 'pt-br']) #TODO: Find more exceptions language_map = {'gr': Language('gre'), 'cz': Language('cze'), 'ua': Language('ukr'), 'cn': Language('chi')} videos = [Episode] require_video = False required_features = ['permissive'] @cachedmethod def get_likely_series_id(self, name): r = self.session.post('%s/search.php' % self.server_url, data={'q': name}) soup = BeautifulSoup(r.content, self.required_features) maindiv = soup.find('div', 'left') results = [] for elem in maindiv.find_all('li'): sid = int(match('tvshow-([0-9]+)\.html', elem.a['href'])) show_name = match('(.*) \(', elem.a.text) results.append((show_name, sid)) #TODO: pick up the best one in a smart way result = results[0] return result[1] @cachedmethod def get_episode_id(self, series_id, season, number): """Get the TvSubtitles id for the given episode. Raises KeyError if none could be found.""" # download the page of the season, contains ids for all episodes episode_id = None r = self.session.get('%s/tvshow-%d-%d.html' % (self.server_url, series_id, season)) soup = BeautifulSoup(r.content, self.required_features) table = soup.find('table', id='table5') for row in table.find_all('tr'): cells = row.find_all('td') if not cells: continue episode_number = match('x([0-9]+)', cells[0].text) if not episode_number: continue episode_number = int(episode_number) episode_id = int(match('episode-([0-9]+)', cells[1].a['href'])) # we could just return the id of the queried episode, but as we # already downloaded the whole page we might as well fill in the # information for all the episodes of the season self.cache_for(self.get_episode_id, args=(series_id, season, episode_number), result=episode_id) # raises KeyError if not found return self.cached_value(self.get_episode_id, args=(series_id, season, number)) # Do not cache this method in order to always check for the most recent # subtitles def get_sub_ids(self, episode_id): subids = [] r = self.session.get('%s/episode-%d.html' % (self.server_url, episode_id)) epsoup = BeautifulSoup(r.content, self.required_features) for subdiv in epsoup.find_all('a'): if 'href' not in subdiv.attrs or not subdiv['href'].startswith('/subtitle'): continue subid = int(match('([0-9]+)', subdiv['href'])) lang = self.get_language(match('flags/(.*).gif', subdiv.img['src'])) result = {'subid': subid, 'language': lang} for p in subdiv.find_all('p'): if 'alt' in p.attrs and p['alt'] == 'rip': result['rip'] = p.text.strip() if 'alt' in p.attrs and p['alt'] == 'release': result['release'] = p.text.strip() subids.append(result) return subids def list_checked(self, video, languages): return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode) def query(self, filepath, languages, keywords, series, season, episode): logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages)) self.init_cache() sid = self.get_likely_series_id(series.lower()) try: ep_id = self.get_episode_id(sid, season, episode) except KeyError: logger.debug(u'Could not find episode id for %s season %d episode %d' % (series, season, episode)) return [] subids = self.get_sub_ids(ep_id) # filter the subtitles with our queried languages subtitles = [] for subid in subids: language = subid['language'] if language not in languages: continue path = get_subtitle_path(filepath, language, self.config.multi) subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s/download-%d.html' % (self.server_url, subid['subid']), keywords=[subid['rip'], subid['release']]) subtitles.append(subtitle) return subtitles def download(self, subtitle): self.download_zip_file(subtitle.link, subtitle.path) return subtitle Service = TvSubtitles
gpl-3.0
3,814,871,655,794,799,000
42.943662
145
0.604487
false
ds-hwang/chromium-crosswalk
remoting/tools/zip2msi.py
89
8691
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generates .msi from a .zip archive or an unpacked directory. The structure of the input archive or directory should look like this: +- archive.zip +- archive +- parameters.json The name of the archive and the top level directory in the archive must match. When an unpacked directory is used as the input "archive.zip/archive" should be passed via the command line. 'parameters.json' specifies the parameters to be passed to candle/light and must have the following structure: { "defines": { "name": "value" }, "extensions": [ "WixFirewallExtension.dll" ], "switches": [ '-nologo' ], "source": "chromoting.wxs", "bind_path": "files", "sign": [ ... ], "candle": { ... }, "light": { ... } } "source" specifies the name of the input .wxs relative to "archive.zip/archive". "bind_path" specifies the path where to look for binary files referenced by .wxs relative to "archive.zip/archive". This script is used for both building Chromoting Host installation during Chromuim build and for signing Chromoting Host installation later. There are two copies of this script because of that: - one in Chromium tree at src/remoting/tools/zip2msi.py. - another one next to the signing scripts. The copies of the script can be out of sync so make sure that a newer version is compatible with the older ones when updating the script. """ import copy import json from optparse import OptionParser import os import re import subprocess import sys import zipfile def UnpackZip(target, source): """Unpacks |source| archive to |target| directory.""" target = os.path.normpath(target) archive = zipfile.ZipFile(source, 'r') for f in archive.namelist(): target_file = os.path.normpath(os.path.join(target, f)) # Sanity check to make sure .zip uses relative paths. if os.path.commonprefix([target_file, target]) != target: print "Failed to unpack '%s': '%s' is not under '%s'" % ( source, target_file, target) return 1 # Create intermediate directories. target_dir = os.path.dirname(target_file) if not os.path.exists(target_dir): os.makedirs(target_dir) archive.extract(f, target) return 0 def Merge(left, right): """Merges two values. Raises: TypeError: |left| and |right| cannot be merged. Returns: - if both |left| and |right| are dictionaries, they are merged recursively. - if both |left| and |right| are lists, the result is a list containing elements from both lists. - if both |left| and |right| are simple value, |right| is returned. - |TypeError| exception is raised if a dictionary or a list are merged with a non-dictionary or non-list correspondingly. """ if isinstance(left, dict): if isinstance(right, dict): retval = copy.copy(left) for key, value in right.iteritems(): if key in retval: retval[key] = Merge(retval[key], value) else: retval[key] = value return retval else: raise TypeError('Error: merging a dictionary and non-dictionary value') elif isinstance(left, list): if isinstance(right, list): return left + right else: raise TypeError('Error: merging a list and non-list value') else: if isinstance(right, dict): raise TypeError('Error: merging a dictionary and non-dictionary value') elif isinstance(right, list): raise TypeError('Error: merging a dictionary and non-dictionary value') else: return right quote_matcher_regex = re.compile(r'\s|"') quote_replacer_regex = re.compile(r'(\\*)"') def QuoteArgument(arg): """Escapes a Windows command-line argument. So that the Win32 CommandLineToArgv function will turn the escaped result back into the original string. See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx ("Parsing C++ Command-Line Arguments") to understand why we have to do this. Args: arg: the string to be escaped. Returns: the escaped string. """ def _Replace(match): # For a literal quote, CommandLineToArgv requires an odd number of # backslashes preceding it, and it produces half as many literal backslashes # (rounded down). So we need to produce 2n+1 backslashes. return 2 * match.group(1) + '\\"' if re.search(quote_matcher_regex, arg): # Escape all quotes so that they are interpreted literally. arg = quote_replacer_regex.sub(_Replace, arg) # Now add unescaped quotes so that any whitespace is interpreted literally. return '"' + arg + '"' else: return arg def GenerateCommandLine(tool, source, dest, parameters): """Generates the command line for |tool|.""" # Merge/apply tool-specific parameters params = copy.copy(parameters) if tool in parameters: params = Merge(params, params[tool]) wix_path = os.path.normpath(params.get('wix_path', '')) switches = [os.path.join(wix_path, tool), '-nologo'] # Append the list of defines and extensions to the command line switches. for name, value in params.get('defines', {}).iteritems(): switches.append('-d%s=%s' % (name, value)) for ext in params.get('extensions', []): switches += ('-ext', os.path.join(wix_path, ext)) # Append raw switches switches += params.get('switches', []) # Append the input and output files switches += ('-out', dest, source) # Generate the actual command line #return ' '.join(map(QuoteArgument, switches)) return switches def Run(args): """Runs a command interpreting the passed |args| as a command line.""" command = ' '.join(map(QuoteArgument, args)) popen = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() if popen.returncode: print command for line in out.splitlines(): print line print '%s returned %d' % (args[0], popen.returncode) return popen.returncode def GenerateMsi(target, source, parameters): """Generates .msi from the installation files prepared by Chromium build.""" parameters['basename'] = os.path.splitext(os.path.basename(source))[0] # The script can handle both forms of input a directory with unpacked files or # a ZIP archive with the same files. In the latter case the archive should be # unpacked to the intermediate directory. source_dir = None if os.path.isdir(source): # Just use unpacked files from the supplied directory. source_dir = source else: # Unpack .zip rc = UnpackZip(parameters['intermediate_dir'], source) if rc != 0: return rc source_dir = '%(intermediate_dir)s\\%(basename)s' % parameters # Read parameters from 'parameters.json'. f = open(os.path.join(source_dir, 'parameters.json')) parameters = Merge(json.load(f), parameters) f.close() if 'source' not in parameters: print 'The source .wxs is not specified' return 1 if 'bind_path' not in parameters: print 'The binding path is not specified' return 1 wxs = os.path.join(source_dir, parameters['source']) # Add the binding path to the light-specific parameters. bind_path = os.path.join(source_dir, parameters['bind_path']) parameters = Merge(parameters, {'light': {'switches': ['-b', bind_path]}}) target_arch = parameters['target_arch'] if target_arch == 'ia32': arch_param = 'x86' elif target_arch == 'x64': arch_param = 'x64' else: print 'Invalid target_arch parameter value' return 1 # Add the architecture to candle-specific parameters. parameters = Merge( parameters, {'candle': {'switches': ['-arch', arch_param]}}) # Run candle and light to generate the installation. wixobj = '%(intermediate_dir)s\\%(basename)s.wixobj' % parameters args = GenerateCommandLine('candle', wxs, wixobj, parameters) rc = Run(args) if rc: return rc args = GenerateCommandLine('light', wixobj, target, parameters) rc = Run(args) if rc: return rc return 0 def main(): usage = 'Usage: zip2msi [options] <input.zip> <output.msi>' parser = OptionParser(usage=usage) parser.add_option('--intermediate_dir', dest='intermediate_dir', default='.') parser.add_option('--wix_path', dest='wix_path', default='.') parser.add_option('--target_arch', dest='target_arch', default='x86') options, args = parser.parse_args() if len(args) != 2: parser.error('two positional arguments expected') return GenerateMsi(args[1], args[0], dict(options.__dict__)) if __name__ == '__main__': sys.exit(main())
bsd-3-clause
7,209,941,797,004,164,000
30.952206
80
0.682085
false
rolobio/DictORM
dictorm/pg.py
1
10974
""" This module is used to build queries for Postgresql. You shouldn't really need to import anything from this file because they can all be built using dictorm.Table. Sqlite queries are slightly different, but use these methods as their base. """ from copy import copy from typing import Union global sort_keys sort_keys = False __all__ = [ 'And', 'Column', 'Comparison', 'Delete', 'Insert', 'Null', 'Operator', 'Or', 'Select', 'set_sort_keys', 'Update', ] def set_sort_keys(val): """Used only for testing""" global sort_keys sort_keys = val class Select(object): query = 'SELECT * FROM "{table}"' def __init__(self, table, operators_or_comp=None, returning=None): self.table = table self.operators_or_comp = operators_or_comp or [] self.returning = returning self._order_by = None self._limit = None self._offset = None def __repr__(self): # pragma: no cover return 'Select({0}, {1}, ret:{2}, order:{3}, limit:{4}, offset:{5}'.format( self.table, repr(self.operators_or_comp), self.returning, self._order_by, self._limit, self._offset) def _copy(self): try: ooc = self.operators_or_comp[:] except TypeError: ooc = self.operators_or_comp._copy() new = type(self)(self.table, ooc, copy(self.returning)) new._order_by = copy(self._order_by) new._limit = copy(self._limit) new._offset = copy(self._offset) return new def __str__(self): parts = [] formats = {'table': self.table, } ooc = self.operators_or_comp if (isinstance(ooc, Operator) and ooc.operators_or_comp) or ( isinstance(ooc, Comparison) ): parts.append(' WHERE {comp}') formats['comp'] = str(ooc) if self._order_by: parts.append(' ORDER BY {0}'.format(str(self._order_by))) if self.returning == '*': parts.append(' RETURNING *') elif self.returning: parts.append(' RETURNING "{0}"'.format(str(self.returning))) if self._limit: parts.append(' LIMIT {0}'.format(str(self._limit))) if self._offset: parts.append(' OFFSET {0}'.format(str(self._offset))) sql = self.query + ''.join(parts) return sql.format(**formats) def values(self): return list(self.operators_or_comp or []) def build(self): return (str(self), self.values()) def order_by(self, order_by): self._order_by = order_by return self def limit(self, limit): self._limit = limit return self def offset(self, offset): self._offset = offset return self def __add__(self, item): self.operators_or_comp += item return self class Insert(object): query = 'INSERT INTO "{table}" {cvp}' cvp = '({0}) VALUES ({1})' interpolation_str = '%s' append_returning = None last_row = 'SELECT {0} FROM "{1}" WHERE "rowid" = last_insert_rowid()' def __init__(self, table, **values): self.table = table self._values = values self._returning = None self._ordered_keys = values.keys() if sort_keys: self._ordered_keys = sorted(self._ordered_keys) def _build_cvp(self): return (', '.join(['"{}"'.format(i) for i in self._ordered_keys]), ', '.join([self.interpolation_str, ] * len(self._values))) def __str__(self): sql = self.query if self._returning == '*': sql += ' RETURNING *' elif self._returning: sql += ' RETURNING "{0}"'.format(self._returning) if not self._values: return sql.format(table=self.table, cvp='DEFAULT VALUES') return sql.format(table=self.table, cvp=self.cvp.format(*self._build_cvp())) def values(self): return [self._values[k] for k in self._ordered_keys] def build(self): sql, values = str(self), self.values() if self.append_returning: ret = [(sql, values), ] ret.append((self.last_row.format( self.append_returning, self.table), [])) return ret return (sql, values) def returning(self, returning): self._returning = returning return self class Update(Insert): query = 'UPDATE "{table}" SET {cvp}' interpolation_str = '%s' def __init__(self, table, **values): self.operators_or_comp = None super(Update, self).__init__(table, **values) def _build_cvp(self): return ', '.join(('"{0}"={1}'.format(k, self.interpolation_str) \ for k in self._ordered_keys)) def __str__(self): parts = [] formats = {'table': self.table, 'cvp': self._build_cvp()} if self.operators_or_comp: parts.append(' WHERE {comps}') formats['comps'] = str(self.operators_or_comp) if self._returning == '*': parts.append(' RETURNING *') elif self._returning: parts.append(' RETURNING "{0}"'.format(self._returning)) sql = self.query + ''.join(parts) return sql.format(**formats) def values(self): values = super(Update, self).values() if self.operators_or_comp: values.extend(list(self.operators_or_comp)) return values def where(self, operators_or_comp): self.operators_or_comp = operators_or_comp return self class Delete(Update): query = 'DELETE FROM "{table}"' QueryHint = Union[Select, Insert, Update, Delete] class Comparison(object): interpolation_str = '%s' many = False def __init__(self, column1, column2, kind): self.column1 = column1 self.column2 = column2 self.kind = kind self._substratum = None self._aggregate = False self._array_exp = False def __repr__(self): # pragma: no cover if isinstance(self.column2, Null): ret = 'Comparison({0}{1})'.format(self.column1, self.kind) ret = 'Comparison{0}({1}{2}{3})'.format('Many' if self.many else '', self.column1, str(self.kind), self.column2) if self._substratum: ret += '.substratum({0})'.format(self._substratum) return ret def __str__(self): c1 = self.column1.column if self._null_kind(): return '"{0}"{1}'.format(c1, self.kind) # Surround the expression with parentheses if self._array_exp: return '"{0}"{1}({2})'.format(c1, self.kind, self.interpolation_str) return '"{0}"{1}{2}'.format(c1, self.kind, self.interpolation_str) def _copy(self): new = type(self)(self.column1, self.column2, self.kind) new._substratum = self._substratum new._aggregate = self._aggregate return new def value(self): return self.column2 def __iter__(self): if self._null_kind(): return iter([]) return iter([self.column2, ]) def substratum(self, column): comp = Comparison(self.column1, self.column2, self.kind) comp._substratum = column comp.many = self.many return comp def aggregate(self, column): comp = self.substratum(column) comp._aggregate = True return comp def _null_kind(self): return isinstance(self.column2, Null) def Or(self, comp2): return Or(self, comp2) def And(self, comp2): return And(self, comp2) class Null(): pass class Column(object): comparison = Comparison def __init__(self, table, column): self.table = table self.column = column def __repr__(self): # pragma: no cover return '{0}.{1}'.format(self.table, self.column) def many(self, column): c = self.comparison(self, column, '=') c.many = True return c def __eq__(self, column): return self.comparison(self, column, '=') def __gt__(self, column): return self.comparison(self, column, '>') def __ge__(self, column): return self.comparison(self, column, '>=') def __lt__(self, column): return self.comparison(self, column, '<') def __le__(self, column): return self.comparison(self, column, '<=') def __ne__(self, column): return self.comparison(self, column, '!=') def Is(self, column): return self.comparison(self, column, ' IS ') def IsNot(self, column): return self.comparison(self, column, ' IS NOT ') def IsDistinct(self, column): return self.comparison(self, column, ' IS DISTINCT FROM ') def IsNotDistinct(self, column): return self.comparison(self, column, ' IS NOT DISTINCT FROM ') def IsNull(self): return self.comparison(self, Null(), ' IS NULL') def IsNotNull(self): return self.comparison(self, Null(), ' IS NOT NULL') def In(self, tup): if isinstance(tup, list): tup = tuple(tup) return self.comparison(self, tup, ' IN ') def Like(self, column): return self.comparison(self, column, ' LIKE ') def Ilike(self, column): return self.comparison(self, column, ' ILIKE ') def Any(self, column): comp = self.comparison(self, column, ' = ANY ') comp._array_exp = True return comp def wrap_ooc(ooc): if isinstance(ooc, Comparison): return '%s' % str(ooc) return '(%s)' % str(ooc) class Operator(object): def __init__(self, kind, operators_or_comp): self.kind = kind self.operators_or_comp = operators_or_comp def __repr__(self): # pragma: no cover return '{0}{1}'.format(self.kind, repr(self.operators_or_comp)) def __str__(self): kind = ' {0} '.format(self.kind) return kind.join(map(wrap_ooc, self.operators_or_comp)) def __iter__(self): i = [] for comp in self.operators_or_comp: if isinstance(comp, Operator): i.extend(comp) elif isinstance(comp, Comparison) and not comp._null_kind(): i.append(comp.value()) return iter(i) def __add__(self, i): if isinstance(i, tuple): self.operators_or_comp += i else: self.operators_or_comp += (i,) return self def _copy(self): new = type(self)() new.operators_or_comp = tuple(i._copy() for i in self.operators_or_comp) return new class Or(Operator): def __init__(self, *operators_or_comp): super(Or, self).__init__('OR', operators_or_comp) class And(Operator): def __init__(self, *operators_or_comp): super(And, self).__init__('AND', operators_or_comp)
apache-2.0
-8,272,285,528,467,006,000
27.80315
91
0.558958
false
megraf/asuswrt-merlin
release/src/router/libxml2/python/tests/xpath.py
87
1188
#!/usr/bin/python -u # # this test exercise the XPath basic engine, parser, etc, and # allows to detect memory leaks # import sys import libxml2 # Memory debug specific libxml2.debugMemory(1) doc = libxml2.parseFile("tst.xml") if doc.name != "tst.xml": print "doc.name error" sys.exit(1); ctxt = doc.xpathNewContext() res = ctxt.xpathEval("//*") if len(res) != 2: print "xpath query: wrong node set size" sys.exit(1) if res[0].name != "doc" or res[1].name != "foo": print "xpath query: wrong node set value" sys.exit(1) ctxt.setContextNode(res[0]) res = ctxt.xpathEval("foo") if len(res) != 1: print "xpath query: wrong node set size" sys.exit(1) if res[0].name != "foo": print "xpath query: wrong node set value" sys.exit(1) doc.freeDoc() ctxt.xpathFreeContext() i = 1000 while i > 0: doc = libxml2.parseFile("tst.xml") ctxt = doc.xpathNewContext() res = ctxt.xpathEval("//*") doc.freeDoc() ctxt.xpathFreeContext() i = i -1 del ctxt # Memory debug specific libxml2.cleanupParser() if libxml2.debugMemory(1) == 0: print "OK" else: print "Memory leak %d bytes" % (libxml2.debugMemory(1)) libxml2.dumpMemory()
gpl-2.0
-4,336,757,916,907,250,000
22.294118
61
0.654882
false
mmatyas/skia
tools/misc_utils.py
68
7711
# Copyright 2014 Google Inc. # # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Module to host the VerboseSubprocess, ChangeDir, and ReSearch classes. """ import os import re import subprocess def print_subprocess_args(prefix, *args, **kwargs): """Print out args in a human-readable manner.""" def quote_and_escape(string): """Quote and escape a string if necessary.""" if ' ' in string or '\n' in string: string = '"%s"' % string.replace('"', '\\"') return string if 'cwd' in kwargs: print '%scd %s' % (prefix, kwargs['cwd']) print prefix + ' '.join(quote_and_escape(arg) for arg in args[0]) if 'cwd' in kwargs: print '%scd -' % prefix class VerboseSubprocess(object): """Call subprocess methods, but print out command before executing. Attributes: verbose: (boolean) should we print out the command or not. If not, this is the same as calling the subprocess method quiet: (boolean) suppress stdout on check_call and call. prefix: (string) When verbose, what to print before each command. """ def __init__(self, verbose): self.verbose = verbose self.quiet = not verbose self.prefix = '~~$ ' def check_call(self, *args, **kwargs): """Wrapper for subprocess.check_call(). Args: *args: to be passed to subprocess.check_call() **kwargs: to be passed to subprocess.check_call() Returns: Whatever subprocess.check_call() returns. Raises: OSError or subprocess.CalledProcessError: raised by check_call. """ if self.verbose: print_subprocess_args(self.prefix, *args, **kwargs) if self.quiet: with open(os.devnull, 'w') as devnull: return subprocess.check_call(*args, stdout=devnull, **kwargs) else: return subprocess.check_call(*args, **kwargs) def call(self, *args, **kwargs): """Wrapper for subprocess.check(). Args: *args: to be passed to subprocess.check_call() **kwargs: to be passed to subprocess.check_call() Returns: Whatever subprocess.call() returns. Raises: OSError or subprocess.CalledProcessError: raised by call. """ if self.verbose: print_subprocess_args(self.prefix, *args, **kwargs) if self.quiet: with open(os.devnull, 'w') as devnull: return subprocess.call(*args, stdout=devnull, **kwargs) else: return subprocess.call(*args, **kwargs) def check_output(self, *args, **kwargs): """Wrapper for subprocess.check_output(). Args: *args: to be passed to subprocess.check_output() **kwargs: to be passed to subprocess.check_output() Returns: Whatever subprocess.check_output() returns. Raises: OSError or subprocess.CalledProcessError: raised by check_output. """ if self.verbose: print_subprocess_args(self.prefix, *args, **kwargs) return subprocess.check_output(*args, **kwargs) def strip_output(self, *args, **kwargs): """Wrap subprocess.check_output and str.strip(). Pass the given arguments into subprocess.check_output() and return the results, after stripping any excess whitespace. Args: *args: to be passed to subprocess.check_output() **kwargs: to be passed to subprocess.check_output() Returns: The output of the process as a string without leading or trailing whitespace. Raises: OSError or subprocess.CalledProcessError: raised by check_output. """ if self.verbose: print_subprocess_args(self.prefix, *args, **kwargs) return str(subprocess.check_output(*args, **kwargs)).strip() def popen(self, *args, **kwargs): """Wrapper for subprocess.Popen(). Args: *args: to be passed to subprocess.Popen() **kwargs: to be passed to subprocess.Popen() Returns: The output of subprocess.Popen() Raises: OSError or subprocess.CalledProcessError: raised by Popen. """ if self.verbose: print_subprocess_args(self.prefix, *args, **kwargs) return subprocess.Popen(*args, **kwargs) class ChangeDir(object): """Use with a with-statement to temporarily change directories.""" # pylint: disable=I0011,R0903 def __init__(self, directory, verbose=False): self._directory = directory self._verbose = verbose def __enter__(self): if self._directory != os.curdir: if self._verbose: print '~~$ cd %s' % self._directory cwd = os.getcwd() os.chdir(self._directory) self._directory = cwd def __exit__(self, etype, value, traceback): if self._directory != os.curdir: if self._verbose: print '~~$ cd %s' % self._directory os.chdir(self._directory) class ReSearch(object): """A collection of static methods for regexing things.""" @staticmethod def search_within_stream(input_stream, pattern, default=None): """Search for regular expression in a file-like object. Opens a file for reading and searches line by line for a match to the regex and returns the parenthesized group named return for the first match. Does not search across newlines. For example: pattern = '^root(:[^:]*){4}:(?P<return>[^:]*)' with open('/etc/passwd', 'r') as stream: return search_within_file(stream, pattern) should return root's home directory (/root on my system). Args: input_stream: file-like object to be read pattern: (string) to be passed to re.compile default: what to return if no match Returns: A string or whatever default is """ pattern_object = re.compile(pattern) for line in input_stream: match = pattern_object.search(line) if match: return match.group('return') return default @staticmethod def search_within_string(input_string, pattern, default=None): """Search for regular expression in a string. Args: input_string: (string) to be searched pattern: (string) to be passed to re.compile default: what to return if no match Returns: A string or whatever default is """ match = re.search(pattern, input_string) return match.group('return') if match else default @staticmethod def search_within_output(verbose, pattern, default, *args, **kwargs): """Search for regular expression in a process output. Does not search across newlines. Args: verbose: (boolean) shoule we call print_subprocess_args? pattern: (string) to be passed to re.compile default: what to return if no match *args: to be passed to subprocess.Popen() **kwargs: to be passed to subprocess.Popen() Returns: A string or whatever default is """ if verbose: print_subprocess_args('~~$ ', *args, **kwargs) proc = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs) return ReSearch.search_within_stream(proc.stdout, pattern, default)
bsd-3-clause
-5,381,130,281,967,364,000
33.424107
77
0.59253
false
mbauskar/helpdesk-frappe
frappe/patches/v5_2/change_checks_to_not_null.py
58
1267
from __future__ import unicode_literals import frappe from frappe.utils import cint from frappe.model import default_fields def execute(): for table in frappe.db.get_tables(): doctype = table[3:] if frappe.db.exists("DocType", doctype): fieldnames = [df["fieldname"] for df in frappe.get_all("DocField", fields=["fieldname"], filters={"parent": doctype})] custom_fieldnames = [df["fieldname"] for df in frappe.get_all("Custom Field", fields=["fieldname"], filters={"dt": doctype})] else: fieldnames = custom_fieldnames = [] for column in frappe.db.sql("""desc `{0}`""".format(table), as_dict=True): if column["Type"]=="int(1)": fieldname = column["Field"] # only change for defined fields, ignore old fields that don't exist in meta if not (fieldname in default_fields or fieldname in fieldnames or fieldname in custom_fieldnames): continue # set 0 frappe.db.sql("""update `{table}` set `{column}`=0 where `{column}` is null"""\ .format(table=table, column=fieldname)) frappe.db.commit() # change definition frappe.db.sql_ddl("""alter table `{table}` modify `{column}` int(1) not null default {default}"""\ .format(table=table, column=fieldname, default=cint(column["Default"])))
mit
7,124,992,491,837,945,000
36.264706
102
0.671665
false
wiltonlazary/arangodb
3rdParty/V8/V8-5.0.71.39/tools/swarming_client/tests/lru_test.py
6
6265
#!/usr/bin/env python # Copyright 2013 The Swarming Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 that # can be found in the LICENSE file. import json import logging import os import sys import tempfile import unittest ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, ROOT_DIR) from utils import lru class LRUDictTest(unittest.TestCase): @staticmethod def prepare_lru_dict(keys): """Returns new LRUDict with given |keys| added one by one.""" lru_dict = lru.LRUDict() for key in keys: lru_dict.add(key, None) return lru_dict def assert_order(self, lru_dict, expected_keys): """Asserts order of keys in |lru_dict| is |expected_keys|. expected_keys[0] is supposedly oldest key, expected_keys[-1] is newest. Destroys |lru_dict| state in the process. """ # Check keys iteration works. self.assertEqual(lru_dict.keys_set(), set(expected_keys)) # Check pop_oldest returns keys in expected order. actual_keys = [] while lru_dict: oldest_key, _ = lru_dict.pop_oldest() actual_keys.append(oldest_key) self.assertEqual(actual_keys, expected_keys) def assert_same_data(self, lru_dict, regular_dict): """Asserts that given |lru_dict| contains same data as |regular_dict|.""" self.assertEqual(lru_dict.keys_set(), set(regular_dict.keys())) self.assertEqual(set(lru_dict.itervalues()), set(regular_dict.values())) for k, v in regular_dict.items(): self.assertEqual(lru_dict.get(k), v) def test_basic_dict_funcs(self): lru_dict = lru.LRUDict() # Add a bunch. data = {1: 'one', 2: 'two', 3: 'three'} for k, v in data.items(): lru_dict.add(k, v) # Check its there. self.assert_same_data(lru_dict, data) # Replace value. lru_dict.add(1, 'one!!!') data[1] = 'one!!!' self.assert_same_data(lru_dict, data) # Check pop works. self.assertEqual(lru_dict.pop(2), 'two') data.pop(2) self.assert_same_data(lru_dict, data) # Pop missing key. with self.assertRaises(KeyError): lru_dict.pop(2) # Touch has no effect on set of keys and values. lru_dict.touch(1) self.assert_same_data(lru_dict, data) # Touch fails on missing key. with self.assertRaises(KeyError): lru_dict.touch(22) def test_magic_methods(self): # Check __nonzero__, __len__ and __contains__ for empty dict. lru_dict = lru.LRUDict() self.assertFalse(lru_dict) self.assertEqual(len(lru_dict), 0) self.assertFalse(1 in lru_dict) # Dict with one item. lru_dict.add(1, 'one') self.assertTrue(lru_dict) self.assertEqual(len(lru_dict), 1) self.assertTrue(1 in lru_dict) self.assertFalse(2 in lru_dict) def test_order(self): data = [1, 2, 3] # Edge cases. self.assert_order(self.prepare_lru_dict([]), []) self.assert_order(self.prepare_lru_dict([1]), [1]) # No touches. self.assert_order(self.prepare_lru_dict(data), data) # Touching newest item is noop. lru_dict = self.prepare_lru_dict(data) lru_dict.touch(3) self.assert_order(lru_dict, data) # Touch to move to newest. lru_dict = self.prepare_lru_dict(data) lru_dict.touch(2) self.assert_order(lru_dict, [1, 3, 2]) # Pop newest. lru_dict = self.prepare_lru_dict(data) lru_dict.pop(1) self.assert_order(lru_dict, [2, 3]) # Pop in the middle. lru_dict = self.prepare_lru_dict(data) lru_dict.pop(2) self.assert_order(lru_dict, [1, 3]) # Pop oldest. lru_dict = self.prepare_lru_dict(data) lru_dict.pop(3) self.assert_order(lru_dict, [1, 2]) # Add oldest. lru_dict = self.prepare_lru_dict(data) lru_dict.batch_insert_oldest([(4, 4), (5, 5)]) self.assert_order(lru_dict, [4, 5] + data) # Add newest. lru_dict = self.prepare_lru_dict(data) lru_dict.add(4, 4) self.assert_order(lru_dict, data + [4]) def test_load_save(self): def save_and_load(lru_dict): handle, tmp_name = tempfile.mkstemp(prefix=u'lru_test') os.close(handle) try: lru_dict.save(tmp_name) return lru.LRUDict.load(tmp_name) finally: try: os.unlink(tmp_name) except OSError: pass data = [1, 2, 3] # Edge case. empty = save_and_load(lru.LRUDict()) self.assertFalse(empty) # Normal flow. lru_dict = save_and_load(self.prepare_lru_dict(data)) self.assert_order(lru_dict, data) # After touches. lru_dict = self.prepare_lru_dict(data) lru_dict.touch(2) lru_dict = save_and_load(lru_dict) self.assert_order(lru_dict, [1, 3, 2]) # After pop. lru_dict = self.prepare_lru_dict(data) lru_dict.pop(2) lru_dict = save_and_load(lru_dict) self.assert_order(lru_dict, [1, 3]) # After add. lru_dict = self.prepare_lru_dict(data) lru_dict.add(4, 4) lru_dict.batch_insert_oldest([(5, 5), (6, 6)]) lru_dict = save_and_load(lru_dict) self.assert_order(lru_dict, [5, 6] + data + [4]) def test_corrupted_state_file(self): def load_from_state(state_text): handle, tmp_name = tempfile.mkstemp(prefix=u'lru_test') os.close(handle) try: with open(tmp_name, 'w') as f: f.write(state_text) return lru.LRUDict.load(tmp_name) finally: os.unlink(tmp_name) # Loads correct state just fine. self.assertIsNotNone(load_from_state(json.dumps([ ['key1', 'value1'], ['key2', 'value2'], ]))) # Not a json. with self.assertRaises(ValueError): load_from_state('garbage, not a state') # Not a list. with self.assertRaises(ValueError): load_from_state('{}') # Not a list of pairs. with self.assertRaises(ValueError): load_from_state(json.dumps([ ['key', 'value', 'and whats this?'], ])) # Duplicate keys. with self.assertRaises(ValueError): load_from_state(json.dumps([ ['key', 'value'], ['key', 'another_value'], ])) if __name__ == '__main__': VERBOSE = '-v' in sys.argv logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR) unittest.main()
apache-2.0
-7,147,157,685,968,510,000
26.47807
80
0.622666
false
kkampardi/Plinth
plinth/modules/matrixsynapse/views.py
2
2670
# # This file is part of Plinth. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Views for the Matrix Synapse module. """ from django.shortcuts import redirect from django.urls import reverse_lazy from django.views.generic import FormView from plinth import actions from plinth import views from plinth.modules import matrixsynapse from plinth.forms import DomainSelectionForm from plinth.utils import get_domain_names class SetupView(FormView): """Show matrix-synapse setup page.""" template_name = 'matrix-synapse-pre-setup.html' form_class = DomainSelectionForm success_url = reverse_lazy('matrixsynapse:index') def form_valid(self, form): """Handle valid form submission.""" domain_name = form.cleaned_data['domain_name'] actions.superuser_run('matrixsynapse', ['setup', '--domain-name', domain_name]) return super().form_valid(form) def get_context_data(self, *args, **kwargs): """Provide context data to the template.""" context = super().get_context_data(**kwargs) context['title'] = matrixsynapse.name context['description'] = matrixsynapse.description context['domain_names'] = get_domain_names() return context class ServiceView(views.ServiceView): """Show matrix-synapse service page.""" service_id = matrixsynapse.managed_services[0] template_name = 'matrix-synapse.html' description = matrixsynapse.description diagnostics_module_name = 'matrixsynapse' def dispatch(self, request, *args, **kwargs): """Redirect to setup page if setup is not done yet.""" if not matrixsynapse.is_setup(): return redirect('matrixsynapse:setup') return super().dispatch(request, *args, **kwargs) def get_context_data(self, *args, **kwargs): """Add additional context data for template.""" context = super().get_context_data(**kwargs) context['domain_name'] = matrixsynapse.get_configured_domain_name() return context
agpl-3.0
345,140,781,094,044,350
34.131579
75
0.695506
false
ZihengJiang/mxnet
python/mxnet/profiler.py
31
2161
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 # pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines # pylint: disable=too-many-branches, too-many-statements """Profiler setting methods.""" from __future__ import absolute_import import ctypes from .base import _LIB, check_call, c_str def profiler_set_config(mode='symbolic', filename='profile.json'): """Set up the configure of profiler. Parameters ---------- mode : string, optional Indicates whether to enable the profiler, can be 'symbolic', or 'all'. Defaults to `symbolic`. filename : string, optional The name of output trace file. Defaults to 'profile.json'. """ mode2int = {'symbolic': 0, 'all': 1} check_call(_LIB.MXSetProfilerConfig( ctypes.c_int(mode2int[mode]), c_str(filename))) def profiler_set_state(state='stop'): """Set up the profiler state to record operator. Parameters ---------- state : string, optional Indicates whether to run the profiler, can be 'stop' or 'run'. Default is `stop`. """ state2int = {'stop': 0, 'run': 1} check_call(_LIB.MXSetProfilerState(ctypes.c_int(state2int[state]))) def dump_profile(): """Dump profile and stop profiler. Use this to save profile in advance in case your program cannot exit normally.""" check_call(_LIB.MXDumpProfile())
apache-2.0
7,532,587,623,368,357,000
36.258621
90
0.698751
false
critiqjo/key-mon
src/keymon/options.py
15
14492
#!/usr/bin/python # # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Options Class for save, restoring and getting parameters from the command line. This provides a class which handles both saving options to disk and gathering options from the command line. It behaves a little like optparse in that you can get or set the attributes by name. It uses ConfigParser to save the variables to disk in ini format. """ __author__ = 'Scott Kirkwood (scott+keymon@forusers.com)' import ConfigParser import gettext import logging import optparse import os import sys LOG = logging.getLogger('options') gettext.install('key-mon', 'locale') class OptionException(Exception): pass class OptionItem(object): """Handles on option. It know both about optparse options and ConfigParser options. By setting opt_short, opt_long to None you won't create an optparse option. By setting ini_group, ini_name to None you won't create a ConfigParser option. """ def __init__(self, dest, _type, default, name, help, opt_group=None, opt_short=None, opt_long=None, ini_group=None, ini_name=None): """Create an option Args: dest: a unique name for this variable, used internally. _type: The data type. default: The default value if none given. name: the translated name. _help: Help text to show. opt_group: Optional option group opt_short: the short name of the option opt_long: the long name for the option ini_group: optional name of the group in the ini file. ini_name: optional name of the name in the ini file """ self._dirty = False self._value = None self._temp_value = None self._dest = dest self._type = _type self._default = default self._name = name self._help = help self._opt_group = opt_group self._opt_short = opt_short if self._opt_short and not self._opt_short.startswith('-'): raise OptionException('Invalid short option %s' % self._opt_short) self._opt_long = opt_long if self._opt_long and not self._opt_long.startswith('--'): raise OptionException('Invalid long option %r' % self._opt_long) self._ini_group = ini_group self._ini_name = ini_name if self._type not in ('int', 'float', 'bool', 'str'): raise OptionException('Unsupported type: %s' % self._type) self._set_value(default) def add_to_parser(self, parser): if not self._opt_short and not self._opt_long: return if self._type == 'bool': self._add_bool_to_parser(parser) return args = [] if self._opt_short: args.append(self._opt_short) if self._opt_long: args.append(self._opt_long) parser.add_option(dest=self._dest, type=self._type, default=self._default, help=self._help, *args) def _add_bool_to_parser(self, parser): """Booleans need special handling.""" args = [] if self._opt_short: args.append(self._opt_short) if self._opt_long: args.append(self._opt_long) parser.add_option(action='store_true', default=self._default, dest=self._dest, help=self._help, *args) if self._ini_group: # Only need the --no version if it could be saved to ini file. parser.add_option('--no' + self._opt_long.lstrip('-'), action='store_false', dest=self._dest, help=_('Opposite of %s') % self._opt_long) def set_from_optparse(self, opts, args): """Try and set an option from optparse. Args: opts: options as returned from parse_args() args: arguments as returned bys sys.args. """ if not self._opt_short and not self._opt_long: return # Was this option actually passed on the command line? found = False if args: for arg in args: if self._type == 'bool' and arg.startswith('--no'): arg = '--' + arg[4:] # Remove the --x=123, if any arg = arg.split('=')[0] if arg == self._opt_short or arg == self._opt_long: found = True break if hasattr(opts, self._dest): opt_val = getattr(opts, self._dest) if not self._ini_name: # For commands like --version which don't have stored values self._set_value(opt_val) if found: self._set_temp_value(opt_val) def reset_to_default(self): """Reset to the default value.""" self._set_value(self._default) self._set_temp_value(None) def get_value(self): """Return the value.""" if self._temp_value is not None: return self._temp_value return self._value def _set_attr_value(self, attr, val): """Set the value via attribute name. Args: attr: attribute name ('_value', or '_temp_value') val: value to set """ old_val = getattr(self, attr) if val is None: setattr(self, attr, val) elif self._type == 'int': setattr(self, attr, int(val)) elif self._type == 'float': setattr(self, attr, float(val)) elif self._type == 'bool': if isinstance(val, basestring): if val.lower() in ('false', 'off', 'no', '0'): setattr(self, attr, False) elif val.lower() in ('true', 'on', 'yes', '1'): setattr(self, attr, True) else: raise OptionException('Unable to convert %s to bool' % val) else: setattr(self, attr, bool(val)) else: setattr(self, attr, val) self._dirty = old_val != getattr(self, attr) def _set_value(self, val): self._set_attr_value('_value', val) self._set_attr_value('_temp_value', None) def _set_temp_value(self, val): self._set_attr_value('_temp_value', val) value = property(get_value, _set_value, doc="Value") @property def dest(self): """Destination variable name.""" return self._dest @property def name(self): """Localized name of the option.""" return self._name @property def help(self): """Long description of the option.""" return self._help @property def type(self): """String name of the type.""" return self._type @property def opt_group(self): """Option group, if any.""" return self._opt_group @property def opt_short(self): """Short option property (ex. '-v').""" return self._opt_short @property def opt_long(self): """Long option property (ex. '--verbose').""" return self._opt_long @property def ini_group(self): """Name of the ini group or None.""" return self._ini_group @property def ini_name(self): """Name in the ini, or None.""" return self._ini_name @property def ini_value(self): """Value to store in the ini, always a string.""" if self._value is None: return None if self._type == 'bool': if self._value is True: return '1' else: return '0' else: return str(self._value) class Options(object): """Store the options in memory, also saves to dist and creates opt_parser.""" def __init__(self): self._options = {} self._ini_filename = None self._opt_group = None self._opt_group_desc = {} self._options_order = [] def __getattr__(self, name): if name not in self.__dict__['_options']: raise AttributeError('Invalid attribute name: %r' % name) return self._options[name].value def __setattr__(self, name, value): if name == '_options' or name not in self.__dict__['_options']: object.__setattr__(self, name, value) else: LOG.info('Setting %r = %r', name, value) self.__dict__['_options'][name].value = value def add_option_group(self, group, desc): self._opt_group = group self._opt_group_desc[group] = desc def add_option(self, dest, type='str', default=None, name=None, help=None, opt_short=None, opt_long=None, ini_group=None, ini_name=None): """Create an option Args: dest: a unique name for this variable, used internally. type: The data type. default: The default value if none given. name: the translated name. help: Help text to show. opt_group: the name of the option group or None opt_short: the short name of the option opt_long: the long name for the option ini_group: the name of the group in the ini file. ini_name: the name of the name in the ini file """ if dest in self._options: raise OptionException('Options %s already added' % dest) self._options_order.append(dest) self._options[dest] = OptionItem(dest, type, default, name, help, opt_group=self._opt_group, opt_short=opt_short, opt_long=opt_long, ini_group=ini_group, ini_name=ini_name) def parse_args(self, desc, args=None): """Add the options to the optparse instance and parse command line Args: desc: Description to use for the program. args: Args for testing or sys.args[1:] otherwise """ parser = optparse.OptionParser(desc) for dest in self._options_order: opt = self._options[dest] opt.add_to_parser(parser) self._opt_ret, self._other_args = parser.parse_args(args) for opt in self._options.values(): opt.set_from_optparse(self._opt_ret, args) def parse_ini(self, fp): """Parser an ini file from fp, which is file-like class.""" config = ConfigParser.SafeConfigParser() config.readfp(fp) checker = {} for opt in self._options.values(): if opt.ini_group: checker[opt.ini_group + '-' + opt.ini_name] = True if (config.has_section(opt.ini_group) and config.has_option(opt.ini_group, opt.ini_name)): opt.value = config.get(opt.ini_group, opt.ini_name) LOG.info('From ini getting %s.%s = %s', opt.ini_group, opt.ini_name, opt.value) for section in config.sections(): for name, value in config.items(section): combined_name = section + '-' + name if not combined_name in checker: LOG.info('Unknown option %r in section [%s]', name, section) # we no longer throw an error to be backward compatible def write_ini(self, fp): """Parser an ini file from fp, which is file-like class.""" config = ConfigParser.SafeConfigParser() for opt in self._options.values(): if not opt.ini_group: continue if not config.has_section(opt.ini_group): config.add_section(opt.ini_group) if opt.ini_value is not None: config.set(opt.ini_group, opt.ini_name, opt.ini_value) config.write(fp) def read_ini_file(self, fname): self._ini_filename = os.path.expanduser(fname) LOG.info('Reading from %r', self._ini_filename) if os.path.exists(self._ini_filename) and os.path.isfile(self._ini_filename): fi = open(self._ini_filename) self.parse_ini(fi) fi.close() else: LOG.info('%r does not exist', self._ini_filename) def save(self): self._write_ini_file(self._ini_filename) def _make_dirs(self, fname): if not os.path.exists(fname): dirname = os.path.dirname(fname) if not os.path.exists(dirname): LOG.info('Creating directory %r', dirname) os.makedirs(dirname) def _write_ini_file(self, fname): self._make_dirs(fname) LOG.info('Writing config file %r', fname) fo = open(fname, 'w') self.write_ini(fo) fo.close() def reset_to_defaults(self): """Reset ini file to defaults.""" for opt in self._options.values(): if not opt.ini_group: continue opt.reset_to_default() if __name__ == '__main__': o = Options() o.add_option(opt_short='-l', opt_long='--larger', dest='larger', default=False, type='bool', help='Make the dialog 25% larger than normal.') o.add_option(opt_short='-m', opt_long='--meta', dest='meta', type='bool', ini_group='buttons', ini_name='meta', default=False, help='Show the meta (windows) key.') o.add_option(opt_long='--scale', dest='scale', type='float', default=1.0, ini_group='ui', ini_name='scale', help='Scale the dialog. ex. 2.0 is 2 times larger, 0.5 is ' 'half the size. Defaults to %default') o.add_option(opt_long='--kbdfile', dest='kbd_file', ini_group='devices', ini_name='map', default='us.kbd', help='Use this kbd filename instead running xmodmap.') o.add_option(opt_short='-v', opt_long='--version', dest='version', type='bool', help='Show version information and exit.') o.add_option(opt_short='-t', opt_long='--theme', dest='theme', type='str', ini_group='ui', ini_name='theme', help='The theme to use when drawing status images (ex. "-t apple").') o.add_option(opt_long='--list-themes', dest='list_themes', type='bool', help='List available themes') o.add_option(opt_long='--old-keys', dest='old_keys', type='int', ini_group='buttons', ini_name='old-keys', help='How many historical keypresses to show (defaults to %default)', default=0) o.add_option(opt_short=None, opt_long=None, type='int', dest='x_pos', default=-1, help='Last X Position', ini_group='position', ini_name='x') o.add_option_group('Developer Options', 'Don\'t use') o.add_option(opt_short='-d', opt_long='--debug', dest='debug', type='bool', help='Output debugging information.') lines = [] lines.append('[ui]') lines.append('scale = 1.0') lines.append('theme = classic') lines.append('[buttons]') lines.append('meta = 0') lines.append('old-keys = 0') lines.append('[devices]') lines.append('map = us.kbd') lines.append('[position]') lines.append('x = -1') import StringIO io = StringIO.StringIO('\n'.join(lines)) o.parse_ini(io) o.parse_args('%prog [options]', sys.argv) io = StringIO.StringIO() o.write_ini(io) print io.getvalue()
apache-2.0
-6,279,942,363,672,504,000
32.162471
84
0.619031
false
maciekcc/tensorflow
tensorflow/python/ops/linalg_grad.py
23
9040
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in linalg_ops.py. Useful reference for derivative formulas is An extended collection of matrix derivative results for forward and reverse mode algorithmic differentiation by Mike Giles: http://eprints.maths.ox.ac.uk/1079/1/NA-08-01.pdf A detailed derivation of formulas for backpropagating through spectral layers (SVD and Eig) by Ionescu, Vantzos & Sminchisescu: https://arxiv.org/pdf/1509.07838v4.pdf """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops @ops.RegisterGradient("MatrixInverse") def _MatrixInverseGrad(op, grad): """Gradient for MatrixInverse.""" ainv = op.outputs[0] return -math_ops.matmul( ainv, math_ops.matmul( grad, ainv, adjoint_b=True), adjoint_a=True) @ops.RegisterGradient("MatrixDeterminant") def _MatrixDeterminantGrad(op, grad): """Gradient for MatrixDeterminant.""" a = op.inputs[0] c = op.outputs[0] a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True) multipliers = array_ops.reshape( grad * c, array_ops.concat([array_ops.shape(c), [1, 1]], 0)) return multipliers * a_adj_inv @ops.RegisterGradient("Cholesky") def _CholeskyGrad(op, grad): """Gradient for Cholesky.""" # Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1} l = op.outputs[0] num_rows = array_ops.shape(l)[-1] batch_shape = array_ops.shape(l)[:-2] l_inverse = linalg_ops.matrix_triangular_solve( l, linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=l.dtype)) middle = math_ops.matmul(l, grad, adjoint_a=True) middle = array_ops.matrix_set_diag(middle, 0.5 * array_ops.matrix_diag_part(middle)) middle = array_ops.matrix_band_part(middle, -1, 0) grad_a = math_ops.matmul( math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse) grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a)) return grad_a * 0.5 @ops.RegisterGradient("MatrixSolve") def _MatrixSolveGrad(op, grad): """Gradient for MatrixSolve.""" a = op.inputs[0] adjoint_a = op.get_attr("adjoint") c = op.outputs[0] grad_b = linalg_ops.matrix_solve(a, grad, adjoint=not adjoint_a) if adjoint_a: grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True) else: grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True) return (grad_a, grad_b) @ops.RegisterGradient("MatrixSolveLs") def _MatrixSolveLsGrad(op, grad): """Gradients for MatrixSolveLs.""" # TODO(rmlarsen): The implementation could be more efficient: # a) Output the Cholesky factorization from forward op instead of # recomputing it here. # b) Implement a symmetric rank-k update op instead of computing # x*z + transpose(x*z). This pattern occurs other places in TensorFlow. def _overdetermined(op, grad): """Gradients for the overdetermined case of MatrixSolveLs. This is the backprop for the solution to the normal equations of the first kind: X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B which solve the least squares problem min ||A * X - B||_F^2 + lambda ||X||_F^2. """ a = op.inputs[0] b = op.inputs[1] l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype) x = op.outputs[0] a_shape = array_ops.shape(a) batch_shape = a_shape[:-2] n = a_shape[-1] identity = linalg_ops.eye(n, batch_shape=batch_shape, dtype=a.dtype) gramian = math_ops.matmul(a, a, adjoint_a=True) + l2_regularizer * identity chol = linalg_ops.cholesky(gramian) # Temporary z = (A^T * A + lambda * I)^{-1} * grad. z = linalg_ops.cholesky_solve(chol, grad) xzt = math_ops.matmul(x, z, adjoint_b=True) zx_sym = xzt + array_ops.matrix_transpose(xzt) grad_a = -math_ops.matmul(a, zx_sym) + math_ops.matmul(b, z, adjoint_b=True) grad_b = math_ops.matmul(a, z) return (grad_a, grad_b, None) def _underdetermined(op, grad): """Gradients for the underdetermined case of MatrixSolveLs. This is the backprop for the solution to the normal equations of the second kind: X = F(A, B) = A * (A*A^T + lambda*I)^{-1} * B that (for lambda=0) solve the least squares problem min ||X||_F subject to A*X = B. """ a = op.inputs[0] b = op.inputs[1] l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype) a_shape = array_ops.shape(a) batch_shape = a_shape[:-2] m = a_shape[-2] identity = linalg_ops.eye(m, batch_shape=batch_shape, dtype=a.dtype) gramian = math_ops.matmul(a, a, adjoint_b=True) + l2_regularizer * identity chol = linalg_ops.cholesky(gramian) grad_b = linalg_ops.cholesky_solve(chol, math_ops.matmul(a, grad)) # Temporary tmp = (A * A^T + lambda * I)^{-1} * B. tmp = linalg_ops.cholesky_solve(chol, b) a1 = math_ops.matmul(tmp, a, adjoint_a=True) a1 = -math_ops.matmul(grad_b, a1) a2 = grad - math_ops.matmul(a, grad_b, adjoint_a=True) a2 = math_ops.matmul(tmp, a2, adjoint_b=True) grad_a = a1 + a2 return (grad_a, grad_b, None) fast = op.get_attr("fast") if fast is False: raise ValueError("Gradient not defined for fast=False") matrix_shape = op.inputs[0].get_shape()[-2:] if matrix_shape.is_fully_defined(): if matrix_shape[-2] >= matrix_shape[-1]: return _overdetermined(op, grad) else: return _underdetermined(op, grad) else: # We have to defer determining the shape to runtime and use # conditional execution of the appropriate graph. matrix_shape = array_ops.shape(op.inputs[0])[-2:] return control_flow_ops.cond(matrix_shape[-2] >= matrix_shape[-1], lambda: _overdetermined(op, grad), lambda: _underdetermined(op, grad)) @ops.RegisterGradient("MatrixTriangularSolve") def _MatrixTriangularSolveGrad(op, grad): """Gradient for MatrixTriangularSolve.""" a = op.inputs[0] adjoint_a = op.get_attr("adjoint") lower_a = op.get_attr("lower") c = op.outputs[0] grad_b = linalg_ops.matrix_triangular_solve( a, grad, lower=lower_a, adjoint=not adjoint_a) if adjoint_a: grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True) else: grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True) if lower_a: grad_a = array_ops.matrix_band_part(grad_a, -1, 0) else: grad_a = array_ops.matrix_band_part(grad_a, 0, -1) return (grad_a, grad_b) @ops.RegisterGradient("SelfAdjointEigV2") def _SelfAdjointEigV2Grad(op, grad_e, grad_v): """Gradient for SelfAdjointEigV2.""" e = op.outputs[0] v = op.outputs[1] # a = op.inputs[0], which satisfies # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i] with ops.control_dependencies([grad_e.op, grad_v.op]): if grad_v is not None: # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0). # Notice that because of the term involving f, the gradient becomes # infinite (or NaN in practice) when eigenvalues are not unique. # Mathematically this should not be surprising, since for (k-fold) # degenerate eigenvalues, the corresponding eigenvectors are only defined # up to arbitrary rotation in a (k-dimensional) subspace. f = array_ops.matrix_set_diag( math_ops.reciprocal( array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)), array_ops.zeros_like(e)) grad_a = math_ops.matmul( v, math_ops.matmul( array_ops.matrix_diag(grad_e) + f * math_ops.matmul( v, grad_v, adjoint_a=True), v, adjoint_b=True)) else: grad_a = math_ops.matmul( v, math_ops.matmul( array_ops.matrix_diag(grad_e), v, adjoint_b=True)) # The forward op only depends on the lower triangular part of a, so here we # symmetrize and take the lower triangle grad_a = array_ops.matrix_band_part( grad_a + math_ops.conj(array_ops.matrix_transpose(grad_a)), -1, 0) grad_a = array_ops.matrix_set_diag(grad_a, 0.5 * array_ops.matrix_diag_part(grad_a)) return grad_a
apache-2.0
-1,518,084,748,407,574,500
37.468085
80
0.645796
false
kirkbroadhurst/sudokupy
test_moves.py
1
3391
from moves import find_possible_moves, find_impossible_moves, resolve_moves def test_find_possible_moves_1(): # the first item, with value 0, is 'missing'. find_possible_moves will tell us it should be a 9. state = [((i,i), i) for i in range(9)] moves = find_possible_moves(state) assert len(moves) == 1 assert moves[0][0] == (0,0) assert moves[0][1] == 9 def test_find_possible_moves_2(): # the first and last items have value 0 state = [((i,i), i if i < 8 else 0) for i in range(9)] moves = find_possible_moves(state) moves = sorted(moves) assert len(moves) == 4 assert moves[0] == ((0,0), 8) assert moves[1] == ((0,0), 9) assert moves[2] == ((8,8), 8) assert moves[3] == ((8,8), 9) def test_resolve_moves_0(): possible_moves = [[((0,0), 1)], [((0,0), 1)]] moves = resolve_moves(possible_moves) assert len(moves) == 1 assert moves[0] == ((0,0), 1) def test_resolve_moves_1(): possible_moves = [[((0,0), 1), ((0,0), 2)], [((0,0), 1), ((0,0), 3)]] moves = resolve_moves(possible_moves) assert len(moves) == 1 assert moves[0] == ((0,0), 1) def test_resolve_moves_2(): possible_moves = [[((0,0), 1), ((0,0), 2)], [((0,0), 1), ((0,0), 2), ((0,0), 3)]] moves = resolve_moves(possible_moves) assert len(moves) == 0 def test_resolve_moves_confounding_0(): possible_moves = [[((0,0), 1), ((0,0), 2), ((1,1), 1), ((1,1), 2)], [((0,0), 1), ((0,0), 2), ((0,0), 3), ((1,1), 1), ((1,1), 2)]] moves = resolve_moves(possible_moves) assert len(moves) == 0 def test_find_impossible_moves_1(): # the first item, with value 0, is 'missing'. find_impossible_moves will tell us it cannot be [1,2,3,4,5,6,7,8]. state = [((i,i), i) for i in range(9)] moves = find_impossible_moves(state) assert len(moves) == 1 assert moves[0][0] == (0,0) assert moves[0][1] == [1,2,3,4,5,6,7,8] def test_find_impossible_moves_2(): state = [((i,i), 0 if i%2==0 else i) for i in range(9)] moves = find_impossible_moves(state) assert len(moves) == 5 for i, m in enumerate(moves): assert m[0] == (2*i,2*i) assert m[1] == [1,3, 5, 7] def test_find_impossible_moves_3(): state = [((i,i), 0) for i in range(9)] moves = find_impossible_moves(state) assert len(moves) == 9 for i, m in enumerate(moves): assert m[0] == (i, i) # there are no impossible moves in this case assert m[1] == [] def test_resolve_impossible_moves_1(): possible_moves = [[((0,0), 1), ((0,0), 2)], [((0,0), 1), ((0,0), 2), ((0,0), 3)]] impossible_moves = [((0,0), [2, 3])] moves = resolve_moves(possible_moves, impossible_moves) assert len(moves) == 1 def test_resolve_impossible_moves_2(): possible_moves = [[((0,0), 1), ((0,0), 2), ((1,1), 1), ((1,1), 2)], [((0,0), 1), ((0,0), 2), ((0,0), 3), ((1,1), 1), ((1,1), 2)]] impossible_moves = [((0,0), [1, 5, 6, 7, 8])] moves = resolve_moves(possible_moves, impossible_moves) assert len(moves) == 1 def test_resolve_impossible_moves_3(): possible_moves = [[((0,0), 1), ((0,0), 2), ((1,1), 1), ((1,1), 2)], [((0,0), 1), ((0,0), 2), ((0,0), 3), ((1,1), 1), ((1,1), 2)]] impossible_moves = [((0,0), [1, 5, 6, 7, 8]), ((1,1), [1, 3, 5, 6, 7, 8, 9])] moves = resolve_moves(possible_moves, impossible_moves) assert len(moves) == 2
mit
3,486,412,079,220,668,000
36.274725
133
0.539664
false
Fireblend/chromium-crosswalk
tools/cygprofile/cyglog_to_orderfile.py
6
8358
#!/usr/bin/python # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Symbolizes a log file produced by cyprofile instrumentation. Given a log file and the binary being profiled, creates an orderfile. """ import logging import multiprocessing import optparse import os import tempfile import string import sys import cygprofile_utils import symbol_extractor def _ParseLogLines(log_file_lines): """Parses a merged cyglog produced by mergetraces.py. Args: log_file_lines: array of lines in log file produced by profiled run lib_name: library or executable containing symbols Below is an example of a small log file: 5086e000-52e92000 r-xp 00000000 b3:02 51276 libchromeview.so secs usecs pid:threadid func START 1314897086 795828 3587:1074648168 0x509e105c 1314897086 795874 3587:1074648168 0x509e0eb4 1314897086 796326 3587:1074648168 0x509e0e3c 1314897086 796552 3587:1074648168 0x509e07bc END Returns: An ordered list of callee offsets. """ call_lines = [] vm_start = 0 line = log_file_lines[0] assert 'r-xp' in line end_index = line.find('-') vm_start = int(line[:end_index], 16) for line in log_file_lines[3:]: fields = line.split() if len(fields) == 4: call_lines.append(fields) else: assert fields[0] == 'END' # Convert strings to int in fields. call_info = [] for call_line in call_lines: addr = int(call_line[3], 16) if vm_start < addr: addr -= vm_start call_info.append(addr) return call_info def _GroupLibrarySymbolInfosByOffset(lib_filename): """Returns a dict {offset: [SymbolInfo]} from a library.""" symbol_infos = symbol_extractor.SymbolInfosFromBinary(lib_filename) return symbol_extractor.GroupSymbolInfosByOffset(symbol_infos) class SymbolNotFoundException(Exception): def __init__(self, value): super(SymbolNotFoundException, self).__init__(value) self.value = value def __str__(self): return repr(self.value) def _FindSymbolInfosAtOffset(offset_to_symbol_infos, offset): """Finds all SymbolInfo at a given offset. Args: offset_to_symbol_infos: {offset: [SymbolInfo]} offset: offset to look the symbols at Returns: The list of SymbolInfo at the given offset Raises: SymbolNotFoundException if the offset doesn't match any symbol. """ if offset in offset_to_symbol_infos: return offset_to_symbol_infos[offset] elif offset % 2 and (offset - 1) in offset_to_symbol_infos: # On ARM, odd addresses are used to signal thumb instruction. They are # generated by setting the LSB to 1 (see # http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0471e/Babfjhia.html). # TODO(lizeb): Make sure this hack doesn't propagate to other archs. return offset_to_symbol_infos[offset - 1] else: raise SymbolNotFoundException(offset) def _GetObjectFileNames(obj_dir): """Returns the list of object files in a directory.""" obj_files = [] for (dirpath, _, filenames) in os.walk(obj_dir): for file_name in filenames: if file_name.endswith('.o'): obj_files.append(os.path.join(dirpath, file_name)) return obj_files def _AllSymbolInfos(object_filenames): """Returns a list of SymbolInfo from an iterable of filenames.""" pool = multiprocessing.Pool() # Hopefully the object files are in the page cache at this step, so IO should # not be a problem (hence no concurrency limit on the pool). symbol_infos_nested = pool.map( symbol_extractor.SymbolInfosFromBinary, object_filenames) result = [] for symbol_infos in symbol_infos_nested: result += symbol_infos return result def _GetSymbolToSectionMapFromObjectFiles(obj_dir): """ Creates a mapping from symbol to linker section name by scanning all the object files. """ object_files = _GetObjectFileNames(obj_dir) symbol_to_section_map = {} symbol_warnings = cygprofile_utils.WarningCollector(300) symbol_infos = _AllSymbolInfos(object_files) for symbol_info in symbol_infos: symbol = symbol_info.name if symbol.startswith('.LTHUNK'): continue section = symbol_info.section if ((symbol in symbol_to_section_map) and (symbol_to_section_map[symbol] != symbol_info.section)): symbol_warnings.Write('Symbol ' + symbol + ' in conflicting sections ' + section + ' and ' + symbol_to_section_map[symbol]) elif not section.startswith('.text'): symbol_warnings.Write('Symbol ' + symbol + ' in incorrect section ' + section) else: symbol_to_section_map[symbol] = section symbol_warnings.WriteEnd('bad sections') return symbol_to_section_map def _WarnAboutDuplicates(offsets): """Warns about duplicate offsets. Args: offsets: list of offsets to check for duplicates Returns: True if there are no duplicates, False otherwise. """ seen_offsets = set() ok = True for offset in offsets: if offset not in seen_offsets: seen_offsets.add(offset) else: ok = False logging.warning('Duplicate offset: ' + hex(offset)) return ok def _OutputOrderfile(offsets, offset_to_symbol_infos, symbol_to_section_map, output_file): """Outputs the orderfile to output_file. Args: offsets: Iterable of offsets to match to section names offset_to_symbol_infos: {offset: [SymbolInfo]} symbol_to_section_map: {name: section} output_file: file-like object to write the results to """ success = True unknown_symbol_warnings = cygprofile_utils.WarningCollector(300) symbol_not_found_warnings = cygprofile_utils.WarningCollector(300) output_sections = set() for offset in offsets: try: symbol_infos = _FindSymbolInfosAtOffset(offset_to_symbol_infos, offset) for symbol_info in symbol_infos: if symbol_info.name in symbol_to_section_map: section = symbol_to_section_map[symbol_info.name] if not section in output_sections: output_file.write(section + '\n') output_sections.add(section) else: unknown_symbol_warnings.Write( 'No known section for symbol ' + symbol_info.name) except SymbolNotFoundException: symbol_not_found_warnings.Write( 'Did not find function in binary. offset: ' + hex(offset)) success = False unknown_symbol_warnings.WriteEnd('no known section for symbol.') symbol_not_found_warnings.WriteEnd('symbol not found in the binary.') return success def main(): parser = optparse.OptionParser(usage= 'usage: %prog [options] <merged_cyglog> <library> <output_filename>') parser.add_option('--target-arch', action='store', dest='arch', choices=['arm', 'arm64', 'x86', 'x86_64', 'x64', 'mips'], help='The target architecture for libchrome.so') options, argv = parser.parse_args(sys.argv) if not options.arch: options.arch = cygprofile_utils.DetectArchitecture() if len(argv) != 4: parser.print_help() return 1 (log_filename, lib_filename, output_filename) = argv[1:] symbol_extractor.SetArchitecture(options.arch) obj_dir = os.path.abspath(os.path.join( os.path.dirname(lib_filename), '../obj')) log_file_lines = map(string.rstrip, open(log_filename).readlines()) offsets = _ParseLogLines(log_file_lines) _WarnAboutDuplicates(offsets) offset_to_symbol_infos = _GroupLibrarySymbolInfosByOffset(lib_filename) symbol_to_section_map = _GetSymbolToSectionMapFromObjectFiles(obj_dir) success = False temp_filename = None output_file = None try: (fd, temp_filename) = tempfile.mkstemp(dir=os.path.dirname(output_filename)) output_file = os.fdopen(fd, 'w') ok = _OutputOrderfile( offsets, offset_to_symbol_infos, symbol_to_section_map, output_file) output_file.close() os.rename(temp_filename, output_filename) temp_filename = None success = ok finally: if output_file: output_file.close() if temp_filename: os.remove(temp_filename) return 0 if success else 1 if __name__ == '__main__': logging.basicConfig(level=logging.INFO) sys.exit(main())
bsd-3-clause
-5,208,371,290,180,285,000
31.395349
90
0.684015
false