repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
var_hash
int64
-9,223,186,179,200,150,000
9,223,291,175B
doc_hash
int64
-9,223,304,365,658,930,000
9,223,309,051B
line_mean
float64
3.5
99.8
line_max
int64
13
999
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
Turpial/Turpial
turpial/ui/gtk/tray.py
3
2115
# -*- coding: utf-8 -*- # GTK3 tray icon for Turpial from gi.repository import Gtk from turpial import DESC from turpial.ui.lang import i18n class TrayIcon(Gtk.StatusIcon): def __init__(self, base): Gtk.StatusIcon.__init__(self) self.base = base self.set_from_pixbuf(self.base.load_image('turpial-tray.png', True)) self.set_tooltip_text(DESC) self.menu = Gtk.Menu() def __build_common_menu(self): accounts = Gtk.MenuItem(i18n.get('accounts')) preferences = Gtk.MenuItem(i18n.get('preferences')) sounds = Gtk.CheckMenuItem(i18n.get('enable_sounds')) #sound_.set_active(not self.sound._disable) exit_ = Gtk.MenuItem(i18n.get('exit')) self.menu.append(accounts) self.menu.append(preferences) self.menu.append(sounds) self.menu.append(Gtk.SeparatorMenuItem()) self.menu.append(exit_) accounts.connect('activate', self.base.show_accounts_dialog) preferences.connect('activate', self.base.show_preferences_dialog) sounds.connect('toggled', self.base.disable_sound) exit_.connect('activate', self.base.main_quit) def empty(self): self.menu = Gtk.Menu() self.__build_common_menu() def normal(self): self.menu = Gtk.Menu() tweet = Gtk.MenuItem(i18n.get('new_tweet')) tweet.connect('activate', self.base.show_update_box) direct = Gtk.MenuItem(i18n.get('direct_message')) direct.connect('activate', self.base.show_update_box, True) self.menu.append(tweet) self.menu.append(direct) self.__build_common_menu() def popup(self, button, activate_time): self.menu.show_all() self.menu.popup(None, None, None, None, button, activate_time) return True # Change the tray icon image to indicate updates def notify(self): self.set_from_pixbuf(self.base.load_image('turpial-tray-update.png', True)) # Clear the tray icon image def clear(self): self.set_from_pixbuf(self.base.load_image('turpial-tray.png', True))
gpl-3.0
-263,349,868,984,470,940
-2,815,470,347,074,492,000
31.045455
83
0.636407
false
sidmitra/django_nonrel_testapp
django/forms/util.py
311
1983
from django.utils.html import conditional_escape from django.utils.encoding import StrAndUnicode, force_unicode from django.utils.safestring import mark_safe # Import ValidationError so that it can be imported from this # module to maintain backwards compatibility. from django.core.exceptions import ValidationError def flatatt(attrs): """ Convert a dictionary of attributes to a single string. The returned string will contain a leading space followed by key="value", XML-style pairs. It is assumed that the keys do not need to be XML-escaped. If the passed dictionary is empty, then return an empty string. """ return u''.join([u' %s="%s"' % (k, conditional_escape(v)) for k, v in attrs.items()]) class ErrorDict(dict, StrAndUnicode): """ A collection of errors that knows how to display itself in various formats. The dictionary keys are the field names, and the values are the errors. """ def __unicode__(self): return self.as_ul() def as_ul(self): if not self: return u'' return mark_safe(u'<ul class="errorlist">%s</ul>' % ''.join([u'<li>%s%s</li>' % (k, force_unicode(v)) for k, v in self.items()])) def as_text(self): return u'\n'.join([u'* %s\n%s' % (k, u'\n'.join([u' * %s' % force_unicode(i) for i in v])) for k, v in self.items()]) class ErrorList(list, StrAndUnicode): """ A collection of errors that knows how to display itself in various formats. """ def __unicode__(self): return self.as_ul() def as_ul(self): if not self: return u'' return mark_safe(u'<ul class="errorlist">%s</ul>' % ''.join([u'<li>%s</li>' % conditional_escape(force_unicode(e)) for e in self])) def as_text(self): if not self: return u'' return u'\n'.join([u'* %s' % force_unicode(e) for e in self]) def __repr__(self): return repr([force_unicode(e) for e in self])
bsd-3-clause
3,147,318,030,483,691,500
-8,722,550,970,596,099,000
35.722222
126
0.626324
false
robclark/chromium
third_party/tlslite/tlslite/TLSConnection.py
6
71226
""" MAIN CLASS FOR TLS LITE (START HERE!). """ from __future__ import generators import socket from utils.compat import formatExceptionTrace from TLSRecordLayer import TLSRecordLayer from Session import Session from constants import * from utils.cryptomath import getRandomBytes from errors import * from messages import * from mathtls import * from HandshakeSettings import HandshakeSettings class TLSConnection(TLSRecordLayer): """ This class wraps a socket and provides TLS handshaking and data transfer. To use this class, create a new instance, passing a connected socket into the constructor. Then call some handshake function. If the handshake completes without raising an exception, then a TLS connection has been negotiated. You can transfer data over this connection as if it were a socket. This class provides both synchronous and asynchronous versions of its key functions. The synchronous versions should be used when writing single-or multi-threaded code using blocking sockets. The asynchronous versions should be used when performing asynchronous, event-based I/O with non-blocking sockets. Asynchronous I/O is a complicated subject; typically, you should not use the asynchronous functions directly, but should use some framework like asyncore or Twisted which TLS Lite integrates with (see L{tlslite.integration.TLSAsyncDispatcherMixIn.TLSAsyncDispatcherMixIn} or L{tlslite.integration.TLSTwistedProtocolWrapper.TLSTwistedProtocolWrapper}). """ def __init__(self, sock): """Create a new TLSConnection instance. @param sock: The socket data will be transmitted on. The socket should already be connected. It may be in blocking or non-blocking mode. @type sock: L{socket.socket} """ TLSRecordLayer.__init__(self, sock) def handshakeClientSRP(self, username, password, session=None, settings=None, checker=None, async=False): """Perform an SRP handshake in the role of client. This function performs a TLS/SRP handshake. SRP mutually authenticates both parties to each other using only a username and password. This function may also perform a combined SRP and server-certificate handshake, if the server chooses to authenticate itself with a certificate chain in addition to doing SRP. TLS/SRP is non-standard. Most TLS implementations don't support it. See U{http://www.ietf.org/html.charters/tls-charter.html} or U{http://trevp.net/tlssrp/} for the latest information on TLS/SRP. Like any handshake function, this can be called on a closed TLS connection, or on a TLS connection that is already open. If called on an open connection it performs a re-handshake. If the function completes without raising an exception, the TLS connection will be open and available for data transfer. If an exception is raised, the connection will have been automatically closed (if it was ever open). @type username: str @param username: The SRP username. @type password: str @param password: The SRP password. @type session: L{tlslite.Session.Session} @param session: A TLS session to attempt to resume. This session must be an SRP session performed with the same username and password as were passed in. If the resumption does not succeed, a full SRP handshake will be performed. @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} @param settings: Various settings which can be used to control the ciphersuites, certificate types, and SSL/TLS versions offered by the client. @type checker: L{tlslite.Checker.Checker} @param checker: A Checker instance. This instance will be invoked to examine the other party's authentication credentials, if the handshake completes succesfully. @type async: bool @param async: If False, this function will block until the handshake is completed. If True, this function will return a generator. Successive invocations of the generator will return 0 if it is waiting to read from the socket, 1 if it is waiting to write to the socket, or will raise StopIteration if the handshake operation is completed. @rtype: None or an iterable @return: If 'async' is True, a generator object will be returned. @raise socket.error: If a socket error occurs. @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed without a preceding alert. @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. @raise tlslite.errors.TLSAuthenticationError: If the checker doesn't like the other party's authentication credentials. """ handshaker = self._handshakeClientAsync(srpParams=(username, password), session=session, settings=settings, checker=checker) if async: return handshaker for result in handshaker: pass def handshakeClientCert(self, certChain=None, privateKey=None, session=None, settings=None, checker=None, async=False): """Perform a certificate-based handshake in the role of client. This function performs an SSL or TLS handshake. The server will authenticate itself using an X.509 or cryptoID certificate chain. If the handshake succeeds, the server's certificate chain will be stored in the session's serverCertChain attribute. Unless a checker object is passed in, this function does no validation or checking of the server's certificate chain. If the server requests client authentication, the client will send the passed-in certificate chain, and use the passed-in private key to authenticate itself. If no certificate chain and private key were passed in, the client will attempt to proceed without client authentication. The server may or may not allow this. Like any handshake function, this can be called on a closed TLS connection, or on a TLS connection that is already open. If called on an open connection it performs a re-handshake. If the function completes without raising an exception, the TLS connection will be open and available for data transfer. If an exception is raised, the connection will have been automatically closed (if it was ever open). @type certChain: L{tlslite.X509CertChain.X509CertChain} or L{cryptoIDlib.CertChain.CertChain} @param certChain: The certificate chain to be used if the server requests client authentication. @type privateKey: L{tlslite.utils.RSAKey.RSAKey} @param privateKey: The private key to be used if the server requests client authentication. @type session: L{tlslite.Session.Session} @param session: A TLS session to attempt to resume. If the resumption does not succeed, a full handshake will be performed. @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} @param settings: Various settings which can be used to control the ciphersuites, certificate types, and SSL/TLS versions offered by the client. @type checker: L{tlslite.Checker.Checker} @param checker: A Checker instance. This instance will be invoked to examine the other party's authentication credentials, if the handshake completes succesfully. @type async: bool @param async: If False, this function will block until the handshake is completed. If True, this function will return a generator. Successive invocations of the generator will return 0 if it is waiting to read from the socket, 1 if it is waiting to write to the socket, or will raise StopIteration if the handshake operation is completed. @rtype: None or an iterable @return: If 'async' is True, a generator object will be returned. @raise socket.error: If a socket error occurs. @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed without a preceding alert. @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. @raise tlslite.errors.TLSAuthenticationError: If the checker doesn't like the other party's authentication credentials. """ handshaker = self._handshakeClientAsync(certParams=(certChain, privateKey), session=session, settings=settings, checker=checker) if async: return handshaker for result in handshaker: pass def handshakeClientUnknown(self, srpCallback=None, certCallback=None, session=None, settings=None, checker=None, async=False): """Perform a to-be-determined type of handshake in the role of client. This function performs an SSL or TLS handshake. If the server requests client certificate authentication, the certCallback will be invoked and should return a (certChain, privateKey) pair. If the callback returns None, the library will attempt to proceed without client authentication. The server may or may not allow this. If the server requests SRP authentication, the srpCallback will be invoked and should return a (username, password) pair. If the callback returns None, the local implementation will signal a user_canceled error alert. After the handshake completes, the client can inspect the connection's session attribute to determine what type of authentication was performed. Like any handshake function, this can be called on a closed TLS connection, or on a TLS connection that is already open. If called on an open connection it performs a re-handshake. If the function completes without raising an exception, the TLS connection will be open and available for data transfer. If an exception is raised, the connection will have been automatically closed (if it was ever open). @type srpCallback: callable @param srpCallback: The callback to be used if the server requests SRP authentication. If None, the client will not offer support for SRP ciphersuites. @type certCallback: callable @param certCallback: The callback to be used if the server requests client certificate authentication. @type session: L{tlslite.Session.Session} @param session: A TLS session to attempt to resume. If the resumption does not succeed, a full handshake will be performed. @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} @param settings: Various settings which can be used to control the ciphersuites, certificate types, and SSL/TLS versions offered by the client. @type checker: L{tlslite.Checker.Checker} @param checker: A Checker instance. This instance will be invoked to examine the other party's authentication credentials, if the handshake completes succesfully. @type async: bool @param async: If False, this function will block until the handshake is completed. If True, this function will return a generator. Successive invocations of the generator will return 0 if it is waiting to read from the socket, 1 if it is waiting to write to the socket, or will raise StopIteration if the handshake operation is completed. @rtype: None or an iterable @return: If 'async' is True, a generator object will be returned. @raise socket.error: If a socket error occurs. @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed without a preceding alert. @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. @raise tlslite.errors.TLSAuthenticationError: If the checker doesn't like the other party's authentication credentials. """ handshaker = self._handshakeClientAsync(unknownParams=(srpCallback, certCallback), session=session, settings=settings, checker=checker) if async: return handshaker for result in handshaker: pass def handshakeClientSharedKey(self, username, sharedKey, settings=None, checker=None, async=False): """Perform a shared-key handshake in the role of client. This function performs a shared-key handshake. Using shared symmetric keys of high entropy (128 bits or greater) mutually authenticates both parties to each other. TLS with shared-keys is non-standard. Most TLS implementations don't support it. See U{http://www.ietf.org/html.charters/tls-charter.html} for the latest information on TLS with shared-keys. If the shared-keys Internet-Draft changes or is superceded, TLS Lite will track those changes, so the shared-key support in later versions of TLS Lite may become incompatible with this version. Like any handshake function, this can be called on a closed TLS connection, or on a TLS connection that is already open. If called on an open connection it performs a re-handshake. If the function completes without raising an exception, the TLS connection will be open and available for data transfer. If an exception is raised, the connection will have been automatically closed (if it was ever open). @type username: str @param username: The shared-key username. @type sharedKey: str @param sharedKey: The shared key. @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} @param settings: Various settings which can be used to control the ciphersuites, certificate types, and SSL/TLS versions offered by the client. @type checker: L{tlslite.Checker.Checker} @param checker: A Checker instance. This instance will be invoked to examine the other party's authentication credentials, if the handshake completes succesfully. @type async: bool @param async: If False, this function will block until the handshake is completed. If True, this function will return a generator. Successive invocations of the generator will return 0 if it is waiting to read from the socket, 1 if it is waiting to write to the socket, or will raise StopIteration if the handshake operation is completed. @rtype: None or an iterable @return: If 'async' is True, a generator object will be returned. @raise socket.error: If a socket error occurs. @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed without a preceding alert. @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. @raise tlslite.errors.TLSAuthenticationError: If the checker doesn't like the other party's authentication credentials. """ handshaker = self._handshakeClientAsync(sharedKeyParams=(username, sharedKey), settings=settings, checker=checker) if async: return handshaker for result in handshaker: pass def _handshakeClientAsync(self, srpParams=(), certParams=(), unknownParams=(), sharedKeyParams=(), session=None, settings=None, checker=None, recursive=False): handshaker = self._handshakeClientAsyncHelper(srpParams=srpParams, certParams=certParams, unknownParams=unknownParams, sharedKeyParams=sharedKeyParams, session=session, settings=settings, recursive=recursive) for result in self._handshakeWrapperAsync(handshaker, checker): yield result def _handshakeClientAsyncHelper(self, srpParams, certParams, unknownParams, sharedKeyParams, session, settings, recursive): if not recursive: self._handshakeStart(client=True) #Unpack parameters srpUsername = None # srpParams password = None # srpParams clientCertChain = None # certParams privateKey = None # certParams srpCallback = None # unknownParams certCallback = None # unknownParams #session # sharedKeyParams (or session) #settings # settings if srpParams: srpUsername, password = srpParams elif certParams: clientCertChain, privateKey = certParams elif unknownParams: srpCallback, certCallback = unknownParams elif sharedKeyParams: session = Session()._createSharedKey(*sharedKeyParams) if not settings: settings = HandshakeSettings() settings = settings._filter() #Validate parameters if srpUsername and not password: raise ValueError("Caller passed a username but no password") if password and not srpUsername: raise ValueError("Caller passed a password but no username") if clientCertChain and not privateKey: raise ValueError("Caller passed a certChain but no privateKey") if privateKey and not clientCertChain: raise ValueError("Caller passed a privateKey but no certChain") if clientCertChain: foundType = False try: import cryptoIDlib.CertChain if isinstance(clientCertChain, cryptoIDlib.CertChain.CertChain): if "cryptoID" not in settings.certificateTypes: raise ValueError("Client certificate doesn't "\ "match Handshake Settings") settings.certificateTypes = ["cryptoID"] foundType = True except ImportError: pass if not foundType and isinstance(clientCertChain, X509CertChain): if "x509" not in settings.certificateTypes: raise ValueError("Client certificate doesn't match "\ "Handshake Settings") settings.certificateTypes = ["x509"] foundType = True if not foundType: raise ValueError("Unrecognized certificate type") if session: if not session.valid(): session = None #ignore non-resumable sessions... elif session.resumable and \ (session.srpUsername != srpUsername): raise ValueError("Session username doesn't match") #Add Faults to parameters if srpUsername and self.fault == Fault.badUsername: srpUsername += "GARBAGE" if password and self.fault == Fault.badPassword: password += "GARBAGE" if sharedKeyParams: identifier = sharedKeyParams[0] sharedKey = sharedKeyParams[1] if self.fault == Fault.badIdentifier: identifier += "GARBAGE" session = Session()._createSharedKey(identifier, sharedKey) elif self.fault == Fault.badSharedKey: sharedKey += "GARBAGE" session = Session()._createSharedKey(identifier, sharedKey) #Initialize locals serverCertChain = None cipherSuite = 0 certificateType = CertificateType.x509 premasterSecret = None #Get client nonce clientRandom = getRandomBytes(32) #Initialize acceptable ciphersuites cipherSuites = [] if srpParams: cipherSuites += CipherSuite.getSrpRsaSuites(settings.cipherNames) cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames) elif certParams: cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) elif unknownParams: if srpCallback: cipherSuites += \ CipherSuite.getSrpRsaSuites(settings.cipherNames) cipherSuites += \ CipherSuite.getSrpSuites(settings.cipherNames) cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) elif sharedKeyParams: cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) else: cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) #Initialize acceptable certificate types certificateTypes = settings._getCertificateTypes() #Tentatively set the version to the client's minimum version. #We'll use this for the ClientHello, and if an error occurs #parsing the Server Hello, we'll use this version for the response self.version = settings.maxVersion #Either send ClientHello (with a resumable session)... if session: #If it's a resumable (i.e. not a shared-key session), then its #ciphersuite must be one of the acceptable ciphersuites if (not sharedKeyParams) and \ session.cipherSuite not in cipherSuites: raise ValueError("Session's cipher suite not consistent "\ "with parameters") else: clientHello = ClientHello() clientHello.create(settings.maxVersion, clientRandom, session.sessionID, cipherSuites, certificateTypes, session.srpUsername) #Or send ClientHello (without) else: clientHello = ClientHello() clientHello.create(settings.maxVersion, clientRandom, createByteArraySequence([]), cipherSuites, certificateTypes, srpUsername) for result in self._sendMsg(clientHello): yield result #Get ServerHello (or missing_srp_username) for result in self._getMsg((ContentType.handshake, ContentType.alert), HandshakeType.server_hello): if result in (0,1): yield result else: break msg = result if isinstance(msg, ServerHello): serverHello = msg elif isinstance(msg, Alert): alert = msg #If it's not a missing_srp_username, re-raise if alert.description != AlertDescription.missing_srp_username: self._shutdown(False) raise TLSRemoteAlert(alert) #If we're not in SRP callback mode, we won't have offered SRP #without a username, so we shouldn't get this alert if not srpCallback: for result in self._sendError(\ AlertDescription.unexpected_message): yield result srpParams = srpCallback() #If the callback returns None, cancel the handshake if srpParams == None: for result in self._sendError(AlertDescription.user_canceled): yield result #Recursively perform handshake for result in self._handshakeClientAsyncHelper(srpParams, None, None, None, None, settings, True): yield result return #Get the server version. Do this before anything else, so any #error alerts will use the server's version self.version = serverHello.server_version #Future responses from server must use this version self._versionCheck = True #Check ServerHello if serverHello.server_version < settings.minVersion: for result in self._sendError(\ AlertDescription.protocol_version, "Too old version: %s" % str(serverHello.server_version)): yield result if serverHello.server_version > settings.maxVersion: for result in self._sendError(\ AlertDescription.protocol_version, "Too new version: %s" % str(serverHello.server_version)): yield result if serverHello.cipher_suite not in cipherSuites: for result in self._sendError(\ AlertDescription.illegal_parameter, "Server responded with incorrect ciphersuite"): yield result if serverHello.certificate_type not in certificateTypes: for result in self._sendError(\ AlertDescription.illegal_parameter, "Server responded with incorrect certificate type"): yield result if serverHello.compression_method != 0: for result in self._sendError(\ AlertDescription.illegal_parameter, "Server responded with incorrect compression method"): yield result #Get the server nonce serverRandom = serverHello.random #If the server agrees to resume if session and session.sessionID and \ serverHello.session_id == session.sessionID: #If a shared-key, we're flexible about suites; otherwise the #server-chosen suite has to match the session's suite if sharedKeyParams: session.cipherSuite = serverHello.cipher_suite elif serverHello.cipher_suite != session.cipherSuite: for result in self._sendError(\ AlertDescription.illegal_parameter,\ "Server's ciphersuite doesn't match session"): yield result #Set the session for this connection self.session = session #Calculate pending connection states self._calcPendingStates(clientRandom, serverRandom, settings.cipherImplementations) #Exchange ChangeCipherSpec and Finished messages for result in self._getFinished(): yield result for result in self._sendFinished(): yield result #Mark the connection as open self._handshakeDone(resumed=True) #If server DOES NOT agree to resume else: if sharedKeyParams: for result in self._sendError(\ AlertDescription.user_canceled, "Was expecting a shared-key resumption"): yield result #We've already validated these cipherSuite = serverHello.cipher_suite certificateType = serverHello.certificate_type #If the server chose an SRP suite... if cipherSuite in CipherSuite.srpSuites: #Get ServerKeyExchange, ServerHelloDone for result in self._getMsg(ContentType.handshake, HandshakeType.server_key_exchange, cipherSuite): if result in (0,1): yield result else: break serverKeyExchange = result for result in self._getMsg(ContentType.handshake, HandshakeType.server_hello_done): if result in (0,1): yield result else: break serverHelloDone = result #If the server chose an SRP+RSA suite... elif cipherSuite in CipherSuite.srpRsaSuites: #Get Certificate, ServerKeyExchange, ServerHelloDone for result in self._getMsg(ContentType.handshake, HandshakeType.certificate, certificateType): if result in (0,1): yield result else: break serverCertificate = result for result in self._getMsg(ContentType.handshake, HandshakeType.server_key_exchange, cipherSuite): if result in (0,1): yield result else: break serverKeyExchange = result for result in self._getMsg(ContentType.handshake, HandshakeType.server_hello_done): if result in (0,1): yield result else: break serverHelloDone = result #If the server chose an RSA suite... elif cipherSuite in CipherSuite.rsaSuites: #Get Certificate[, CertificateRequest], ServerHelloDone for result in self._getMsg(ContentType.handshake, HandshakeType.certificate, certificateType): if result in (0,1): yield result else: break serverCertificate = result for result in self._getMsg(ContentType.handshake, (HandshakeType.server_hello_done, HandshakeType.certificate_request)): if result in (0,1): yield result else: break msg = result certificateRequest = None if isinstance(msg, CertificateRequest): certificateRequest = msg for result in self._getMsg(ContentType.handshake, HandshakeType.server_hello_done): if result in (0,1): yield result else: break serverHelloDone = result elif isinstance(msg, ServerHelloDone): serverHelloDone = msg else: raise AssertionError() #Calculate SRP premaster secret, if server chose an SRP or #SRP+RSA suite if cipherSuite in CipherSuite.srpSuites + \ CipherSuite.srpRsaSuites: #Get and check the server's group parameters and B value N = serverKeyExchange.srp_N g = serverKeyExchange.srp_g s = serverKeyExchange.srp_s B = serverKeyExchange.srp_B if (g,N) not in goodGroupParameters: for result in self._sendError(\ AlertDescription.untrusted_srp_parameters, "Unknown group parameters"): yield result if numBits(N) < settings.minKeySize: for result in self._sendError(\ AlertDescription.untrusted_srp_parameters, "N value is too small: %d" % numBits(N)): yield result if numBits(N) > settings.maxKeySize: for result in self._sendError(\ AlertDescription.untrusted_srp_parameters, "N value is too large: %d" % numBits(N)): yield result if B % N == 0: for result in self._sendError(\ AlertDescription.illegal_parameter, "Suspicious B value"): yield result #Check the server's signature, if server chose an #SRP+RSA suite if cipherSuite in CipherSuite.srpRsaSuites: #Hash ServerKeyExchange/ServerSRPParams hashBytes = serverKeyExchange.hash(clientRandom, serverRandom) #Extract signature bytes from ServerKeyExchange sigBytes = serverKeyExchange.signature if len(sigBytes) == 0: for result in self._sendError(\ AlertDescription.illegal_parameter, "Server sent an SRP ServerKeyExchange "\ "message without a signature"): yield result #Get server's public key from the Certificate message for result in self._getKeyFromChain(serverCertificate, settings): if result in (0,1): yield result else: break publicKey, serverCertChain = result #Verify signature if not publicKey.verify(sigBytes, hashBytes): for result in self._sendError(\ AlertDescription.decrypt_error, "Signature failed to verify"): yield result #Calculate client's ephemeral DH values (a, A) a = bytesToNumber(getRandomBytes(32)) A = powMod(g, a, N) #Calculate client's static DH values (x, v) x = makeX(bytesToString(s), srpUsername, password) v = powMod(g, x, N) #Calculate u u = makeU(N, A, B) #Calculate premaster secret k = makeK(N, g) S = powMod((B - (k*v)) % N, a+(u*x), N) if self.fault == Fault.badA: A = N S = 0 premasterSecret = numberToBytes(S) #Send ClientKeyExchange for result in self._sendMsg(\ ClientKeyExchange(cipherSuite).createSRP(A)): yield result #Calculate RSA premaster secret, if server chose an RSA suite elif cipherSuite in CipherSuite.rsaSuites: #Handle the presence of a CertificateRequest if certificateRequest: if unknownParams and certCallback: certParamsNew = certCallback() if certParamsNew: clientCertChain, privateKey = certParamsNew #Get server's public key from the Certificate message for result in self._getKeyFromChain(serverCertificate, settings): if result in (0,1): yield result else: break publicKey, serverCertChain = result #Calculate premaster secret premasterSecret = getRandomBytes(48) premasterSecret[0] = settings.maxVersion[0] premasterSecret[1] = settings.maxVersion[1] if self.fault == Fault.badPremasterPadding: premasterSecret[0] = 5 if self.fault == Fault.shortPremasterSecret: premasterSecret = premasterSecret[:-1] #Encrypt premaster secret to server's public key encryptedPreMasterSecret = publicKey.encrypt(premasterSecret) #If client authentication was requested, send Certificate #message, either with certificates or empty if certificateRequest: clientCertificate = Certificate(certificateType) if clientCertChain: #Check to make sure we have the same type of #certificates the server requested wrongType = False if certificateType == CertificateType.x509: if not isinstance(clientCertChain, X509CertChain): wrongType = True elif certificateType == CertificateType.cryptoID: if not isinstance(clientCertChain, cryptoIDlib.CertChain.CertChain): wrongType = True if wrongType: for result in self._sendError(\ AlertDescription.handshake_failure, "Client certificate is of wrong type"): yield result clientCertificate.create(clientCertChain) for result in self._sendMsg(clientCertificate): yield result else: #The server didn't request client auth, so we #zeroize these so the clientCertChain won't be #stored in the session. privateKey = None clientCertChain = None #Send ClientKeyExchange clientKeyExchange = ClientKeyExchange(cipherSuite, self.version) clientKeyExchange.createRSA(encryptedPreMasterSecret) for result in self._sendMsg(clientKeyExchange): yield result #If client authentication was requested and we have a #private key, send CertificateVerify if certificateRequest and privateKey: if self.version == (3,0): #Create a temporary session object, just for the #purpose of creating the CertificateVerify session = Session() session._calcMasterSecret(self.version, premasterSecret, clientRandom, serverRandom) verifyBytes = self._calcSSLHandshakeHash(\ session.masterSecret, "") elif self.version in ((3,1), (3,2)): verifyBytes = stringToBytes(\ self._handshake_md5.digest() + \ self._handshake_sha.digest()) if self.fault == Fault.badVerifyMessage: verifyBytes[0] = ((verifyBytes[0]+1) % 256) signedBytes = privateKey.sign(verifyBytes) certificateVerify = CertificateVerify() certificateVerify.create(signedBytes) for result in self._sendMsg(certificateVerify): yield result #Create the session object self.session = Session() self.session._calcMasterSecret(self.version, premasterSecret, clientRandom, serverRandom) self.session.sessionID = serverHello.session_id self.session.cipherSuite = cipherSuite self.session.srpUsername = srpUsername self.session.clientCertChain = clientCertChain self.session.serverCertChain = serverCertChain #Calculate pending connection states self._calcPendingStates(clientRandom, serverRandom, settings.cipherImplementations) #Exchange ChangeCipherSpec and Finished messages for result in self._sendFinished(): yield result for result in self._getFinished(): yield result #Mark the connection as open self.session._setResumable(True) self._handshakeDone(resumed=False) def handshakeServer(self, sharedKeyDB=None, verifierDB=None, certChain=None, privateKey=None, reqCert=False, sessionCache=None, settings=None, checker=None, reqCAs=None, tlsIntolerant=False): """Perform a handshake in the role of server. This function performs an SSL or TLS handshake. Depending on the arguments and the behavior of the client, this function can perform a shared-key, SRP, or certificate-based handshake. It can also perform a combined SRP and server-certificate handshake. Like any handshake function, this can be called on a closed TLS connection, or on a TLS connection that is already open. If called on an open connection it performs a re-handshake. This function does not send a Hello Request message before performing the handshake, so if re-handshaking is required, the server must signal the client to begin the re-handshake through some other means. If the function completes without raising an exception, the TLS connection will be open and available for data transfer. If an exception is raised, the connection will have been automatically closed (if it was ever open). @type sharedKeyDB: L{tlslite.SharedKeyDB.SharedKeyDB} @param sharedKeyDB: A database of shared symmetric keys associated with usernames. If the client performs a shared-key handshake, the session's sharedKeyUsername attribute will be set. @type verifierDB: L{tlslite.VerifierDB.VerifierDB} @param verifierDB: A database of SRP password verifiers associated with usernames. If the client performs an SRP handshake, the session's srpUsername attribute will be set. @type certChain: L{tlslite.X509CertChain.X509CertChain} or L{cryptoIDlib.CertChain.CertChain} @param certChain: The certificate chain to be used if the client requests server certificate authentication. @type privateKey: L{tlslite.utils.RSAKey.RSAKey} @param privateKey: The private key to be used if the client requests server certificate authentication. @type reqCert: bool @param reqCert: Whether to request client certificate authentication. This only applies if the client chooses server certificate authentication; if the client chooses SRP or shared-key authentication, this will be ignored. If the client performs a client certificate authentication, the sessions's clientCertChain attribute will be set. @type sessionCache: L{tlslite.SessionCache.SessionCache} @param sessionCache: An in-memory cache of resumable sessions. The client can resume sessions from this cache. Alternatively, if the client performs a full handshake, a new session will be added to the cache. @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} @param settings: Various settings which can be used to control the ciphersuites and SSL/TLS version chosen by the server. @type checker: L{tlslite.Checker.Checker} @param checker: A Checker instance. This instance will be invoked to examine the other party's authentication credentials, if the handshake completes succesfully. @type reqCAs: list of L{array.array} of unsigned bytes @param reqCAs: A collection of DER-encoded DistinguishedNames that will be sent along with a certificate request. This does not affect verification. @raise socket.error: If a socket error occurs. @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed without a preceding alert. @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. @raise tlslite.errors.TLSAuthenticationError: If the checker doesn't like the other party's authentication credentials. """ for result in self.handshakeServerAsync(sharedKeyDB, verifierDB, certChain, privateKey, reqCert, sessionCache, settings, checker, reqCAs, tlsIntolerant): pass def handshakeServerAsync(self, sharedKeyDB=None, verifierDB=None, certChain=None, privateKey=None, reqCert=False, sessionCache=None, settings=None, checker=None, reqCAs=None, tlsIntolerant=False): """Start a server handshake operation on the TLS connection. This function returns a generator which behaves similarly to handshakeServer(). Successive invocations of the generator will return 0 if it is waiting to read from the socket, 1 if it is waiting to write to the socket, or it will raise StopIteration if the handshake operation is complete. @rtype: iterable @return: A generator; see above for details. """ handshaker = self._handshakeServerAsyncHelper(\ sharedKeyDB=sharedKeyDB, verifierDB=verifierDB, certChain=certChain, privateKey=privateKey, reqCert=reqCert, sessionCache=sessionCache, settings=settings, reqCAs=reqCAs, tlsIntolerant=tlsIntolerant) for result in self._handshakeWrapperAsync(handshaker, checker): yield result def _handshakeServerAsyncHelper(self, sharedKeyDB, verifierDB, certChain, privateKey, reqCert, sessionCache, settings, reqCAs, tlsIntolerant): self._handshakeStart(client=False) if (not sharedKeyDB) and (not verifierDB) and (not certChain): raise ValueError("Caller passed no authentication credentials") if certChain and not privateKey: raise ValueError("Caller passed a certChain but no privateKey") if privateKey and not certChain: raise ValueError("Caller passed a privateKey but no certChain") if reqCAs and not reqCert: raise ValueError("Caller passed reqCAs but not reqCert") if not settings: settings = HandshakeSettings() settings = settings._filter() #Initialize acceptable cipher suites cipherSuites = [] if verifierDB: if certChain: cipherSuites += \ CipherSuite.getSrpRsaSuites(settings.cipherNames) cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames) if sharedKeyDB or certChain: cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) #Initialize acceptable certificate type certificateType = None if certChain: try: import cryptoIDlib.CertChain if isinstance(certChain, cryptoIDlib.CertChain.CertChain): certificateType = CertificateType.cryptoID except ImportError: pass if isinstance(certChain, X509CertChain): certificateType = CertificateType.x509 if certificateType == None: raise ValueError("Unrecognized certificate type") #Initialize locals clientCertChain = None serverCertChain = None #We may set certChain to this later postFinishedError = None #Tentatively set version to most-desirable version, so if an error #occurs parsing the ClientHello, this is what we'll use for the #error alert self.version = settings.maxVersion #Get ClientHello for result in self._getMsg(ContentType.handshake, HandshakeType.client_hello): if result in (0,1): yield result else: break clientHello = result #If client's version is too low, reject it if clientHello.client_version < settings.minVersion: self.version = settings.minVersion for result in self._sendError(\ AlertDescription.protocol_version, "Too old version: %s" % str(clientHello.client_version)): yield result if tlsIntolerant and clientHello.client_version > (3, 0): for result in self._sendError(\ AlertDescription.handshake_failure): yield result #If client's version is too high, propose my highest version elif clientHello.client_version > settings.maxVersion: self.version = settings.maxVersion else: #Set the version to the client's version self.version = clientHello.client_version #Get the client nonce; create server nonce clientRandom = clientHello.random serverRandom = getRandomBytes(32) #Calculate the first cipher suite intersection. #This is the 'privileged' ciphersuite. We'll use it if we're #doing a shared-key resumption or a new negotiation. In fact, #the only time we won't use it is if we're resuming a non-sharedkey #session, in which case we use the ciphersuite from the session. # #Given the current ciphersuite ordering, this means we prefer SRP #over non-SRP. for cipherSuite in cipherSuites: if cipherSuite in clientHello.cipher_suites: break else: for result in self._sendError(\ AlertDescription.handshake_failure): yield result #If resumption was requested... if clientHello.session_id and (sharedKeyDB or sessionCache): session = None #Check in the sharedKeys container if sharedKeyDB and len(clientHello.session_id)==16: try: #Trim off zero padding, if any for x in range(16): if clientHello.session_id[x]==0: break self.allegedSharedKeyUsername = bytesToString(\ clientHello.session_id[:x]) session = sharedKeyDB[self.allegedSharedKeyUsername] if not session.sharedKey: raise AssertionError() #use privileged ciphersuite session.cipherSuite = cipherSuite except KeyError: pass #Then check in the session cache if sessionCache and not session: try: session = sessionCache[bytesToString(\ clientHello.session_id)] if session.sharedKey: raise AssertionError() if not session.resumable: raise AssertionError() #Check for consistency with ClientHello if session.cipherSuite not in cipherSuites: for result in self._sendError(\ AlertDescription.handshake_failure): yield result if session.cipherSuite not in clientHello.cipher_suites: for result in self._sendError(\ AlertDescription.handshake_failure): yield result if clientHello.srp_username: if clientHello.srp_username != session.srpUsername: for result in self._sendError(\ AlertDescription.handshake_failure): yield result except KeyError: pass #If a session is found.. if session: #Set the session self.session = session #Send ServerHello serverHello = ServerHello() serverHello.create(self.version, serverRandom, session.sessionID, session.cipherSuite, certificateType) for result in self._sendMsg(serverHello): yield result #From here on, the client's messages must have the right version self._versionCheck = True #Calculate pending connection states self._calcPendingStates(clientRandom, serverRandom, settings.cipherImplementations) #Exchange ChangeCipherSpec and Finished messages for result in self._sendFinished(): yield result for result in self._getFinished(): yield result #Mark the connection as open self._handshakeDone(resumed=True) return #If not a resumption... #TRICKY: we might have chosen an RSA suite that was only deemed #acceptable because of the shared-key resumption. If the shared- #key resumption failed, because the identifier wasn't recognized, #we might fall through to here, where we have an RSA suite #chosen, but no certificate. if cipherSuite in CipherSuite.rsaSuites and not certChain: for result in self._sendError(\ AlertDescription.handshake_failure): yield result #If an RSA suite is chosen, check for certificate type intersection #(We do this check down here because if the mismatch occurs but the # client is using a shared-key session, it's okay) if cipherSuite in CipherSuite.rsaSuites + \ CipherSuite.srpRsaSuites: if certificateType not in clientHello.certificate_types: for result in self._sendError(\ AlertDescription.handshake_failure, "the client doesn't support my certificate type"): yield result #Move certChain -> serverCertChain, now that we're using it serverCertChain = certChain #Create sessionID if sessionCache: sessionID = getRandomBytes(32) else: sessionID = createByteArraySequence([]) #If we've selected an SRP suite, exchange keys and calculate #premaster secret: if cipherSuite in CipherSuite.srpSuites + CipherSuite.srpRsaSuites: #If there's no SRP username... if not clientHello.srp_username: #Ask the client to re-send ClientHello with one for result in self._sendMsg(Alert().create(\ AlertDescription.missing_srp_username, AlertLevel.warning)): yield result #Get ClientHello for result in self._getMsg(ContentType.handshake, HandshakeType.client_hello): if result in (0,1): yield result else: break clientHello = result #Check ClientHello #If client's version is too low, reject it (COPIED CODE; BAD!) if clientHello.client_version < settings.minVersion: self.version = settings.minVersion for result in self._sendError(\ AlertDescription.protocol_version, "Too old version: %s" % str(clientHello.client_version)): yield result #If client's version is too high, propose my highest version elif clientHello.client_version > settings.maxVersion: self.version = settings.maxVersion else: #Set the version to the client's version self.version = clientHello.client_version #Recalculate the privileged cipher suite, making sure to #pick an SRP suite cipherSuites = [c for c in cipherSuites if c in \ CipherSuite.srpSuites + \ CipherSuite.srpRsaSuites] for cipherSuite in cipherSuites: if cipherSuite in clientHello.cipher_suites: break else: for result in self._sendError(\ AlertDescription.handshake_failure): yield result #Get the client nonce; create server nonce clientRandom = clientHello.random serverRandom = getRandomBytes(32) #The username better be there, this time if not clientHello.srp_username: for result in self._sendError(\ AlertDescription.illegal_parameter, "Client resent a hello, but without the SRP"\ " username"): yield result #Get username self.allegedSrpUsername = clientHello.srp_username #Get parameters from username try: entry = verifierDB[self.allegedSrpUsername] except KeyError: for result in self._sendError(\ AlertDescription.unknown_srp_username): yield result (N, g, s, v) = entry #Calculate server's ephemeral DH values (b, B) b = bytesToNumber(getRandomBytes(32)) k = makeK(N, g) B = (powMod(g, b, N) + (k*v)) % N #Create ServerKeyExchange, signing it if necessary serverKeyExchange = ServerKeyExchange(cipherSuite) serverKeyExchange.createSRP(N, g, stringToBytes(s), B) if cipherSuite in CipherSuite.srpRsaSuites: hashBytes = serverKeyExchange.hash(clientRandom, serverRandom) serverKeyExchange.signature = privateKey.sign(hashBytes) #Send ServerHello[, Certificate], ServerKeyExchange, #ServerHelloDone msgs = [] serverHello = ServerHello() serverHello.create(self.version, serverRandom, sessionID, cipherSuite, certificateType) msgs.append(serverHello) if cipherSuite in CipherSuite.srpRsaSuites: certificateMsg = Certificate(certificateType) certificateMsg.create(serverCertChain) msgs.append(certificateMsg) msgs.append(serverKeyExchange) msgs.append(ServerHelloDone()) for result in self._sendMsgs(msgs): yield result #From here on, the client's messages must have the right version self._versionCheck = True #Get and check ClientKeyExchange for result in self._getMsg(ContentType.handshake, HandshakeType.client_key_exchange, cipherSuite): if result in (0,1): yield result else: break clientKeyExchange = result A = clientKeyExchange.srp_A if A % N == 0: postFinishedError = (AlertDescription.illegal_parameter, "Suspicious A value") #Calculate u u = makeU(N, A, B) #Calculate premaster secret S = powMod((A * powMod(v,u,N)) % N, b, N) premasterSecret = numberToBytes(S) #If we've selected an RSA suite, exchange keys and calculate #premaster secret: elif cipherSuite in CipherSuite.rsaSuites: #Send ServerHello, Certificate[, CertificateRequest], #ServerHelloDone msgs = [] msgs.append(ServerHello().create(self.version, serverRandom, sessionID, cipherSuite, certificateType)) msgs.append(Certificate(certificateType).create(serverCertChain)) if reqCert and reqCAs: msgs.append(CertificateRequest().create([], reqCAs)) elif reqCert: msgs.append(CertificateRequest()) msgs.append(ServerHelloDone()) for result in self._sendMsgs(msgs): yield result #From here on, the client's messages must have the right version self._versionCheck = True #Get [Certificate,] (if was requested) if reqCert: if self.version == (3,0): for result in self._getMsg((ContentType.handshake, ContentType.alert), HandshakeType.certificate, certificateType): if result in (0,1): yield result else: break msg = result if isinstance(msg, Alert): #If it's not a no_certificate alert, re-raise alert = msg if alert.description != \ AlertDescription.no_certificate: self._shutdown(False) raise TLSRemoteAlert(alert) elif isinstance(msg, Certificate): clientCertificate = msg if clientCertificate.certChain and \ clientCertificate.certChain.getNumCerts()!=0: clientCertChain = clientCertificate.certChain else: raise AssertionError() elif self.version in ((3,1), (3,2)): for result in self._getMsg(ContentType.handshake, HandshakeType.certificate, certificateType): if result in (0,1): yield result else: break clientCertificate = result if clientCertificate.certChain and \ clientCertificate.certChain.getNumCerts()!=0: clientCertChain = clientCertificate.certChain else: raise AssertionError() #Get ClientKeyExchange for result in self._getMsg(ContentType.handshake, HandshakeType.client_key_exchange, cipherSuite): if result in (0,1): yield result else: break clientKeyExchange = result #Decrypt ClientKeyExchange premasterSecret = privateKey.decrypt(\ clientKeyExchange.encryptedPreMasterSecret) randomPreMasterSecret = getRandomBytes(48) versionCheck = (premasterSecret[0], premasterSecret[1]) if not premasterSecret: premasterSecret = randomPreMasterSecret elif len(premasterSecret)!=48: premasterSecret = randomPreMasterSecret elif versionCheck != clientHello.client_version: if versionCheck != self.version: #Tolerate buggy IE clients premasterSecret = randomPreMasterSecret #Get and check CertificateVerify, if relevant if clientCertChain: if self.version == (3,0): #Create a temporary session object, just for the purpose #of checking the CertificateVerify session = Session() session._calcMasterSecret(self.version, premasterSecret, clientRandom, serverRandom) verifyBytes = self._calcSSLHandshakeHash(\ session.masterSecret, "") elif self.version in ((3,1), (3,2)): verifyBytes = stringToBytes(self._handshake_md5.digest() +\ self._handshake_sha.digest()) for result in self._getMsg(ContentType.handshake, HandshakeType.certificate_verify): if result in (0,1): yield result else: break certificateVerify = result publicKey = clientCertChain.getEndEntityPublicKey() if len(publicKey) < settings.minKeySize: postFinishedError = (AlertDescription.handshake_failure, "Client's public key too small: %d" % len(publicKey)) if len(publicKey) > settings.maxKeySize: postFinishedError = (AlertDescription.handshake_failure, "Client's public key too large: %d" % len(publicKey)) if not publicKey.verify(certificateVerify.signature, verifyBytes): postFinishedError = (AlertDescription.decrypt_error, "Signature failed to verify") #Create the session object self.session = Session() self.session._calcMasterSecret(self.version, premasterSecret, clientRandom, serverRandom) self.session.sessionID = sessionID self.session.cipherSuite = cipherSuite self.session.srpUsername = self.allegedSrpUsername self.session.clientCertChain = clientCertChain self.session.serverCertChain = serverCertChain #Calculate pending connection states self._calcPendingStates(clientRandom, serverRandom, settings.cipherImplementations) #Exchange ChangeCipherSpec and Finished messages for result in self._getFinished(): yield result #If we were holding a post-finished error until receiving the client #finished message, send it now. We delay the call until this point #because calling sendError() throws an exception, and our caller might #shut down the socket upon receiving the exception. If he did, and the #client was still sending its ChangeCipherSpec or Finished messages, it #would cause a socket error on the client side. This is a lot of #consideration to show to misbehaving clients, but this would also #cause problems with fault-testing. if postFinishedError: for result in self._sendError(*postFinishedError): yield result for result in self._sendFinished(): yield result #Add the session object to the session cache if sessionCache and sessionID: sessionCache[bytesToString(sessionID)] = self.session #Mark the connection as open self.session._setResumable(True) self._handshakeDone(resumed=False) def _handshakeWrapperAsync(self, handshaker, checker): if not self.fault: try: for result in handshaker: yield result if checker: try: checker(self) except TLSAuthenticationError: alert = Alert().create(AlertDescription.close_notify, AlertLevel.fatal) for result in self._sendMsg(alert): yield result raise except: self._shutdown(False) raise else: try: for result in handshaker: yield result if checker: try: checker(self) except TLSAuthenticationError: alert = Alert().create(AlertDescription.close_notify, AlertLevel.fatal) for result in self._sendMsg(alert): yield result raise except socket.error, e: raise TLSFaultError("socket error!") except TLSAbruptCloseError, e: raise TLSFaultError("abrupt close error!") except TLSAlert, alert: if alert.description not in Fault.faultAlerts[self.fault]: raise TLSFaultError(str(alert)) else: pass except: self._shutdown(False) raise else: raise TLSFaultError("No error!") def _getKeyFromChain(self, certificate, settings): #Get and check cert chain from the Certificate message certChain = certificate.certChain if not certChain or certChain.getNumCerts() == 0: for result in self._sendError(AlertDescription.illegal_parameter, "Other party sent a Certificate message without "\ "certificates"): yield result #Get and check public key from the cert chain publicKey = certChain.getEndEntityPublicKey() if len(publicKey) < settings.minKeySize: for result in self._sendError(AlertDescription.handshake_failure, "Other party's public key too small: %d" % len(publicKey)): yield result if len(publicKey) > settings.maxKeySize: for result in self._sendError(AlertDescription.handshake_failure, "Other party's public key too large: %d" % len(publicKey)): yield result yield publicKey, certChain
bsd-3-clause
-1,661,798,008,255,936,800
-8,463,723,264,819,610,000
43.021014
83
0.572389
false
tensorflow/examples
tensorflow_examples/lite/model_maker/demo/image_classification_demo_test.py
1
2699
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile from unittest.mock import patch import tensorflow as tf from tensorflow_examples.lite.model_maker.core import test_util from tensorflow_examples.lite.model_maker.demo import image_classification_demo from tflite_model_maker import image_classifier from_folder_fn = image_classifier.DataLoader.from_folder def patch_data_loader(): """Patch to train partial dataset rather than all of them.""" def side_effect(*args, **kwargs): tf.compat.v1.logging.info('Train on partial dataset') data_loader = from_folder_fn(*args, **kwargs) if len(data_loader) > 10: # Trim dataset to at most 10. data_loader._size = 10 # TODO(b/171449557): Change this once the dataset is lazily loaded. data_loader._dataset = data_loader._dataset.take(10) return data_loader return patch.object( image_classifier.DataLoader, 'from_folder', side_effect=side_effect) class ImageClassificationDemoTest(tf.test.TestCase): def test_image_classification_demo(self): with patch_data_loader(): with tempfile.TemporaryDirectory() as temp_dir: # Use cached training data if exists. data_dir = image_classification_demo.download_demo_data( cache_dir=test_util.get_cache_dir(temp_dir, 'flower_photos.tgz'), file_hash='6f87fb78e9cc9ab41eff2015b380011d') tflite_filename = os.path.join(temp_dir, 'model.tflite') label_filename = os.path.join(temp_dir, 'labels.txt') image_classification_demo.run( data_dir, temp_dir, spec='efficientnet_lite0', epochs=1, batch_size=1) self.assertTrue(tf.io.gfile.exists(tflite_filename)) self.assertGreater(os.path.getsize(tflite_filename), 0) self.assertFalse(tf.io.gfile.exists(label_filename)) if __name__ == '__main__': # Load compressed models from tensorflow_hub os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED' tf.test.main()
apache-2.0
5,330,753,893,444,014,000
1,920,276,559,624,034,300
34.513158
79
0.70804
false
takis/odoo
openerp/report/report_sxw.py
217
27364
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from lxml import etree import StringIO import cStringIO import base64 from datetime import datetime import os import re import time from interface import report_rml import preprocess import logging import openerp.tools as tools import zipfile import common import openerp from openerp import SUPERUSER_ID from openerp.osv.fields import float as float_field, function as function_field, datetime as datetime_field from openerp.tools.translate import _ from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT from openerp.tools.safe_eval import safe_eval as eval _logger = logging.getLogger(__name__) rml_parents = { 'tr':1, 'li':1, 'story': 0, 'section': 0 } rml_tag="para" sxw_parents = { 'table-row': 1, 'list-item': 1, 'body': 0, 'section': 0, } html_parents = { 'tr' : 1, 'body' : 0, 'div' : 0 } sxw_tag = "p" rml2sxw = { 'para': 'p', } def get_date_length(date_format=DEFAULT_SERVER_DATE_FORMAT): return len((datetime.now()).strftime(date_format)) class rml_parse(object): def __init__(self, cr, uid, name, parents=rml_parents, tag=rml_tag, context=None): if not context: context={} self.cr = cr self.uid = uid self.pool = openerp.registry(cr.dbname) user = self.pool['res.users'].browse(cr, uid, uid, context=context) self.localcontext = { 'user': user, 'setCompany': self.setCompany, 'repeatIn': self.repeatIn, 'setLang': self.setLang, 'setTag': self.setTag, 'removeParentNode': self.removeParentNode, 'format': self.format, 'formatLang': self.formatLang, 'lang' : user.company_id.partner_id.lang, 'translate' : self._translate, 'setHtmlImage' : self.set_html_image, 'strip_name' : self._strip_name, 'time' : time, 'display_address': self.display_address, # more context members are setup in setCompany() below: # - company_id # - logo } self.setCompany(user.company_id) self.localcontext.update(context) self.name = name self._node = None self.parents = parents self.tag = tag self._lang_cache = {} self.lang_dict = {} self.default_lang = {} self.lang_dict_called = False self._transl_regex = re.compile('(\[\[.+?\]\])') def setTag(self, oldtag, newtag, attrs=None): return newtag, attrs def _ellipsis(self, char, size=100, truncation_str='...'): if not char: return '' if len(char) <= size: return char return char[:size-len(truncation_str)] + truncation_str def setCompany(self, company_id): if company_id: self.localcontext['company'] = company_id self.localcontext['logo'] = company_id.logo self.rml_header = company_id.rml_header self.rml_header2 = company_id.rml_header2 self.rml_header3 = company_id.rml_header3 self.logo = company_id.logo def _strip_name(self, name, maxlen=50): return self._ellipsis(name, maxlen) def format(self, text, oldtag=None): return text.strip() def removeParentNode(self, tag=None): raise GeneratorExit('Skip') def set_html_image(self,id,model=None,field=None,context=None): if not id : return '' if not model: model = 'ir.attachment' try : id = int(id) res = self.pool[model].read(self.cr,self.uid,id) if field : return res[field] elif model =='ir.attachment' : return res['datas'] else : return '' except Exception: return '' def setLang(self, lang): self.localcontext['lang'] = lang self.lang_dict_called = False # re-evaluate self.objects in a different environment env = self.objects.env(self.cr, self.uid, self.localcontext) self.objects = self.objects.with_env(env) def _get_lang_dict(self): pool_lang = self.pool['res.lang'] lang = self.localcontext.get('lang', 'en_US') or 'en_US' lang_ids = pool_lang.search(self.cr,self.uid,[('code','=',lang)]) if not lang_ids: lang_ids = pool_lang.search(self.cr,self.uid,[('code','=','en_US')]) lang_obj = pool_lang.browse(self.cr,self.uid,lang_ids[0]) self.lang_dict.update({'lang_obj':lang_obj,'date_format':lang_obj.date_format,'time_format':lang_obj.time_format}) self.default_lang[lang] = self.lang_dict.copy() return True def digits_fmt(self, obj=None, f=None, dp=None): digits = self.get_digits(obj, f, dp) return "%%.%df" % (digits, ) def get_digits(self, obj=None, f=None, dp=None): d = DEFAULT_DIGITS = 2 if dp: decimal_precision_obj = self.pool['decimal.precision'] d = decimal_precision_obj.precision_get(self.cr, self.uid, dp) elif obj and f: res_digits = getattr(obj._columns[f], 'digits', lambda x: ((16, DEFAULT_DIGITS))) if isinstance(res_digits, tuple): d = res_digits[1] else: d = res_digits(self.cr)[1] elif (hasattr(obj, '_field') and\ isinstance(obj._field, (float_field, function_field)) and\ obj._field.digits): d = obj._field.digits[1] if not d and d is not 0: d = DEFAULT_DIGITS return d def formatLang(self, value, digits=None, date=False, date_time=False, grouping=True, monetary=False, dp=False, currency_obj=False): """ Assuming 'Account' decimal.precision=3: formatLang(value) -> digits=2 (default) formatLang(value, digits=4) -> digits=4 formatLang(value, dp='Account') -> digits=3 formatLang(value, digits=5, dp='Account') -> digits=5 """ if digits is None: if dp: digits = self.get_digits(dp=dp) else: digits = self.get_digits(value) if isinstance(value, (str, unicode)) and not value: return '' if not self.lang_dict_called: self._get_lang_dict() self.lang_dict_called = True if date or date_time: if not value: return '' date_format = self.lang_dict['date_format'] parse_format = DEFAULT_SERVER_DATE_FORMAT if date_time: value = value.split('.')[0] date_format = date_format + " " + self.lang_dict['time_format'] parse_format = DEFAULT_SERVER_DATETIME_FORMAT if isinstance(value, basestring): # FIXME: the trimming is probably unreliable if format includes day/month names # and those would need to be translated anyway. date = datetime.strptime(value[:get_date_length(parse_format)], parse_format) elif isinstance(value, time.struct_time): date = datetime(*value[:6]) else: date = datetime(*value.timetuple()[:6]) if date_time: # Convert datetime values to the expected client/context timezone date = datetime_field.context_timestamp(self.cr, self.uid, timestamp=date, context=self.localcontext) return date.strftime(date_format.encode('utf-8')) res = self.lang_dict['lang_obj'].format('%.' + str(digits) + 'f', value, grouping=grouping, monetary=monetary) if currency_obj: if currency_obj.position == 'after': res = u'%s\N{NO-BREAK SPACE}%s' % (res, currency_obj.symbol) elif currency_obj and currency_obj.position == 'before': res = u'%s\N{NO-BREAK SPACE}%s' % (currency_obj.symbol, res) return res def display_address(self, address_record, without_company=False): # FIXME handle `without_company` return address_record.contact_address def repeatIn(self, lst, name,nodes_parent=False): ret_lst = [] for id in lst: ret_lst.append({name:id}) return ret_lst def _translate(self,text): lang = self.localcontext['lang'] if lang and text and not text.isspace(): transl_obj = self.pool['ir.translation'] piece_list = self._transl_regex.split(text) for pn in range(len(piece_list)): if not self._transl_regex.match(piece_list[pn]): source_string = piece_list[pn].replace('\n', ' ').strip() if len(source_string): translated_string = transl_obj._get_source(self.cr, self.uid, self.name, ('report', 'rml'), lang, source_string) if translated_string: piece_list[pn] = piece_list[pn].replace(source_string, translated_string) text = ''.join(piece_list) return text def _add_header(self, rml_dom, header='external'): if header=='internal': rml_head = self.rml_header2 elif header=='internal landscape': rml_head = self.rml_header3 else: rml_head = self.rml_header head_dom = etree.XML(rml_head) for tag in head_dom: found = rml_dom.find('.//'+tag.tag) if found is not None and len(found): if tag.get('position'): found.append(tag) else : found.getparent().replace(found,tag) return True def set_context(self, objects, data, ids, report_type = None): self.localcontext['data'] = data self.localcontext['objects'] = objects self.localcontext['digits_fmt'] = self.digits_fmt self.localcontext['get_digits'] = self.get_digits self.datas = data self.ids = ids self.objects = objects if report_type: if report_type=='odt' : self.localcontext.update({'name_space' :common.odt_namespace}) else: self.localcontext.update({'name_space' :common.sxw_namespace}) # WARNING: the object[0].exists() call below is slow but necessary because # some broken reporting wizards pass incorrect IDs (e.g. ir.ui.menu ids) if objects and len(objects) == 1 and \ objects[0].exists() and 'company_id' in objects[0] and objects[0].company_id: # When we print only one record, we can auto-set the correct # company in the localcontext. For other cases the report # will have to call setCompany() inside the main repeatIn loop. self.setCompany(objects[0].company_id) class report_sxw(report_rml, preprocess.report): """ The register=True kwarg has been added to help remove the openerp.netsvc.LocalService() indirection and the related openerp.report.interface.report_int._reports dictionary: report_sxw registered in XML with auto=False are also registered in Python. In that case, they are registered in the above dictionary. Since registration is automatically done upon instanciation, and that instanciation is needed before rendering, a way was needed to instanciate-without-register a report. In the future, no report should be registered in the above dictionary and it will be dropped. """ def __init__(self, name, table, rml=False, parser=rml_parse, header='external', store=False, register=True): report_rml.__init__(self, name, table, rml, '', register=register) self.name = name self.parser = parser self.header = header self.store = store self.internal_header=False if header=='internal' or header=='internal landscape': self.internal_header=True def getObjects(self, cr, uid, ids, context): table_obj = openerp.registry(cr.dbname)[self.table] return table_obj.browse(cr, uid, ids, context=context) def create(self, cr, uid, ids, data, context=None): context = dict(context or {}) if self.internal_header: context.update(internal_header=self.internal_header) # skip osv.fields.sanitize_binary_value() because we want the raw bytes in all cases context.update(bin_raw=True) registry = openerp.registry(cr.dbname) ir_obj = registry['ir.actions.report.xml'] registry['res.font'].font_scan(cr, SUPERUSER_ID, lazy=True, context=context) report_xml_ids = ir_obj.search(cr, uid, [('report_name', '=', self.name[7:])], context=context) if report_xml_ids: report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context) else: title = '' report_file = tools.file_open(self.tmpl, subdir=None) try: rml = report_file.read() report_type= data.get('report_type', 'pdf') class a(object): def __init__(self, *args, **argv): for key,arg in argv.items(): setattr(self, key, arg) report_xml = a(title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header) finally: report_file.close() # We add an attribute on the ir.actions.report.xml instance. # This attribute 'use_global_header' will be used by # the create_single_XXX function of the report engine. # This change has been done to avoid a big change of the API. setattr(report_xml, 'use_global_header', self.header if report_xml.header else False) report_type = report_xml.report_type if report_type in ['sxw','odt']: fnct = self.create_source_odt elif report_type in ['pdf','raw','txt','html']: fnct = self.create_source_pdf elif report_type=='html2html': fnct = self.create_source_html2html elif report_type=='mako2html': fnct = self.create_source_mako2html else: raise NotImplementedError(_('Unknown report type: %s') % report_type) fnct_ret = fnct(cr, uid, ids, data, report_xml, context) if not fnct_ret: return False, False return fnct_ret def create_source_odt(self, cr, uid, ids, data, report_xml, context=None): return self.create_single_odt(cr, uid, ids, data, report_xml, context or {}) def create_source_html2html(self, cr, uid, ids, data, report_xml, context=None): return self.create_single_html2html(cr, uid, ids, data, report_xml, context or {}) def create_source_mako2html(self, cr, uid, ids, data, report_xml, context=None): return self.create_single_mako2html(cr, uid, ids, data, report_xml, context or {}) def create_source_pdf(self, cr, uid, ids, data, report_xml, context=None): if not context: context={} registry = openerp.registry(cr.dbname) attach = report_xml.attachment if attach: objs = self.getObjects(cr, uid, ids, context) results = [] for obj in objs: aname = eval(attach, {'object':obj, 'time':time}) result = False if report_xml.attachment_use and aname and context.get('attachment_use', True): aids = registry['ir.attachment'].search(cr, uid, [('datas_fname','=',aname+'.pdf'),('res_model','=',self.table),('res_id','=',obj.id)]) if aids: brow_rec = registry['ir.attachment'].browse(cr, uid, aids[0]) if not brow_rec.datas: continue d = base64.decodestring(brow_rec.datas) results.append((d,'pdf')) continue result = self.create_single_pdf(cr, uid, [obj.id], data, report_xml, context) if not result: return False if aname: try: name = aname+'.'+result[1] # Remove the default_type entry from the context: this # is for instance used on the account.account_invoices # and is thus not intended for the ir.attachment type # field. ctx = dict(context) ctx.pop('default_type', None) registry['ir.attachment'].create(cr, uid, { 'name': aname, 'datas': base64.encodestring(result[0]), 'datas_fname': name, 'res_model': self.table, 'res_id': obj.id, }, context=ctx ) except Exception: #TODO: should probably raise a proper osv_except instead, shouldn't we? see LP bug #325632 _logger.error('Could not create saved report attachment', exc_info=True) results.append(result) if results: if results[0][1]=='pdf': from pyPdf import PdfFileWriter, PdfFileReader output = PdfFileWriter() for r in results: reader = PdfFileReader(cStringIO.StringIO(r[0])) for page in range(reader.getNumPages()): output.addPage(reader.getPage(page)) s = cStringIO.StringIO() output.write(s) return s.getvalue(), results[0][1] return self.create_single_pdf(cr, uid, ids, data, report_xml, context) def create_single_pdf(self, cr, uid, ids, data, report_xml, context=None): if not context: context={} logo = None context = context.copy() title = report_xml.name rml = report_xml.report_rml_content # if no rml file is found if not rml: return False rml_parser = self.parser(cr, uid, self.name2, context=context) objs = self.getObjects(cr, uid, ids, context) rml_parser.set_context(objs, data, ids, report_xml.report_type) processed_rml = etree.XML(rml) if report_xml.use_global_header: rml_parser._add_header(processed_rml, self.header) processed_rml = self.preprocess_rml(processed_rml,report_xml.report_type) if rml_parser.logo: logo = base64.decodestring(rml_parser.logo) create_doc = self.generators[report_xml.report_type] pdf = create_doc(etree.tostring(processed_rml),rml_parser.localcontext,logo,title.encode('utf8')) return pdf, report_xml.report_type def create_single_odt(self, cr, uid, ids, data, report_xml, context=None): context = dict(context or {}) context['parents'] = sxw_parents report_type = report_xml.report_type binary_report_content = report_xml.report_sxw_content if isinstance(report_xml.report_sxw_content, unicode): # if binary content was passed as unicode, we must # re-encode it as a 8-bit string using the pass-through # 'latin1' encoding, to restore the original byte values. # See also osv.fields.sanitize_binary_value() binary_report_content = report_xml.report_sxw_content.encode("latin1") sxw_io = StringIO.StringIO(binary_report_content) sxw_z = zipfile.ZipFile(sxw_io, mode='r') rml = sxw_z.read('content.xml') meta = sxw_z.read('meta.xml') mime_type = sxw_z.read('mimetype') if mime_type == 'application/vnd.sun.xml.writer': mime_type = 'sxw' else : mime_type = 'odt' sxw_z.close() rml_parser = self.parser(cr, uid, self.name2, context=context) rml_parser.parents = sxw_parents rml_parser.tag = sxw_tag objs = self.getObjects(cr, uid, ids, context) rml_parser.set_context(objs, data, ids, mime_type) rml_dom_meta = node = etree.XML(meta) elements = node.findall(rml_parser.localcontext['name_space']["meta"]+"user-defined") for pe in elements: if pe.get(rml_parser.localcontext['name_space']["meta"]+"name"): if pe.get(rml_parser.localcontext['name_space']["meta"]+"name") == "Info 3": pe[0].text=data['id'] if pe.get(rml_parser.localcontext['name_space']["meta"]+"name") == "Info 4": pe[0].text=data['model'] meta = etree.tostring(rml_dom_meta, encoding='utf-8', xml_declaration=True) rml_dom = etree.XML(rml) elements = [] key1 = rml_parser.localcontext['name_space']["text"]+"p" key2 = rml_parser.localcontext['name_space']["text"]+"drop-down" for n in rml_dom.iterdescendants(): if n.tag == key1: elements.append(n) if mime_type == 'odt': for pe in elements: e = pe.findall(key2) for de in e: pp=de.getparent() if de.text or de.tail: pe.text = de.text or de.tail for cnd in de: if cnd.text or cnd.tail: if pe.text: pe.text += cnd.text or cnd.tail else: pe.text = cnd.text or cnd.tail pp.remove(de) else: for pe in elements: e = pe.findall(key2) for de in e: pp = de.getparent() if de.text or de.tail: pe.text = de.text or de.tail for cnd in de: text = cnd.get("{http://openoffice.org/2000/text}value",False) if text: if pe.text and text.startswith('[['): pe.text += text elif text.startswith('[['): pe.text = text if de.getparent(): pp.remove(de) rml_dom = self.preprocess_rml(rml_dom, mime_type) create_doc = self.generators[mime_type] odt = etree.tostring(create_doc(rml_dom, rml_parser.localcontext), encoding='utf-8', xml_declaration=True) sxw_contents = {'content.xml':odt, 'meta.xml':meta} if report_xml.use_global_header: #Add corporate header/footer rml_file = tools.file_open(os.path.join('base', 'report', 'corporate_%s_header.xml' % report_type)) try: rml = rml_file.read() rml_parser = self.parser(cr, uid, self.name2, context=context) rml_parser.parents = sxw_parents rml_parser.tag = sxw_tag objs = self.getObjects(cr, uid, ids, context) rml_parser.set_context(objs, data, ids, report_xml.report_type) rml_dom = self.preprocess_rml(etree.XML(rml),report_type) create_doc = self.generators[report_type] odt = create_doc(rml_dom,rml_parser.localcontext) if report_xml.use_global_header: rml_parser._add_header(odt) odt = etree.tostring(odt, encoding='utf-8', xml_declaration=True) sxw_contents['styles.xml'] = odt finally: rml_file.close() #created empty zip writing sxw contents to avoid duplication sxw_out = StringIO.StringIO() sxw_out_zip = zipfile.ZipFile(sxw_out, mode='w') sxw_template_zip = zipfile.ZipFile (sxw_io, 'r') for item in sxw_template_zip.infolist(): if item.filename not in sxw_contents: buffer = sxw_template_zip.read(item.filename) sxw_out_zip.writestr(item.filename, buffer) for item_filename, buffer in sxw_contents.iteritems(): sxw_out_zip.writestr(item_filename, buffer) sxw_template_zip.close() sxw_out_zip.close() final_op = sxw_out.getvalue() sxw_io.close() sxw_out.close() return final_op, mime_type def create_single_html2html(self, cr, uid, ids, data, report_xml, context=None): context = dict(context or {}) context['parents'] = html_parents report_type = 'html' html = report_xml.report_rml_content html_parser = self.parser(cr, uid, self.name2, context=context) html_parser.parents = html_parents html_parser.tag = sxw_tag objs = self.getObjects(cr, uid, ids, context) html_parser.set_context(objs, data, ids, report_type) html_dom = etree.HTML(html) html_dom = self.preprocess_rml(html_dom,'html2html') create_doc = self.generators['html2html'] html = etree.tostring(create_doc(html_dom, html_parser.localcontext)) return html.replace('&amp;','&').replace('&lt;', '<').replace('&gt;', '>').replace('</br>',''), report_type def create_single_mako2html(self, cr, uid, ids, data, report_xml, context=None): mako_html = report_xml.report_rml_content html_parser = self.parser(cr, uid, self.name2, context) objs = self.getObjects(cr, uid, ids, context) html_parser.set_context(objs, data, ids, 'html') create_doc = self.generators['makohtml2html'] html = create_doc(mako_html,html_parser.localcontext) return html,'html' # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-4,344,363,551,393,206,300
-3,589,847,439,216,945,700
41.823161
155
0.556607
false
MrLucasCardoso/pycards
tests/test_amex.py
1
2451
import json import pytest from pycards import CreditCard from datetime import datetime from pycards.settings import FIXTURES_PATH @pytest.fixture(scope="session") def data(): with open(FIXTURES_PATH) as data_file: return json.load(data_file)['AMEX'] def test_init(data): assert len(data) > 0 cards = [CreditCard(card['name'], code=card['code']) for card in data] assert len(cards) == len(data) def test_is_valid(data): assert all(CreditCard(card['name'], code=card['code']).is_valid for card in data) def test_brand(data): cards = [CreditCard(card['name'], code=card['code']) for card in data] assert len(cards) == len([card for card in cards if card.brand == 'Amex']) def test_cardholder(data): cards = [CreditCard(card['name'], code=card['code'], cardholder='TESTE DADOS') for card in data] assert len(cards) == len([card for card in cards if card.cardholder == 'TESTE DADOS']) def test_number(data): numbers = [card['name'] for card in data] cards = [CreditCard(card['name'], code=card['code']) for card in data] assert all([True for c in cards if c.number in numbers]) and any([True for c in cards if c.number in numbers]) def test_expires(data): cards = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2021') for card in data] assert all(True for c in cards if type(c.expires) == datetime) def test_expires_string(data): cards = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2021') for card in data] assert all(True for c in cards if c.expires_string == '07/21') and any(True for c in cards if c.expires_string == '07/21') def test_is_not_expired(data): card = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2021') for card in data][0] assert not card.is_expired def test_is_expired(data): card = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2016') for card in data][0] assert card.is_expired def test_code_name(data): card = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2016') for card in data][0] assert card.code_name == 'CVV' def test_code(data): codes = [card['code'] for card in data] cards = [CreditCard(card['name'], code=card['code']) for card in data] assert all([True for c in cards if c.code in codes]) and any([True for c in cards if c.code in codes])
mit
1,431,736,538,652,449,000
-3,123,374,025,965,673,500
35.044118
126
0.669931
false
steveklabnik/servo
components/script/dom/bindings/codegen/parser/tests/test_extended_attributes.py
149
2846
import WebIDL def WebIDLTest(parser, harness): parser.parse(""" [NoInterfaceObject] interface TestExtendedAttr { [Unforgeable] readonly attribute byte b; }; """) results = parser.finish() parser = parser.reset() parser.parse(""" [Pref="foo.bar",Pref=flop] interface TestExtendedAttr { [Pref="foo.bar"] attribute byte b; }; """) results = parser.finish() parser = parser.reset() parser.parse(""" interface TestLenientThis { [LenientThis] attribute byte b; }; """) results = parser.finish() harness.ok(results[0].members[0].hasLenientThis(), "Should have a lenient this") parser = parser.reset() threw = False try: parser.parse(""" interface TestLenientThis2 { [LenientThis=something] attribute byte b; }; """) results = parser.finish() except: threw = True harness.ok(threw, "[LenientThis] must take no arguments") parser = parser.reset() parser.parse(""" interface TestClamp { void testClamp([Clamp] long foo); void testNotClamp(long foo); }; """) results = parser.finish() # Pull out the first argument out of the arglist of the first (and # only) signature. harness.ok(results[0].members[0].signatures()[0][1][0].clamp, "Should be clamped") harness.ok(not results[0].members[1].signatures()[0][1][0].clamp, "Should not be clamped") parser = parser.reset() threw = False try: parser.parse(""" interface TestClamp2 { void testClamp([Clamp=something] long foo); }; """) results = parser.finish() except: threw = True harness.ok(threw, "[Clamp] must take no arguments") parser = parser.reset() parser.parse(""" interface TestEnforceRange { void testEnforceRange([EnforceRange] long foo); void testNotEnforceRange(long foo); }; """) results = parser.finish() # Pull out the first argument out of the arglist of the first (and # only) signature. harness.ok(results[0].members[0].signatures()[0][1][0].enforceRange, "Should be enforceRange") harness.ok(not results[0].members[1].signatures()[0][1][0].enforceRange, "Should not be enforceRange") parser = parser.reset() threw = False try: parser.parse(""" interface TestEnforceRange2 { void testEnforceRange([EnforceRange=something] long foo); }; """) results = parser.finish() except: threw = True harness.ok(threw, "[EnforceRange] must take no arguments")
mpl-2.0
-1,270,494,003,477,681,200
-2,441,060,304,974,786,600
25.598131
76
0.562895
false
lichengshuang/createvhost
others/webvirtmgr/delServer.py
1
1305
#!/usr/bin/python #-*-encoding:utf-8-*- #author: asher #date: 20160429 on train D909 # this scripts useed for add server ip to webvirtmgr # if not , each server must add by website,it's too slow, and very not interesting. # use this , it's make you feel very happy import sqlite3 try: conn = sqlite3.connect('../webvirtmgr.sqlite3') cur = conn.cursor() print "Input the server ip address like:" ips = raw_input("Ips 172.23.32:").strip() ips1 = int(raw_input("Input start last ip num: 1:>").strip()) ips2 = int(raw_input("Input end ip num: 100:>").strip()) # jifang = str(raw_input("DataCenter like:jxq:>").strip()) # login = str(raw_input("User:admin or others:>").strip()) # password = str(raw_input("Password:>").strip()) while True: if ips1 <= ips2: ips1 = str(ips1) newip = ips + "." + ips1 # jifang1 = jifang + "_" + newip print "Del %s into database\n" % newip cur.execute("delete from servers_compute where hostname == '%s'" % newip) ips1 = int(ips1) ips1 += 1 conn.commit() else: break finally: allservers = cur.execute("select id,name,hostname,login,type from servers_compute").fetchall() for i in allservers: print i conn.close()
apache-2.0
3,390,784,429,687,761,000
-8,031,038,660,453,539,000
33.342105
95
0.603065
false
esikachev/sahara-backup
sahara/tests/unit/swift/test_utils.py
7
1447
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from sahara.swift import utils from sahara.tests.unit import base as testbase class SwiftUtilsTest(testbase.SaharaTestCase): def setUp(self): super(SwiftUtilsTest, self).setUp() self.override_config('use_identity_api_v3', True) @mock.patch('sahara.utils.openstack.base.url_for') def test_retrieve_auth_url(self, url_for_mock): correct = "https://127.0.0.1:8080/v2.0/" def _assert(uri): url_for_mock.return_value = uri self.assertEqual(correct, utils.retrieve_auth_url()) _assert("%s/" % correct) _assert("https://127.0.0.1:8080") _assert("https://127.0.0.1:8080/") _assert("https://127.0.0.1:8080/v2.0") _assert("https://127.0.0.1:8080/v2.0/") _assert("https://127.0.0.1:8080/v42/") _assert("https://127.0.0.1:8080/foo")
apache-2.0
-1,124,731,369,562,066,300
8,266,892,987,516,646,000
33.452381
69
0.666897
false
goofwear/raspberry_pwn
src/pentest/fasttrack/setup.py
16
11757
#!/usr/bin/env python import os import sys import time import subprocess import re def get_basepath(): basepath = os.getcwd() return basepath definepath=get_basepath() try: if sys.argv[1]=='install': print """ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~ *** Fast-Track Setup *** ~ ~ *** Install Fast-Track dependencies *** ~ ~ *** Version 2.1 *** ~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Fast-Track initial setup menu, you should use this if you are updating Fast-Track at all due to added dependancies when writing new application modules into Fast-Track. Some things to note before running Fast-Track, you must install the following: Metasploit (for autopwn, and mass client attack) SQLite3 (for autopwn) There are other requirements however, Fast-Track will check for them and if your missing it, Fast-Track will install it for you. NOTE: Some changes, pymssql is currently compiled at a lower level, higher levels completely break Fast-Track at this point. Working on a solution to fix the overall issues. """ # Check if we're root if os.geteuid() != 0: print "\nFast-Track v4 - A new beginning...\n\n" print "Fast-Track Setup is not running under root. Please re-run the tool under root...\n" sys.exit(1) #print "Metasploit directory example: /pentest/exploits/framework3/" #print "\nMake sure you do /folder/ with the / at the end or it'll\njack some stuff up." try: print "[*] Ensure that you configure the Metasploit path in bin/config/config\n" #metasploitpath=raw_input("\nEnter the path to the metasploit directory\nHit enter for default (/pentest/exploits/framework3/): ") #if metasploitpath=='': # metasploitpath="/pentest/exploits/framework3/" #if os.path.isfile("%smsfconsole" % (metasploitpath)): print "Metasploit files detected, moving on..." #if not os.path.isfile("%smsfconsole" % (metasploitpath)): print "Metasploit not detected in path specified. You should re-run setup and specify the correct path." #writefile=file("%s/bin/setup/metasploitconfig.file" % (definepath),'w') #writefile.write("%s" % (metasploitpath)) #writefile.close() #print "*** Metasploit directory set..... ***\n" print "No guarantee this will successfully install all dependancies correctly\nyou may need to install some manually..\n\nDifferent Linux OS require different things to work.\n" installstuff=raw_input("Would you like to attempt all dependancies, yes or no: ") # Thanks to swc|666 for the help below if installstuff=='yes': print '[-] Installing requirements needed for Fast-Track.. [-]' print '\n[-] Detecting Linux version... [-]' time.sleep(2) if os.path.isfile("/etc/apt/sources.list"): ### Not every sources.list file presence indicates Ubuntu (this works on all flavors of Ubuntu, Debian and Sidux @least) if os.path.isfile("/etc/lsb-release"): pat=re.compile("=|\"",re.M|re.DOTALL) distro=open("/etc/lsb-release").read() distro=pat.sub("",distro).split("\n") distro=[i.strip() for i in distro if i.strip() != '' ] for n,items in enumerate(distro): if "DISTRIB_DESCRIPTION" in items: d1 = distro[n+0] d2 = d1.strip("DISTRIB_DESCRIPTION") d3 = "\n[-] " "%s " "Detected [-]\n" % (d2) #print d3 print "[-] Installing requirements to run on " "%s" "! [-]" % (d2) # else: ### A sources.list and not a lsb-release file? >.< print '\n[-] Debian-Based OS Detected [-]\n' print '[-] Installing requirements! [-]' print "Installing Subversion, Build-Essential, Python-ClientForm, FreeTds-Dev, PExpect, and Python2.5-Dev, PYMILLS, through Apt, please wait.." subprocess.Popen("apt-get --force-yes -y install subversion build-essential vncviewer nmap python-clientform python2.6-dev python-pexpect python-setuptools", shell=True).wait() subprocess.Popen("wget http://ibiblio.org/pub/Linux/ALPHA/freetds/stable/freetds-stable.tgz", shell=True).wait() subprocess.Popen("wget http://downloads.sourceforge.net/pymssql/pymssql-0.8.0.tar.gz", shell=True).wait() subprocess.Popen("tar -zxvf freetds-stable.tgz;tar -zxvf pymssql-0.8.0.tar.gz;cd freetds-0.*;./configure --enable-msdblib --with-tdsver=8.0 && make && make install; cd ..;cd pymssql-0.8.0;ln -s /usr/local/lib/libsysbdb.so.5 /usr/lib;python setup.py install;cd ..;rm -rf freetds*;rm -rf pymssql*", shell=True).wait() print '[-] Running ldconfig.... [-]' subprocess.Popen("ldconfig", shell=True).wait() subprocess.Popen("wget http://downloads.sourceforge.net/pymssql/pymssql-0.8.0.tar.gz", shell=True).wait() subprocess.Popen("tar -zxvf pymssql-0.8.0.tar.gz;cd pymssql-0.8.0;ln -s /usr/local/lib/libsysbdb.so.5 /usr/lib;python setup.py install;cd ..;rm -rf pymssql*", shell=True).wait() subprocess.Popen('wget http://pypi.inqbus.de/pymills/pymills-3.4.tar.gz#md5=5741d4a5c30aaed5def2f4b4f86e92a9;tar -zxvf pymills-3.4.tar.gz;mv pymills-3.4 pymills;cd pymills/; python setup.py install', shell=True).wait() subprocess.Popen('rm -rf pymills; rm -rf pymills-3.4.tar.gz', shell=True).wait() print "Installing BeautifulSoup Python Module" subprocess.Popen("wget http://www.crummy.com/software/BeautifulSoup/download/BeautifulSoup.tar.gz;tar -zxvf BeautifulSoup.tar.gz;cd BeautifulSoup*;python setup.py install;cd ..;rm -rf BeautifulSoup*", shell=True).wait() print "BeautifulSoup Installed." # Taken from http://wiredbytes.com/node/5 metasploitinstall=raw_input("\nWould you like Fast-Track to install Metasploit 3 for you (experimental)? yes or no: ") if metasploitinstall == 'yes': subprocess.Popen("apt-get install build-essential ruby libruby rdoc libyaml-ruby libzlib-ruby libopenssl-ruby libdl-ruby libreadline-ruby libiconv-ruby libgtk2-ruby libglade2-ruby subversion sqlite3 libsqlite3-ruby irb", shell=True).wait() subprocess.Popen("wget -c http://rubyforge.org/frs/download.php/70696/rubygems-1.3.7.tgz;tar -xvzf rubygems-1.3.7.tgz -C /tmp/;cd /tmp/rubygems-1.3.7/;ruby setup.rb", shell=True).wait() subprocess.Popen("/usr/bin/gem1.8 install rails", shell=True).wait() subprocess.Popen("rm rubygems-1.3.7.tgz", shell=True).wait() subprocess.Popen("mkdir /pentest/exploits/framework3;cd /pentest/exploits/framework/;svn co http://metasploit.com/svn/framework3/trunk/ ." , shell=True).wait() print "Metasploit should have been installed..running ldconfig" ldconfig=subprocess.Popen("ldconfig").wait() else: print "[-] Generic Linux OS detected! [-] \n[-] Installing vanilla installation for dependancies [-]" print '[-] Installing FreeTDS and PYMMSQL [-]' subprocess.Popen("wget http://ibiblio.org/pub/Linux/ALPHA/freetds/stable/freetds-stable.tgz", shell=True).wait() subprocess.Popen("wget http://downloads.sourceforge.net/pymssql/pymssql-0.8.0.tar.gz", shell=True).wait() subprocess.Popen("tar -zxvf freetds-stable.tgz;tar -zxvf pymssql-0.8.0.tar.gz;cd freetds-0.*;./configure --enable-msdblib --with-tdsver=8.0 && make && make install; cd ..;cd pymssql-0.8.0;ln -s /usr/local/lib/libsysbdb.so.5 /usr/lib;python setup.py install;cd ..;rm -rf freetds*;rm -rf pymssql*", shell=True).wait() print '[-] Running ldconfig.... [-]' subprocess.Popen("ldconfig", shell=True).wait() print '[-] Finished..moving on.. [-]' time.sleep(2) print 'Installing Module for Python Called "PExpect"' subprocess.Popen('wget http://downloads.sourceforge.net/pexpect/pexpect-2.3.tar.gz;tar -zxvf pexpect-2.3.tar.gz;cd pexpect-2.3;python setup.py install;cd ..;rm -rf pexpect-2.3;rm pexpect-2.3.tar.gz', shell=True).wait() print 'Installed! Moving on...' print 'Installing SQLite3' subprocess.Popen('cd /usr/local/bin/;ln -s tclsh8.4 tclsh', shell=True).wait() subprocess.Popen('wget http://www.sqlite.org/sqlite-3.7.0.1.tar.gz;tar -zxvf sqlite-3.7.0.1;cd sqlite-3.7.0.1;./configure --prefix=/usr/local && make && make install;cd ..;rm sqlite-3.7.0.1.tar.gz;rm -rf sqlite-3.7.0.1', shell=True).wait() subprocess.Popen('wget http://rubyforge.org/frs/download.php/2820/sqlite-ruby-2.2.3.tar.gz;tar -zxvf sqlite3-ruby-2.2.3.tar.gz;cd sqlite3-ruby-2.2.3;ruby setup.rb config;ruby setup.rb setup;ruby setup.rb install;cd ..;rm sqlite3-ruby-2.2.3.tar.gz;rm -rf sqlite3-ruby-2.2.3', shell=True).wait() print 'SQLite3 installed..Moving on...' print "Installing ClientForm Python Module" subprocess.Popen("svn co http://codespeak.net/svn/wwwsearch/ClientForm/trunk ClientForm;cd ClientForm;python setup.py install;cd ..;rm -rf ClientForm", shell=True).wait() print "ClientForm Installed, moving on.." print "Installing PROFTPD" subprocess.Popen("""wget ftp://ftp.proftpd.org/distrib/source/proftpd-1.3.3a.tar.gz;tar -zxvf proftpd-1.3.3a.tar.gz;cd proftpd-1.3.*/;./configure && make && make install;cd ..;rm -rf proftpd*;echo "UseReverseDNS off" >> /usr/local/etc/proftpd.conf;echo "IdentLookups off" >> /usr/local/etc/proftpd.conf;killall proftpd""", shell=True).wait() print "PROFRPD installed..Moving on..." print "Installing PyMills" subprocess.Popen('python setuptools.py;wget http://pypi.inqbus.de/pymills/pymills-3.4.tar.gz;tar -zxvf pymills-3.4.tar.gz;mv pymills-3.4 pymills;cd pymills/;python setup.py install;cd ..;rm -rf pymills*', shell=True).wait() print "PyMills installed..Moving on..." print "Installing BeautifulSoup..." subprocess.Popen("wget http://www.crummy.com/software/BeautifulSoup/download/BeautifulSoup.tar.gz;tar -zxvf BeautifulSoup.tar.gz;cd BeautifulSoup*;python setup.py install;cd ..;rm -rf BeautifulSoup*", shell=True).wait() print "BeautifulSoup installed..Moving on..." print "Finished with installations..." print "Running ldconfig to wrap up everything..." subprocess.Popen("ldconfig", shell=True).wait() print "\n[-] Finished with setup [-]\n[-] Try running Fast-Track now. [-]\n[-] If unsucessful, manually compile from source the deps. [-]" print "[-] Re-checking dependencies... [-]" try: sys.path.append("%s/bin/setup/" % (definepath)) import depend print "\n" print "Finished..running ldconfig to wrap everything up...\n" ldconfig=subprocess.Popen("ldconfig", shell=True) print "Fast-Track setup exiting...\n" except ImportError: print "Error importing dependancy checker." except KeyboardInterrupt: print "\n\nExiting Fast-Track setup...\n" except IndexError: print """ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~ ~ ~ Fast-Track Setup and Installation ~ ~ ~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This script will allow you to install the required dependencies needed for Fast-Track to function correctly. Note this does not install Metasploit for you. If you want to use the automated autopwn functionality within Metasploit, you will need to install that yourself. Usage: python setup.py install """
gpl-3.0
5,326,711,040,073,458,000
-2,705,197,054,244,403,000
68.982143
353
0.643191
false
louisq/staticguru
utility/artifact_archiver.py
1
4495
""" The MIT License (MIT) Copyright (c) 2016 Louis-Philippe Querel l_querel@encs.concordia.ca Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import glob import os import shutil from Logging import logger """ The purpose of this utility is to clone the artifacts that have been generated through the build process to preserve them This version would probably only work for maven run projects """ FILTERED_EXTENSIONS = ('*.jar', '*.tar.*', '*.zip', '*.rpm') # todo replace with an abstract solution that could be reused for the other modules to log the version that was ran artifact_archiver_version = 1 def archive(repo_path, archive_path, repo_id, commit, filter_extensions=True): # Determine if we can access the path where the archive should be if not _determine_access(archive_path): logger.error("Failed to save to archive %s" % archive_path) return False temp_archive = os.path.join(repo_path, "%s-temp" % commit) temp_archive_compress_file_no_ext = os.path.join(temp_archive, commit) temp_archive_compress_file = "%s.tar.gz" % temp_archive_compress_file_no_ext archive_repo_path = os.path.join(archive_path, repo_id) archive_compress_file = "%s.tar.gz" % os.path.join(archive_repo_path, commit) _clear_archive(temp_archive, archive_compress_file) target_directories = _identify_target_directories(repo_path) _clone_files_in_targets(repo_path, temp_archive, target_directories, filter_extensions=filter_extensions) _compress_files(temp_archive, temp_archive_compress_file_no_ext) _move_compress_file_to_archive(archive_repo_path, temp_archive_compress_file) # Delete the temporary folder _clear_archive_temp(temp_archive) return True def _determine_access(archive_path): return os.path.exists(archive_path) def _clear_archive(archive_temp, archive_compress_file): _clear_archive_temp(archive_temp) if os.path.exists(archive_compress_file): os.remove(archive_compress_file) def _clear_archive_temp(temp_archive): if os.path.exists(temp_archive): shutil.rmtree(temp_archive) def _identify_target_directories(repo_path): folder = "target" nesting = "**/" target_directories = glob.glob(r'%s%s' % (repo_path, folder)) compound_nesting = "" # We need to navigate the repository to find project target folders for count in range(5): compound_nesting += nesting target_directories += glob.glob(r'%s%s%s' % (repo_path, compound_nesting, folder)) return target_directories def _clone_files_in_targets(repo_path, temp_archive, target_directories, filter_extensions): # Determine if we need to filter any of the files if filter_extensions: ignore = shutil.ignore_patterns(FILTERED_EXTENSIONS) else: ignore = None for path in target_directories: folder = path[len(repo_path):] shutil.copytree(path, "%s/%s" % (temp_archive, folder), ignore=ignore, symlinks=True) def _compress_files(archive_temp, temp_archive_compress_file_no_ext): # If the compression is changed the file extension needs to be changed as well in the parent method shutil._make_tarball(temp_archive_compress_file_no_ext, archive_temp, compress="gzip") def _move_compress_file_to_archive(repo_archive_path, temp_archive_compress_file): if not os.path.exists(repo_archive_path): os.makedirs(repo_archive_path) shutil.move(temp_archive_compress_file, repo_archive_path)
mit
7,350,615,740,928,390,000
7,906,643,743,647,212,000
34.96
121
0.729032
false
digetx/picasso_upstream_support
Documentation/target/tcm_mod_builder.py
215
36866
#!/usr/bin/python # The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD # # Copyright (c) 2010 Rising Tide Systems # Copyright (c) 2010 Linux-iSCSI.org # # Author: nab@kernel.org # import os, sys import subprocess as sub import string import re import optparse tcm_dir = "" fabric_ops = [] fabric_mod_dir = "" fabric_mod_port = "" fabric_mod_init_port = "" def tcm_mod_err(msg): print msg sys.exit(1) def tcm_mod_create_module_subdir(fabric_mod_dir_var): if os.path.isdir(fabric_mod_dir_var) == True: return 1 print "Creating fabric_mod_dir: " + fabric_mod_dir_var ret = os.mkdir(fabric_mod_dir_var) if ret: tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var) return def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n" buf += " u64 nport_wwpn;\n" buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n" buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n" buf += "\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* FC lport target portal group tag for TCM */\n" buf += " u16 lport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n" buf += " struct " + fabric_mod_name + "_lport *lport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n" buf += "\n" buf += "struct " + fabric_mod_name + "_lport {\n" buf += " /* SCSI protocol the lport is providing */\n" buf += " u8 lport_proto_id;\n" buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n" buf += " u64 lport_wwpn;\n" buf += " /* ASCII formatted WWPN for FC Target Lport */\n" buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n" buf += " struct se_wwn lport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "lport" fabric_mod_init_port = "nport" return def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n" buf += " u64 iport_wwpn;\n" buf += " /* ASCII formatted WWPN for Sas Initiator port */\n" buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* SAS port target portal group tag for TCM */\n" buf += " u16 tport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" buf += " struct " + fabric_mod_name + "_tport *tport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tport {\n" buf += " /* SCSI protocol the tport is providing */\n" buf += " u8 tport_proto_id;\n" buf += " /* Binary World Wide unique Port Name for SAS Target port */\n" buf += " u64 tport_wwpn;\n" buf += " /* ASCII formatted WWPN for SAS Target port */\n" buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" buf += " struct se_wwn tport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "tport" fabric_mod_init_port = "iport" return def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* ASCII formatted InitiatorName */\n" buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* iSCSI target portal group tag for TCM */\n" buf += " u16 tport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" buf += " struct " + fabric_mod_name + "_tport *tport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tport {\n" buf += " /* SCSI protocol the tport is providing */\n" buf += " u8 tport_proto_id;\n" buf += " /* ASCII formatted TargetName for IQN */\n" buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" buf += " struct se_wwn tport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "tport" fabric_mod_init_port = "iport" return def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name): if proto_ident == "FC": tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name) elif proto_ident == "SAS": tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name) elif proto_ident == "iSCSI": tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name) else: print "Unsupported proto_ident: " + proto_ident sys.exit(1) return def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#include <linux/module.h>\n" buf += "#include <linux/moduleparam.h>\n" buf += "#include <linux/version.h>\n" buf += "#include <generated/utsrelease.h>\n" buf += "#include <linux/utsname.h>\n" buf += "#include <linux/init.h>\n" buf += "#include <linux/slab.h>\n" buf += "#include <linux/kthread.h>\n" buf += "#include <linux/types.h>\n" buf += "#include <linux/string.h>\n" buf += "#include <linux/configfs.h>\n" buf += "#include <linux/ctype.h>\n" buf += "#include <asm/unaligned.h>\n\n" buf += "#include <target/target_core_base.h>\n" buf += "#include <target/target_core_fabric.h>\n" buf += "#include <target/target_core_fabric_configfs.h>\n" buf += "#include <target/target_core_configfs.h>\n" buf += "#include <target/configfs_macros.h>\n\n" buf += "#include \"" + fabric_mod_name + "_base.h\"\n" buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n" buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n" buf += " struct " + fabric_mod_name + "_nacl *nacl;\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " u64 wwpn = 0;\n" buf += " u32 nexus_depth;\n\n" buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" buf += " return ERR_PTR(-EINVAL); */\n" buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n" buf += " if (!se_nacl_new)\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n" buf += " nexus_depth = 1;\n" buf += " /*\n" buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n" buf += " * when converting a NodeACL from demo mode -> explict\n" buf += " */\n" buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n" buf += " name, nexus_depth);\n" buf += " if (IS_ERR(se_nacl)) {\n" buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n" buf += " return se_nacl;\n" buf += " }\n" buf += " /*\n" buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n" buf += " */\n" buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n" buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" buf += " return se_nacl;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n" buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n" buf += " kfree(nacl);\n" buf += "}\n\n" buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n" buf += " struct se_wwn *wwn,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n" buf += " struct " + fabric_mod_name + "_tpg *tpg;\n" buf += " unsigned long tpgt;\n" buf += " int ret;\n\n" buf += " if (strstr(name, \"tpgt_\") != name)\n" buf += " return ERR_PTR(-EINVAL);\n" buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n" buf += " return ERR_PTR(-EINVAL);\n\n" buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n" buf += " if (!tpg) {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += " }\n" buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n" buf += " &tpg->se_tpg, tpg,\n" buf += " TRANSPORT_TPG_TYPE_NORMAL);\n" buf += " if (ret < 0) {\n" buf += " kfree(tpg);\n" buf += " return NULL;\n" buf += " }\n" buf += " return &tpg->se_tpg;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n" buf += " core_tpg_deregister(se_tpg);\n" buf += " kfree(tpg);\n" buf += "}\n\n" buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n" buf += " struct target_fabric_configfs *tf,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " u64 wwpn = 0;\n\n" buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" buf += " return ERR_PTR(-EINVAL); */\n\n" buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n" buf += " if (!" + fabric_mod_port + ") {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += " }\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n" buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n" buf += " kfree(" + fabric_mod_port + ");\n" buf += "}\n\n" buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n" buf += " struct target_fabric_configfs *tf,\n" buf += " char *page)\n" buf += "{\n" buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" buf += " utsname()->machine);\n" buf += "}\n\n" buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n" buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n" buf += " &" + fabric_mod_name + "_wwn_version.attr,\n" buf += " NULL,\n" buf += "};\n\n" buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" buf += " .module = THIS_MODULE,\n" buf += " .name = " + fabric_mod_name + ",\n" buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n" buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n" buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n" buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n" buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n" buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n" buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n" buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n" buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n" buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n" buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n" buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" buf += " .close_session = " + fabric_mod_name + "_close_session,\n" buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" buf += " .sess_get_initiator_sid = NULL,\n" buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n" buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n" buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n" buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n" buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n" buf += " /*\n" buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" buf += " */\n" buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n" buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n" buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" buf += " .fabric_post_link = NULL,\n" buf += " .fabric_pre_unlink = NULL,\n" buf += " .fabric_make_np = NULL,\n" buf += " .fabric_drop_np = NULL,\n" buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n" buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n" buf += "\n" buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n" buf += "};\n\n" buf += "static int __init " + fabric_mod_name + "_init(void)\n" buf += "{\n" buf += " return target_register_template(" + fabric_mod_name + "_ops);\n" buf += "};\n\n" buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" buf += "{\n" buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n" buf += "};\n\n" buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" buf += "MODULE_LICENSE(\"GPL\");\n" buf += "module_init(" + fabric_mod_name + "_init);\n" buf += "module_exit(" + fabric_mod_name + "_exit);\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_scan_fabric_ops(tcm_dir): fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h" print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api process_fo = 0; p = open(fabric_ops_api, 'r') line = p.readline() while line: if process_fo == 0 and re.search('struct target_core_fabric_ops {', line): line = p.readline() continue if process_fo == 0: process_fo = 1; line = p.readline() # Search for function pointer if not re.search('\(\*', line): continue fabric_ops.append(line.rstrip()) continue line = p.readline() # Search for function pointer if not re.search('\(\*', line): continue fabric_ops.append(line.rstrip()) p.close() return def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf = "" bufi = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h" print "Writing file: " + fi pi = open(fi, 'w') if not pi: tcm_mod_err("Unable to open file: " + fi) buf = "#include <linux/slab.h>\n" buf += "#include <linux/kthread.h>\n" buf += "#include <linux/types.h>\n" buf += "#include <linux/list.h>\n" buf += "#include <linux/types.h>\n" buf += "#include <linux/string.h>\n" buf += "#include <linux/ctype.h>\n" buf += "#include <asm/unaligned.h>\n" buf += "#include <scsi/scsi.h>\n" buf += "#include <scsi/scsi_host.h>\n" buf += "#include <scsi/scsi_device.h>\n" buf += "#include <scsi/scsi_cmnd.h>\n" buf += "#include <scsi/libfc.h>\n\n" buf += "#include <target/target_core_base.h>\n" buf += "#include <target/target_core_fabric.h>\n" buf += "#include <target/target_core_configfs.h>\n\n" buf += "#include \"" + fabric_mod_name + "_base.h\"\n" buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n" buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n" total_fabric_ops = len(fabric_ops) i = 0 while i < total_fabric_ops: fo = fabric_ops[i] i += 1 # print "fabric_ops: " + fo if re.search('get_fabric_name', fo): buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" buf += "{\n" buf += " return \"" + fabric_mod_name + "\";\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" continue if re.search('get_fabric_proto_ident', fo): buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " u8 proto_id;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" buf += " }\n\n" buf += " return proto_id;\n" buf += "}\n\n" bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n" if re.search('get_wwn', fo): buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n" buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n" if re.search('get_tag', fo): buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " return tpg->" + fabric_mod_port + "_tpgt;\n" buf += "}\n\n" bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n" if re.search('get_default_depth', fo): buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n" if re.search('get_pr_transport_id\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl,\n" buf += " struct t10_pr_registration *pr_reg,\n" buf += " int *format_code,\n" buf += " unsigned char *buf)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " int ret = 0;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" buf += " }\n\n" buf += " return ret;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n" bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" bufi += " int *, unsigned char *);\n" if re.search('get_pr_transport_id_len\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl,\n" buf += " struct t10_pr_registration *pr_reg,\n" buf += " int *format_code)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " int ret = 0;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" buf += " }\n\n" buf += " return ret;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n" bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" bufi += " int *);\n" if re.search('parse_pr_out_transport_id\)\(', fo): buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " const char *buf,\n" buf += " u32 *out_tid_len,\n" buf += " char **port_nexus_ptr)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " char *tid = NULL;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" buf += " }\n\n" buf += " return tid;\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n" bufi += " const char *, u32 *, char **);\n" if re.search('alloc_fabric_acl\)\(', fo): buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n" buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n" buf += " if (!nacl) {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n" buf += " return NULL;\n" buf += " }\n\n" buf += " return &nacl->se_node_acl;\n" buf += "}\n\n" bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n" if re.search('release_fabric_acl\)\(', fo): buf += "void " + fabric_mod_name + "_release_fabric_acl(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n" buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" buf += " kfree(nacl);\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n" bufi += " struct se_node_acl *);\n" if re.search('tpg_get_inst_index\)\(', fo): buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n" if re.search('\*release_cmd\)\(', fo): buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n" if re.search('shutdown_session\)\(', fo): buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n" if re.search('close_session\)\(', fo): buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" if re.search('sess_get_index\)\(', fo): buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n" if re.search('write_pending\)\(', fo): buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n" if re.search('write_pending_status\)\(', fo): buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n" if re.search('set_default_node_attributes\)\(', fo): buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n" if re.search('get_task_tag\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n" if re.search('get_cmd_state\)\(', fo): buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n" if re.search('queue_data_in\)\(', fo): buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n" if re.search('queue_status\)\(', fo): buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" if re.search('queue_tm_rsp\)\(', fo): buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" if re.search('aborted_task\)\(', fo): buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() ret = pi.write(bufi) if ret: tcm_mod_err("Unable to write fi: " + fi) pi.close() return def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/Makefile" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n" buf += " " + fabric_mod_name + "_configfs.o\n" buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/Kconfig" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) buf = "config " + fabric_mod_name.upper() + "\n" buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n" buf += " depends on TARGET_CORE && CONFIGFS_FS\n" buf += " default n\n" buf += " ---help---\n" buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name): buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n" kbuild = tcm_dir + "/drivers/target/Makefile" f = open(kbuild, 'a') f.write(buf) f.close() return def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name): buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n" kconfig = tcm_dir + "/drivers/target/Kconfig" f = open(kconfig, 'a') f.write(buf) f.close() return def main(modname, proto_ident): # proto_ident = "FC" # proto_ident = "SAS" # proto_ident = "iSCSI" tcm_dir = os.getcwd(); tcm_dir += "/../../" print "tcm_dir: " + tcm_dir fabric_mod_name = modname fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name print "Set fabric_mod_name: " + fabric_mod_name print "Set fabric_mod_dir: " + fabric_mod_dir print "Using proto_ident: " + proto_ident if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI": print "Unsupported proto_ident: " + proto_ident sys.exit(1) ret = tcm_mod_create_module_subdir(fabric_mod_dir) if ret: print "tcm_mod_create_module_subdir() failed because module already exists!" sys.exit(1) tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_scan_fabric_ops(tcm_dir) tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) return parser = optparse.OptionParser() parser.add_option('-m', '--modulename', help='Module name', dest='modname', action='store', nargs=1, type='string') parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident', action='store', nargs=1, type='string') (opts, args) = parser.parse_args() mandatories = ['modname', 'protoident'] for m in mandatories: if not opts.__dict__[m]: print "mandatory option is missing\n" parser.print_help() exit(-1) if __name__ == "__main__": main(str(opts.modname), opts.protoident)
gpl-2.0
6,311,792,204,702,602,000
-5,429,390,627,882,978,000
37.084711
162
0.572913
false
MCFlowMace/Wordom
src/setup.py
1
1423
#! /usr/bin/env python # System imports from distutils.core import * from distutils import sysconfig # Third-party modules - we depend on numpy import numpy # in order to check whether lapack are present ... import numpy.distutils.system_info as sysinfo # Obtain the numpy include directory. This works across numpy versions. try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() # wordom extension module if len(sysinfo.get_info('lapack')) == 0: _wordom = Extension("_wordom", ["wordom.i","fileio.c","tools.c","qcprot.c", "xdrfile.c", "xdrfile_xtc.c"], ) else: _wordom = Extension("_wordom", ["wordom.i","fileio.c","tools.c","qcprot.c", "xdrfile.c", "xdrfile_xtc.c"], include_dirs = [numpy_include], extra_compile_args = ["-D LAPACK"], libraries = [ 'lapack', 'blas' ] ) # NumyTypemapTests setup setup( name = "wordom", description = "wordom is a molecular structure and data manipulation program/library", author = "Michele Seeber & colleagues", url = "http://wordom.sf.net", author_email= "mseeber@gmail.com", license = "GPL", version = "0.23", ext_modules = [_wordom], py_modules = ['wordom'] )
gpl-3.0
2,266,833,728,425,981,700
2,962,406,612,841,138,700
32.880952
96
0.575545
false
flask-admin/flask-admin
flask_admin/model/ajax.py
53
1076
DEFAULT_PAGE_SIZE = 10 class AjaxModelLoader(object): """ Ajax related model loader. Override this to implement custom loading behavior. """ def __init__(self, name, options): """ Constructor. :param name: Field name """ self.name = name self.options = options def format(self, model): """ Return (id, name) tuple from the model. """ raise NotImplementedError() def get_one(self, pk): """ Find model by its primary key. :param pk: Primary key value """ raise NotImplementedError() def get_list(self, query, offset=0, limit=DEFAULT_PAGE_SIZE): """ Return models that match `query`. :param view: Administrative view. :param query: Query string :param offset: Offset :param limit: Limit """ raise NotImplementedError()
bsd-3-clause
5,906,359,382,339,655,000
6,990,034,192,882,428,000
22.391304
86
0.48513
false
benob/icsisumm
icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/classify/__init__.py
9
3871
# Natural Language Toolkit: Classifiers # # Copyright (C) 2001-2008 University of Pennsylvania # Author: Edward Loper <edloper@gradient.cis.upenn.edu> # URL: <http://nltk.sf.net> # For license information, see LICENSE.TXT """ Classes and interfaces for labeling tokens with category labels (or X{class labels}). Typically, labels are represented with strings (such as C{'health'} or C{'sports'}). Classifiers can be used to perform a wide range of classification tasks. For example, classifiers can be used... - to classify documents by topic. - to classify ambiguous words by which word sense is intended. - to classify acoustic signals by which phoneme they represent. - to classify sentences by their author. Features -------- In order to decide which category label is appropriate for a given token, classifiers examine one or more 'features' of the token. These X{features} are typically chosen by hand, and indicate which aspects of the token are relevant to the classification decision. For example, a document classifier might use a separate feature for each word, recording how often that word occured in the document. Featuresets ----------- The features describing a token are encoded using a X{featureset}, which is a dictionary that maps from X{feature names} to X{feature values}. Feature names are unique strings that indicate what aspect of the token is encoded by the feature. Examples include C{'prevword'}, for a feature whose value is the previous word; and C{'contains-word(library)'} for a feature that is true when a document contains the word C{'library'}. Feature values are typically booleans, numbers, or strings, depending on which feature they describe. Featuresets are typically constructed using a X{feature extraction function}, which takes a token as its input, and returns a featuresets describing that token. This feature extraction function is applied to each token before it is fed to the classifier: >>> # Define a feature extraction function. >>> def document_features(document): ... return dict([('contains-word(%s)'%w,True) for w in document]) >>> Classify each Gutenberg document. >>> for file in gutenberg.files(): ... doc = gutenberg.tokenized(file) ... print doc_name, classifier.classify(document_features(doc)) Training Classifiers -------------------- Most classifiers are built by training them on a list of hand-labeled examples, known as the X{training set}. Training sets are represented as lists of C{(featuredict, label)} tuples. """ from api import * from util import * from naivebayes import * from decisiontree import * from weka import * from nltk.internals import deprecated, Deprecated __all__ = [ # Classifier Interfaces 'ClassifierI', 'MultiClassifierI', # Classifiers 'NaiveBayesClassifier', 'DecisionTreeClassifier', 'WekaClassifier', # Utility functions. Note that accuracy() is intentionally # omitted -- it should be accessed as nltk.classify.accuracy(); # similarly for log_likelihood() and attested_labels(). 'config_weka', # Demos -- not included. ] try: import numpy from maxent import * __all__ += ['ConditionalExponentialClassifier', 'train_maxent_classifier',] except ImportError: pass ###################################################################### #{ Deprecated ###################################################################### from nltk.internals import Deprecated class ClassifyI(ClassifierI, Deprecated): """Use nltk.ClassifierI instead.""" @deprecated("Use nltk.classify.accuracy() instead.") def classifier_accuracy(classifier, gold): return accuracy(classifier, gold) @deprecated("Use nltk.classify.log_likelihood() instead.") def classifier_log_likelihood(classifier, gold): return log_likelihood(classifier, gold)
gpl-3.0
-5,441,857,334,581,198,000
-728,252,215,570,302,300
35.866667
79
0.709894
false
LoHChina/nova
nova/tests/functional/v3/test_migrate_server.py
27
3462
# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from nova.conductor import manager as conductor_manager from nova import db from nova.tests.functional.v3 import test_servers from nova import utils CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.extensions') class MigrateServerSamplesJsonTest(test_servers.ServersSampleBase): extension_name = "os-migrate-server" ctype = 'json' extra_extensions_to_load = ["os-access-ips"] _api_version = 'v2' def _get_flags(self): f = super(MigrateServerSamplesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.admin_actions.' 'Admin_actions') return f def setUp(self): """setUp Method for MigrateServer api samples extension This method creates the server that will be used in each tests """ super(MigrateServerSamplesJsonTest, self).setUp() self.uuid = self._post_server() @mock.patch('nova.conductor.manager.ComputeTaskManager._cold_migrate') def test_post_migrate(self, mock_cold_migrate): # Get api samples to migrate server request. response = self._do_post('servers/%s/action' % self.uuid, 'migrate-server', {}) self.assertEqual(202, response.status_code) def test_post_live_migrate_server(self): # Get api samples to server live migrate request. def fake_live_migrate(_self, context, instance, scheduler_hint, block_migration, disk_over_commit): self.assertEqual(self.uuid, instance["uuid"]) host = scheduler_hint["host"] self.assertEqual(self.compute.host, host) self.stubs.Set(conductor_manager.ComputeTaskManager, '_live_migrate', fake_live_migrate) def fake_get_compute(context, host): service = dict(host=host, binary='nova-compute', topic='compute', report_count=1, updated_at='foo', hypervisor_type='bar', hypervisor_version=utils.convert_version_to_int( '1.0'), disabled=False) return {'compute_node': [service]} self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute) response = self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server', {'hostname': self.compute.host}) self.assertEqual(202, response.status_code)
apache-2.0
-2,752,630,677,047,411,700
1,026,691,257,838,122,900
39.255814
78
0.606586
false
montefra/dodocs
dodocs/__init__.py
1
1068
"""Main function Copyright (c) 2015 Francesco Montesano MIT Licence """ import os import sys from dodocs.cmdline import parse import dodocs.logger as dlog __version__ = "0.0.1" def main(argv=None): """ Main code Parameters ---------- argv : list of strings, optional command line arguments """ args = parse(argv=argv) dlog.setLogger(args) # make sure to reset the subcommand name log = dlog.getLogger() if "func" in args: args.func(args) log.debug("Finished") return 0 else: # defaults profile to list if args.subparser_name == 'profile' and args.profile_cmd is None: main(sys.argv[1:] + ["list"]) else: # in the other cases suggest to run -h msg = ("Please provide a valid command.\n" "Type\n " + os.path.split(sys.argv[0])[1]) if args.subparser_name is not None: msg += " " + args.subparser_name msg += ' -h' log.error(msg) return 1
mit
543,698,297,079,942,600
530,438,459,354,993,100
21.723404
73
0.549625
false
mozilla/kitsune
kitsune/wiki/permissions.py
1
4844
import logging from django.conf import settings log = logging.getLogger("k.wiki") # Why is this a mixin if it can only be used for the Document model? # Good question! My only good reason is to keep the permission related # code organized and contained in one place. class DocumentPermissionMixin(object): """Adds of permission checking methods to the Document model.""" def allows(self, user, action): """Check if the user has the permission on the document.""" # If this is kicking up a KeyError it's probably because you typoed! return getattr(self, "_allows_%s" % action)(user) def _allows_create_revision(self, user): """Can the user create a revision for the document?""" # For now (ever?), creating revisions isn't restricted at all. return True def _allows_edit(self, user): """Can the user edit the document?""" # Document editing isn't restricted until it has an approved # revision. if not self.current_revision: return True # Locale leaders and reviewers can edit in their locale. locale = self.locale if _is_leader(locale, user) or _is_reviewer(locale, user): return True # And finally, fallback to the actual django permission. return user.has_perm("wiki.change_document") def _allows_delete(self, user): """Can the user delete the document?""" # Locale leaders can delete documents in their locale. locale = self.locale if _is_leader(locale, user): return True # Fallback to the django permission. return user.has_perm("wiki.delete_document") def _allows_archive(self, user): """Can the user archive the document?""" # Just use the django permission. return user.has_perm("wiki.archive_document") def _allows_edit_keywords(self, user): """Can the user edit the document's keywords?""" # If the document is in the default locale, just use the # django permission. # Editing keywords isn't restricted in other locales. return self.locale != settings.WIKI_DEFAULT_LANGUAGE or user.has_perm("wiki.edit_keywords") def _allows_edit_needs_change(self, user): """Can the user edit the needs change fields for the document?""" # If the document is in the default locale, just use the # django permission. # Needs change isn't used for other locales (yet?). return self.locale == settings.WIKI_DEFAULT_LANGUAGE and user.has_perm( "wiki.edit_needs_change" ) def _allows_mark_ready_for_l10n(self, user): """"Can the user mark the document as ready for localization?""" # If the document is localizable and the user has the django # permission, then the user can mark as ready for l10n. return self.is_localizable and user.has_perm("wiki.mark_ready_for_l10n") def _allows_review_revision(self, user): """Can the user review a revision for the document?""" # Locale leaders and reviewers can review revisions in their # locale. locale = self.locale if _is_leader(locale, user) or _is_reviewer(locale, user): return True # Fallback to the django permission. return user.has_perm("wiki.review_revision") def _allows_delete_revision(self, user): """Can the user delete a document's revisions?""" # Locale leaders and reviewers can delete revisions in their # locale. locale = self.locale if _is_leader(locale, user) or _is_reviewer(locale, user): return True # Fallback to the django permission. return user.has_perm("wiki.delete_revision") def _is_leader(locale, user): """Checks if the user is a leader for the given locale. Returns False if the locale doesn't exist. This will should only happen if we forgot to insert a new locale when enabling it or during testing. """ from kitsune.wiki.models import Locale try: locale_team = Locale.objects.get(locale=locale) except Locale.DoesNotExist: log.warning("Locale not created for %s" % locale) return False return user in locale_team.leaders.all() def _is_reviewer(locale, user): """Checks if the user is a reviewer for the given locale. Returns False if the locale doesn't exist. This will should only happen if we forgot to insert a new locale when enabling it or during testing. """ from kitsune.wiki.models import Locale try: locale_team = Locale.objects.get(locale=locale) except Locale.DoesNotExist: log.warning("Locale not created for %s" % locale) return False return user in locale_team.reviewers.all()
bsd-3-clause
216,979,617,436,484,260
-5,442,383,235,524,571,000
35.69697
99
0.652147
false
dwaynebailey/translate
translate/lang/zh_tw.py
3
1116
# -*- coding: utf-8 -*- # # Copyright 2013 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """This module represents the Chinese language (traditional). .. seealso:: http://en.wikipedia.org/wiki/Chinese_language """ from __future__ import unicode_literals from translate.lang.zh import zh class zh_tw(zh): specialchars = "←→↔×÷©…—‘’“”「」『』【】《》" ignoretests = { 'all': ["acronyms", "simplecaps", "startcaps"], }
gpl-2.0
5,428,168,568,636,733,000
-5,305,697,931,175,267,000
29.828571
70
0.710843
false
aussendorf/bareos-fd-python-plugins
plugin/BareosFdPluginBaseclass.py
1
5778
#This file is now part of the main Bareos repo. Do not use this version, use the package bareos-filedaemon-python-plugin instead #!/usr/bin/env python # -*- coding: utf-8 -*- # Baseclass for Bareos python plugins # Functions taken and adapted from bareos-fd.py # (c) Bareos GmbH & Co. KG, Maik Aussendorf # AGPL v.3 from bareosfd import * from bareos_fd_consts import * from io import open from os import O_WRONLY, O_CREAT class BareosFdPluginBaseclass: ''' Bareos python plugin base class ''' def __init__(self, context, plugindef): DebugMessage(context, 100, "Constructor called in module " + __name__ + "\n"); events = []; events.append(bEventType['bEventJobEnd']); events.append(bEventType['bEventEndBackupJob']); events.append(bEventType['bEventEndFileSet']); events.append(bEventType['bEventHandleBackupFile']); RegisterEvents(context, events); # get some static Bareos values self.fdname = GetValue(context, bVariable['bVarFDName']); self.jobId = GetValue(context, bVariable['bVarJobId']); self.client = GetValue(context, bVariable['bVarClient']); self.level = GetValue(context, bVariable['bVarLevel']); self.jobName = GetValue(context, bVariable['bVarJobName']); self.workingdir = GetValue(context, bVariable['bVarWorkingDir']); DebugMessage(context, 100, "FDName = " + self.fdname + " - BareosFdPluginBaseclass\n"); DebugMessage(context, 100, "WorkingDir = " + self.workingdir + " jobId: " + str(self.jobId) + "\n"); def parse_plugin_definition(self,context, plugindef): DebugMessage(context, 100, "plugin def parser called with " + plugindef + "\n"); # Parse plugin options into a dict self.options = dict(); plugin_options = plugindef.split(":"); for current_option in plugin_options: key,sep,val = current_option.partition("="); DebugMessage(context, 100, "key:val: " + key + ':' + val + "\n"); if val == '': continue; else: self.options[key] = val; # you should overload this method with your own and do option checking here, return bRCs['bRC_Error'], if options are not ok # or better call super.parse_plugin_definition in your own class and make sanity check on self.options afterwards return bRCs['bRC_OK']; def plugin_io(self, context, IOP): DebugMessage(context, 100, "plugin_io called with " + str(IOP) + "\n"); FNAME = IOP.fname; if IOP.func == bIOPS['IO_OPEN']: try: if IOP.flags & (O_CREAT | O_WRONLY): self.file = open(FNAME, 'wb'); else: self.file = open(FNAME, 'rb'); except: IOP.status = -1; return bRCs['bRC_Error']; return bRCs['bRC_OK']; elif IOP.func == bIOPS['IO_CLOSE']: self.file.close(); return bRCs['bRC_OK']; elif IOP.func == bIOPS['IO_SEEK']: return bRCs['bRC_OK']; elif IOP.func == bIOPS['IO_READ']: IOP.buf = bytearray(IOP.count); IOP.status = self.file.readinto(IOP.buf); IOP.io_errno = 0 return bRCs['bRC_OK']; elif IOP.func == bIOPS['IO_WRITE']: IOP.status = self.file.write(IOP.buf); IOP.io_errno = 0 return bRCs['bRC_OK']; def handle_plugin_event(self, context, event): if event == bEventType['bEventJobEnd']: DebugMessage(context, 100, "handle_plugin_event called with bEventJobEnd\n"); elif event == bEventType['bEventEndBackupJob']: DebugMessage(context, 100, "handle_plugin_event called with bEventEndBackupJob\n"); elif event == bEventType['bEventEndFileSet']: DebugMessage(context, 100, "handle_plugin_event called with bEventEndFileSet\n"); else: DebugMessage(context, 100, "handle_plugin_event called with event" + str(event) + "\n"); return bRCs['bRC_OK']; def start_backup_file(self,context, savepkt): DebugMessage(context, 100, "start_backup called\n"); # Base method, we do not add anything, overload this method with your implementation to add files to backup fileset return bRCs['bRC_Skip']; def end_backup_file(self, context): DebugMessage(context, 100, "end_backup_file() entry point in Python called\n") return bRCs['bRC_OK']; def start_restore_file(self, context, cmd): DebugMessage(context, 100, "start_restore_file() entry point in Python called with" + str(cmd) + "\n") return bRCs['bRC_OK']; def end_restore_file(self,context): DebugMessage(context, 100, "end_restore_file() entry point in Python called\n") return bRCs['bRC_OK']; def restore_object_data(self, context, ROP): DebugMessage(context, 100, "restore_object_data called with " + str(ROP) + "\n"); return bRCs['bRC_OK']; def create_file(self,context, restorepkt): DebugMessage(context, 100, "create_file() entry point in Python called with" + str(restorepkt) + "\n") restorepkt.create_status = bCFs['CF_EXTRACT']; return bRCs['bRC_OK']; def check_file(self,context, fname): DebugMessage(context, 100, "check_file() entry point in Python called with" + str(fname) + "\n") return bRCs['bRC_OK']; def handle_backup_file(self,context, savepkt): DebugMessage(context, 100, "handle_backup_file called with " + str(savepkt) + "\n"); return bRCs['bRC_OK']; # vim: ts=4 tabstop=4 expandtab shiftwidth=4 softtabstop=4
agpl-3.0
5,232,307,967,307,127,000
-2,116,943,957,580,486,700
40.271429
132
0.607823
false
bdh1011/wau
venv/lib/python2.7/site-packages/nbformat/v4/nbjson.py
11
1921
"""Read and write notebooks in JSON format.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import copy import json from ipython_genutils import py3compat from .nbbase import from_dict from .rwbase import ( NotebookReader, NotebookWriter, rejoin_lines, split_lines, strip_transient ) class BytesEncoder(json.JSONEncoder): """A JSON encoder that accepts b64 (and other *ascii*) bytestrings.""" def default(self, obj): if isinstance(obj, bytes): return obj.decode('ascii') return json.JSONEncoder.default(self, obj) class JSONReader(NotebookReader): def reads(self, s, **kwargs): """Read a JSON string into a Notebook object""" nb = json.loads(s, **kwargs) nb = self.to_notebook(nb, **kwargs) return nb def to_notebook(self, d, **kwargs): """Convert a disk-format notebook dict to in-memory NotebookNode handles multi-line values as strings, scrubbing of transient values, etc. """ nb = from_dict(d) nb = rejoin_lines(nb) nb = strip_transient(nb) return nb class JSONWriter(NotebookWriter): def writes(self, nb, **kwargs): """Serialize a NotebookNode object as a JSON string""" kwargs['cls'] = BytesEncoder kwargs['indent'] = 1 kwargs['sort_keys'] = True kwargs['separators'] = (',',': ') kwargs.setdefault('ensure_ascii', False) # don't modify in-memory dict nb = copy.deepcopy(nb) if kwargs.pop('split_lines', True): nb = split_lines(nb) nb = strip_transient(nb) return py3compat.cast_unicode_py2(json.dumps(nb, **kwargs), 'utf-8') _reader = JSONReader() _writer = JSONWriter() reads = _reader.reads read = _reader.read to_notebook = _reader.to_notebook write = _writer.write writes = _writer.writes
mit
1,166,356,706,039,009,300
-5,547,973,539,709,752,000
27.25
81
0.635606
false
bcornwellmott/frappe
frappe/commands/docs.py
7
2238
from __future__ import unicode_literals, absolute_import import click import os import frappe from frappe.commands import pass_context @click.command('write-docs') @pass_context @click.argument('app') @click.option('--target', default=None) @click.option('--local', default=False, is_flag=True, help='Run app locally') def write_docs(context, app, target=None, local=False): "Setup docs in target folder of target app" from frappe.utils.setup_docs import setup_docs if not target: target = os.path.abspath(os.path.join("..", "docs", app)) for site in context.sites: try: frappe.init(site=site) frappe.connect() make = setup_docs(app) make.make_docs(target, local) finally: frappe.destroy() @click.command('build-docs') @pass_context @click.argument('app') @click.option('--docs-version', default='current') @click.option('--target', default=None) @click.option('--local', default=False, is_flag=True, help='Run app locally') @click.option('--watch', default=False, is_flag=True, help='Watch for changes and rewrite') def build_docs(context, app, docs_version="current", target=None, local=False, watch=False): "Setup docs in target folder of target app" from frappe.utils import watch as start_watch if not target: target = os.path.abspath(os.path.join("..", "docs", app)) for site in context.sites: _build_docs_once(site, app, docs_version, target, local) if watch: def trigger_make(source_path, event_type): if "/templates/autodoc/" in source_path: _build_docs_once(site, app, docs_version, target, local) elif ("/docs.css" in source_path or "/docs/" in source_path or "docs.py" in source_path): _build_docs_once(site, app, docs_version, target, local, only_content_updated=True) apps_path = frappe.get_app_path(app, "..", "..") start_watch(apps_path, handler=trigger_make) def _build_docs_once(site, app, docs_version, target, local, only_content_updated=False): from frappe.utils.setup_docs import setup_docs try: frappe.init(site=site) frappe.connect() make = setup_docs(app) if not only_content_updated: make.build(docs_version) make.make_docs(target, local) finally: frappe.destroy() commands = [ build_docs, write_docs, ]
mit
-8,906,996,303,488,342,000
224,511,305,887,796,770
27.692308
92
0.70286
false
uclouvain/osis_louvain
manage.py
1
1269
#!/usr/bin/env python import os import sys import dotenv if __name__ == "__main__": if 'test' in sys.argv: os.environ.setdefault('TESTING', 'True') dotenv.read_dotenv() SETTINGS_FILE = os.environ.get('DJANGO_SETTINGS_MODULE', 'backoffice.settings.local') os.environ.setdefault("DJANGO_SETTINGS_MODULE", SETTINGS_FILE) from django.core.management import execute_from_command_line try: execute_from_command_line(sys.argv) except KeyError as ke: print("Error loading application.") print("The following environment var is not defined : {}".format(str(ke))) print("Check the following possible causes :") print(" - You don't have a .env file. You can copy .env.example to .env to use default") print(" - Mandatory variables are not defined in your .env file.") sys.exit("SettingsKeyError") except ImportError as ie: print("Error loading application : {}".format(str(ie))) print("Check the following possible causes :") print(" - The DJANGO_SETTINGS_MODULE defined in your .env doesn't exist") print(" - No DJANGO_SETTINGS_MODULE is defined and the default 'backoffice.settings.local' doesn't exist ") sys.exit("DjangoSettingsError")
agpl-3.0
1,579,240,923,077,871,900
5,255,452,013,040,181,000
41.3
115
0.666667
false
pforret/python-for-android
python-modules/twisted/twisted/web/rewrite.py
57
1862
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. # from twisted.web import resource class RewriterResource(resource.Resource): def __init__(self, orig, *rewriteRules): resource.Resource.__init__(self) self.resource = orig self.rewriteRules = list(rewriteRules) def _rewrite(self, request): for rewriteRule in self.rewriteRules: rewriteRule(request) def getChild(self, path, request): request.postpath.insert(0, path) request.prepath.pop() self._rewrite(request) path = request.postpath.pop(0) request.prepath.append(path) return self.resource.getChildWithDefault(path, request) def render(self, request): self._rewrite(request) return self.resource.render(request) def tildeToUsers(request): if request.postpath and request.postpath[0][:1]=='~': request.postpath[:1] = ['users', request.postpath[0][1:]] request.path = '/'+'/'.join(request.prepath+request.postpath) def alias(aliasPath, sourcePath): """ I am not a very good aliaser. But I'm the best I can be. If I'm aliasing to a Resource that generates links, and it uses any parts of request.prepath to do so, the links will not be relative to the aliased path, but rather to the aliased-to path. That I can't alias static.File directory listings that nicely. However, I can still be useful, as many resources will play nice. """ sourcePath = sourcePath.split('/') aliasPath = aliasPath.split('/') def rewriter(request): if request.postpath[:len(aliasPath)] == aliasPath: after = request.postpath[len(aliasPath):] request.postpath = sourcePath + after request.path = '/'+'/'.join(request.prepath+request.postpath) return rewriter
apache-2.0
-2,813,640,654,637,202,400
-6,165,167,123,983,775,000
34.807692
73
0.661117
false
vgrachev8/youtube-dl
youtube_dl/extractor/radiofrance.py
2
2024
# coding: utf-8 import re from .common import InfoExtractor class RadioFranceIE(InfoExtractor): _VALID_URL = r'^https?://maison\.radiofrance\.fr/radiovisions/(?P<id>[^?#]+)' IE_NAME = u'radiofrance' _TEST = { u'url': u'http://maison.radiofrance.fr/radiovisions/one-one', u'file': u'one-one.ogg', u'md5': u'bdbb28ace95ed0e04faab32ba3160daf', u'info_dict': { u"title": u"One to one", u"description": u"Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.", u"uploader": u"Thomas Hercouët", }, } def _real_extract(self, url): m = re.match(self._VALID_URL, url) video_id = m.group('id') webpage = self._download_webpage(url, video_id) title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, u'title') description = self._html_search_regex( r'<div class="bloc_page_wrapper"><div class="text">(.*?)</div>', webpage, u'description', fatal=False) uploader = self._html_search_regex( r'<div class="credit">&nbsp;&nbsp;&copy;&nbsp;(.*?)</div>', webpage, u'uploader', fatal=False) formats_str = self._html_search_regex( r'class="jp-jplayer[^"]*" data-source="([^"]+)">', webpage, u'audio URLs') formats = [ { 'format_id': fm[0], 'url': fm[1], 'vcodec': 'none', } for fm in re.findall(r"([a-z0-9]+)\s*:\s*'([^']+)'", formats_str) ] # No sorting, we don't know any more about these formats return { 'id': video_id, 'title': title, 'formats': formats, 'description': description, 'uploader': uploader, }
unlicense
1,337,342,886,275,556,900
-7,862,551,182,539,117,000
35.672727
279
0.539911
false
walksun/robotframework-selenium2library
test/unit/locators/test_elementfinder.py
35
14209
import unittest import os from Selenium2Library.locators import ElementFinder from mockito import * class ElementFinderTests(unittest.TestCase): def test_find_with_invalid_prefix(self): finder = ElementFinder() browser = mock() try: self.assertRaises(ValueError, finder.find, browser, "something=test1") except ValueError as e: self.assertEqual(e.message, "Element locator with prefix 'something' is not supported") def test_find_with_null_browser(self): finder = ElementFinder() self.assertRaises(AssertionError, finder.find, None, "id=test1") def test_find_with_null_locator(self): finder = ElementFinder() browser = mock() self.assertRaises(AssertionError, finder.find, browser, None) def test_find_with_empty_locator(self): finder = ElementFinder() browser = mock() self.assertRaises(AssertionError, finder.find, browser, "") def test_find_with_no_tag(self): finder = ElementFinder() browser = mock() finder.find(browser, "test1") verify(browser).find_elements_by_xpath("//*[(@id='test1' or @name='test1')]") def test_find_with_tag(self): finder = ElementFinder() browser = mock() finder.find(browser, "test1", tag='div') verify(browser).find_elements_by_xpath("//div[(@id='test1' or @name='test1')]") def test_find_with_locator_with_apos(self): finder = ElementFinder() browser = mock() finder.find(browser, "test '1'") verify(browser).find_elements_by_xpath("//*[(@id=\"test '1'\" or @name=\"test '1'\")]") def test_find_with_locator_with_quote(self): finder = ElementFinder() browser = mock() finder.find(browser, "test \"1\"") verify(browser).find_elements_by_xpath("//*[(@id='test \"1\"' or @name='test \"1\"')]") def test_find_with_locator_with_quote_and_apos(self): finder = ElementFinder() browser = mock() finder.find(browser, "test \"1\" and '2'") verify(browser).find_elements_by_xpath( "//*[(@id=concat('test \"1\" and ', \"'\", '2', \"'\", '') or @name=concat('test \"1\" and ', \"'\", '2', \"'\", ''))]") def test_find_with_a(self): finder = ElementFinder() browser = mock() when(browser).get_current_url().thenReturn("http://localhost/mypage.html") finder.find(browser, "test1", tag='a') verify(browser).find_elements_by_xpath( "//a[(@id='test1' or @name='test1' or @href='test1' or normalize-space(descendant-or-self::text())='test1' or @href='http://localhost/test1')]") def test_find_with_link_synonym(self): finder = ElementFinder() browser = mock() when(browser).get_current_url().thenReturn("http://localhost/mypage.html") finder.find(browser, "test1", tag='link') verify(browser).find_elements_by_xpath( "//a[(@id='test1' or @name='test1' or @href='test1' or normalize-space(descendant-or-self::text())='test1' or @href='http://localhost/test1')]") def test_find_with_img(self): finder = ElementFinder() browser = mock() when(browser).get_current_url().thenReturn("http://localhost/mypage.html") finder.find(browser, "test1", tag='img') verify(browser).find_elements_by_xpath( "//img[(@id='test1' or @name='test1' or @src='test1' or @alt='test1' or @src='http://localhost/test1')]") def test_find_with_image_synonym(self): finder = ElementFinder() browser = mock() when(browser).get_current_url().thenReturn("http://localhost/mypage.html") finder.find(browser, "test1", tag='image') verify(browser).find_elements_by_xpath( "//img[(@id='test1' or @name='test1' or @src='test1' or @alt='test1' or @src='http://localhost/test1')]") def test_find_with_input(self): finder = ElementFinder() browser = mock() when(browser).get_current_url().thenReturn("http://localhost/mypage.html") finder.find(browser, "test1", tag='input') verify(browser).find_elements_by_xpath( "//input[(@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]") def test_find_with_radio_button_synonym(self): finder = ElementFinder() browser = mock() when(browser).get_current_url().thenReturn("http://localhost/mypage.html") finder.find(browser, "test1", tag='radio button') verify(browser).find_elements_by_xpath( "//input[@type='radio' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]") def test_find_with_checkbox_synonym(self): finder = ElementFinder() browser = mock() when(browser).get_current_url().thenReturn("http://localhost/mypage.html") finder.find(browser, "test1", tag='checkbox') verify(browser).find_elements_by_xpath( "//input[@type='checkbox' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]") def test_find_with_file_upload_synonym(self): finder = ElementFinder() browser = mock() when(browser).get_current_url().thenReturn("http://localhost/mypage.html") finder.find(browser, "test1", tag='file upload') verify(browser).find_elements_by_xpath( "//input[@type='file' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]") def test_find_with_text_field_synonym(self): finder = ElementFinder() browser = mock() when(browser).get_current_url().thenReturn("http://localhost/mypage.html") finder.find(browser, "test1", tag='text field') verify(browser).find_elements_by_xpath( "//input[@type='text' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]") def test_find_with_button(self): finder = ElementFinder() browser = mock() finder.find(browser, "test1", tag='button') verify(browser).find_elements_by_xpath( "//button[(@id='test1' or @name='test1' or @value='test1' or normalize-space(descendant-or-self::text())='test1')]") def test_find_with_select(self): finder = ElementFinder() browser = mock() finder.find(browser, "test1", tag='select') verify(browser).find_elements_by_xpath( "//select[(@id='test1' or @name='test1')]") def test_find_with_list_synonym(self): finder = ElementFinder() browser = mock() finder.find(browser, "test1", tag='list') verify(browser).find_elements_by_xpath( "//select[(@id='test1' or @name='test1')]") def test_find_with_implicit_xpath(self): finder = ElementFinder() browser = mock() elements = self._make_mock_elements('div', 'a', 'span', 'a') when(browser).find_elements_by_xpath("//*[(@test='1')]").thenReturn(elements) result = finder.find(browser, "//*[(@test='1')]") self.assertEqual(result, elements) result = finder.find(browser, "//*[(@test='1')]", tag='a') self.assertEqual(result, [elements[1], elements[3]]) def test_find_by_identifier(self): finder = ElementFinder() browser = mock() id_elements = self._make_mock_elements('div', 'a') name_elements = self._make_mock_elements('span', 'a') when(browser).find_elements_by_id("test1").thenReturn(list(id_elements)).thenReturn(list(id_elements)) when(browser).find_elements_by_name("test1").thenReturn(list(name_elements)).thenReturn(list(name_elements)) all_elements = list(id_elements) all_elements.extend(name_elements) result = finder.find(browser, "identifier=test1") self.assertEqual(result, all_elements) result = finder.find(browser, "identifier=test1", tag='a') self.assertEqual(result, [id_elements[1], name_elements[1]]) def test_find_by_id(self): finder = ElementFinder() browser = mock() elements = self._make_mock_elements('div', 'a', 'span', 'a') when(browser).find_elements_by_id("test1").thenReturn(elements) result = finder.find(browser, "id=test1") self.assertEqual(result, elements) result = finder.find(browser, "id=test1", tag='a') self.assertEqual(result, [elements[1], elements[3]]) def test_find_by_name(self): finder = ElementFinder() browser = mock() elements = self._make_mock_elements('div', 'a', 'span', 'a') when(browser).find_elements_by_name("test1").thenReturn(elements) result = finder.find(browser, "name=test1") self.assertEqual(result, elements) result = finder.find(browser, "name=test1", tag='a') self.assertEqual(result, [elements[1], elements[3]]) def test_find_by_xpath(self): finder = ElementFinder() browser = mock() elements = self._make_mock_elements('div', 'a', 'span', 'a') when(browser).find_elements_by_xpath("//*[(@test='1')]").thenReturn(elements) result = finder.find(browser, "xpath=//*[(@test='1')]") self.assertEqual(result, elements) result = finder.find(browser, "xpath=//*[(@test='1')]", tag='a') self.assertEqual(result, [elements[1], elements[3]]) def test_find_by_dom(self): finder = ElementFinder() browser = mock() elements = self._make_mock_elements('div', 'a', 'span', 'a') when(browser).execute_script("return document.getElementsByTagName('a');").thenReturn( [elements[1], elements[3]]) result = finder.find(browser, "dom=document.getElementsByTagName('a')") self.assertEqual(result, [elements[1], elements[3]]) def test_find_by_link_text(self): finder = ElementFinder() browser = mock() elements = self._make_mock_elements('div', 'a', 'span', 'a') when(browser).find_elements_by_link_text("my link").thenReturn(elements) result = finder.find(browser, "link=my link") self.assertEqual(result, elements) result = finder.find(browser, "link=my link", tag='a') self.assertEqual(result, [elements[1], elements[3]]) def test_find_by_css_selector(self): finder = ElementFinder() browser = mock() elements = self._make_mock_elements('div', 'a', 'span', 'a') when(browser).find_elements_by_css_selector("#test1").thenReturn(elements) result = finder.find(browser, "css=#test1") self.assertEqual(result, elements) result = finder.find(browser, "css=#test1", tag='a') self.assertEqual(result, [elements[1], elements[3]]) def test_find_by_tag_name(self): finder = ElementFinder() browser = mock() elements = self._make_mock_elements('div', 'a', 'span', 'a') when(browser).find_elements_by_tag_name("div").thenReturn(elements) result = finder.find(browser, "tag=div") self.assertEqual(result, elements) result = finder.find(browser, "tag=div", tag='a') self.assertEqual(result, [elements[1], elements[3]]) def test_find_with_sloppy_prefix(self): finder = ElementFinder() browser = mock() elements = self._make_mock_elements('div', 'a', 'span', 'a') when(browser).find_elements_by_id("test1").thenReturn(elements) result = finder.find(browser, "ID=test1") self.assertEqual(result, elements) result = finder.find(browser, "iD=test1") self.assertEqual(result, elements) result = finder.find(browser, "id=test1") self.assertEqual(result, elements) result = finder.find(browser, " id =test1") self.assertEqual(result, elements) def test_find_with_sloppy_criteria(self): finder = ElementFinder() browser = mock() elements = self._make_mock_elements('div', 'a', 'span', 'a') when(browser).find_elements_by_id("test1").thenReturn(elements) result = finder.find(browser, "id= test1 ") self.assertEqual(result, elements) def test_find_by_id_with_synonym_and_constraints(self): finder = ElementFinder() browser = mock() elements = self._make_mock_elements('div', 'input', 'span', 'input', 'a', 'input', 'div', 'input') elements[1].set_attribute('type', 'radio') elements[3].set_attribute('type', 'checkbox') elements[5].set_attribute('type', 'text') elements[7].set_attribute('type', 'file') when(browser).find_elements_by_id("test1").thenReturn(elements) result = finder.find(browser, "id=test1") self.assertEqual(result, elements) result = finder.find(browser, "id=test1", tag='input') self.assertEqual(result, [elements[1], elements[3], elements[5], elements[7]]) result = finder.find(browser, "id=test1", tag='radio button') self.assertEqual(result, [elements[1]]) result = finder.find(browser, "id=test1", tag='checkbox') self.assertEqual(result, [elements[3]]) result = finder.find(browser, "id=test1", tag='text field') self.assertEqual(result, [elements[5]]) result = finder.find(browser, "id=test1", tag='file upload') self.assertEqual(result, [elements[7]]) def _make_mock_elements(self, *tags): elements = [] for tag in tags: element = self._make_mock_element(tag) elements.append(element) return elements def _make_mock_element(self, tag): element = mock() element.tag_name = tag element.attributes = {} def set_attribute(name, value): element.attributes[name] = value element.set_attribute = set_attribute def get_attribute(name): return element.attributes[name] element.get_attribute = get_attribute return element
apache-2.0
-5,054,073,404,308,270,000
-7,972,308,768,501,539,000
40.914454
156
0.604124
false
yuyichao/pyscical
pyscical/utils.py
1
1930
# Copyright (C) 2012~2014 by Yichao Yu # yyc1992@gmail.com # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np def cffi_ptr(obj, _ffi, writable=False, retain=False): if isinstance(obj, bytes): if writable: # bytes is not writable raise TypeError('expected an object with a writable ' 'buffer interface.') if retain: buf = _ffi.new('char[]', obj) return (buf, len(obj), buf) return (obj, len(obj), obj) elif isinstance(obj, np.ndarray): # numpy array return (_ffi.cast('void*', obj.__array_interface__['data'][0]), obj.nbytes, obj) elif isinstance(obj, np.generic): if writable or retain: raise TypeError('expected an object with a writable ' 'buffer interface.') # numpy scalar # # * obj.__array_interface__ exists in CPython although requires # holding a reference to the dynamically created # __array_interface__ object # # * does not exist (yet?) in numpypy. s_array = obj[()] return (_ffi.cast('void*', s_array.__array_interface__['data'][0]), s_array.nbytes, s_array) raise TypeError("Only numpy arrays and bytes can be converted")
gpl-3.0
-5,205,479,219,224,619,000
-1,848,436,878,634,194,200
40.06383
75
0.622798
false
potzenheimer/meetshaus
src/meetshaus.sitetheme/meetshaus/sitetheme/tests.py
1
1419
import unittest #from zope.testing import doctestunit #from zope.component import testing from Testing import ZopeTestCase as ztc from Products.Five import fiveconfigure from Products.PloneTestCase import PloneTestCase as ptc from Products.PloneTestCase.layer import PloneSite ptc.setupPloneSite() import meetshaus.sitetheme class TestCase(ptc.PloneTestCase): class layer(PloneSite): @classmethod def setUp(cls): fiveconfigure.debug_mode = True ztc.installPackage(meetshaus.sitetheme) fiveconfigure.debug_mode = False @classmethod def tearDown(cls): pass def test_suite(): return unittest.TestSuite([ # Unit tests #doctestunit.DocFileSuite( # 'README.txt', package='meetshaus.sitetheme', # setUp=testing.setUp, tearDown=testing.tearDown), #doctestunit.DocTestSuite( # module='meetshaus.sitetheme.mymodule', # setUp=testing.setUp, tearDown=testing.tearDown), # Integration tests that use PloneTestCase #ztc.ZopeDocFileSuite( # 'README.txt', package='meetshaus.sitetheme', # test_class=TestCase), #ztc.FunctionalDocFileSuite( # 'browser.txt', package='meetshaus.sitetheme', # test_class=TestCase), ]) if __name__ == '__main__': unittest.main(defaultTest='test_suite')
mit
-2,716,257,425,855,948,300
-547,817,883,070,154,750
24.8
61
0.653982
false
yglazner/jy-dev
jy_dev/docopt.py
11
19946
"""Pythonic command-line interface parser that will make you smile. * http://docopt.org * Repository and issue-tracker: https://github.com/docopt/docopt * Licensed under terms of MIT license (see LICENSE-MIT) * Copyright (c) 2013 Vladimir Keleshev, vladimir@keleshev.com """ import sys import re __all__ = ['docopt'] __version__ = '0.6.1' class DocoptLanguageError(Exception): """Error in construction of usage-message by developer.""" class DocoptExit(SystemExit): """Exit in case user invoked program with incorrect arguments.""" usage = '' def __init__(self, message=''): SystemExit.__init__(self, (message + '\n' + self.usage).strip()) class Pattern(object): def __eq__(self, other): return repr(self) == repr(other) def __hash__(self): return hash(repr(self)) def fix(self): self.fix_identities() self.fix_repeating_arguments() return self def fix_identities(self, uniq=None): """Make pattern-tree tips point to same object if they are equal.""" if not hasattr(self, 'children'): return self uniq = list(set(self.flat())) if uniq is None else uniq for i, c in enumerate(self.children): if not hasattr(c, 'children'): assert c in uniq self.children[i] = uniq[uniq.index(c)] else: c.fix_identities(uniq) def fix_repeating_arguments(self): """Fix elements that should accumulate/increment values.""" either = [list(c.children) for c in self.either.children] for case in either: for e in [c for c in case if case.count(c) > 1]: if type(e) is Argument or type(e) is Option and e.argcount: if e.value is None: e.value = [] elif type(e.value) is not list: e.value = e.value.split() if type(e) is Command or type(e) is Option and e.argcount == 0: e.value = 0 return self @property def either(self): """Transform pattern into an equivalent, with only top-level Either.""" # Currently the pattern will not be equivalent, but more "narrow", # although good enough to reason about list arguments. ret = [] groups = [[self]] while groups: children = groups.pop(0) types = [type(c) for c in children] if Either in types: either = [c for c in children if type(c) is Either][0] children.pop(children.index(either)) for c in either.children: groups.append([c] + children) elif Required in types: required = [c for c in children if type(c) is Required][0] children.pop(children.index(required)) groups.append(list(required.children) + children) elif Optional in types: optional = [c for c in children if type(c) is Optional][0] children.pop(children.index(optional)) groups.append(list(optional.children) + children) elif AnyOptions in types: optional = [c for c in children if type(c) is AnyOptions][0] children.pop(children.index(optional)) groups.append(list(optional.children) + children) elif OneOrMore in types: oneormore = [c for c in children if type(c) is OneOrMore][0] children.pop(children.index(oneormore)) groups.append(list(oneormore.children) * 2 + children) else: ret.append(children) return Either(*[Required(*e) for e in ret]) class ChildPattern(Pattern): def __init__(self, name, value=None): self.name = name self.value = value def __repr__(self): return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.value) def flat(self, *types): return [self] if not types or type(self) in types else [] def match(self, left, collected=None): collected = [] if collected is None else collected pos, match = self.single_match(left) if match is None: return False, left, collected left_ = left[:pos] + left[pos + 1:] same_name = [a for a in collected if a.name == self.name] if type(self.value) in (int, list): if type(self.value) is int: increment = 1 else: increment = ([match.value] if type(match.value) is str else match.value) if not same_name: match.value = increment return True, left_, collected + [match] same_name[0].value += increment return True, left_, collected return True, left_, collected + [match] class ParentPattern(Pattern): def __init__(self, *children): self.children = list(children) def __repr__(self): return '%s(%s)' % (self.__class__.__name__, ', '.join(repr(a) for a in self.children)) def flat(self, *types): if type(self) in types: return [self] return sum([c.flat(*types) for c in self.children], []) class Argument(ChildPattern): def single_match(self, left): for n, p in enumerate(left): if type(p) is Argument: return n, Argument(self.name, p.value) return None, None @classmethod def parse(class_, source): name = re.findall('(<\S*?>)', source)[0] value = re.findall('\[default: (.*)\]', source, flags=re.I) return class_(name, value[0] if value else None) class Command(Argument): def __init__(self, name, value=False): self.name = name self.value = value def single_match(self, left): for n, p in enumerate(left): if type(p) is Argument: if p.value == self.name: return n, Command(self.name, True) else: break return None, None class Option(ChildPattern): def __init__(self, short=None, long=None, argcount=0, value=False): assert argcount in (0, 1) self.short, self.long = short, long self.argcount, self.value = argcount, value self.value = None if value is False and argcount else value @classmethod def parse(class_, option_description): short, long, argcount, value = None, None, 0, False options, _, description = option_description.strip().partition(' ') options = options.replace(',', ' ').replace('=', ' ') for s in options.split(): if s.startswith('--'): long = s elif s.startswith('-'): short = s else: argcount = 1 if argcount: matched = re.findall('\[default: (.*)\]', description, flags=re.I) value = matched[0] if matched else None return class_(short, long, argcount, value) def single_match(self, left): for n, p in enumerate(left): if self.name == p.name: return n, p return None, None @property def name(self): return self.long or self.short def __repr__(self): return 'Option(%r, %r, %r, %r)' % (self.short, self.long, self.argcount, self.value) class Required(ParentPattern): def match(self, left, collected=None): collected = [] if collected is None else collected l = left c = collected for p in self.children: matched, l, c = p.match(l, c) if not matched: return False, left, collected return True, l, c class Optional(ParentPattern): def match(self, left, collected=None): collected = [] if collected is None else collected for p in self.children: m, left, collected = p.match(left, collected) return True, left, collected class AnyOptions(Optional): """Marker/placeholder for [options] shortcut.""" class OneOrMore(ParentPattern): def match(self, left, collected=None): assert len(self.children) == 1 collected = [] if collected is None else collected l = left c = collected l_ = None matched = True times = 0 while matched: # could it be that something didn't match but changed l or c? matched, l, c = self.children[0].match(l, c) times += 1 if matched else 0 if l_ == l: break l_ = l if times >= 1: return True, l, c return False, left, collected class Either(ParentPattern): def match(self, left, collected=None): collected = [] if collected is None else collected outcomes = [] for p in self.children: matched, _, _ = outcome = p.match(left, collected) if matched: outcomes.append(outcome) if outcomes: return min(outcomes, key=lambda outcome: len(outcome[1])) return False, left, collected class TokenStream(list): def __init__(self, source, error): self += source.split() if hasattr(source, 'split') else source self.error = error def move(self): return self.pop(0) if len(self) else None def current(self): return self[0] if len(self) else None def parse_long(tokens, options): """long ::= '--' chars [ ( ' ' | '=' ) chars ] ;""" long, eq, value = tokens.move().partition('=') assert long.startswith('--') value = None if eq == value == '' else value similar = [o for o in options if o.long == long] if tokens.error is DocoptExit and similar == []: # if no exact match similar = [o for o in options if o.long and o.long.startswith(long)] if len(similar) > 1: # might be simply specified ambiguously 2+ times? raise tokens.error('%s is not a unique prefix: %s?' % (long, ', '.join(o.long for o in similar))) elif len(similar) < 1: argcount = 1 if eq == '=' else 0 o = Option(None, long, argcount) options.append(o) if tokens.error is DocoptExit: o = Option(None, long, argcount, value if argcount else True) else: o = Option(similar[0].short, similar[0].long, similar[0].argcount, similar[0].value) if o.argcount == 0: if value is not None: raise tokens.error('%s must not have an argument' % o.long) else: if value is None: if tokens.current() is None: raise tokens.error('%s requires argument' % o.long) value = tokens.move() if tokens.error is DocoptExit: o.value = value if value is not None else True return [o] def parse_shorts(tokens, options): """shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;""" token = tokens.move() assert token.startswith('-') and not token.startswith('--') left = token.lstrip('-') parsed = [] while left != '': short, left = '-' + left[0], left[1:] similar = [o for o in options if o.short == short] if len(similar) > 1: raise tokens.error('%s is specified ambiguously %d times' % (short, len(similar))) elif len(similar) < 1: o = Option(short, None, 0) options.append(o) if tokens.error is DocoptExit: o = Option(short, None, 0, True) else: # why copying is necessary here? o = Option(short, similar[0].long, similar[0].argcount, similar[0].value) value = None if o.argcount != 0: if left == '': if tokens.current() is None: raise tokens.error('%s requires argument' % short) value = tokens.move() else: value = left left = '' if tokens.error is DocoptExit: o.value = value if value is not None else True parsed.append(o) return parsed def parse_pattern(source, options): tokens = TokenStream(re.sub(r'([\[\]\(\)\|]|\.\.\.)', r' \1 ', source), DocoptLanguageError) result = parse_expr(tokens, options) if tokens.current() is not None: raise tokens.error('unexpected ending: %r' % ' '.join(tokens)) return Required(*result) def parse_expr(tokens, options): """expr ::= seq ( '|' seq )* ;""" seq = parse_seq(tokens, options) if tokens.current() != '|': return seq result = [Required(*seq)] if len(seq) > 1 else seq while tokens.current() == '|': tokens.move() seq = parse_seq(tokens, options) result += [Required(*seq)] if len(seq) > 1 else seq return [Either(*result)] if len(result) > 1 else result def parse_seq(tokens, options): """seq ::= ( atom [ '...' ] )* ;""" result = [] while tokens.current() not in [None, ']', ')', '|']: atom = parse_atom(tokens, options) if tokens.current() == '...': atom = [OneOrMore(*atom)] tokens.move() result += atom return result def parse_atom(tokens, options): """atom ::= '(' expr ')' | '[' expr ']' | 'options' | long | shorts | argument | command ; """ token = tokens.current() result = [] if token in '([': tokens.move() matching, pattern = {'(': [')', Required], '[': [']', Optional]}[token] result = pattern(*parse_expr(tokens, options)) if tokens.move() != matching: raise tokens.error("unmatched '%s'" % token) return [result] elif token == 'options': tokens.move() return [AnyOptions()] elif token.startswith('--') and token != '--': return parse_long(tokens, options) elif token.startswith('-') and token not in ('-', '--'): return parse_shorts(tokens, options) elif token.startswith('<') and token.endswith('>') or token.isupper(): return [Argument(tokens.move())] else: return [Command(tokens.move())] def parse_argv(tokens, options, options_first=False): """Parse command-line argument vector. If options_first: argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ; else: argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ; """ parsed = [] while tokens.current() is not None: if tokens.current() == '--': return parsed + [Argument(None, v) for v in tokens] elif tokens.current().startswith('--'): parsed += parse_long(tokens, options) elif tokens.current().startswith('-') and tokens.current() != '-': parsed += parse_shorts(tokens, options) elif options_first: return parsed + [Argument(None, v) for v in tokens] else: parsed.append(Argument(None, tokens.move())) return parsed def parse_defaults(doc): # in python < 2.7 you can't pass flags=re.MULTILINE split = re.split('\n *(<\S+?>|-\S+?)', doc)[1:] split = [s1 + s2 for s1, s2 in zip(split[::2], split[1::2])] options = [Option.parse(s) for s in split if s.startswith('-')] #arguments = [Argument.parse(s) for s in split if s.startswith('<')] #return options, arguments return options def printable_usage(doc): # in python < 2.7 you can't pass flags=re.IGNORECASE usage_split = re.split(r'([Uu][Ss][Aa][Gg][Ee]:)', doc) if len(usage_split) < 3: raise DocoptLanguageError('"usage:" (case-insensitive) not found.') if len(usage_split) > 3: raise DocoptLanguageError('More than one "usage:" (case-insensitive).') return re.split(r'\n\s*\n', ''.join(usage_split[1:]))[0].strip() def formal_usage(printable_usage): pu = printable_usage.split()[1:] # split and drop "usage:" return '( ' + ' '.join(') | (' if s == pu[0] else s for s in pu[1:]) + ' )' def extras(help, version, options, doc): if help and any((o.name in ('-h', '--help')) and o.value for o in options): print(doc.strip("\n")) sys.exit() if version and any(o.name == '--version' and o.value for o in options): print(version) sys.exit() class Dict(dict): def __repr__(self): return '{%s}' % ',\n '.join('%r: %r' % i for i in sorted(self.items())) def docopt(doc, argv=None, help=True, version=None, options_first=False): """Parse `argv` based on command-line interface described in `doc`. `docopt` creates your command-line interface based on its description that you pass as `doc`. Such description can contain --options, <positional-argument>, commands, which could be [optional], (required), (mutually | exclusive) or repeated... Parameters ---------- doc : str Description of your command-line interface. argv : list of str, optional Argument vector to be parsed. sys.argv[1:] is used if not provided. help : bool (default: True) Set to False to disable automatic help on -h or --help options. version : any object If passed, the object will be printed if --version is in `argv`. options_first : bool (default: False) Set to True to require options preceed positional arguments, i.e. to forbid options and positional arguments intermix. Returns ------- args : dict A dictionary, where keys are names of command-line elements such as e.g. "--verbose" and "<path>", and values are the parsed values of those elements. Example ------- >>> from docopt import docopt >>> doc = ''' Usage: my_program tcp <host> <port> [--timeout=<seconds>] my_program serial <port> [--baud=<n>] [--timeout=<seconds>] my_program (-h | --help | --version) Options: -h, --help Show this screen and exit. --baud=<n> Baudrate [default: 9600] ''' >>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30'] >>> docopt(doc, argv) {'--baud': '9600', '--help': False, '--timeout': '30', '--version': False, '<host>': '127.0.0.1', '<port>': '80', 'serial': False, 'tcp': True} See also -------- * For video introduction see http://docopt.org * Full documentation is available in README.rst as well as online at https://github.com/docopt/docopt#readme """ if argv is None: argv = sys.argv[1:] DocoptExit.usage = printable_usage(doc) options = parse_defaults(doc) pattern = parse_pattern(formal_usage(DocoptExit.usage), options) # [default] syntax for argument is disabled #for a in pattern.flat(Argument): # same_name = [d for d in arguments if d.name == a.name] # if same_name: # a.value = same_name[0].value argv = parse_argv(TokenStream(argv, DocoptExit), list(options), options_first) pattern_options = set(pattern.flat(Option)) for ao in pattern.flat(AnyOptions): doc_options = parse_defaults(doc) ao.children = list(set(doc_options) - pattern_options) #if any_options: # ao.children += [Option(o.short, o.long, o.argcount) # for o in argv if type(o) is Option] extras(help, version, argv, doc) matched, left, collected = pattern.fix().match(argv) if matched and left == []: # better error message if left? return Dict((a.name, a.value) for a in (pattern.flat() + collected)) raise DocoptExit()
mit
8,182,519,606,230,127,000
8,831,182,021,706,009,000
33.44905
79
0.553194
false
mikedh/trimesh
examples/voxel.py
2
4350
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import inspect import trimesh from trimesh.exchange.binvox import voxelize_mesh from trimesh import voxel as v dir_current = os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe()))) # the absolute path for our reference models dir_models = os.path.abspath( os.path.join(dir_current, '..', 'models')) def show(chair_mesh, chair_voxels, colors=(1, 1, 1, 0.3)): scene = chair_mesh.scene() scene.add_geometry(chair_voxels.as_boxes(colors=colors)) scene.show() if __name__ == '__main__': base_name = 'chair_model' chair_mesh = trimesh.load(os.path.join(dir_models, '%s.obj' % base_name)) if isinstance(chair_mesh, trimesh.scene.Scene): chair_mesh = trimesh.util.concatenate([ trimesh.Trimesh(mesh.vertices, mesh.faces) for mesh in chair_mesh.geometry.values()]) binvox_path = os.path.join(dir_models, '%s.binvox' % base_name) chair_voxels = trimesh.load(binvox_path) chair_voxels = v.VoxelGrid(chair_voxels.encoding.dense, chair_voxels.transform) print('white: voxelized chair (binvox, exact)') show(chair_mesh, voxelize_mesh(chair_mesh, exact=True), colors=(1, 1, 1, 0.3)) print('red: binvox-loaded chair') show(chair_mesh, chair_voxels, colors=(1, 0, 0, 0.3)) voxelized_chair_mesh = chair_mesh.voxelized(np.max(chair_mesh.extents) / 32) print('green: voxelized chair (default).') show(chair_mesh, voxelized_chair_mesh, colors=(0, 1, 0, 0.3)) shape = (50, 17, 63) revox = chair_voxels.revoxelized(shape) print('cyan: revoxelized.') show(chair_mesh, revox, colors=(0, 1, 1, 0.3)) values = chair_voxels.encoding.dense.copy() values[:values.shape[0] // 2] = 0 stripped = v.VoxelGrid(values, chair_voxels.transform.copy()).strip() print('yellow: stripped halved voxel grid. Transform is updated appropriately') show(chair_mesh, stripped, colors=(1, 1, 0, 0.3)) transform = np.eye(4) transform[:3] += np.random.normal(size=(3, 4)) * 0.2 transformed_chair_mesh = chair_mesh.copy().apply_transform(transform) print('original transform volume: %s' % str(chair_voxels.element_volume)) chair_voxels.apply_transform(transform) print('warped transform volume: %s' % str(chair_voxels.element_volume)) print('blue: transformed voxels. Transformation is lazy, and each voxel is ' 'no longer a cube.') show(transformed_chair_mesh, chair_voxels, colors=(0, 0, 1, 0.3)) voxelized = chair_mesh.voxelized(pitch=0.02, method='subdivide').fill() print('green: subdivided') show(chair_mesh, voxelized, colors=(0, 1, 0, 0.3)) voxelized = chair_mesh.voxelized(pitch=0.02, method='ray') print('red: ray. Poor performance on thin structures') show(chair_mesh, voxelized, colors=(1, 0, 0, 0.3)) voxelized = chair_mesh.voxelized(pitch=0.02, method='binvox') print('red: binvox (default). Poor performance on thin structures') show(chair_mesh, voxelized, colors=(1, 0, 0, 0.3)) voxelized = chair_mesh.voxelized(pitch=0.02, method='binvox', wireframe=True) print('green: binvox (wireframe). Still doesn\'t capture all thin structures') show(chair_mesh, voxelized, colors=(0, 1, 0, 0.3)) voxelized = chair_mesh.voxelized(pitch=0.02, method='binvox', exact=True) print('blue: binvox (exact). Does a good job') show(chair_mesh, voxelized, colors=(0, 0, 1, 0.3)) voxelized = chair_mesh.voxelized( pitch=0.02, method='binvox', exact=True, downsample_factor=2, downsample_threshold=1) print('red: binvox (exact downsampled) surface') show(chair_mesh, voxelized, colors=(1, 0, 0, 0.3)) chair_voxels = chair_mesh.voxelized(pitch=0.02, method='binvox', exact=True) voxelized = chair_voxels.copy().fill(method='base') print('blue: binvox (exact) filled (base). Gets a bit overly excited') show(chair_mesh, voxelized, colors=(0, 0, 1, 0.3)) voxelized = chair_voxels.copy().fill(method='orthographic') print('green: binvox (exact) filled (orthographic). ' 'Doesn\'t do much as should be expected') show(chair_mesh, voxelized, colors=(0, 1, 0, 0.3))
mit
3,157,090,749,167,827,000
1,561,008,479,858,500,000
37.495575
83
0.663678
false
zyga/ubuntu-make
tests/large/test_ide.py
6
18449
# -*- coding: utf-8 -*- # Copyright (C) 2014 Canonical # # Authors: # Didier Roche # Tin Tvrtković # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; version 3. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Tests for the IDE category""" import logging import platform import subprocess import os import pexpect from tests.large import LargeFrameworkTests from tests.tools import UMAKE logger = logging.getLogger(__name__) class EclipseIDETests(LargeFrameworkTests): """The Eclipse distribution from the IDE collection.""" TIMEOUT_INSTALL_PROGRESS = 120 TIMEOUT_START = 60 TIMEOUT_STOP = 60 def setUp(self): super().setUp() self.installed_path = os.path.expanduser("~/tools/ide/eclipse") self.desktop_filename = "eclipse.desktop" @property def arch_option(self): """we return the expected arch call on command line""" return platform.machine() def test_default_eclipse_ide_install(self): """Install eclipse from scratch test case""" self.child = pexpect.spawnu(self.command('{} ide eclipse'.format(UMAKE))) self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path)) self.child.sendline("") self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS) self.wait_and_no_warn() # we have an installed launcher, added to the launcher and an icon file self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename)) self.assert_exec_exists() self.assert_icon_exists() # launch it, send SIGTERM and check that it exits fine proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # on 64 bits, there is a java subprocess, we kill that one with SIGKILL (eclipse isn't reliable on SIGTERM) if self.arch_option == "x86_64": self.check_and_kill_process(["java", self.arch_option, self.installed_path], wait_before=self.TIMEOUT_START, send_sigkill=True) else: self.check_and_kill_process([self.exec_path], wait_before=self.TIMEOUT_START, send_sigkill=True) proc.wait(self.TIMEOUT_STOP) # ensure that it's detected as installed: self.child = pexpect.spawnu(self.command('{} ide eclipse'.format(UMAKE))) self.expect_and_no_warn("Eclipse is already installed.*\[.*\] ") self.child.sendline() self.wait_and_no_warn() class IdeaIDETests(LargeFrameworkTests): """IntelliJ Idea from the IDE collection.""" TIMEOUT_INSTALL_PROGRESS = 120 TIMEOUT_START = 60 TIMEOUT_STOP = 60 def setUp(self): super().setUp() self.installed_path = os.path.expanduser("~/tools/ide/idea") self.desktop_filename = 'jetbrains-idea.desktop' def test_default_install(self): """Install from scratch test case""" self.child = pexpect.spawnu(self.command('{} ide idea'.format(UMAKE))) self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path)) self.child.sendline("") self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS) self.wait_and_no_warn() # we have an installed launcher, added to the launcher and an icon file self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename)) self.assert_exec_exists() self.assert_icon_exists() # launch it, send SIGTERM and check that it exits fine proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START) proc.wait(self.TIMEOUT_STOP) # ensure that it's detected as installed: self.child = pexpect.spawnu(self.command('{} ide idea'.format(UMAKE))) self.expect_and_no_warn("Idea is already installed.*\[.*\] ") self.child.sendline() self.wait_and_no_warn() class IdeaUltimateIDETests(LargeFrameworkTests): """IntelliJ Idea Ultimate from the IDE collection.""" TIMEOUT_INSTALL_PROGRESS = 120 TIMEOUT_START = 60 TIMEOUT_STOP = 60 def setUp(self): super().setUp() self.installed_path = os.path.expanduser("~/tools/ide/idea-ultimate") self.desktop_filename = 'jetbrains-idea.desktop' def test_default_install(self): """Install from scratch test case""" self.child = pexpect.spawnu(self.command('{} ide idea-ultimate'.format(UMAKE))) self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path)) self.child.sendline("") self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS) self.wait_and_no_warn() logger.info("Installed, running...") # we have an installed launcher, added to the launcher and an icon file self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename)) self.assert_exec_exists() self.assert_icon_exists() # launch it, send SIGTERM and check that it exits fine proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START) proc.wait(self.TIMEOUT_STOP) # ensure that it's detected as installed: self.child = pexpect.spawnu(self.command('{} ide idea-ultimate'.format(UMAKE))) self.expect_and_no_warn("Idea Ultimate is already installed.*\[.*\] ") self.child.sendline() self.wait_and_no_warn() class PyCharmIDETests(LargeFrameworkTests): """PyCharm from the IDE collection.""" TIMEOUT_INSTALL_PROGRESS = 120 TIMEOUT_START = 60 TIMEOUT_STOP = 60 def setUp(self): super().setUp() self.installed_path = os.path.expanduser("~/tools/ide/pycharm") self.desktop_filename = 'jetbrains-pycharm.desktop' def test_default_install(self): """Install from scratch test case""" self.child = pexpect.spawnu(self.command('{} ide pycharm'.format(UMAKE))) self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path)) self.child.sendline("") self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS) self.wait_and_no_warn() logger.info("Installed, running...") # we have an installed launcher, added to the launcher and an icon file self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename)) self.assert_exec_exists() self.assert_icon_exists() # launch it, send SIGTERM and check that it exits fine proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START) proc.wait(self.TIMEOUT_STOP) # ensure that it's detected as installed: self.child = pexpect.spawnu(self.command('{} ide pycharm'.format(UMAKE))) self.expect_and_no_warn("PyCharm is already installed.*\[.*\] ") self.child.sendline() self.wait_and_no_warn() class PyCharmEducationalIDETests(LargeFrameworkTests): """PyCharm Educational from the IDE collection.""" TIMEOUT_INSTALL_PROGRESS = 120 TIMEOUT_START = 60 TIMEOUT_STOP = 60 def setUp(self): super().setUp() self.installed_path = os.path.expanduser("~/tools/ide/pycharm-educational") self.desktop_filename = 'jetbrains-pycharm.desktop' def test_default_install(self): """Install from scratch test case""" self.child = pexpect.spawnu(self.command('{} ide pycharm-educational'.format(UMAKE))) self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path)) self.child.sendline("") self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS) self.wait_and_no_warn() logger.info("Installed, running...") # we have an installed launcher, added to the launcher and an icon file self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename)) self.assert_exec_exists() self.assert_icon_exists() # launch it, send SIGTERM and check that it exits fine proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START) proc.wait(self.TIMEOUT_STOP) # ensure that it's detected as installed: self.child = pexpect.spawnu(self.command('{} ide pycharm-educational'.format(UMAKE))) self.expect_and_no_warn("PyCharm Educational is already installed.*\[.*\] ") self.child.sendline() self.wait_and_no_warn() class PyCharmProfessionalIDETests(LargeFrameworkTests): """PyCharm Professional from the IDE collection.""" TIMEOUT_INSTALL_PROGRESS = 120 TIMEOUT_START = 60 TIMEOUT_STOP = 60 def setUp(self): super().setUp() self.installed_path = os.path.expanduser("~/tools/ide/pycharm-professional") self.desktop_filename = 'jetbrains-pycharm.desktop' def test_default_install(self): """Install from scratch test case""" self.child = pexpect.spawnu(self.command('{} ide pycharm-professional'.format(UMAKE))) self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path)) self.child.sendline("") self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS) self.wait_and_no_warn() logger.info("Installed, running...") # we have an installed launcher, added to the launcher and an icon file self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename)) self.assert_exec_exists() self.assert_icon_exists() # launch it, send SIGTERM and check that it exits fine proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START) proc.wait(self.TIMEOUT_STOP) # ensure that it's detected as installed: self.child = pexpect.spawnu(self.command('{} ide pycharm-professional'.format(UMAKE))) self.expect_and_no_warn("PyCharm Professional is already installed.*\[.*\] ") self.child.sendline() self.wait_and_no_warn() class RubyMineIDETests(LargeFrameworkTests): """RubyMine from the IDE collection.""" TIMEOUT_INSTALL_PROGRESS = 120 TIMEOUT_START = 60 TIMEOUT_STOP = 60 def setUp(self): super().setUp() self.installed_path = os.path.expanduser("~/tools/ide/rubymine") self.desktop_filename = 'jetbrains-rubymine.desktop' def test_default_install(self): """Install from scratch test case""" self.child = pexpect.spawnu(self.command('{} ide rubymine'.format(UMAKE))) self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path)) self.child.sendline("") self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS) self.wait_and_no_warn() logger.info("Installed, running...") # we have an installed launcher, added to the launcher and an icon file self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename)) self.assert_exec_exists() self.assert_icon_exists() # launch it, send SIGTERM and check that it exits fine proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START) proc.wait(self.TIMEOUT_STOP) # ensure that it's detected as installed: self.child = pexpect.spawnu(self.command('{} ide rubymine'.format(UMAKE))) self.expect_and_no_warn("RubyMine is already installed.*\[.*\] ") self.child.sendline() self.wait_and_no_warn() class WebStormIDETests(LargeFrameworkTests): """WebStorm from the IDE collection.""" TIMEOUT_INSTALL_PROGRESS = 120 TIMEOUT_START = 60 TIMEOUT_STOP = 60 def setUp(self): super().setUp() self.installed_path = os.path.expanduser("~/tools/ide/webstorm") self.desktop_filename = 'jetbrains-webstorm.desktop' def test_default_install(self): """Install from scratch test case""" self.child = pexpect.spawnu(self.command('{} ide webstorm'.format(UMAKE))) self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path)) self.child.sendline("") self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS) self.wait_and_no_warn() logger.info("Installed, running...") # we have an installed launcher, added to the launcher and an icon file self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename)) self.assert_exec_exists() self.assert_icon_exists() # launch it, send SIGTERM and check that it exits fine proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START) proc.wait(self.TIMEOUT_STOP) # ensure that it's detected as installed: self.child = pexpect.spawnu(self.command('{} ide webstorm'.format(UMAKE))) self.expect_and_no_warn("WebStorm is already installed.*\[.*\] ") self.child.sendline() self.wait_and_no_warn() class PhpStormIDETests(LargeFrameworkTests): """PhpStorm from the IDE collection.""" TIMEOUT_INSTALL_PROGRESS = 120 TIMEOUT_START = 60 TIMEOUT_STOP = 60 def setUp(self): super().setUp() self.installed_path = os.path.expanduser("~/tools/ide/phpstorm") self.desktop_filename = 'jetbrains-phpstorm.desktop' def test_default_install(self): """Install from scratch test case""" self.child = pexpect.spawnu(self.command('{} ide phpstorm'.format(UMAKE))) self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path)) self.child.sendline("") self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS) self.wait_and_no_warn() logger.info("Installed, running...") # we have an installed launcher, added to the launcher and an icon file self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename)) self.assert_exec_exists() self.assert_icon_exists() # launch it, send SIGTERM and check that it exits fine proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START) proc.wait(self.TIMEOUT_STOP) # ensure that it's detected as installed: self.child = pexpect.spawnu(self.command('{} ide phpstorm'.format(UMAKE))) self.expect_and_no_warn("PhpStorm is already installed.*\[.*\] ") self.child.sendline() self.wait_and_no_warn() class ArduinoIDETests(LargeFrameworkTests): """The Arduino Software distribution from the IDE collection.""" TIMEOUT_INSTALL_PROGRESS = 120 TIMEOUT_START = 60 TIMEOUT_STOP = 60 def setUp(self): super().setUp() self.installed_path = os.path.expanduser("~/tools/ide/arduino") self.desktop_filename = "arduino.desktop" @property def arch_option(self): """we return the expected arch call on command line""" return platform.machine() def test_default_install(self): """Install the distribution from scratch test case""" self.child = pexpect.spawnu(self.command('{} ide arduino'.format(UMAKE))) self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path)) self.child.sendline("") self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS) self.wait_and_no_warn() # we have an installed launcher, added to the launcher and an icon file self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename)) self.assert_exec_exists() self.assert_icon_exists() self.assertTrue(self.is_in_group("dialout")) # launch it, send SIGTERM and check that it exits fine proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.check_and_kill_process(["java", "processing.app.Base"], wait_before=self.TIMEOUT_START) proc.wait(self.TIMEOUT_STOP) # ensure that it's detected as installed: self.child = pexpect.spawnu(self.command('{} ide arduino'.format(UMAKE))) self.expect_and_no_warn("Arduino is already installed.*\[.*\] ") self.child.sendline() self.wait_and_no_warn()
gpl-3.0
-102,023,082,346,891,600
3,056,912,784,696,880,600
39.814159
115
0.655247
false
cloudnautique/cloud-cattle
code/agent/src/agents/pyagent/cattle/plugins/core/event_handlers.py
11
2000
import os import subprocess from cattle import utils from cattle import Config from cattle.type_manager import types from cattle.progress import Progress def _should_handle(handler, event): name = event.name.split(';', 1)[0] if name not in handler.events() or event.replyTo is None: return False return True class PingHandler: def __init__(self): pass def events(self): return ['ping'] def execute(self, event): if not _should_handle(self, event): return resp = utils.reply(event) if Config.do_ping(): for type in types(): if hasattr(type, 'on_ping'): type.on_ping(event, resp) return resp class ConfigUpdateHandler: def __init__(self): pass def events(self): return ['config.update'] def execute(self, event): if not _should_handle(self, event): return if len(event.data.items) == 0: return utils.reply(event) item_names = [] for item in event.data.items: # For development, don't let the server kill your agent if item.name != 'pyagent' or Config.config_update_pyagent(): item_names.append(item.name) home = Config.home() env = dict(os.environ) env['CATTLE_ACCESS_KEY'] = Config.access_key() env['CATTLE_SECRET_KEY'] = Config.secret_key() env['CATTLE_CONFIG_URL'] = Config.config_url() env['CATTLE_HOME'] = home args = [Config.config_sh()] + item_names try: output = utils.get_command_output(args, cwd=home, env=env) return utils.reply(event, { 'exitCode': 0, 'output': output }) except subprocess.CalledProcessError as e: Progress(event).update('Update Failed', data={ 'exitCode': e.returncode, 'output': e.output })
apache-2.0
3,698,947,250,957,020,000
1,327,422,006,872,625,400
24.641026
72
0.5565
false
capchu/TextRPGOnline
rpgonline/env/lib/python2.7/site-packages/pip/vendor/html5lib/treebuilders/dom.py
249
11328
from __future__ import absolute_import, division, unicode_literals from xml.dom import minidom, Node, XML_NAMESPACE, XMLNS_NAMESPACE import weakref from . import _base from .. import constants from ..constants import namespaces from ..utils import moduleFactoryFactory def getDomBuilder(DomImplementation): Dom = DomImplementation class AttrList(object): def __init__(self, element): self.element = element def __iter__(self): return list(self.element.attributes.items()).__iter__() def __setitem__(self, name, value): self.element.setAttribute(name, value) def __len__(self): return len(list(self.element.attributes.items())) def items(self): return [(item[0], item[1]) for item in list(self.element.attributes.items())] def keys(self): return list(self.element.attributes.keys()) def __getitem__(self, name): return self.element.getAttribute(name) def __contains__(self, name): if isinstance(name, tuple): raise NotImplementedError else: return self.element.hasAttribute(name) class NodeBuilder(_base.Node): def __init__(self, element): _base.Node.__init__(self, element.nodeName) self.element = element namespace = property(lambda self: hasattr(self.element, "namespaceURI") and self.element.namespaceURI or None) def appendChild(self, node): node.parent = self self.element.appendChild(node.element) def insertText(self, data, insertBefore=None): text = self.element.ownerDocument.createTextNode(data) if insertBefore: self.element.insertBefore(text, insertBefore.element) else: self.element.appendChild(text) def insertBefore(self, node, refNode): self.element.insertBefore(node.element, refNode.element) node.parent = self def removeChild(self, node): if node.element.parentNode == self.element: self.element.removeChild(node.element) node.parent = None def reparentChildren(self, newParent): while self.element.hasChildNodes(): child = self.element.firstChild self.element.removeChild(child) newParent.element.appendChild(child) self.childNodes = [] def getAttributes(self): return AttrList(self.element) def setAttributes(self, attributes): if attributes: for name, value in list(attributes.items()): if isinstance(name, tuple): if name[0] is not None: qualifiedName = (name[0] + ":" + name[1]) else: qualifiedName = name[1] self.element.setAttributeNS(name[2], qualifiedName, value) else: self.element.setAttribute( name, value) attributes = property(getAttributes, setAttributes) def cloneNode(self): return NodeBuilder(self.element.cloneNode(False)) def hasContent(self): return self.element.hasChildNodes() def getNameTuple(self): if self.namespace is None: return namespaces["html"], self.name else: return self.namespace, self.name nameTuple = property(getNameTuple) class TreeBuilder(_base.TreeBuilder): def documentClass(self): self.dom = Dom.getDOMImplementation().createDocument(None, None, None) return weakref.proxy(self) def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] domimpl = Dom.getDOMImplementation() doctype = domimpl.createDocumentType(name, publicId, systemId) self.document.appendChild(NodeBuilder(doctype)) if Dom == minidom: doctype.ownerDocument = self.dom def elementClass(self, name, namespace=None): if namespace is None and self.defaultNamespace is None: node = self.dom.createElement(name) else: node = self.dom.createElementNS(namespace, name) return NodeBuilder(node) def commentClass(self, data): return NodeBuilder(self.dom.createComment(data)) def fragmentClass(self): return NodeBuilder(self.dom.createDocumentFragment()) def appendChild(self, node): self.dom.appendChild(node.element) def testSerializer(self, element): return testSerializer(element) def getDocument(self): return self.dom def getFragment(self): return _base.TreeBuilder.getFragment(self).element def insertText(self, data, parent=None): data = data if parent != self: _base.TreeBuilder.insertText(self, data, parent) else: # HACK: allow text nodes as children of the document node if hasattr(self.dom, '_child_node_types'): if not Node.TEXT_NODE in self.dom._child_node_types: self.dom._child_node_types = list(self.dom._child_node_types) self.dom._child_node_types.append(Node.TEXT_NODE) self.dom.appendChild(self.dom.createTextNode(data)) implementation = DomImplementation name = None def testSerializer(element): element.normalize() rv = [] def serializeElement(element, indent=0): if element.nodeType == Node.DOCUMENT_TYPE_NODE: if element.name: if element.publicId or element.systemId: publicId = element.publicId or "" systemId = element.systemId or "" rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" % (' ' * indent, element.name, publicId, systemId)) else: rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name)) else: rv.append("|%s<!DOCTYPE >" % (' ' * indent,)) elif element.nodeType == Node.DOCUMENT_NODE: rv.append("#document") elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE: rv.append("#document-fragment") elif element.nodeType == Node.COMMENT_NODE: rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue)) elif element.nodeType == Node.TEXT_NODE: rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue)) else: if (hasattr(element, "namespaceURI") and element.namespaceURI is not None): name = "%s %s" % (constants.prefixes[element.namespaceURI], element.nodeName) else: name = element.nodeName rv.append("|%s<%s>" % (' ' * indent, name)) if element.hasAttributes(): attributes = [] for i in range(len(element.attributes)): attr = element.attributes.item(i) name = attr.nodeName value = attr.value ns = attr.namespaceURI if ns: name = "%s %s" % (constants.prefixes[ns], attr.localName) else: name = attr.nodeName attributes.append((name, value)) for name, value in sorted(attributes): rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) indent += 2 for child in element.childNodes: serializeElement(child, indent) serializeElement(element, 0) return "\n".join(rv) def dom2sax(node, handler, nsmap={'xml': XML_NAMESPACE}): if node.nodeType == Node.ELEMENT_NODE: if not nsmap: handler.startElement(node.nodeName, node.attributes) for child in node.childNodes: dom2sax(child, handler, nsmap) handler.endElement(node.nodeName) else: attributes = dict(node.attributes.itemsNS()) # gather namespace declarations prefixes = [] for attrname in list(node.attributes.keys()): attr = node.getAttributeNode(attrname) if (attr.namespaceURI == XMLNS_NAMESPACE or (attr.namespaceURI is None and attr.nodeName.startswith('xmlns'))): prefix = (attr.nodeName != 'xmlns' and attr.nodeName or None) handler.startPrefixMapping(prefix, attr.nodeValue) prefixes.append(prefix) nsmap = nsmap.copy() nsmap[prefix] = attr.nodeValue del attributes[(attr.namespaceURI, attr.nodeName)] # apply namespace declarations for attrname in list(node.attributes.keys()): attr = node.getAttributeNode(attrname) if attr.namespaceURI is None and ':' in attr.nodeName: prefix = attr.nodeName.split(':')[0] if prefix in nsmap: del attributes[(attr.namespaceURI, attr.nodeName)] attributes[(nsmap[prefix], attr.nodeName)] = attr.nodeValue # SAX events ns = node.namespaceURI or nsmap.get(None, None) handler.startElementNS((ns, node.nodeName), node.nodeName, attributes) for child in node.childNodes: dom2sax(child, handler, nsmap) handler.endElementNS((ns, node.nodeName), node.nodeName) for prefix in prefixes: handler.endPrefixMapping(prefix) elif node.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]: handler.characters(node.nodeValue) elif node.nodeType == Node.DOCUMENT_NODE: handler.startDocument() for child in node.childNodes: dom2sax(child, handler, nsmap) handler.endDocument() elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE: for child in node.childNodes: dom2sax(child, handler, nsmap) else: # ATTRIBUTE_NODE # ENTITY_NODE # PROCESSING_INSTRUCTION_NODE # COMMENT_NODE # DOCUMENT_TYPE_NODE # NOTATION_NODE pass return locals() # The actual means to get a module! getDomModule = moduleFactoryFactory(getDomBuilder)
gpl-3.0
4,689,508,213,571,617,000
8,652,364,982,675,161,000
38.062069
90
0.532309
false
mwstobo/marshmallow
tests/test_exceptions.py
7
2753
# -*- coding: utf-8 -*- import pytest from marshmallow.exceptions import ValidationError, MarshallingError, UnmarshallingError from marshmallow import fields, Schema class TestValidationError: def test_stores_message_in_list(self): err = ValidationError('foo') assert err.messages == ['foo'] def test_can_pass_list_of_messages(self): err = ValidationError(['foo', 'bar']) assert err.messages == ['foo', 'bar'] def test_stores_dictionaries(self): messages = {'user': {'email': ['email is invalid']}} err = ValidationError(messages) assert err.messages == messages def test_can_store_field_names(self): err = ValidationError('invalid email', field_names='email') assert err.field_names == ['email'] err = ValidationError('invalid email', field_names=['email']) assert err.field_names == ['email'] def test_str(self): err = ValidationError('invalid email') assert str(err) == 'invalid email' err2 = ValidationError('invalid email', 'email') assert str(err2) == 'invalid email' class TestMarshallingError: def test_deprecated(self): pytest.deprecated_call(MarshallingError, 'foo') def test_can_store_field_and_field_name(self): field_name = 'foo' field = fields.Str() err = MarshallingError('something went wrong', fields=[field], field_names=[field_name]) assert err.fields == [field] assert err.field_names == [field_name] def test_can_be_raised_by_custom_field(self): class MyField(fields.Field): def _serialize(self, val, attr, obj): raise MarshallingError('oops') class MySchema(Schema): foo = MyField() s = MySchema() result = s.dump({'foo': 42}) assert 'foo' in result.errors assert result.errors['foo'] == ['oops'] class TestUnmarshallingError: def test_deprecated(self): pytest.deprecated_call(UnmarshallingError, 'foo') def test_can_store_field_and_field_name(self): field_name = 'foo' field = fields.Str() err = UnmarshallingError('something went wrong', fields=[field], field_names=[field_name]) assert err.fields == [field] assert err.field_names == [field_name] def test_can_be_raised_by_validator(self): def validator(val): raise UnmarshallingError('oops') class MySchema(Schema): foo = fields.Field(validate=[validator]) s = MySchema() result = s.load({'foo': 42}) assert 'foo' in result.errors assert result.errors['foo'] == ['oops']
mit
-5,019,912,320,156,841,000
158,914,714,628,558,270
31.011628
88
0.601162
false
juanka1331/VAN-applied-to-Nifti-images
final_scripts/tests_over_3dmask_generator.py
1
1589
import sys import os from lib.data_loader import utils_mask3d sys.path.append(os.path.dirname(os.getcwd())) from lib.utils import output_utils from lib.data_loader import mri_atlas from lib.data_loader import pet_atlas from lib.data_loader import PET_stack_NORAD from lib.data_loader import MRI_stack_NORAD from lib.utils.os_aux import create_directories import settings region = 75 #images = "MRI" images = "PET" path_folder3D = os.path.join(settings.path_to_project, "folder3D") path_folder_masks3d = os.path.join(path_folder3D, "masks3D") path_mask = os.path.join( path_folder_masks3d, "{1}_region:{0}".format(region, images)) create_directories([path_folder3D, path_folder_masks3d]) atlas = None reshape_kind = None colour_kind = None stack_dict = None if images == "MRI": stack_dict = MRI_stack_NORAD.get_gm_stack() reshape_kind = "A" colour_kind = "Greys" atlas = mri_atlas.load_atlas_mri() elif images == "PET": stack_dict = PET_stack_NORAD.get_full_stack() reshape_kind = "F" colour_kind = "jet" total_size = stack_dict['total_size'] imgsize = stack_dict['imgsize'] voxels_index = stack_dict['voxel_index'] map_region_voxels = atlas[region] # index refered to nbground voxels no_bg_region_voxels_index = voxels_index[map_region_voxels] mask3d = utils_mask3d.generate_region_3dmaskatlas( no_bg_region_voxels_index=no_bg_region_voxels_index, reshape_kind=reshape_kind, imgsize=imgsize, totalsize=total_size) output_utils.from_3d_image_to_nifti_file( path_to_save=path_mask, image3d=mask3d)
gpl-2.0
6,054,961,113,930,599,000
-8,128,945,527,352,054,000
27.375
69
0.713027
false
boberfly/gaffer
python/GafferUI/Button.py
7
6312
########################################################################## # # Copyright (c) 2011, John Haddon. All rights reserved. # Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import six import Gaffer import GafferUI from Qt import QtGui from Qt import QtWidgets from Qt import QtCore class Button( GafferUI.Widget ) : __palette = None def __init__( self, text="", image=None, hasFrame=True, highlightOnOver=True, **kw ) : GafferUI.Widget.__init__( self, QtWidgets.QPushButton(), **kw ) self.__highlightForHover = False self._qtWidget().setAttribute( QtCore.Qt.WA_LayoutUsesWidgetRect ) # allow return and enter keys to click button self._qtWidget().setAutoDefault( True ) self.setText( text ) self.setImage( image ) self.setHasFrame( hasFrame ) # using a WeakMethod to avoid circular references which would otherwise # never be broken. self._qtWidget().clicked.connect( Gaffer.WeakMethod( self.__clicked ) ) self.__clickedSignal = GafferUI.WidgetSignal() # buttons appear to totally ignore the etch-disabled-text stylesheet option, # and we really don't like the etching. the only effective way of disabling it # seems to be to apply this palette which makes the etched text transparent. if Button.__palette is None : Button.__palette = QtGui.QPalette( QtWidgets.QApplication.instance().palette( self._qtWidget() ) ) Button.__palette.setColor( QtGui.QPalette.Disabled, QtGui.QPalette.Light, QtGui.QColor( 0, 0, 0, 0 ) ) self._qtWidget().setPalette( Button.__palette ) if highlightOnOver : self.enterSignal().connect( Gaffer.WeakMethod( self.__enter ), scoped = False ) self.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ), scoped = False ) def setHighlighted( self, highlighted ) : GafferUI.Widget.setHighlighted( self, highlighted ) self.__updateIcon() def setText( self, text ) : assert( isinstance( text, six.string_types ) ) self._qtWidget().setText( text ) def getText( self ) : return self._qtWidget().text() def setImage( self, imageOrImageFileName ) : assert( isinstance( imageOrImageFileName, ( six.string_types, GafferUI.Image, type( None ) ) ) ) if isinstance( imageOrImageFileName, six.string_types ) : self.__image = GafferUI.Image( imageOrImageFileName ) else : self.__image = imageOrImageFileName self.__updateIcon() def getImage( self ) : return self.__image def setHasFrame( self, hasFrame ) : self._qtWidget().setProperty( "gafferWithFrame", hasFrame ) self._qtWidget().setSizePolicy( QtWidgets.QSizePolicy.Minimum if hasFrame else QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) self._repolish() def getHasFrame( self ) : return self._qtWidget().property( "gafferWithFrame" ) def setEnabled( self, enabled ) : # Once we're disabled, mouse leave events will be skipped, and we'll # remain in a highlighted state once re-enabled. if not enabled and self.__highlightForHover : self.__highlightForHover = False self.__updateIcon() GafferUI.Widget.setEnabled( self, enabled ) def clickedSignal( self ) : return self.__clickedSignal def __clicked( self, *unusedArgs ) : # currently PyQt passes a "checked" argument and PySide doesn't # workaround problem whereby not all text fields will have committed their contents # into plugs when the button is pressed - this occurs particularly in the OpDialogue, and causes # the op to run without the values the user sees in the ui. normally editingFinished is emitted by # the text widget itself on a loss of focus, but unfortunately clicking on a button doesn't cause that # focus loss. so we helpfully emit the signal ourselves here. focusWidget = GafferUI.Widget._owner( QtWidgets.QApplication.focusWidget() ) if focusWidget is not None and hasattr( focusWidget, "editingFinishedSignal" ) : focusWidget.editingFinishedSignal()( focusWidget ) self.clickedSignal()( self ) def __updateIcon( self ) : if self.__image is None : self._qtWidget().setIcon( QtGui.QIcon() ) return # Qt's built-in disabled state generation doesn't work well with dark schemes # There is no built-in support for QtGui.QIcon.Active in the default # painter, which is why we have to juggle it here. icon = self.__image._qtIcon( highlighted = self.getHighlighted() or self.__highlightForHover ) self._qtWidget().setIcon( icon ) self._qtWidget().setIconSize( self.__image._qtPixmap().size() ) def __enter( self, widget ) : self.__highlightForHover = True self.__updateIcon() def __leave( self, widget ) : self.__highlightForHover = False self.__updateIcon()
bsd-3-clause
4,864,726,481,588,132,000
8,574,442,265,392,795,000
34.863636
105
0.709601
false
xxd3vin/spp-sdk
opt/Python27/Lib/site-packages/numpy/ma/core.py
22
226541
""" numpy.ma : a package to handle missing or invalid values. This package was initially written for numarray by Paul F. Dubois at Lawrence Livermore National Laboratory. In 2006, the package was completely rewritten by Pierre Gerard-Marchant (University of Georgia) to make the MaskedArray class a subclass of ndarray, and to improve support of structured arrays. Copyright 1999, 2000, 2001 Regents of the University of California. Released for unlimited redistribution. * Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. * Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant (pgmdevlist_AT_gmail_DOT_com) * Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) .. moduleauthor:: Pierre Gerard-Marchant """ # pylint: disable-msg=E1002 __author__ = "Pierre GF Gerard-Marchant" __docformat__ = "restructuredtext en" __all__ = ['MAError', 'MaskError', 'MaskType', 'MaskedArray', 'bool_', 'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', 'anom', 'anomalies', 'any', 'arange', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin', 'argsort', 'around', 'array', 'asarray', 'asanyarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'ceil', 'choose', 'clip', 'common_fill_value', 'compress', 'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh', 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', 'diff', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', 'fabs', 'flatten_mask', 'fmod', 'filled', 'floor', 'floor_divide', 'fix_invalid', 'flatten_structured_array', 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask', 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log2', 'log10', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', 'masked_array', 'masked_equal', 'masked_greater', 'masked_greater_equal', 'masked_inside', 'masked_invalid', 'masked_less', 'masked_less_equal', 'masked_not_equal', 'masked_object', 'masked_outside', 'masked_print_option', 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', 'mod', 'multiply', 'mvoid', 'negative', 'nomask', 'nonzero', 'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod', 'product', 'ptp', 'put', 'putmask', 'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', 'right_shift', 'round_', 'round', 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', 'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', 'var', 'where', 'zeros'] import cPickle import numpy as np from numpy import ndarray, amax, amin, iscomplexobj, bool_ from numpy import array as narray import numpy.core.umath as umath import numpy.core.numerictypes as ntypes from numpy.compat import getargspec, formatargspec from numpy import expand_dims as n_expand_dims import warnings import sys if sys.version_info[0] >= 3: from functools import reduce MaskType = np.bool_ nomask = MaskType(0) def doc_note(initialdoc, note): """ Adds a Notes section to an existing docstring. """ if initialdoc is None: return if note is None: return initialdoc newdoc = """ %s Notes ----- %s """ return newdoc % (initialdoc, note) def get_object_signature(obj): """ Get the signature from obj """ try: sig = formatargspec(*getargspec(obj)) except TypeError, errmsg: sig = '' # msg = "Unable to retrieve the signature of %s '%s'\n"\ # "(Initial error message: %s)" # warnings.warn(msg % (type(obj), # getattr(obj, '__name__', '???'), # errmsg)) return sig #####-------------------------------------------------------------------------- #---- --- Exceptions --- #####-------------------------------------------------------------------------- class MAError(Exception): """Class for masked array related errors.""" pass class MaskError(MAError): "Class for mask related errors." pass #####-------------------------------------------------------------------------- #---- --- Filling options --- #####-------------------------------------------------------------------------- # b: boolean - c: complex - f: floats - i: integer - O: object - S: string default_filler = {'b': True, 'c' : 1.e20 + 0.0j, 'f' : 1.e20, 'i' : 999999, 'O' : '?', 'S' : 'N/A', 'u' : 999999, 'V' : '???', 'U' : 'N/A', } max_filler = ntypes._minvals max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]]) min_filler = ntypes._maxvals min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]]) if 'float128' in ntypes.typeDict: max_filler.update([(np.float128, -np.inf)]) min_filler.update([(np.float128, +np.inf)]) def default_fill_value(obj): """ Return the default fill value for the argument object. The default filling value depends on the datatype of the input array or the type of the input scalar: ======== ======== datatype default ======== ======== bool True int 999999 float 1.e20 complex 1.e20+0j object '?' string 'N/A' ======== ======== Parameters ---------- obj : ndarray, dtype or scalar The array data-type or scalar for which the default fill value is returned. Returns ------- fill_value : scalar The default fill value. Examples -------- >>> np.ma.default_fill_value(1) 999999 >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) 1e+20 >>> np.ma.default_fill_value(np.dtype(complex)) (1e+20+0j) """ if hasattr(obj, 'dtype'): defval = _check_fill_value(None, obj.dtype) elif isinstance(obj, np.dtype): if obj.subdtype: defval = default_filler.get(obj.subdtype[0].kind, '?') else: defval = default_filler.get(obj.kind, '?') elif isinstance(obj, float): defval = default_filler['f'] elif isinstance(obj, int) or isinstance(obj, long): defval = default_filler['i'] elif isinstance(obj, str): defval = default_filler['S'] elif isinstance(obj, unicode): defval = default_filler['U'] elif isinstance(obj, complex): defval = default_filler['c'] else: defval = default_filler['O'] return defval def _recursive_extremum_fill_value(ndtype, extremum): names = ndtype.names if names: deflist = [] for name in names: fval = _recursive_extremum_fill_value(ndtype[name], extremum) deflist.append(fval) return tuple(deflist) return extremum[ndtype] def minimum_fill_value(obj): """ Return the maximum value that can be represented by the dtype of an object. This function is useful for calculating a fill value suitable for taking the minimum of an array with a given dtype. Parameters ---------- obj : ndarray or dtype An object that can be queried for it's numeric type. Returns ------- val : scalar The maximum representable value. Raises ------ TypeError If `obj` isn't a suitable numeric type. See Also -------- maximum_fill_value : The inverse function. set_fill_value : Set the filling value of a masked array. MaskedArray.fill_value : Return current fill value. Examples -------- >>> import numpy.ma as ma >>> a = np.int8() >>> ma.minimum_fill_value(a) 127 >>> a = np.int32() >>> ma.minimum_fill_value(a) 2147483647 An array of numeric data can also be passed. >>> a = np.array([1, 2, 3], dtype=np.int8) >>> ma.minimum_fill_value(a) 127 >>> a = np.array([1, 2, 3], dtype=np.float32) >>> ma.minimum_fill_value(a) inf """ errmsg = "Unsuitable type for calculating minimum." if hasattr(obj, 'dtype'): return _recursive_extremum_fill_value(obj.dtype, min_filler) elif isinstance(obj, float): return min_filler[ntypes.typeDict['float_']] elif isinstance(obj, int): return min_filler[ntypes.typeDict['int_']] elif isinstance(obj, long): return min_filler[ntypes.typeDict['uint']] elif isinstance(obj, np.dtype): return min_filler[obj] else: raise TypeError(errmsg) def maximum_fill_value(obj): """ Return the minimum value that can be represented by the dtype of an object. This function is useful for calculating a fill value suitable for taking the maximum of an array with a given dtype. Parameters ---------- obj : {ndarray, dtype} An object that can be queried for it's numeric type. Returns ------- val : scalar The minimum representable value. Raises ------ TypeError If `obj` isn't a suitable numeric type. See Also -------- minimum_fill_value : The inverse function. set_fill_value : Set the filling value of a masked array. MaskedArray.fill_value : Return current fill value. Examples -------- >>> import numpy.ma as ma >>> a = np.int8() >>> ma.maximum_fill_value(a) -128 >>> a = np.int32() >>> ma.maximum_fill_value(a) -2147483648 An array of numeric data can also be passed. >>> a = np.array([1, 2, 3], dtype=np.int8) >>> ma.maximum_fill_value(a) -128 >>> a = np.array([1, 2, 3], dtype=np.float32) >>> ma.maximum_fill_value(a) -inf """ errmsg = "Unsuitable type for calculating maximum." if hasattr(obj, 'dtype'): return _recursive_extremum_fill_value(obj.dtype, max_filler) elif isinstance(obj, float): return max_filler[ntypes.typeDict['float_']] elif isinstance(obj, int): return max_filler[ntypes.typeDict['int_']] elif isinstance(obj, long): return max_filler[ntypes.typeDict['uint']] elif isinstance(obj, np.dtype): return max_filler[obj] else: raise TypeError(errmsg) def _recursive_set_default_fill_value(dtypedescr): deflist = [] for currentdescr in dtypedescr: currenttype = currentdescr[1] if isinstance(currenttype, list): deflist.append(tuple(_recursive_set_default_fill_value(currenttype))) else: deflist.append(default_fill_value(np.dtype(currenttype))) return tuple(deflist) def _recursive_set_fill_value(fillvalue, dtypedescr): fillvalue = np.resize(fillvalue, len(dtypedescr)) output_value = [] for (fval, descr) in zip(fillvalue, dtypedescr): cdtype = descr[1] if isinstance(cdtype, list): output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) else: output_value.append(np.array(fval, dtype=cdtype).item()) return tuple(output_value) def _check_fill_value(fill_value, ndtype): """ Private function validating the given `fill_value` for the given dtype. If fill_value is None, it is set to the default corresponding to the dtype if this latter is standard (no fields). If the datatype is flexible (named fields), fill_value is set to a tuple whose elements are the default fill values corresponding to each field. If fill_value is not None, its value is forced to the given dtype. """ ndtype = np.dtype(ndtype) fields = ndtype.fields if fill_value is None: if fields: descr = ndtype.descr fill_value = np.array(_recursive_set_default_fill_value(descr), dtype=ndtype,) else: fill_value = default_fill_value(ndtype) elif fields: fdtype = [(_[0], _[1]) for _ in ndtype.descr] if isinstance(fill_value, (ndarray, np.void)): try: fill_value = np.array(fill_value, copy=False, dtype=fdtype) except ValueError: err_msg = "Unable to transform %s to dtype %s" raise ValueError(err_msg % (fill_value, fdtype)) else: descr = ndtype.descr fill_value = np.asarray(fill_value, dtype=object) fill_value = np.array(_recursive_set_fill_value(fill_value, descr), dtype=ndtype) else: if isinstance(fill_value, basestring) and (ndtype.char not in 'SV'): fill_value = default_fill_value(ndtype) else: # In case we want to convert 1e+20 to int... try: fill_value = np.array(fill_value, copy=False, dtype=ndtype)#.item() except OverflowError: fill_value = default_fill_value(ndtype) return np.array(fill_value) def set_fill_value(a, fill_value): """ Set the filling value of a, if a is a masked array. This function changes the fill value of the masked array `a` in place. If `a` is not a masked array, the function returns silently, without doing anything. Parameters ---------- a : array_like Input array. fill_value : dtype Filling value. A consistency test is performed to make sure the value is compatible with the dtype of `a`. Returns ------- None Nothing returned by this function. See Also -------- maximum_fill_value : Return the default fill value for a dtype. MaskedArray.fill_value : Return current fill value. MaskedArray.set_fill_value : Equivalent method. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(5) >>> a array([0, 1, 2, 3, 4]) >>> a = ma.masked_where(a < 3, a) >>> a masked_array(data = [-- -- -- 3 4], mask = [ True True True False False], fill_value=999999) >>> ma.set_fill_value(a, -999) >>> a masked_array(data = [-- -- -- 3 4], mask = [ True True True False False], fill_value=-999) Nothing happens if `a` is not a masked array. >>> a = range(5) >>> a [0, 1, 2, 3, 4] >>> ma.set_fill_value(a, 100) >>> a [0, 1, 2, 3, 4] >>> a = np.arange(5) >>> a array([0, 1, 2, 3, 4]) >>> ma.set_fill_value(a, 100) >>> a array([0, 1, 2, 3, 4]) """ if isinstance(a, MaskedArray): a.set_fill_value(fill_value) return def get_fill_value(a): """ Return the filling value of a, if any. Otherwise, returns the default filling value for that type. """ if isinstance(a, MaskedArray): result = a.fill_value else: result = default_fill_value(a) return result def common_fill_value(a, b): """ Return the common filling value of two masked arrays, if any. If ``a.fill_value == b.fill_value``, return the fill value, otherwise return None. Parameters ---------- a, b : MaskedArray The masked arrays for which to compare fill values. Returns ------- fill_value : scalar or None The common fill value, or None. Examples -------- >>> x = np.ma.array([0, 1.], fill_value=3) >>> y = np.ma.array([0, 1.], fill_value=3) >>> np.ma.common_fill_value(x, y) 3.0 """ t1 = get_fill_value(a) t2 = get_fill_value(b) if t1 == t2: return t1 return None #####-------------------------------------------------------------------------- def filled(a, fill_value=None): """ Return input as an array with masked data replaced by a fill value. If `a` is not a `MaskedArray`, `a` itself is returned. If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to ``a.fill_value``. Parameters ---------- a : MaskedArray or array_like An input object. fill_value : scalar, optional Filling value. Default is None. Returns ------- a : ndarray The filled array. See Also -------- compressed Examples -------- >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> x.filled() array([[999999, 1, 2], [999999, 4, 5], [ 6, 7, 8]]) """ if hasattr(a, 'filled'): return a.filled(fill_value) elif isinstance(a, ndarray): # Should we check for contiguity ? and a.flags['CONTIGUOUS']: return a elif isinstance(a, dict): return np.array(a, 'O') else: return np.array(a) #####-------------------------------------------------------------------------- def get_masked_subclass(*arrays): """ Return the youngest subclass of MaskedArray from a list of (masked) arrays. In case of siblings, the first listed takes over. """ if len(arrays) == 1: arr = arrays[0] if isinstance(arr, MaskedArray): rcls = type(arr) else: rcls = MaskedArray else: arrcls = [type(a) for a in arrays] rcls = arrcls[0] if not issubclass(rcls, MaskedArray): rcls = MaskedArray for cls in arrcls[1:]: if issubclass(cls, rcls): rcls = cls # Don't return MaskedConstant as result: revert to MaskedArray if rcls.__name__ == 'MaskedConstant': return MaskedArray return rcls #####-------------------------------------------------------------------------- def getdata(a, subok=True): """ Return the data of a masked array as an ndarray. Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, else return `a` as a ndarray or subclass (depending on `subok`) if not. Parameters ---------- a : array_like Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. subok : bool Whether to force the output to be a `pure` ndarray (False) or to return a subclass of ndarray if appropriate (True, default). See Also -------- getmask : Return the mask of a masked array, or nomask. getmaskarray : Return the mask of a masked array, or full array of False. Examples -------- >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a masked_array(data = [[1 --] [3 4]], mask = [[False True] [False False]], fill_value=999999) >>> ma.getdata(a) array([[1, 2], [3, 4]]) Equivalently use the ``MaskedArray`` `data` attribute. >>> a.data array([[1, 2], [3, 4]]) """ try: data = a._data except AttributeError: data = np.array(a, copy=False, subok=subok) if not subok: return data.view(ndarray) return data get_data = getdata def fix_invalid(a, mask=nomask, copy=True, fill_value=None): """ Return input with invalid data masked and replaced by a fill value. Invalid data means values of `nan`, `inf`, etc. Parameters ---------- a : array_like Input array, a (subclass of) ndarray. copy : bool, optional Whether to use a copy of `a` (True) or to fix `a` in place (False). Default is True. fill_value : scalar, optional Value used for fixing invalid data. Default is None, in which case the ``a.fill_value`` is used. Returns ------- b : MaskedArray The input array with invalid entries fixed. Notes ----- A copy is performed by default. Examples -------- >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) >>> x masked_array(data = [-- -1.0 nan inf], mask = [ True False False False], fill_value = 1e+20) >>> np.ma.fix_invalid(x) masked_array(data = [-- -1.0 -- --], mask = [ True False True True], fill_value = 1e+20) >>> fixed = np.ma.fix_invalid(x) >>> fixed.data array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20, 1.00000000e+20]) >>> x.data array([ 1., -1., NaN, Inf]) """ a = masked_array(a, copy=copy, mask=mask, subok=True) #invalid = (numpy.isnan(a._data) | numpy.isinf(a._data)) invalid = np.logical_not(np.isfinite(a._data)) if not invalid.any(): return a a._mask |= invalid if fill_value is None: fill_value = a.fill_value a._data[invalid] = fill_value return a #####-------------------------------------------------------------------------- #---- --- Ufuncs --- #####-------------------------------------------------------------------------- ufunc_domain = {} ufunc_fills = {} class _DomainCheckInterval: """ Define a valid interval, so that : ``domain_check_interval(a,b)(x) == True`` where ``x < a`` or ``x > b``. """ def __init__(self, a, b): "domain_check_interval(a,b)(x) = true where x < a or y > b" if (a > b): (a, b) = (b, a) self.a = a self.b = b def __call__ (self, x): "Execute the call behavior." return umath.logical_or(umath.greater (x, self.b), umath.less(x, self.a)) class _DomainTan: """Define a valid interval for the `tan` function, so that: ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` """ def __init__(self, eps): "domain_tan(eps) = true where abs(cos(x)) < eps)" self.eps = eps def __call__ (self, x): "Executes the call behavior." return umath.less(umath.absolute(umath.cos(x)), self.eps) class _DomainSafeDivide: """Define a domain for safe division.""" def __init__ (self, tolerance=None): self.tolerance = tolerance def __call__ (self, a, b): # Delay the selection of the tolerance to here in order to reduce numpy # import times. The calculation of these parameters is a substantial # component of numpy's import time. if self.tolerance is None: self.tolerance = np.finfo(float).tiny return umath.absolute(a) * self.tolerance >= umath.absolute(b) class _DomainGreater: """DomainGreater(v)(x) is True where x <= v.""" def __init__(self, critical_value): "DomainGreater(v)(x) = true where x <= v" self.critical_value = critical_value def __call__ (self, x): "Executes the call behavior." return umath.less_equal(x, self.critical_value) class _DomainGreaterEqual: """DomainGreaterEqual(v)(x) is True where x < v.""" def __init__(self, critical_value): "DomainGreaterEqual(v)(x) = true where x < v" self.critical_value = critical_value def __call__ (self, x): "Executes the call behavior." return umath.less(x, self.critical_value) #.............................................................................. class _MaskedUnaryOperation: """ Defines masked version of unary operations, where invalid values are pre-masked. Parameters ---------- mufunc : callable The function for which to define a masked version. Made available as ``_MaskedUnaryOperation.f``. fill : scalar, optional Filling value, default is 0. domain : class instance Domain for the function. Should be one of the ``_Domain*`` classes. Default is None. """ def __init__ (self, mufunc, fill=0, domain=None): """ _MaskedUnaryOperation(aufunc, fill=0, domain=None) aufunc(fill) must be defined self(x) returns aufunc(x) with masked values where domain(x) is true or getmask(x) is true. """ self.f = mufunc self.fill = fill self.domain = domain self.__doc__ = getattr(mufunc, "__doc__", str(mufunc)) self.__name__ = getattr(mufunc, "__name__", str(mufunc)) ufunc_domain[mufunc] = domain ufunc_fills[mufunc] = fill # def __call__ (self, a, *args, **kwargs): "Execute the call behavior." d = getdata(a) # Case 1.1. : Domained function if self.domain is not None: # Save the error status err_status_ini = np.geterr() try: np.seterr(divide='ignore', invalid='ignore') result = self.f(d, *args, **kwargs) finally: np.seterr(**err_status_ini) # Make a mask m = ~umath.isfinite(result) m |= self.domain(d) m |= getmask(a) # Case 1.2. : Function without a domain else: # Get the result and the mask result = self.f(d, *args, **kwargs) m = getmask(a) # Case 2.1. : The result is scalarscalar if not result.ndim: if m: return masked return result # Case 2.2. The result is an array # We need to fill the invalid data back w/ the input # Now, that's plain silly: in C, we would just skip the element and keep # the original, but we do have to do it that way in Python if m is not nomask: # In case result has a lower dtype than the inputs (as in equal) try: np.putmask(result, m, d) except TypeError: pass # Transform to if isinstance(a, MaskedArray): subtype = type(a) else: subtype = MaskedArray result = result.view(subtype) result._mask = m result._update_from(a) return result # def __str__ (self): return "Masked version of %s. [Invalid values are masked]" % str(self.f) class _MaskedBinaryOperation: """ Define masked version of binary operations, where invalid values are pre-masked. Parameters ---------- mbfunc : function The function for which to define a masked version. Made available as ``_MaskedBinaryOperation.f``. domain : class instance Default domain for the function. Should be one of the ``_Domain*`` classes. Default is None. fillx : scalar, optional Filling value for the first argument, default is 0. filly : scalar, optional Filling value for the second argument, default is 0. """ def __init__ (self, mbfunc, fillx=0, filly=0): """abfunc(fillx, filly) must be defined. abfunc(x, filly) = x for all x to enable reduce. """ self.f = mbfunc self.fillx = fillx self.filly = filly self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc)) self.__name__ = getattr(mbfunc, "__name__", str(mbfunc)) ufunc_domain[mbfunc] = None ufunc_fills[mbfunc] = (fillx, filly) def __call__ (self, a, b, *args, **kwargs): "Execute the call behavior." # Get the data, as ndarray (da, db) = (getdata(a, subok=False), getdata(b, subok=False)) # Get the mask (ma, mb) = (getmask(a), getmask(b)) if ma is nomask: if mb is nomask: m = nomask else: m = umath.logical_or(getmaskarray(a), mb) elif mb is nomask: m = umath.logical_or(ma, getmaskarray(b)) else: m = umath.logical_or(ma, mb) # Get the result err_status_ini = np.geterr() try: np.seterr(divide='ignore', invalid='ignore') result = self.f(da, db, *args, **kwargs) finally: np.seterr(**err_status_ini) # Case 1. : scalar if not result.ndim: if m: return masked return result # Case 2. : array # Revert result to da where masked if m.any(): np.putmask(result, m, 0) # This only makes sense if the operation preserved the dtype if result.dtype == da.dtype: result += m * da # Transforms to a (subclass of) MaskedArray result = result.view(get_masked_subclass(a, b)) result._mask = m # Update the optional info from the inputs if isinstance(b, MaskedArray): if isinstance(a, MaskedArray): result._update_from(a) else: result._update_from(b) elif isinstance(a, MaskedArray): result._update_from(a) return result def reduce(self, target, axis=0, dtype=None): """Reduce `target` along the given `axis`.""" if isinstance(target, MaskedArray): tclass = type(target) else: tclass = MaskedArray m = getmask(target) t = filled(target, self.filly) if t.shape == (): t = t.reshape(1) if m is not nomask: m = make_mask(m, copy=1) m.shape = (1,) if m is nomask: return self.f.reduce(t, axis).view(tclass) t = t.view(tclass) t._mask = m tr = self.f.reduce(getdata(t), axis, dtype=dtype or t.dtype) mr = umath.logical_and.reduce(m, axis) tr = tr.view(tclass) if mr.ndim > 0: tr._mask = mr return tr elif mr: return masked return tr def outer (self, a, b): """Return the function applied to the outer product of a and b. """ ma = getmask(a) mb = getmask(b) if ma is nomask and mb is nomask: m = nomask else: ma = getmaskarray(a) mb = getmaskarray(b) m = umath.logical_or.outer(ma, mb) if (not m.ndim) and m: return masked (da, db) = (getdata(a), getdata(b)) d = self.f.outer(da, db) if m is not nomask: np.putmask(d, m, da) if d.shape: d = d.view(get_masked_subclass(a, b)) d._mask = m return d def accumulate (self, target, axis=0): """Accumulate `target` along `axis` after filling with y fill value. """ if isinstance(target, MaskedArray): tclass = type(target) else: tclass = MaskedArray t = filled(target, self.filly) return self.f.accumulate(t, axis).view(tclass) def __str__ (self): return "Masked version of " + str(self.f) class _DomainedBinaryOperation: """ Define binary operations that have a domain, like divide. They have no reduce, outer or accumulate. Parameters ---------- mbfunc : function The function for which to define a masked version. Made available as ``_DomainedBinaryOperation.f``. domain : class instance Default domain for the function. Should be one of the ``_Domain*`` classes. fillx : scalar, optional Filling value for the first argument, default is 0. filly : scalar, optional Filling value for the second argument, default is 0. """ def __init__ (self, dbfunc, domain, fillx=0, filly=0): """abfunc(fillx, filly) must be defined. abfunc(x, filly) = x for all x to enable reduce. """ self.f = dbfunc self.domain = domain self.fillx = fillx self.filly = filly self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc)) self.__name__ = getattr(dbfunc, "__name__", str(dbfunc)) ufunc_domain[dbfunc] = domain ufunc_fills[dbfunc] = (fillx, filly) def __call__(self, a, b, *args, **kwargs): "Execute the call behavior." # Get the data and the mask (da, db) = (getdata(a, subok=False), getdata(b, subok=False)) (ma, mb) = (getmask(a), getmask(b)) # Get the result err_status_ini = np.geterr() try: np.seterr(divide='ignore', invalid='ignore') result = self.f(da, db, *args, **kwargs) finally: np.seterr(**err_status_ini) # Get the mask as a combination of ma, mb and invalid m = ~umath.isfinite(result) m |= ma m |= mb # Apply the domain domain = ufunc_domain.get(self.f, None) if domain is not None: m |= filled(domain(da, db), True) # Take care of the scalar case first if (not m.ndim): if m: return masked else: return result # When the mask is True, put back da np.putmask(result, m, 0) result += m * da result = result.view(get_masked_subclass(a, b)) result._mask = m if isinstance(b, MaskedArray): if isinstance(a, MaskedArray): result._update_from(a) else: result._update_from(b) elif isinstance(a, MaskedArray): result._update_from(a) return result def __str__ (self): return "Masked version of " + str(self.f) #.............................................................................. # Unary ufuncs exp = _MaskedUnaryOperation(umath.exp) conjugate = _MaskedUnaryOperation(umath.conjugate) sin = _MaskedUnaryOperation(umath.sin) cos = _MaskedUnaryOperation(umath.cos) tan = _MaskedUnaryOperation(umath.tan) arctan = _MaskedUnaryOperation(umath.arctan) arcsinh = _MaskedUnaryOperation(umath.arcsinh) sinh = _MaskedUnaryOperation(umath.sinh) cosh = _MaskedUnaryOperation(umath.cosh) tanh = _MaskedUnaryOperation(umath.tanh) abs = absolute = _MaskedUnaryOperation(umath.absolute) fabs = _MaskedUnaryOperation(umath.fabs) negative = _MaskedUnaryOperation(umath.negative) floor = _MaskedUnaryOperation(umath.floor) ceil = _MaskedUnaryOperation(umath.ceil) around = _MaskedUnaryOperation(np.round_) logical_not = _MaskedUnaryOperation(umath.logical_not) # Domained unary ufuncs ....................................................... sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, _DomainGreaterEqual(0.0)) log = _MaskedUnaryOperation(umath.log, 1.0, _DomainGreater(0.0)) log2 = _MaskedUnaryOperation(umath.log2, 1.0, _DomainGreater(0.0)) log10 = _MaskedUnaryOperation(umath.log10, 1.0, _DomainGreater(0.0)) tan = _MaskedUnaryOperation(umath.tan, 0.0, _DomainTan(1e-35)) arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, _DomainCheckInterval(-1.0, 1.0)) arccos = _MaskedUnaryOperation(umath.arccos, 0.0, _DomainCheckInterval(-1.0, 1.0)) arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, _DomainGreaterEqual(1.0)) arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) # Binary ufuncs ............................................................... add = _MaskedBinaryOperation(umath.add) subtract = _MaskedBinaryOperation(umath.subtract) multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) equal = _MaskedBinaryOperation(umath.equal) equal.reduce = None not_equal = _MaskedBinaryOperation(umath.not_equal) not_equal.reduce = None less_equal = _MaskedBinaryOperation(umath.less_equal) less_equal.reduce = None greater_equal = _MaskedBinaryOperation(umath.greater_equal) greater_equal.reduce = None less = _MaskedBinaryOperation(umath.less) less.reduce = None greater = _MaskedBinaryOperation(umath.greater) greater.reduce = None logical_and = _MaskedBinaryOperation(umath.logical_and) alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce logical_or = _MaskedBinaryOperation(umath.logical_or) sometrue = logical_or.reduce logical_xor = _MaskedBinaryOperation(umath.logical_xor) bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) hypot = _MaskedBinaryOperation(umath.hypot) # Domained binary ufuncs ...................................................... divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) true_divide = _DomainedBinaryOperation(umath.true_divide, _DomainSafeDivide(), 0, 1) floor_divide = _DomainedBinaryOperation(umath.floor_divide, _DomainSafeDivide(), 0, 1) remainder = _DomainedBinaryOperation(umath.remainder, _DomainSafeDivide(), 0, 1) fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) #####-------------------------------------------------------------------------- #---- --- Mask creation functions --- #####-------------------------------------------------------------------------- def _recursive_make_descr(datatype, newtype=bool_): "Private function allowing recursion in make_descr." # Do we have some name fields ? if datatype.names: descr = [] for name in datatype.names: field = datatype.fields[name] if len(field) == 3: # Prepend the title to the name name = (field[-1], name) descr.append((name, _recursive_make_descr(field[0], newtype))) return descr # Is this some kind of composite a la (np.float,2) elif datatype.subdtype: mdescr = list(datatype.subdtype) mdescr[0] = newtype return tuple(mdescr) else: return newtype def make_mask_descr(ndtype): """ Construct a dtype description list from a given dtype. Returns a new dtype object, with the type of all fields in `ndtype` to a boolean type. Field names are not altered. Parameters ---------- ndtype : dtype The dtype to convert. Returns ------- result : dtype A dtype that looks like `ndtype`, the type of all fields is boolean. Examples -------- >>> import numpy.ma as ma >>> dtype = np.dtype({'names':['foo', 'bar'], 'formats':[np.float32, np.int]}) >>> dtype dtype([('foo', '<f4'), ('bar', '<i4')]) >>> ma.make_mask_descr(dtype) dtype([('foo', '|b1'), ('bar', '|b1')]) >>> ma.make_mask_descr(np.float32) <type 'numpy.bool_'> """ # Make sure we do have a dtype if not isinstance(ndtype, np.dtype): ndtype = np.dtype(ndtype) return np.dtype(_recursive_make_descr(ndtype, np.bool)) def getmask(a): """ Return the mask of a masked array, or nomask. Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the mask is not `nomask`, else return `nomask`. To guarantee a full array of booleans of the same shape as a, use `getmaskarray`. Parameters ---------- a : array_like Input `MaskedArray` for which the mask is required. See Also -------- getdata : Return the data of a masked array as an ndarray. getmaskarray : Return the mask of a masked array, or full array of False. Examples -------- >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a masked_array(data = [[1 --] [3 4]], mask = [[False True] [False False]], fill_value=999999) >>> ma.getmask(a) array([[False, True], [False, False]], dtype=bool) Equivalently use the `MaskedArray` `mask` attribute. >>> a.mask array([[False, True], [False, False]], dtype=bool) Result when mask == `nomask` >>> b = ma.masked_array([[1,2],[3,4]]) >>> b masked_array(data = [[1 2] [3 4]], mask = False, fill_value=999999) >>> ma.nomask False >>> ma.getmask(b) == ma.nomask True >>> b.mask == ma.nomask True """ return getattr(a, '_mask', nomask) get_mask = getmask def getmaskarray(arr): """ Return the mask of a masked array, or full boolean array of False. Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and the mask is not `nomask`, else return a full boolean array of False of the same shape as `arr`. Parameters ---------- arr : array_like Input `MaskedArray` for which the mask is required. See Also -------- getmask : Return the mask of a masked array, or nomask. getdata : Return the data of a masked array as an ndarray. Examples -------- >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a masked_array(data = [[1 --] [3 4]], mask = [[False True] [False False]], fill_value=999999) >>> ma.getmaskarray(a) array([[False, True], [False, False]], dtype=bool) Result when mask == ``nomask`` >>> b = ma.masked_array([[1,2],[3,4]]) >>> b masked_array(data = [[1 2] [3 4]], mask = False, fill_value=999999) >>> >ma.getmaskarray(b) array([[False, False], [False, False]], dtype=bool) """ mask = getmask(arr) if mask is nomask: mask = make_mask_none(np.shape(arr), getdata(arr).dtype) return mask def is_mask(m): """ Return True if m is a valid, standard mask. This function does not check the contents of the input, only that the type is MaskType. In particular, this function returns False if the mask has a flexible dtype. Parameters ---------- m : array_like Array to test. Returns ------- result : bool True if `m.dtype.type` is MaskType, False otherwise. See Also -------- isMaskedArray : Test whether input is an instance of MaskedArray. Examples -------- >>> import numpy.ma as ma >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> m masked_array(data = [-- 1 -- 2 3], mask = [ True False True False False], fill_value=999999) >>> ma.is_mask(m) False >>> ma.is_mask(m.mask) True Input must be an ndarray (or have similar attributes) for it to be considered a valid mask. >>> m = [False, True, False] >>> ma.is_mask(m) False >>> m = np.array([False, True, False]) >>> m array([False, True, False], dtype=bool) >>> ma.is_mask(m) True Arrays with complex dtypes don't return True. >>> dtype = np.dtype({'names':['monty', 'pithon'], 'formats':[np.bool, np.bool]}) >>> dtype dtype([('monty', '|b1'), ('pithon', '|b1')]) >>> m = np.array([(True, False), (False, True), (True, False)], dtype=dtype) >>> m array([(True, False), (False, True), (True, False)], dtype=[('monty', '|b1'), ('pithon', '|b1')]) >>> ma.is_mask(m) False """ try: return m.dtype.type is MaskType except AttributeError: return False def make_mask(m, copy=False, shrink=True, dtype=MaskType): """ Create a boolean mask from an array. Return `m` as a boolean mask, creating a copy if necessary or requested. The function can accept any sequence that is convertible to integers, or ``nomask``. Does not require that contents must be 0s and 1s, values of 0 are interepreted as False, everything else as True. Parameters ---------- m : array_like Potential mask. copy : bool, optional Whether to return a copy of `m` (True) or `m` itself (False). shrink : bool, optional Whether to shrink `m` to ``nomask`` if all its values are False. dtype : dtype, optional Data-type of the output mask. By default, the output mask has a dtype of MaskType (bool). If the dtype is flexible, each field has a boolean dtype. Returns ------- result : ndarray A boolean mask derived from `m`. Examples -------- >>> import numpy.ma as ma >>> m = [True, False, True, True] >>> ma.make_mask(m) array([ True, False, True, True], dtype=bool) >>> m = [1, 0, 1, 1] >>> ma.make_mask(m) array([ True, False, True, True], dtype=bool) >>> m = [1, 0, 2, -3] >>> ma.make_mask(m) array([ True, False, True, True], dtype=bool) Effect of the `shrink` parameter. >>> m = np.zeros(4) >>> m array([ 0., 0., 0., 0.]) >>> ma.make_mask(m) False >>> ma.make_mask(m, shrink=False) array([False, False, False, False], dtype=bool) Using a flexible `dtype`. >>> m = [1, 0, 1, 1] >>> n = [0, 1, 0, 0] >>> arr = [] >>> for man, mouse in zip(m, n): ... arr.append((man, mouse)) >>> arr [(1, 0), (0, 1), (1, 0), (1, 0)] >>> dtype = np.dtype({'names':['man', 'mouse'], 'formats':[np.int, np.int]}) >>> arr = np.array(arr, dtype=dtype) >>> arr array([(1, 0), (0, 1), (1, 0), (1, 0)], dtype=[('man', '<i4'), ('mouse', '<i4')]) >>> ma.make_mask(arr, dtype=dtype) array([(True, False), (False, True), (True, False), (True, False)], dtype=[('man', '|b1'), ('mouse', '|b1')]) """ if m is nomask: return nomask elif isinstance(m, ndarray): # We won't return after this point to make sure we can shrink the mask # Fill the mask in case there are missing data m = filled(m, True) # Make sure the input dtype is valid dtype = make_mask_descr(dtype) if m.dtype == dtype: if copy: result = m.copy() else: result = m else: result = np.array(m, dtype=dtype, copy=copy) else: result = np.array(filled(m, True), dtype=MaskType) # Bas les masques ! if shrink and (not result.dtype.names) and (not result.any()): return nomask else: return result def make_mask_none(newshape, dtype=None): """ Return a boolean mask of the given shape, filled with False. This function returns a boolean ndarray with all entries False, that can be used in common mask manipulations. If a complex dtype is specified, the type of each field is converted to a boolean type. Parameters ---------- newshape : tuple A tuple indicating the shape of the mask. dtype: {None, dtype}, optional If None, use a MaskType instance. Otherwise, use a new datatype with the same fields as `dtype`, converted to boolean types. Returns ------- result : ndarray An ndarray of appropriate shape and dtype, filled with False. See Also -------- make_mask : Create a boolean mask from an array. make_mask_descr : Construct a dtype description list from a given dtype. Examples -------- >>> import numpy.ma as ma >>> ma.make_mask_none((3,)) array([False, False, False], dtype=bool) Defining a more complex dtype. >>> dtype = np.dtype({'names':['foo', 'bar'], 'formats':[np.float32, np.int]}) >>> dtype dtype([('foo', '<f4'), ('bar', '<i4')]) >>> ma.make_mask_none((3,), dtype=dtype) array([(False, False), (False, False), (False, False)], dtype=[('foo', '|b1'), ('bar', '|b1')]) """ if dtype is None: result = np.zeros(newshape, dtype=MaskType) else: result = np.zeros(newshape, dtype=make_mask_descr(dtype)) return result def mask_or (m1, m2, copy=False, shrink=True): """ Combine two masks with the ``logical_or`` operator. The result may be a view on `m1` or `m2` if the other is `nomask` (i.e. False). Parameters ---------- m1, m2 : array_like Input masks. copy : bool, optional If copy is False and one of the inputs is `nomask`, return a view of the other input mask. Defaults to False. shrink : bool, optional Whether to shrink the output to `nomask` if all its values are False. Defaults to True. Returns ------- mask : output mask The result masks values that are masked in either `m1` or `m2`. Raises ------ ValueError If `m1` and `m2` have different flexible dtypes. Examples -------- >>> m1 = np.ma.make_mask([0, 1, 1, 0]) >>> m2 = np.ma.make_mask([1, 0, 0, 0]) >>> np.ma.mask_or(m1, m2) array([ True, True, True, False], dtype=bool) """ def _recursive_mask_or(m1, m2, newmask): names = m1.dtype.names for name in names: current1 = m1[name] if current1.dtype.names: _recursive_mask_or(current1, m2[name], newmask[name]) else: umath.logical_or(current1, m2[name], newmask[name]) return # if (m1 is nomask) or (m1 is False): dtype = getattr(m2, 'dtype', MaskType) return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) if (m2 is nomask) or (m2 is False): dtype = getattr(m1, 'dtype', MaskType) return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) if m1 is m2 and is_mask(m1): return m1 (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) if (dtype1 != dtype2): raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) if dtype1.names: newmask = np.empty_like(m1) _recursive_mask_or(m1, m2, newmask) return newmask return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) def flatten_mask(mask): """ Returns a completely flattened version of the mask, where nested fields are collapsed. Parameters ---------- mask : array_like Input array, which will be interpreted as booleans. Returns ------- flattened_mask : ndarray of bools The flattened input. Examples -------- >>> mask = np.array([0, 0, 1], dtype=np.bool) >>> flatten_mask(mask) array([False, False, True], dtype=bool) >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) >>> flatten_mask(mask) array([False, False, False, True], dtype=bool) >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) >>> flatten_mask(mask) array([False, False, False, False, False, True], dtype=bool) """ # def _flatmask(mask): "Flatten the mask and returns a (maybe nested) sequence of booleans." mnames = mask.dtype.names if mnames: return [flatten_mask(mask[name]) for name in mnames] else: return mask # def _flatsequence(sequence): "Generates a flattened version of the sequence." try: for element in sequence: if hasattr(element, '__iter__'): for f in _flatsequence(element): yield f else: yield element except TypeError: yield sequence # mask = np.asarray(mask) flattened = _flatsequence(_flatmask(mask)) return np.array([_ for _ in flattened], dtype=bool) def _check_mask_axis(mask, axis): "Check whether there are masked values along the given axis" if mask is not nomask: return mask.all(axis=axis) return nomask #####-------------------------------------------------------------------------- #--- --- Masking functions --- #####-------------------------------------------------------------------------- def masked_where(condition, a, copy=True): """ Mask an array where a condition is met. Return `a` as an array masked where `condition` is True. Any masked values of `a` or `condition` are also masked in the output. Parameters ---------- condition : array_like Masking condition. When `condition` tests floating point values for equality, consider using ``masked_values`` instead. a : array_like Array to mask. copy : bool If True (default) make a copy of `a` in the result. If False modify `a` in place and return a view. Returns ------- result : MaskedArray The result of masking `a` where `condition` is True. See Also -------- masked_values : Mask using floating point equality. masked_equal : Mask where equal to a given value. masked_not_equal : Mask where `not` equal to a given value. masked_less_equal : Mask where less than or equal to a given value. masked_greater_equal : Mask where greater than or equal to a given value. masked_less : Mask where less than a given value. masked_greater : Mask where greater than a given value. masked_inside : Mask inside a given interval. masked_outside : Mask outside a given interval. masked_invalid : Mask invalid values (NaNs or infs). Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_where(a <= 2, a) masked_array(data = [-- -- -- 3], mask = [ True True True False], fill_value=999999) Mask array `b` conditional on `a`. >>> b = ['a', 'b', 'c', 'd'] >>> ma.masked_where(a == 2, b) masked_array(data = [a b -- d], mask = [False False True False], fill_value=N/A) Effect of the `copy` argument. >>> c = ma.masked_where(a <= 2, a) >>> c masked_array(data = [-- -- -- 3], mask = [ True True True False], fill_value=999999) >>> c[0] = 99 >>> c masked_array(data = [99 -- -- 3], mask = [False True True False], fill_value=999999) >>> a array([0, 1, 2, 3]) >>> c = ma.masked_where(a <= 2, a, copy=False) >>> c[0] = 99 >>> c masked_array(data = [99 -- -- 3], mask = [False True True False], fill_value=999999) >>> a array([99, 1, 2, 3]) When `condition` or `a` contain masked values. >>> a = np.arange(4) >>> a = ma.masked_where(a == 2, a) >>> a masked_array(data = [0 1 -- 3], mask = [False False True False], fill_value=999999) >>> b = np.arange(4) >>> b = ma.masked_where(b == 0, b) >>> b masked_array(data = [-- 1 2 3], mask = [ True False False False], fill_value=999999) >>> ma.masked_where(a == 3, b) masked_array(data = [-- 1 -- --], mask = [ True False True True], fill_value=999999) """ # Make sure that condition is a valid standard-type mask. cond = make_mask(condition) a = np.array(a, copy=copy, subok=True) (cshape, ashape) = (cond.shape, a.shape) if cshape and cshape != ashape: raise IndexError("Inconsistant shape between the condition and the input"\ " (got %s and %s)" % (cshape, ashape)) if hasattr(a, '_mask'): cond = mask_or(cond, a._mask) cls = type(a) else: cls = MaskedArray result = a.view(cls) result._mask = cond return result def masked_greater(x, value, copy=True): """ Mask an array where greater than a given value. This function is a shortcut to ``masked_where``, with `condition` = (x > value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_greater(a, 2) masked_array(data = [0 1 2 --], mask = [False False False True], fill_value=999999) """ return masked_where(greater(x, value), x, copy=copy) def masked_greater_equal(x, value, copy=True): """ Mask an array where greater than or equal to a given value. This function is a shortcut to ``masked_where``, with `condition` = (x >= value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_greater_equal(a, 2) masked_array(data = [0 1 -- --], mask = [False False True True], fill_value=999999) """ return masked_where(greater_equal(x, value), x, copy=copy) def masked_less(x, value, copy=True): """ Mask an array where less than a given value. This function is a shortcut to ``masked_where``, with `condition` = (x < value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_less(a, 2) masked_array(data = [-- -- 2 3], mask = [ True True False False], fill_value=999999) """ return masked_where(less(x, value), x, copy=copy) def masked_less_equal(x, value, copy=True): """ Mask an array where less than or equal to a given value. This function is a shortcut to ``masked_where``, with `condition` = (x <= value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_less_equal(a, 2) masked_array(data = [-- -- -- 3], mask = [ True True True False], fill_value=999999) """ return masked_where(less_equal(x, value), x, copy=copy) def masked_not_equal(x, value, copy=True): """ Mask an array where `not` equal to a given value. This function is a shortcut to ``masked_where``, with `condition` = (x != value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_not_equal(a, 2) masked_array(data = [-- -- 2 --], mask = [ True True False True], fill_value=999999) """ return masked_where(not_equal(x, value), x, copy=copy) def masked_equal(x, value, copy=True): """ Mask an array where equal to a given value. This function is a shortcut to ``masked_where``, with `condition` = (x == value). For floating point arrays, consider using ``masked_values(x, value)``. See Also -------- masked_where : Mask where a condition is met. masked_values : Mask using floating point equality. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_equal(a, 2) masked_array(data = [0 1 -- 3], mask = [False False True False], fill_value=999999) """ # An alternative implementation relies on filling first: probably not needed. # d = filled(x, 0) # c = umath.equal(d, value) # m = mask_or(c, getmask(x)) # return array(d, mask=m, copy=copy) output = masked_where(equal(x, value), x, copy=copy) output.fill_value = value return output def masked_inside(x, v1, v2, copy=True): """ Mask an array inside a given interval. Shortcut to ``masked_where``, where `condition` is True for `x` inside the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` can be given in either order. See Also -------- masked_where : Mask where a condition is met. Notes ----- The array `x` is prefilled with its filling value. Examples -------- >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_inside(x, -0.3, 0.3) masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], mask = [False False True True False False], fill_value=1e+20) The order of `v1` and `v2` doesn't matter. >>> ma.masked_inside(x, 0.3, -0.3) masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], mask = [False False True True False False], fill_value=1e+20) """ if v2 < v1: (v1, v2) = (v2, v1) xf = filled(x) condition = (xf >= v1) & (xf <= v2) return masked_where(condition, x, copy=copy) def masked_outside(x, v1, v2, copy=True): """ Mask an array outside a given interval. Shortcut to ``masked_where``, where `condition` is True for `x` outside the interval [v1,v2] (x < v1)|(x > v2). The boundaries `v1` and `v2` can be given in either order. See Also -------- masked_where : Mask where a condition is met. Notes ----- The array `x` is prefilled with its filling value. Examples -------- >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_outside(x, -0.3, 0.3) masked_array(data = [-- -- 0.01 0.2 -- --], mask = [ True True False False True True], fill_value=1e+20) The order of `v1` and `v2` doesn't matter. >>> ma.masked_outside(x, 0.3, -0.3) masked_array(data = [-- -- 0.01 0.2 -- --], mask = [ True True False False True True], fill_value=1e+20) """ if v2 < v1: (v1, v2) = (v2, v1) xf = filled(x) condition = (xf < v1) | (xf > v2) return masked_where(condition, x, copy=copy) def masked_object(x, value, copy=True, shrink=True): """ Mask the array `x` where the data are exactly equal to value. This function is similar to `masked_values`, but only suitable for object arrays: for floating point, use `masked_values` instead. Parameters ---------- x : array_like Array to mask value : object Comparison value copy : {True, False}, optional Whether to return a copy of `x`. shrink : {True, False}, optional Whether to collapse a mask full of False to nomask Returns ------- result : MaskedArray The result of masking `x` where equal to `value`. See Also -------- masked_where : Mask where a condition is met. masked_equal : Mask where equal to a given value (integers). masked_values : Mask using floating point equality. Examples -------- >>> import numpy.ma as ma >>> food = np.array(['green_eggs', 'ham'], dtype=object) >>> # don't eat spoiled food >>> eat = ma.masked_object(food, 'green_eggs') >>> print eat [-- ham] >>> # plain ol` ham is boring >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) >>> eat = ma.masked_object(fresh_food, 'green_eggs') >>> print eat [cheese ham pineapple] Note that `mask` is set to ``nomask`` if possible. >>> eat masked_array(data = [cheese ham pineapple], mask = False, fill_value=?) """ if isMaskedArray(x): condition = umath.equal(x._data, value) mask = x._mask else: condition = umath.equal(np.asarray(x), value) mask = nomask mask = mask_or(mask, make_mask(condition, shrink=shrink)) return masked_array(x, mask=mask, copy=copy, fill_value=value) def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): """ Mask using floating point equality. Return a MaskedArray, masked where the data in array `x` are approximately equal to `value`, i.e. where the following condition is True (abs(x - value) <= atol+rtol*abs(value)) The fill_value is set to `value` and the mask is set to ``nomask`` if possible. For integers, consider using ``masked_equal``. Parameters ---------- x : array_like Array to mask. value : float Masking value. rtol : float, optional Tolerance parameter. atol : float, optional Tolerance parameter (1e-8). copy : bool, optional Whether to return a copy of `x`. shrink : bool, optional Whether to collapse a mask full of False to ``nomask``. Returns ------- result : MaskedArray The result of masking `x` where approximately equal to `value`. See Also -------- masked_where : Mask where a condition is met. masked_equal : Mask where equal to a given value (integers). Examples -------- >>> import numpy.ma as ma >>> x = np.array([1, 1.1, 2, 1.1, 3]) >>> ma.masked_values(x, 1.1) masked_array(data = [1.0 -- 2.0 -- 3.0], mask = [False True False True False], fill_value=1.1) Note that `mask` is set to ``nomask`` if possible. >>> ma.masked_values(x, 1.5) masked_array(data = [ 1. 1.1 2. 1.1 3. ], mask = False, fill_value=1.5) For integers, the fill value will be different in general to the result of ``masked_equal``. >>> x = np.arange(5) >>> x array([0, 1, 2, 3, 4]) >>> ma.masked_values(x, 2) masked_array(data = [0 1 -- 3 4], mask = [False False True False False], fill_value=2) >>> ma.masked_equal(x, 2) masked_array(data = [0 1 -- 3 4], mask = [False False True False False], fill_value=999999) """ mabs = umath.absolute xnew = filled(x, value) if issubclass(xnew.dtype.type, np.floating): condition = umath.less_equal(mabs(xnew - value), atol + rtol * mabs(value)) mask = getattr(x, '_mask', nomask) else: condition = umath.equal(xnew, value) mask = nomask mask = mask_or(mask, make_mask(condition, shrink=shrink)) return masked_array(xnew, mask=mask, copy=copy, fill_value=value) def masked_invalid(a, copy=True): """ Mask an array where invalid values occur (NaNs or infs). This function is a shortcut to ``masked_where``, with `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. Only applies to arrays with a dtype where NaNs or infs make sense (i.e. floating point types), but accepts any array_like object. See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(5, dtype=np.float) >>> a[2] = np.NaN >>> a[3] = np.PINF >>> a array([ 0., 1., NaN, Inf, 4.]) >>> ma.masked_invalid(a) masked_array(data = [0.0 1.0 -- -- 4.0], mask = [False False True True False], fill_value=1e+20) """ a = np.array(a, copy=copy, subok=True) mask = getattr(a, '_mask', None) if mask is not None: condition = ~(np.isfinite(getdata(a))) if mask is not nomask: condition |= mask cls = type(a) else: condition = ~(np.isfinite(a)) cls = MaskedArray result = a.view(cls) result._mask = condition return result #####-------------------------------------------------------------------------- #---- --- Printing options --- #####-------------------------------------------------------------------------- class _MaskedPrintOption: """ Handle the string used to represent missing data in a masked array. """ def __init__ (self, display): "Create the masked_print_option object." self._display = display self._enabled = True def display(self): "Display the string to print for masked values." return self._display def set_display (self, s): "Set the string to print for masked values." self._display = s def enabled(self): "Is the use of the display value enabled?" return self._enabled def enable(self, shrink=1): "Set the enabling shrink to `shrink`." self._enabled = shrink def __str__ (self): return str(self._display) __repr__ = __str__ #if you single index into a masked location you get this object. masked_print_option = _MaskedPrintOption('--') def _recursive_printoption(result, mask, printopt): """ Puts printoptions in result where mask is True. Private function allowing for recursion """ names = result.dtype.names for name in names: (curdata, curmask) = (result[name], mask[name]) if curdata.dtype.names: _recursive_printoption(curdata, curmask, printopt) else: np.putmask(curdata, curmask, printopt) return _print_templates = dict(long_std="""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s) """, short_std="""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s) """, long_flx="""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s, %(nlen)s dtype = %(dtype)s) """, short_flx="""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s, %(nlen)s dtype = %(dtype)s) """) #####-------------------------------------------------------------------------- #---- --- MaskedArray class --- #####-------------------------------------------------------------------------- def _recursive_filled(a, mask, fill_value): """ Recursively fill `a` with `fill_value`. Private function """ names = a.dtype.names for name in names: current = a[name] if current.dtype.names: _recursive_filled(current, mask[name], fill_value[name]) else: np.putmask(current, mask[name], fill_value[name]) def flatten_structured_array(a): """ Flatten a structured array. The data type of the output is chosen such that it can represent all of the (nested) fields. Parameters ---------- a : structured array Returns ------- output : masked array or ndarray A flattened masked array if the input is a masked array, otherwise a standard ndarray. Examples -------- >>> ndtype = [('a', int), ('b', float)] >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) >>> flatten_structured_array(a) array([[1., 1.], [2., 2.]]) """ # def flatten_sequence(iterable): """Flattens a compound of nested iterables.""" for elm in iter(iterable): if hasattr(elm, '__iter__'): for f in flatten_sequence(elm): yield f else: yield elm # a = np.asanyarray(a) inishape = a.shape a = a.ravel() if isinstance(a, MaskedArray): out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) out = out.view(MaskedArray) out._mask = np.array([tuple(flatten_sequence(d.item())) for d in getmaskarray(a)]) else: out = np.array([tuple(flatten_sequence(d.item())) for d in a]) if len(inishape) > 1: newshape = list(out.shape) newshape[0] = inishape out.shape = tuple(flatten_sequence(newshape)) return out class _arraymethod(object): """ Define a wrapper for basic array methods. Upon call, returns a masked array, where the new ``_data`` array is the output of the corresponding method called on the original ``_data``. If `onmask` is True, the new mask is the output of the method called on the initial mask. Otherwise, the new mask is just a reference to the initial mask. Attributes ---------- _onmask : bool Holds the `onmask` parameter. obj : object The object calling `_arraymethod`. Parameters ---------- funcname : str Name of the function to apply on data. onmask : bool Whether the mask must be processed also (True) or left alone (False). Default is True. Make available as `_onmask` attribute. """ def __init__(self, funcname, onmask=True): self.__name__ = funcname self._onmask = onmask self.obj = None self.__doc__ = self.getdoc() # def getdoc(self): "Return the doc of the function (from the doc of the method)." methdoc = getattr(ndarray, self.__name__, None) or \ getattr(np, self.__name__, None) if methdoc is not None: return methdoc.__doc__ # def __get__(self, obj, objtype=None): self.obj = obj return self # def __call__(self, *args, **params): methodname = self.__name__ instance = self.obj # Fallback : if the instance has not been initialized, use the first arg if instance is None: args = list(args) instance = args.pop(0) data = instance._data mask = instance._mask cls = type(instance) result = getattr(data, methodname)(*args, **params).view(cls) result._update_from(instance) if result.ndim: if not self._onmask: result.__setmask__(mask) elif mask is not nomask: result.__setmask__(getattr(mask, methodname)(*args, **params)) else: if mask.ndim and (not mask.dtype.names and mask.all()): return masked return result class MaskedIterator(object): """ Flat iterator object to iterate over masked arrays. A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array `x`. It allows iterating over the array as if it were a 1-D array, either in a for-loop or by calling its `next` method. Iteration is done in C-contiguous style, with the last index varying the fastest. The iterator can also be indexed using basic slicing or advanced indexing. See Also -------- MaskedArray.flat : Return a flat iterator over an array. MaskedArray.flatten : Returns a flattened copy of an array. Notes ----- `MaskedIterator` is not exported by the `ma` module. Instead of instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. Examples -------- >>> x = np.ma.array(arange(6).reshape(2, 3)) >>> fl = x.flat >>> type(fl) <class 'numpy.ma.core.MaskedIterator'> >>> for item in fl: ... print item ... 0 1 2 3 4 5 Extracting more than a single element b indexing the `MaskedIterator` returns a masked array: >>> fl[2:4] masked_array(data = [2 3], mask = False, fill_value = 999999) """ def __init__(self, ma): self.ma = ma self.dataiter = ma._data.flat # if ma._mask is nomask: self.maskiter = None else: self.maskiter = ma._mask.flat def __iter__(self): return self def __getitem__(self, indx): result = self.dataiter.__getitem__(indx).view(type(self.ma)) if self.maskiter is not None: _mask = self.maskiter.__getitem__(indx) _mask.shape = result.shape result._mask = _mask return result ### This won't work is ravel makes a copy def __setitem__(self, index, value): self.dataiter[index] = getdata(value) if self.maskiter is not None: self.maskiter[index] = getmaskarray(value) def next(self): """ Return the next value, or raise StopIteration. Examples -------- >>> x = np.ma.array([3, 2], mask=[0, 1]) >>> fl = x.flat >>> fl.next() 3 >>> fl.next() masked_array(data = --, mask = True, fill_value = 1e+20) >>> fl.next() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next d = self.dataiter.next() StopIteration """ d = self.dataiter.next() if self.maskiter is not None and self.maskiter.next(): d = masked return d class MaskedArray(ndarray): """ An array class with possibly masked values. Masked values of True exclude the corresponding element from any computation. Construction:: x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, shrink=True) Parameters ---------- data : array_like Input data. mask : sequence, optional Mask. Must be convertible to an array of booleans with the same shape as `data`. True indicates a masked (i.e. invalid) data. dtype : dtype, optional Data type of the output. If `dtype` is None, the type of the data argument (``data.dtype``) is used. If `dtype` is not None and different from ``data.dtype``, a copy is performed. copy : bool, optional Whether to copy the input data (True), or to use a reference instead. Default is False. subok : bool, optional Whether to return a subclass of `MaskedArray` if possible (True) or a plain `MaskedArray`. Default is True. ndmin : int, optional Minimum number of dimensions. Default is 0. fill_value : scalar, optional Value used to fill in the masked values when necessary. If None, a default based on the data-type is used. keep_mask : bool, optional Whether to combine `mask` with the mask of the input data, if any (True), or to use only `mask` for the output (False). Default is True. hard_mask : bool, optional Whether to use a hard mask or not. With a hard mask, masked values cannot be unmasked. Default is False. shrink : bool, optional Whether to force compression of an empty mask. Default is True. """ __array_priority__ = 15 _defaultmask = nomask _defaulthardmask = False _baseclass = ndarray def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, subok=True, ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, shrink=True, **options): """ Create a new masked array from scratch. Notes ----- A masked array can also be created by taking a .view(MaskedArray). """ # Process data............ _data = np.array(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin) _baseclass = getattr(data, '_baseclass', type(_data)) # Check that we're not erasing the mask.......... if isinstance(data, MaskedArray) and (data.shape != _data.shape): copy = True # Careful, cls might not always be MaskedArray... if not isinstance(data, cls) or not subok: _data = ndarray.view(_data, cls) else: _data = ndarray.view(_data, type(data)) # Backwards compatibility w/ numpy.core.ma ....... if hasattr(data, '_mask') and not isinstance(data, ndarray): _data._mask = data._mask _sharedmask = True # Process mask ............................... # Number of named fields (or zero if none) names_ = _data.dtype.names or () # Type of the mask if names_: mdtype = make_mask_descr(_data.dtype) else: mdtype = MaskType # Case 1. : no mask in input ............ if mask is nomask: # Erase the current mask ? if not keep_mask: # With a reduced version if shrink: _data._mask = nomask # With full version else: _data._mask = np.zeros(_data.shape, dtype=mdtype) # Check whether we missed something elif isinstance(data, (tuple, list)): try: # If data is a sequence of masked array mask = np.array([getmaskarray(m) for m in data], dtype=mdtype) except ValueError: # If data is nested mask = nomask # Force shrinking of the mask if needed (and possible) if (mdtype == MaskType) and mask.any(): _data._mask = mask _data._sharedmask = False else: if copy: _data._mask = _data._mask.copy() _data._sharedmask = False # Reset the shape of the original mask if getmask(data) is not nomask: data._mask.shape = data.shape else: _data._sharedmask = True # Case 2. : With a mask in input ........ else: # Read the mask with the current mdtype try: mask = np.array(mask, copy=copy, dtype=mdtype) # Or assume it's a sequence of bool/int except TypeError: mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype) # Make sure the mask and the data have the same shape if mask.shape != _data.shape: (nd, nm) = (_data.size, mask.size) if nm == 1: mask = np.resize(mask, _data.shape) elif nm == nd: mask = np.reshape(mask, _data.shape) else: msg = "Mask and data not compatible: data size is %i, " + \ "mask size is %i." raise MaskError, msg % (nd, nm) copy = True # Set the mask to the new value if _data._mask is nomask: _data._mask = mask _data._sharedmask = not copy else: if not keep_mask: _data._mask = mask _data._sharedmask = not copy else: if names_: def _recursive_or(a, b): "do a|=b on each field of a, recursively" for name in a.dtype.names: (af, bf) = (a[name], b[name]) if af.dtype.names: _recursive_or(af, bf) else: af |= bf return _recursive_or(_data._mask, mask) else: _data._mask = np.logical_or(mask, _data._mask) _data._sharedmask = False # Update fill_value....... if fill_value is None: fill_value = getattr(data, '_fill_value', None) # But don't run the check unless we have something to check.... if fill_value is not None: _data._fill_value = _check_fill_value(fill_value, _data.dtype) # Process extra options .. if hard_mask is None: _data._hardmask = getattr(data, '_hardmask', False) else: _data._hardmask = hard_mask _data._baseclass = _baseclass return _data # def _update_from(self, obj): """Copies some attributes of obj to self. """ if obj is not None and isinstance(obj, ndarray): _baseclass = type(obj) else: _baseclass = ndarray # We need to copy the _basedict to avoid backward propagation _optinfo = {} _optinfo.update(getattr(obj, '_optinfo', {})) _optinfo.update(getattr(obj, '_basedict', {})) if not isinstance(obj, MaskedArray): _optinfo.update(getattr(obj, '__dict__', {})) _dict = dict(_fill_value=getattr(obj, '_fill_value', None), _hardmask=getattr(obj, '_hardmask', False), _sharedmask=getattr(obj, '_sharedmask', False), _isfield=getattr(obj, '_isfield', False), _baseclass=getattr(obj, '_baseclass', _baseclass), _optinfo=_optinfo, _basedict=_optinfo) self.__dict__.update(_dict) self.__dict__.update(_optinfo) return def __array_finalize__(self, obj): """Finalizes the masked array. """ # Get main attributes ......... self._update_from(obj) if isinstance(obj, ndarray): odtype = obj.dtype if odtype.names: _mask = getattr(obj, '_mask', make_mask_none(obj.shape, odtype)) else: _mask = getattr(obj, '_mask', nomask) else: _mask = nomask self._mask = _mask # Finalize the mask ........... if self._mask is not nomask: try: self._mask.shape = self.shape except ValueError: self._mask = nomask except (TypeError, AttributeError): # When _mask.shape is not writable (because it's a void) pass # Finalize the fill_value for structured arrays if self.dtype.names: if self._fill_value is None: self._fill_value = _check_fill_value(None, self.dtype) return def __array_wrap__(self, obj, context=None): """ Special hook for ufuncs. Wraps the numpy array and sets the mask according to context. """ result = obj.view(type(self)) result._update_from(self) #.......... if context is not None: result._mask = result._mask.copy() (func, args, _) = context m = reduce(mask_or, [getmaskarray(arg) for arg in args]) # Get the domain mask................ domain = ufunc_domain.get(func, None) if domain is not None: # Take the domain, and make sure it's a ndarray if len(args) > 2: d = filled(reduce(domain, args), True) else: d = filled(domain(*args), True) # Fill the result where the domain is wrong try: # Binary domain: take the last value fill_value = ufunc_fills[func][-1] except TypeError: # Unary domain: just use this one fill_value = ufunc_fills[func] except KeyError: # Domain not recognized, use fill_value instead fill_value = self.fill_value result = result.copy() np.putmask(result, d, fill_value) # Update the mask if m is nomask: if d is not nomask: m = d else: # Don't modify inplace, we risk back-propagation m = (m | d) # Make sure the mask has the proper size if result.shape == () and m: return masked else: result._mask = m result._sharedmask = False #.... return result def view(self, dtype=None, type=None): if dtype is None: if type is None: output = ndarray.view(self) else: output = ndarray.view(self, type) elif type is None: try: if issubclass(dtype, ndarray): output = ndarray.view(self, dtype) dtype = None else: output = ndarray.view(self, dtype) except TypeError: output = ndarray.view(self, dtype) else: output = ndarray.view(self, dtype, type) # Should we update the mask ? if (getattr(output, '_mask', nomask) is not nomask): if dtype is None: dtype = output.dtype mdtype = make_mask_descr(dtype) output._mask = self._mask.view(mdtype, ndarray) # Try to reset the shape of the mask (if we don't have a void) try: output._mask.shape = output.shape except (AttributeError, TypeError): pass # Make sure to reset the _fill_value if needed if getattr(output, '_fill_value', None) is not None: output._fill_value = None return output view.__doc__ = ndarray.view.__doc__ def astype(self, newtype): """ Returns a copy of the MaskedArray cast to given newtype. Returns ------- output : MaskedArray A copy of self cast to input newtype. The returned record shape matches self.shape. Examples -------- >>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> print x [[1.0 -- 3.1] [-- 5.0 --] [7.0 -- 9.0]] >>> print x.astype(int32) [[1 -- 3] [-- 5 --] [7 -- 9]] """ newtype = np.dtype(newtype) output = self._data.astype(newtype).view(type(self)) output._update_from(self) names = output.dtype.names if names is None: output._mask = self._mask.astype(bool) else: if self._mask is nomask: output._mask = nomask else: output._mask = self._mask.astype([(n, bool) for n in names]) # Don't check _fill_value if it's None, that'll speed things up if self._fill_value is not None: output._fill_value = _check_fill_value(self._fill_value, newtype) return output def __getitem__(self, indx): """x.__getitem__(y) <==> x[y] Return the item described by i, as a masked array. """ # This test is useful, but we should keep things light... # if getmask(indx) is not nomask: # msg = "Masked arrays must be filled before they can be used as indices!" # raise IndexError, msg _data = ndarray.view(self, ndarray) dout = ndarray.__getitem__(_data, indx) # We could directly use ndarray.__getitem__ on self... # But then we would have to modify __array_finalize__ to prevent the # mask of being reshaped if it hasn't been set up properly yet... # So it's easier to stick to the current version _mask = self._mask if not getattr(dout, 'ndim', False): # A record ................ if isinstance(dout, np.void): mask = _mask[indx] # If we can make mvoid a subclass of np.void, that'd be what we'd need # return mvoid(dout, mask=mask) if flatten_mask(mask).any(): dout = mvoid(dout, mask=mask) else: return dout # Just a scalar............ elif _mask is not nomask and _mask[indx]: return masked else: # Force dout to MA ........ dout = dout.view(type(self)) # Inherit attributes from self dout._update_from(self) # Check the fill_value .... if isinstance(indx, basestring): if self._fill_value is not None: dout._fill_value = self._fill_value[indx] dout._isfield = True # Update the mask if needed if _mask is not nomask: dout._mask = _mask[indx] dout._sharedmask = True # Note: Don't try to check for m.any(), that'll take too long... return dout def __setitem__(self, indx, value): """x.__setitem__(i, y) <==> x[i]=y Set item described by index. If value is masked, masks those locations. """ if self is masked: raise MaskError, 'Cannot alter the masked element.' # This test is useful, but we should keep things light... # if getmask(indx) is not nomask: # msg = "Masked arrays must be filled before they can be used as indices!" # raise IndexError, msg _data = ndarray.view(self, ndarray.__getattribute__(self, '_baseclass')) _mask = ndarray.__getattribute__(self, '_mask') if isinstance(indx, basestring): ndarray.__setitem__(_data, indx, value) if _mask is nomask: self._mask = _mask = make_mask_none(self.shape, self.dtype) _mask[indx] = getmask(value) return #........................................ _dtype = ndarray.__getattribute__(_data, 'dtype') nbfields = len(_dtype.names or ()) #........................................ if value is masked: # The mask wasn't set: create a full version... if _mask is nomask: _mask = self._mask = make_mask_none(self.shape, _dtype) # Now, set the mask to its value. if nbfields: _mask[indx] = tuple([True] * nbfields) else: _mask[indx] = True if not self._isfield: self._sharedmask = False return #........................................ # Get the _data part of the new value dval = value # Get the _mask part of the new value mval = getattr(value, '_mask', nomask) if nbfields and mval is nomask: mval = tuple([False] * nbfields) if _mask is nomask: # Set the data, then the mask ndarray.__setitem__(_data, indx, dval) if mval is not nomask: _mask = self._mask = make_mask_none(self.shape, _dtype) ndarray.__setitem__(_mask, indx, mval) elif not self._hardmask: # Unshare the mask if necessary to avoid propagation if not self._isfield: self.unshare_mask() _mask = ndarray.__getattribute__(self, '_mask') # Set the data, then the mask ndarray.__setitem__(_data, indx, dval) ndarray.__setitem__(_mask, indx, mval) elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): indx = indx * umath.logical_not(_mask) ndarray.__setitem__(_data, indx, dval) else: if nbfields: err_msg = "Flexible 'hard' masks are not yet supported..." raise NotImplementedError(err_msg) mindx = mask_or(_mask[indx], mval, copy=True) dindx = self._data[indx] if dindx.size > 1: dindx[~mindx] = dval elif mindx is nomask: dindx = dval ndarray.__setitem__(_data, indx, dindx) _mask[indx] = mindx return def __getslice__(self, i, j): """x.__getslice__(i, j) <==> x[i:j] Return the slice described by (i, j). The use of negative indices is not supported. """ return self.__getitem__(slice(i, j)) def __setslice__(self, i, j, value): """x.__setslice__(i, j, value) <==> x[i:j]=value Set the slice (i,j) of a to value. If value is masked, mask those locations. """ self.__setitem__(slice(i, j), value) def __setmask__(self, mask, copy=False): """Set the mask. """ idtype = ndarray.__getattribute__(self, 'dtype') current_mask = ndarray.__getattribute__(self, '_mask') if mask is masked: mask = True # Make sure the mask is set if (current_mask is nomask): # Just don't do anything is there's nothing to do... if mask is nomask: return current_mask = self._mask = make_mask_none(self.shape, idtype) # No named fields......... if idtype.names is None: # Hardmask: don't unmask the data if self._hardmask: current_mask |= mask # Softmask: set everything to False else: current_mask.flat = mask # Named fields w/ ............ else: mdtype = current_mask.dtype mask = np.array(mask, copy=False) # Mask is a singleton if not mask.ndim: # It's a boolean : make a record if mask.dtype.kind == 'b': mask = np.array(tuple([mask.item()]*len(mdtype)), dtype=mdtype) # It's a record: make sure the dtype is correct else: mask = mask.astype(mdtype) # Mask is a sequence else: # Make sure the new mask is a ndarray with the proper dtype try: mask = np.array(mask, copy=copy, dtype=mdtype) # Or assume it's a sequence of bool/int except TypeError: mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype) # Hardmask: don't unmask the data if self._hardmask: for n in idtype.names: current_mask[n] |= mask[n] # Softmask: set everything to False else: current_mask.flat = mask # Reshape if needed if current_mask.shape: current_mask.shape = self.shape return _set_mask = __setmask__ #.... def _get_mask(self): """Return the current mask. """ # We could try to force a reshape, but that wouldn't work in some cases. # return self._mask.reshape(self.shape) return self._mask mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") def _get_recordmask(self): """ Return the mask of the records. A record is masked when all the fields are masked. """ _mask = ndarray.__getattribute__(self, '_mask').view(ndarray) if _mask.dtype.names is None: return _mask return np.all(flatten_structured_array(_mask), axis= -1) def _set_recordmask(self): """Return the mask of the records. A record is masked when all the fields are masked. """ raise NotImplementedError("Coming soon: setting the mask per records!") recordmask = property(fget=_get_recordmask) #............................................ def harden_mask(self): """ Force the mask to hard. Whether the mask of a masked array is hard or soft is determined by its `hardmask` property. `harden_mask` sets `hardmask` to True. See Also -------- hardmask """ self._hardmask = True return self def soften_mask(self): """ Force the mask to soft. Whether the mask of a masked array is hard or soft is determined by its `hardmask` property. `soften_mask` sets `hardmask` to False. See Also -------- hardmask """ self._hardmask = False return self hardmask = property(fget=lambda self: self._hardmask, doc="Hardness of the mask") def unshare_mask(self): """ Copy the mask and set the sharedmask flag to False. Whether the mask is shared between masked arrays can be seen from the `sharedmask` property. `unshare_mask` ensures the mask is not shared. A copy of the mask is only made if it was shared. See Also -------- sharedmask """ if self._sharedmask: self._mask = self._mask.copy() self._sharedmask = False return self sharedmask = property(fget=lambda self: self._sharedmask, doc="Share status of the mask (read-only).") def shrink_mask(self): """ Reduce a mask to nomask when possible. Parameters ---------- None Returns ------- None Examples -------- >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) >>> x.mask array([[False, False], [False, False]], dtype=bool) >>> x.shrink_mask() >>> x.mask False """ m = self._mask if m.ndim and not m.any(): self._mask = nomask return self #............................................ baseclass = property(fget=lambda self:self._baseclass, doc="Class of the underlying data (read-only).") def _get_data(self): """Return the current data, as a view of the original underlying data. """ return ndarray.view(self, self._baseclass) _data = property(fget=_get_data) data = property(fget=_get_data) def _get_flat(self): "Return a flat iterator." return MaskedIterator(self) # def _set_flat (self, value): "Set a flattened version of self to value." y = self.ravel() y[:] = value # flat = property(fget=_get_flat, fset=_set_flat, doc="Flat version of the array.") def get_fill_value(self): """ Return the filling value of the masked array. Returns ------- fill_value : scalar The filling value. Examples -------- >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: ... np.ma.array([0, 1], dtype=dt).get_fill_value() ... 999999 999999 1e+20 (1e+20+0j) >>> x = np.ma.array([0, 1.], fill_value=-np.inf) >>> x.get_fill_value() -inf """ if self._fill_value is None: self._fill_value = _check_fill_value(None, self.dtype) return self._fill_value[()] def set_fill_value(self, value=None): """ Set the filling value of the masked array. Parameters ---------- value : scalar, optional The new filling value. Default is None, in which case a default based on the data type is used. See Also -------- ma.set_fill_value : Equivalent function. Examples -------- >>> x = np.ma.array([0, 1.], fill_value=-np.inf) >>> x.fill_value -inf >>> x.set_fill_value(np.pi) >>> x.fill_value 3.1415926535897931 Reset to default: >>> x.set_fill_value() >>> x.fill_value 1e+20 """ target = _check_fill_value(value, self.dtype) _fill_value = self._fill_value if _fill_value is None: # Create the attribute if it was undefined self._fill_value = target else: # Don't overwrite the attribute, just fill it (for propagation) _fill_value[()] = target fill_value = property(fget=get_fill_value, fset=set_fill_value, doc="Filling value.") def filled(self, fill_value=None): """ Return a copy of self, with masked values filled with a given value. Parameters ---------- fill_value : scalar, optional The value to use for invalid entries (None by default). If None, the `fill_value` attribute of the array is used instead. Returns ------- filled_array : ndarray A copy of ``self`` with invalid entries replaced by *fill_value* (be it the function argument or the attribute of ``self``. Notes ----- The result is **not** a MaskedArray! Examples -------- >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) >>> x.filled() array([1, 2, -999, 4, -999]) >>> type(x.filled()) <type 'numpy.ndarray'> Subclassing is preserved. This means that if the data part of the masked array is a matrix, `filled` returns a matrix: >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.filled() matrix([[ 1, 999999], [999999, 4]]) """ m = self._mask if m is nomask: return self._data # if fill_value is None: fill_value = self.fill_value else: fill_value = _check_fill_value(fill_value, self.dtype) # if self is masked_singleton: return np.asanyarray(fill_value) # if m.dtype.names: result = self._data.copy() _recursive_filled(result, self._mask, fill_value) elif not m.any(): return self._data else: result = self._data.copy() try: np.putmask(result, m, fill_value) except (TypeError, AttributeError): fill_value = narray(fill_value, dtype=object) d = result.astype(object) result = np.choose(m, (d, fill_value)) except IndexError: #ok, if scalar if self._data.shape: raise elif m: result = np.array(fill_value, dtype=self.dtype) else: result = self._data return result def compressed(self): """ Return all the non-masked data as a 1-D array. Returns ------- data : ndarray A new `ndarray` holding the non-masked data is returned. Notes ----- The result is **not** a MaskedArray! Examples -------- >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) >>> x.compressed() array([0, 1]) >>> type(x.compressed()) <type 'numpy.ndarray'> """ data = ndarray.ravel(self._data) if self._mask is not nomask: data = data.compress(np.logical_not(ndarray.ravel(self._mask))) return data def compress(self, condition, axis=None, out=None): """ Return `a` where condition is ``True``. If condition is a `MaskedArray`, missing values are considered as ``False``. Parameters ---------- condition : var Boolean 1-d array selecting which entries to return. If len(condition) is less than the size of a along the axis, then output is truncated to length of condition array. axis : {None, int}, optional Axis along which the operation must be performed. out : {None, ndarray}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. Returns ------- result : MaskedArray A :class:`MaskedArray` object. Notes ----- Please note the difference with :meth:`compressed` ! The output of :meth:`compress` has a mask, the output of :meth:`compressed` does not. Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> print x [[1 -- 3] [-- 5 --] [7 -- 9]] >>> x.compress([1, 0, 1]) masked_array(data = [1 3], mask = [False False], fill_value=999999) >>> x.compress([1, 0, 1], axis=1) masked_array(data = [[1 3] [-- --] [7 9]], mask = [[False False] [ True True] [False False]], fill_value=999999) """ # Get the basic components (_data, _mask) = (self._data, self._mask) # Force the condition to a regular ndarray (forget the missing values...) condition = np.array(condition, copy=False, subok=False) # _new = _data.compress(condition, axis=axis, out=out).view(type(self)) _new._update_from(self) if _mask is not nomask: _new._mask = _mask.compress(condition, axis=axis) return _new #............................................ def __str__(self): """String representation. """ if masked_print_option.enabled(): f = masked_print_option if self is masked: return str(f) m = self._mask if m is nomask: res = self._data else: if m.shape == (): if m.dtype.names: m = m.view((bool, len(m.dtype))) if m.any(): r = np.array(self._data.tolist(), dtype=object) np.putmask(r, m, f) return str(tuple(r)) else: return str(self._data) elif m: return str(f) else: return str(self._data) # convert to object array to make filled work names = self.dtype.names if names is None: res = self._data.astype("|O8") res[m] = f else: rdtype = _recursive_make_descr(self.dtype, "|O8") res = self._data.astype(rdtype) _recursive_printoption(res, m, f) else: res = self.filled(self.fill_value) return str(res) def __repr__(self): """Literal string representation. """ n = len(self.shape) name = repr(self._data).split('(')[0] parameters = dict(name=name, nlen=" " * len(name), data=str(self), mask=str(self._mask), fill=str(self.fill_value), dtype=str(self.dtype)) if self.dtype.names: if n <= 1: return _print_templates['short_flx'] % parameters return _print_templates['long_flx'] % parameters elif n <= 1: return _print_templates['short_std'] % parameters return _print_templates['long_std'] % parameters def __eq__(self, other): "Check whether other equals self elementwise" if self is masked: return masked omask = getattr(other, '_mask', nomask) if omask is nomask: check = ndarray.__eq__(self.filled(0), other) try: check = check.view(type(self)) check._mask = self._mask except AttributeError: # Dang, we have a bool instead of an array: return the bool return check else: odata = filled(other, 0) check = ndarray.__eq__(self.filled(0), odata).view(type(self)) if self._mask is nomask: check._mask = omask else: mask = mask_or(self._mask, omask) if mask.dtype.names: if mask.size > 1: axis = 1 else: axis = None try: mask = mask.view((bool_, len(self.dtype))).all(axis) except ValueError: mask = np.all([[f[n].all() for n in mask.dtype.names] for f in mask], axis=axis) check._mask = mask return check # def __ne__(self, other): "Check whether other doesn't equal self elementwise" if self is masked: return masked omask = getattr(other, '_mask', nomask) if omask is nomask: check = ndarray.__ne__(self.filled(0), other) try: check = check.view(type(self)) check._mask = self._mask except AttributeError: # In case check is a boolean (or a numpy.bool) return check else: odata = filled(other, 0) check = ndarray.__ne__(self.filled(0), odata).view(type(self)) if self._mask is nomask: check._mask = omask else: mask = mask_or(self._mask, omask) if mask.dtype.names: if mask.size > 1: axis = 1 else: axis = None try: mask = mask.view((bool_, len(self.dtype))).all(axis) except ValueError: mask = np.all([[f[n].all() for n in mask.dtype.names] for f in mask], axis=axis) check._mask = mask return check # def __add__(self, other): "Add other to self, and return a new masked array." return add(self, other) # def __radd__(self, other): "Add other to self, and return a new masked array." return add(self, other) # def __sub__(self, other): "Subtract other to self, and return a new masked array." return subtract(self, other) # def __rsub__(self, other): "Subtract other to self, and return a new masked array." return subtract(other, self) # def __mul__(self, other): "Multiply other by self, and return a new masked array." return multiply(self, other) # def __rmul__(self, other): "Multiply other by self, and return a new masked array." return multiply(self, other) # def __div__(self, other): "Divide other into self, and return a new masked array." return divide(self, other) # def __truediv__(self, other): "Divide other into self, and return a new masked array." return true_divide(self, other) # def __rtruediv__(self, other): "Divide other into self, and return a new masked array." return true_divide(other, self) # def __floordiv__(self, other): "Divide other into self, and return a new masked array." return floor_divide(self, other) # def __rfloordiv__(self, other): "Divide other into self, and return a new masked array." return floor_divide(other, self) # def __pow__(self, other): "Raise self to the power other, masking the potential NaNs/Infs" return power(self, other) # def __rpow__(self, other): "Raise self to the power other, masking the potential NaNs/Infs" return power(other, self) #............................................ def __iadd__(self, other): "Add other to self in-place." m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m else: if m is not nomask: self._mask += m ndarray.__iadd__(self._data, np.where(self._mask, 0, getdata(other))) return self #.... def __isub__(self, other): "Subtract other from self in-place." m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m elif m is not nomask: self._mask += m ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other))) return self #.... def __imul__(self, other): "Multiply self by other in-place." m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m elif m is not nomask: self._mask += m ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other))) return self #.... def __idiv__(self, other): "Divide self by other in-place." other_data = getdata(other) dom_mask = _DomainSafeDivide().__call__(self._data, other_data) other_mask = getmask(other) new_mask = mask_or(other_mask, dom_mask) # The following 3 lines control the domain filling if dom_mask.any(): (_, fval) = ufunc_fills[np.divide] other_data = np.where(dom_mask, fval, other_data) # self._mask = mask_or(self._mask, new_mask) self._mask |= new_mask ndarray.__idiv__(self._data, np.where(self._mask, 1, other_data)) return self #.... def __ifloordiv__(self, other): "Floor divide self by other in-place." other_data = getdata(other) dom_mask = _DomainSafeDivide().__call__(self._data, other_data) other_mask = getmask(other) new_mask = mask_or(other_mask, dom_mask) # The following 3 lines control the domain filling if dom_mask.any(): (_, fval) = ufunc_fills[np.floor_divide] other_data = np.where(dom_mask, fval, other_data) # self._mask = mask_or(self._mask, new_mask) self._mask |= new_mask ndarray.__ifloordiv__(self._data, np.where(self._mask, 1, other_data)) return self #.... def __itruediv__(self, other): "True divide self by other in-place." other_data = getdata(other) dom_mask = _DomainSafeDivide().__call__(self._data, other_data) other_mask = getmask(other) new_mask = mask_or(other_mask, dom_mask) # The following 3 lines control the domain filling if dom_mask.any(): (_, fval) = ufunc_fills[np.true_divide] other_data = np.where(dom_mask, fval, other_data) # self._mask = mask_or(self._mask, new_mask) self._mask |= new_mask ndarray.__itruediv__(self._data, np.where(self._mask, 1, other_data)) return self #... def __ipow__(self, other): "Raise self to the power other, in place." other_data = getdata(other) other_mask = getmask(other) err_status = np.geterr() try: np.seterr(divide='ignore', invalid='ignore') ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data)) finally: np.seterr(**err_status) invalid = np.logical_not(np.isfinite(self._data)) if invalid.any(): if self._mask is not nomask: self._mask |= invalid else: self._mask = invalid np.putmask(self._data, invalid, self.fill_value) new_mask = mask_or(other_mask, invalid) self._mask = mask_or(self._mask, new_mask) return self #............................................ def __float__(self): "Convert to float." if self.size > 1: raise TypeError("Only length-1 arrays can be converted "\ "to Python scalars") elif self._mask: warnings.warn("Warning: converting a masked element to nan.") return np.nan return float(self.item()) def __int__(self): "Convert to int." if self.size > 1: raise TypeError("Only length-1 arrays can be converted "\ "to Python scalars") elif self._mask: raise MaskError, 'Cannot convert masked element to a Python int.' return int(self.item()) def get_imag(self): """ Return the imaginary part of the masked array. The returned array is a view on the imaginary part of the `MaskedArray` whose `get_imag` method is called. Parameters ---------- None Returns ------- result : MaskedArray The imaginary part of the masked array. See Also -------- get_real, real, imag Examples -------- >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.get_imag() masked_array(data = [1.0 -- 1.6], mask = [False True False], fill_value = 1e+20) """ result = self._data.imag.view(type(self)) result.__setmask__(self._mask) return result imag = property(fget=get_imag, doc="Imaginary part.") def get_real(self): """ Return the real part of the masked array. The returned array is a view on the real part of the `MaskedArray` whose `get_real` method is called. Parameters ---------- None Returns ------- result : MaskedArray The real part of the masked array. See Also -------- get_imag, real, imag Examples -------- >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.get_real() masked_array(data = [1.0 -- 3.45], mask = [False True False], fill_value = 1e+20) """ result = self._data.real.view(type(self)) result.__setmask__(self._mask) return result real = property(fget=get_real, doc="Real part") #............................................ def count(self, axis=None): """ Count the non-masked elements of the array along the given axis. Parameters ---------- axis : int, optional Axis along which to count the non-masked elements. If `axis` is `None`, all non-masked elements are counted. Returns ------- result : int or ndarray If `axis` is `None`, an integer count is returned. When `axis` is not `None`, an array with shape determined by the lengths of the remaining axes, is returned. See Also -------- count_masked : Count masked elements in array or along a given axis. Examples -------- >>> import numpy.ma as ma >>> a = ma.arange(6).reshape((2, 3)) >>> a[1, :] = ma.masked >>> a masked_array(data = [[0 1 2] [-- -- --]], mask = [[False False False] [ True True True]], fill_value = 999999) >>> a.count() 3 When the `axis` keyword is specified an array of appropriate size is returned. >>> a.count(axis=0) array([1, 1, 1]) >>> a.count(axis=1) array([3, 0]) """ m = self._mask s = self.shape ls = len(s) if m is nomask: if ls == 0: return 1 if ls == 1: return s[0] if axis is None: return self.size else: n = s[axis] t = list(s) del t[axis] return np.ones(t) * n n1 = np.size(m, axis) n2 = m.astype(int).sum(axis) if axis is None: return (n1 - n2) else: return narray(n1 - n2) #............................................ flatten = _arraymethod('flatten') # def ravel(self): """ Returns a 1D version of self, as a view. Returns ------- MaskedArray Output view is of shape ``(self.size,)`` (or ``(np.ma.product(self.shape),)``). Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> print x [[1 -- 3] [-- 5 --] [7 -- 9]] >>> print x.ravel() [1 -- 3 -- 5 -- 7 -- 9] """ r = ndarray.ravel(self._data).view(type(self)) r._update_from(self) if self._mask is not nomask: r._mask = ndarray.ravel(self._mask).reshape(r.shape) else: r._mask = nomask return r # repeat = _arraymethod('repeat') # def reshape (self, *s, **kwargs): """ Give a new shape to the array without changing its data. Returns a masked array containing the same data, but with a new shape. The result is a view on the original array; if this is not possible, a ValueError is raised. Parameters ---------- shape : int or tuple of ints The new shape should be compatible with the original shape. If an integer is supplied, then the result will be a 1-D array of that length. order : {'C', 'F'}, optional Determines whether the array data should be viewed as in C (row-major) or FORTRAN (column-major) order. Returns ------- reshaped_array : array A new view on the array. See Also -------- reshape : Equivalent function in the masked array module. numpy.ndarray.reshape : Equivalent method on ndarray object. numpy.reshape : Equivalent function in the NumPy module. Notes ----- The reshaping operation cannot guarantee that a copy will not be made, to modify the shape in place, use ``a.shape = s`` Examples -------- >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) >>> print x [[-- 2] [3 --]] >>> x = x.reshape((4,1)) >>> print x [[--] [2] [3] [--]] """ kwargs.update(order=kwargs.get('order', 'C')) result = self._data.reshape(*s, **kwargs).view(type(self)) result._update_from(self) mask = self._mask if mask is not nomask: result._mask = mask.reshape(*s, **kwargs) return result # def resize(self, newshape, refcheck=True, order=False): """ .. warning:: This method does nothing, except raise a ValueError exception. A masked array does not own its data and therefore cannot safely be resized in place. Use the `numpy.ma.resize` function instead. This method is difficult to implement safely and may be deprecated in future releases of NumPy. """ # Note : the 'order' keyword looks broken, let's just drop it # try: # ndarray.resize(self, newshape, refcheck=refcheck) # if self.mask is not nomask: # self._mask.resize(newshape, refcheck=refcheck) # except ValueError: # raise ValueError("Cannot resize an array that has been referenced " # "or is referencing another array in this way.\n" # "Use the numpy.ma.resize function.") # return None errmsg = "A masked array does not own its data "\ "and therefore cannot be resized.\n" \ "Use the numpy.ma.resize function instead." raise ValueError(errmsg) # def put(self, indices, values, mode='raise'): """ Set storage-indexed locations to corresponding values. Sets self._data.flat[n] = values[n] for each n in indices. If `values` is shorter than `indices` then it will repeat. If `values` has some masked values, the initial mask is updated in consequence, else the corresponding values are unmasked. Parameters ---------- indices : 1-D array_like Target indices, interpreted as integers. values : array_like Values to place in self._data copy at target indices. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. 'raise' : raise an error. 'wrap' : wrap around. 'clip' : clip to the range. Notes ----- `values` can be a scalar or length 1 array. Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> print x [[1 -- 3] [-- 5 --] [7 -- 9]] >>> x.put([0,4,8],[10,20,30]) >>> print x [[10 -- 3] [-- 20 --] [7 -- 30]] >>> x.put(4,999) >>> print x [[10 -- 3] [-- 999 --] [7 -- 30]] """ m = self._mask # Hard mask: Get rid of the values/indices that fall on masked data if self._hardmask and self._mask is not nomask: mask = self._mask[indices] indices = narray(indices, copy=False) values = narray(values, copy=False, subok=True) values.resize(indices.shape) indices = indices[~mask] values = values[~mask] #.... self._data.put(indices, values, mode=mode) #.... if m is nomask: m = getmask(values) else: m = m.copy() if getmask(values) is nomask: m.put(indices, False, mode=mode) else: m.put(indices, values._mask, mode=mode) m = make_mask(m, copy=False, shrink=True) self._mask = m #............................................ def ids (self): """ Return the addresses of the data and mask areas. Parameters ---------- None Examples -------- >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) >>> x.ids() (166670640, 166659832) If the array has no mask, the address of `nomask` is returned. This address is typically not close to the data in memory: >>> x = np.ma.array([1, 2, 3]) >>> x.ids() (166691080, 3083169284L) """ if self._mask is nomask: return (self.ctypes.data, id(nomask)) return (self.ctypes.data, self._mask.ctypes.data) def iscontiguous(self): """ Return a boolean indicating whether the data is contiguous. Parameters ---------- None Examples -------- >>> x = np.ma.array([1, 2, 3]) >>> x.iscontiguous() True `iscontiguous` returns one of the flags of the masked array: >>> x.flags C_CONTIGUOUS : True F_CONTIGUOUS : True OWNDATA : False WRITEABLE : True ALIGNED : True UPDATEIFCOPY : False """ return self.flags['CONTIGUOUS'] #............................................ def all(self, axis=None, out=None): """ Check if all of the elements of `a` are true. Performs a :func:`logical_and` over the given axis and returns the result. Masked values are considered as True during computation. For convenience, the output array is masked where ALL the values along the current axis are masked: if the output would have been a scalar and that all the values are masked, then the output is `masked`. Parameters ---------- axis : {None, integer} Axis to perform the operation over. If None, perform over flattened array. out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. See Also -------- all : equivalent function Examples -------- >>> np.ma.array([1,2,3]).all() True >>> a = np.ma.array([1,2,3], mask=True) >>> (a.all() is np.ma.masked) True """ mask = _check_mask_axis(self._mask, axis) if out is None: d = self.filled(True).all(axis=axis).view(type(self)) if d.ndim: d.__setmask__(mask) elif mask: return masked return d self.filled(True).all(axis=axis, out=out) if isinstance(out, MaskedArray): if out.ndim or mask: out.__setmask__(mask) return out def any(self, axis=None, out=None): """ Check if any of the elements of `a` are true. Performs a logical_or over the given axis and returns the result. Masked values are considered as False during computation. Parameters ---------- axis : {None, integer} Axis to perform the operation over. If None, perform over flattened array and return a scalar. out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. See Also -------- any : equivalent function """ mask = _check_mask_axis(self._mask, axis) if out is None: d = self.filled(False).any(axis=axis).view(type(self)) if d.ndim: d.__setmask__(mask) elif mask: d = masked return d self.filled(False).any(axis=axis, out=out) if isinstance(out, MaskedArray): if out.ndim or mask: out.__setmask__(mask) return out def nonzero(self): """ Return the indices of unmasked elements that are not zero. Returns a tuple of arrays, one for each dimension, containing the indices of the non-zero elements in that dimension. The corresponding non-zero values can be obtained with:: a[a.nonzero()] To group the indices by element, rather than dimension, use instead:: np.transpose(a.nonzero()) The result of this is always a 2d array, with a row for each non-zero element. Parameters ---------- None Returns ------- tuple_of_arrays : tuple Indices of elements that are non-zero. See Also -------- numpy.nonzero : Function operating on ndarrays. flatnonzero : Return indices that are non-zero in the flattened version of the input array. ndarray.nonzero : Equivalent ndarray method. count_nonzero : Counts the number of non-zero elements in the input array. Examples -------- >>> import numpy.ma as ma >>> x = ma.array(np.eye(3)) >>> x masked_array(data = [[ 1. 0. 0.] [ 0. 1. 0.] [ 0. 0. 1.]], mask = False, fill_value=1e+20) >>> x.nonzero() (array([0, 1, 2]), array([0, 1, 2])) Masked elements are ignored. >>> x[1, 1] = ma.masked >>> x masked_array(data = [[1.0 0.0 0.0] [0.0 -- 0.0] [0.0 0.0 1.0]], mask = [[False False False] [False True False] [False False False]], fill_value=1e+20) >>> x.nonzero() (array([0, 2]), array([0, 2])) Indices can also be grouped by element. >>> np.transpose(x.nonzero()) array([[0, 0], [2, 2]]) A common use for ``nonzero`` is to find the indices of an array, where a condition is True. Given an array `a`, the condition `a` > 3 is a boolean array and since False is interpreted as 0, ma.nonzero(a > 3) yields the indices of the `a` where the condition is true. >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) >>> a > 3 masked_array(data = [[False False False] [ True True True] [ True True True]], mask = False, fill_value=999999) >>> ma.nonzero(a > 3) (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) The ``nonzero`` method of the condition array can also be called. >>> (a > 3).nonzero() (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) """ return narray(self.filled(0), copy=False).nonzero() def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): """ (this docstring should be overwritten) """ #!!!: implement out + test! m = self._mask if m is nomask: result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, axis2=axis2, out=out) return result.astype(dtype) else: D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) return D.astype(dtype).filled(0).sum(axis=None, out=out) trace.__doc__ = ndarray.trace.__doc__ def sum(self, axis=None, dtype=None, out=None): """ Return the sum of the array elements over the given axis. Masked elements are set to 0 internally. Parameters ---------- axis : {None, -1, int}, optional Axis along which the sum is computed. The default (`axis` = None) is to compute over the flattened array. dtype : {None, dtype}, optional Determines the type of the returned array and of the accumulator where the elements are summed. If dtype has the value None and the type of a is an integer type of precision less than the default platform integer, then the default platform integer precision is used. Otherwise, the dtype is the same as that of a. out : {None, ndarray}, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. Returns ------- sum_along_axis : MaskedArray or scalar An array with the same shape as self, with the specified axis removed. If self is a 0-d array, or if `axis` is None, a scalar is returned. If an output array is specified, a reference to `out` is returned. Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> print x [[1 -- 3] [-- 5 --] [7 -- 9]] >>> print x.sum() 25 >>> print x.sum(axis=1) [4 5 16] >>> print x.sum(axis=0) [8 5 12] >>> print type(x.sum(axis=0, dtype=np.int64)[0]) <type 'numpy.int64'> """ _mask = ndarray.__getattribute__(self, '_mask') newmask = _check_mask_axis(_mask, axis) # No explicit output if out is None: result = self.filled(0).sum(axis, dtype=dtype) rndim = getattr(result, 'ndim', 0) if rndim: result = result.view(type(self)) result.__setmask__(newmask) elif newmask: result = masked return result # Explicit output result = self.filled(0).sum(axis, dtype=dtype, out=out) if isinstance(out, MaskedArray): outmask = getattr(out, '_mask', nomask) if (outmask is nomask): outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask return out def cumsum(self, axis=None, dtype=None, out=None): """ Return the cumulative sum of the elements along the given axis. The cumulative sum is calculated over the flattened array by default, otherwise over the specified axis. Masked values are set to 0 internally during the computation. However, their position is saved, and the result will be masked at the same locations. Parameters ---------- axis : {None, -1, int}, optional Axis along which the sum is computed. The default (`axis` = None) is to compute over the flattened array. `axis` may be negative, in which case it counts from the last to the first axis. dtype : {None, dtype}, optional Type of the returned array and of the accumulator in which the elements are summed. If `dtype` is not specified, it defaults to the dtype of `a`, unless `a` has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. Returns ------- cumsum : ndarray. A new array holding the result is returned unless ``out`` is specified, in which case a reference to ``out`` is returned. Notes ----- The mask is lost if `out` is not a valid :class:`MaskedArray` ! Arithmetic is modular when using integer types, and no error is raised on overflow. Examples -------- >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) >>> print marr.cumsum() [0 1 3 -- -- -- 9 16 24 33] """ result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) if out is not None: if isinstance(out, MaskedArray): out.__setmask__(self.mask) return out result = result.view(type(self)) result.__setmask__(self._mask) return result def prod(self, axis=None, dtype=None, out=None): """ Return the product of the array elements over the given axis. Masked elements are set to 1 internally for computation. Parameters ---------- axis : {None, int}, optional Axis over which the product is taken. If None is used, then the product is over all the array elements. dtype : {None, dtype}, optional Determines the type of the returned array and of the accumulator where the elements are multiplied. If ``dtype`` has the value ``None`` and the type of a is an integer type of precision less than the default platform integer, then the default platform integer precision is used. Otherwise, the dtype is the same as that of a. out : {None, array}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. Returns ------- product_along_axis : {array, scalar}, see dtype parameter above. Returns an array whose shape is the same as a with the specified axis removed. Returns a 0d array when a is 1d or axis=None. Returns a reference to the specified output array if specified. See Also -------- prod : equivalent function Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. Examples -------- >>> np.prod([1.,2.]) 2.0 >>> np.prod([1.,2.], dtype=np.int32) 2 >>> np.prod([[1.,2.],[3.,4.]]) 24.0 >>> np.prod([[1.,2.],[3.,4.]], axis=1) array([ 2., 12.]) """ _mask = ndarray.__getattribute__(self, '_mask') newmask = _check_mask_axis(_mask, axis) # No explicit output if out is None: result = self.filled(1).prod(axis, dtype=dtype) rndim = getattr(result, 'ndim', 0) if rndim: result = result.view(type(self)) result.__setmask__(newmask) elif newmask: result = masked return result # Explicit output result = self.filled(1).prod(axis, dtype=dtype, out=out) if isinstance(out, MaskedArray): outmask = getattr(out, '_mask', nomask) if (outmask is nomask): outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask return out product = prod def cumprod(self, axis=None, dtype=None, out=None): """ Return the cumulative product of the elements along the given axis. The cumulative product is taken over the flattened array by default, otherwise over the specified axis. Masked values are set to 1 internally during the computation. However, their position is saved, and the result will be masked at the same locations. Parameters ---------- axis : {None, -1, int}, optional Axis along which the product is computed. The default (`axis` = None) is to compute over the flattened array. dtype : {None, dtype}, optional Determines the type of the returned array and of the accumulator where the elements are multiplied. If ``dtype`` has the value ``None`` and the type of ``a`` is an integer type of precision less than the default platform integer, then the default platform integer precision is used. Otherwise, the dtype is the same as that of ``a``. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. Returns ------- cumprod : ndarray A new array holding the result is returned unless out is specified, in which case a reference to out is returned. Notes ----- The mask is lost if `out` is not a valid MaskedArray ! Arithmetic is modular when using integer types, and no error is raised on overflow. """ result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) if out is not None: if isinstance(out, MaskedArray): out.__setmask__(self._mask) return out result = result.view(type(self)) result.__setmask__(self._mask) return result def mean(self, axis=None, dtype=None, out=None): """ Returns the average of the array elements. Masked entries are ignored. The average is taken over the flattened array by default, otherwise over the specified axis. Refer to `numpy.mean` for the full documentation. Parameters ---------- a : array_like Array containing numbers whose mean is desired. If `a` is not an array, a conversion is attempted. axis : int, optional Axis along which the means are computed. The default is to compute the mean of the flattened array. dtype : dtype, optional Type to use in computing the mean. For integer inputs, the default is float64; for floating point, inputs it is the same as the input dtype. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. Returns ------- mean : ndarray, see dtype parameter above If `out=None`, returns a new array containing the mean values, otherwise a reference to the output array is returned. See Also -------- numpy.ma.mean : Equivalent function. numpy.mean : Equivalent function on non-masked arrays. numpy.ma.average: Weighted average. Examples -------- >>> a = np.ma.array([1,2,3], mask=[False, False, True]) >>> a masked_array(data = [1 2 --], mask = [False False True], fill_value = 999999) >>> a.mean() 1.5 """ if self._mask is nomask: result = super(MaskedArray, self).mean(axis=axis, dtype=dtype) else: dsum = self.sum(axis=axis, dtype=dtype) cnt = self.count(axis=axis) if cnt.shape == () and (cnt == 0): result = masked else: result = dsum * 1. / cnt if out is not None: out.flat = result if isinstance(out, MaskedArray): outmask = getattr(out, '_mask', nomask) if (outmask is nomask): outmask = out._mask = make_mask_none(out.shape) outmask.flat = getattr(result, '_mask', nomask) return out return result def anom(self, axis=None, dtype=None): """ Compute the anomalies (deviations from the arithmetic mean) along the given axis. Returns an array of anomalies, with the same shape as the input and where the arithmetic mean is computed along the given axis. Parameters ---------- axis : int, optional Axis over which the anomalies are taken. The default is to use the mean of the flattened array as reference. dtype : dtype, optional Type to use in computing the variance. For arrays of integer type the default is float32; for arrays of float types it is the same as the array type. See Also -------- mean : Compute the mean of the array. Examples -------- >>> a = np.ma.array([1,2,3]) >>> a.anom() masked_array(data = [-1. 0. 1.], mask = False, fill_value = 1e+20) """ m = self.mean(axis, dtype) if not axis: return (self - m) else: return (self - expand_dims(m, axis)) def var(self, axis=None, dtype=None, out=None, ddof=0): "" # Easy case: nomask, business as usual if self._mask is nomask: return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof) # Some data are masked, yay! cnt = self.count(axis=axis) - ddof danom = self.anom(axis=axis, dtype=dtype) if iscomplexobj(self): danom = umath.absolute(danom) ** 2 else: danom *= danom dvar = divide(danom.sum(axis), cnt).view(type(self)) # Apply the mask if it's not a scalar if dvar.ndim: dvar._mask = mask_or(self._mask.all(axis), (cnt <= 0)) dvar._update_from(self) elif getattr(dvar, '_mask', False): # Make sure that masked is returned when the scalar is masked. dvar = masked if out is not None: if isinstance(out, MaskedArray): out.__setmask__(True) elif out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or "\ "more location." raise MaskError(errmsg) else: out.flat = np.nan return out # In case with have an explicit output if out is not None: # Set the data out.flat = dvar # Set the mask if needed if isinstance(out, MaskedArray): out.__setmask__(dvar.mask) return out return dvar var.__doc__ = np.var.__doc__ def std(self, axis=None, dtype=None, out=None, ddof=0): "" dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof) if dvar is not masked: dvar = sqrt(dvar) if out is not None: out **= 0.5 return out return dvar std.__doc__ = np.std.__doc__ #............................................ def round(self, decimals=0, out=None): """ Return an array rounded a to the given number of decimals. Refer to `numpy.around` for full documentation. See Also -------- numpy.around : equivalent function """ result = self._data.round(decimals=decimals, out=out).view(type(self)) result._mask = self._mask result._update_from(self) # No explicit output: we're done if out is None: return result if isinstance(out, MaskedArray): out.__setmask__(self._mask) return out round.__doc__ = ndarray.round.__doc__ #............................................ def argsort(self, axis=None, kind='quicksort', order=None, fill_value=None): """ Return an ndarray of indices that sort the array along the specified axis. Masked values are filled beforehand to `fill_value`. Parameters ---------- axis : int, optional Axis along which to sort. The default is -1 (last axis). If None, the flattened array is used. fill_value : var, optional Value used to fill the array before sorting. The default is the `fill_value` attribute of the input array. kind : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. order : list, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. Returns ------- index_array : ndarray, int Array of indices that sort `a` along the specified axis. In other words, ``a[index_array]`` yields a sorted `a`. See Also -------- sort : Describes sorting algorithms used. lexsort : Indirect stable sort with multiple keys. ndarray.sort : Inplace sort. Notes ----- See `sort` for notes on the different sorting algorithms. Examples -------- >>> a = np.ma.array([3,2,1], mask=[False, False, True]) >>> a masked_array(data = [3 2 --], mask = [False False True], fill_value = 999999) >>> a.argsort() array([1, 0, 2]) """ if fill_value is None: fill_value = default_fill_value(self) d = self.filled(fill_value).view(ndarray) return d.argsort(axis=axis, kind=kind, order=order) def argmin(self, axis=None, fill_value=None, out=None): """ Return array of indices to the minimum values along the given axis. Parameters ---------- axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis fill_value : {var}, optional Value used to fill in the masked values. If None, the output of minimum_fill_value(self._data) is used instead. out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. Returns ------- {ndarray, scalar} If multi-dimension input, returns a new ndarray of indices to the minimum values along the given axis. Otherwise, returns a scalar of index to the minimum values along the given axis. Examples -------- >>> x = np.ma.array(arange(4), mask=[1,1,0,0]) >>> x.shape = (2,2) >>> print x [[-- --] [2 3]] >>> print x.argmin(axis=0, fill_value=-1) [0 0] >>> print x.argmin(axis=0, fill_value=9) [1 1] """ if fill_value is None: fill_value = minimum_fill_value(self) d = self.filled(fill_value).view(ndarray) return d.argmin(axis, out=out) def argmax(self, axis=None, fill_value=None, out=None): """ Returns array of indices of the maximum values along the given axis. Masked values are treated as if they had the value fill_value. Parameters ---------- axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis fill_value : {var}, optional Value used to fill in the masked values. If None, the output of maximum_fill_value(self._data) is used instead. out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. Returns ------- index_array : {integer_array} Examples -------- >>> a = np.arange(6).reshape(2,3) >>> a.argmax() 5 >>> a.argmax(0) array([1, 1, 1]) >>> a.argmax(1) array([2, 2]) """ if fill_value is None: fill_value = maximum_fill_value(self._data) d = self.filled(fill_value).view(ndarray) return d.argmax(axis, out=out) def sort(self, axis= -1, kind='quicksort', order=None, endwith=True, fill_value=None): """ Sort the array, in-place Parameters ---------- a : array_like Array to be sorted. axis : int, optional Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. kind : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. Default is 'quicksort'. order : list, optional When `a` is a structured array, this argument specifies which fields to compare first, second, and so on. This list does not need to include all of the fields. endwith : {True, False}, optional Whether missing values (if any) should be forced in the upper indices (at the end of the array) (True) or lower indices (at the beginning). fill_value : {var}, optional Value used internally for the masked values. If ``fill_value`` is not None, it supersedes ``endwith``. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. See Also -------- ndarray.sort : Method to sort an array in-place. argsort : Indirect sort. lexsort : Indirect stable sort on multiple keys. searchsorted : Find elements in a sorted array. Notes ----- See ``sort`` for notes on the different sorting algorithms. Examples -------- >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Default >>> a.sort() >>> print a [1 3 5 -- --] >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Put missing values in the front >>> a.sort(endwith=False) >>> print a [-- -- 1 3 5] >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # fill_value takes over endwith >>> a.sort(endwith=False, fill_value=3) >>> print a [1 -- -- 3 5] """ if self._mask is nomask: ndarray.sort(self, axis=axis, kind=kind, order=order) else: if self is masked: return self if fill_value is None: if endwith: filler = minimum_fill_value(self) else: filler = maximum_fill_value(self) else: filler = fill_value idx = np.indices(self.shape) idx[axis] = self.filled(filler).argsort(axis=axis, kind=kind, order=order) idx_l = idx.tolist() tmp_mask = self._mask[idx_l].flat tmp_data = self._data[idx_l].flat self._data.flat = tmp_data self._mask.flat = tmp_mask return #............................................ def min(self, axis=None, out=None, fill_value=None): """ Return the minimum along a given axis. Parameters ---------- axis : {None, int}, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. out : array_like, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. fill_value : {var}, optional Value used to fill in the masked values. If None, use the output of `minimum_fill_value`. Returns ------- amin : array_like New array holding the result. If ``out`` was specified, ``out`` is returned. See Also -------- minimum_fill_value Returns the minimum filling value for a given datatype. """ _mask = ndarray.__getattribute__(self, '_mask') newmask = _check_mask_axis(_mask, axis) if fill_value is None: fill_value = minimum_fill_value(self) # No explicit output if out is None: result = self.filled(fill_value).min(axis=axis, out=out).view(type(self)) if result.ndim: # Set the mask result.__setmask__(newmask) # Get rid of Infs if newmask.ndim: np.putmask(result, newmask, result.fill_value) elif newmask: result = masked return result # Explicit output result = self.filled(fill_value).min(axis=axis, out=out) if isinstance(out, MaskedArray): outmask = getattr(out, '_mask', nomask) if (outmask is nomask): outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask else: if out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or more"\ " location." raise MaskError(errmsg) np.putmask(out, newmask, np.nan) return out def mini(self, axis=None): """ Return the array minimum along the specified axis. Parameters ---------- axis : int, optional The axis along which to find the minima. Default is None, in which case the minimum value in the whole array is returned. Returns ------- min : scalar or MaskedArray If `axis` is None, the result is a scalar. Otherwise, if `axis` is given and the array is at least 2-D, the result is a masked array with dimension one smaller than the array on which `mini` is called. Examples -------- >>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2) >>> print x [[0 --] [2 3] [4 --]] >>> x.mini() 0 >>> x.mini(axis=0) masked_array(data = [0 3], mask = [False False], fill_value = 999999) >>> print x.mini(axis=1) [0 2 4] """ if axis is None: return minimum(self) else: return minimum.reduce(self, axis) #........................ def max(self, axis=None, out=None, fill_value=None): """ Return the maximum along a given axis. Parameters ---------- axis : {None, int}, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. out : array_like, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. fill_value : {var}, optional Value used to fill in the masked values. If None, use the output of maximum_fill_value(). Returns ------- amax : array_like New array holding the result. If ``out`` was specified, ``out`` is returned. See Also -------- maximum_fill_value Returns the maximum filling value for a given datatype. """ _mask = ndarray.__getattribute__(self, '_mask') newmask = _check_mask_axis(_mask, axis) if fill_value is None: fill_value = maximum_fill_value(self) # No explicit output if out is None: result = self.filled(fill_value).max(axis=axis, out=out).view(type(self)) if result.ndim: # Set the mask result.__setmask__(newmask) # Get rid of Infs if newmask.ndim: np.putmask(result, newmask, result.fill_value) elif newmask: result = masked return result # Explicit output result = self.filled(fill_value).max(axis=axis, out=out) if isinstance(out, MaskedArray): outmask = getattr(out, '_mask', nomask) if (outmask is nomask): outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask else: if out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or more"\ " location." raise MaskError(errmsg) np.putmask(out, newmask, np.nan) return out def ptp(self, axis=None, out=None, fill_value=None): """ Return (maximum - minimum) along the the given dimension (i.e. peak-to-peak value). Parameters ---------- axis : {None, int}, optional Axis along which to find the peaks. If None (default) the flattened array is used. out : {None, array_like}, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. fill_value : {var}, optional Value used to fill in the masked values. Returns ------- ptp : ndarray. A new array holding the result, unless ``out`` was specified, in which case a reference to ``out`` is returned. """ if out is None: result = self.max(axis=axis, fill_value=fill_value) result -= self.min(axis=axis, fill_value=fill_value) return result out.flat = self.max(axis=axis, out=out, fill_value=fill_value) out -= self.min(axis=axis, fill_value=fill_value) return out def take(self, indices, axis=None, out=None, mode='raise'): """ """ (_data, _mask) = (self._data, self._mask) cls = type(self) # Make sure the indices are not masked maskindices = getattr(indices, '_mask', nomask) if maskindices is not nomask: indices = indices.filled(0) # Get the data if out is None: out = _data.take(indices, axis=axis, mode=mode).view(cls) else: np.take(_data, indices, axis=axis, mode=mode, out=out) # Get the mask if isinstance(out, MaskedArray): if _mask is nomask: outmask = maskindices else: outmask = _mask.take(indices, axis=axis, mode=mode) outmask |= maskindices out.__setmask__(outmask) return out # Array methods --------------------------------------- copy = _arraymethod('copy') diagonal = _arraymethod('diagonal') transpose = _arraymethod('transpose') T = property(fget=lambda self:self.transpose()) swapaxes = _arraymethod('swapaxes') clip = _arraymethod('clip', onmask=False) copy = _arraymethod('copy') squeeze = _arraymethod('squeeze') #-------------------------------------------- def tolist(self, fill_value=None): """ Return the data portion of the masked array as a hierarchical Python list. Data items are converted to the nearest compatible Python type. Masked values are converted to `fill_value`. If `fill_value` is None, the corresponding entries in the output list will be ``None``. Parameters ---------- fill_value : scalar, optional The value to use for invalid entries. Default is None. Returns ------- result : list The Python list representation of the masked array. Examples -------- >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) >>> x.tolist() [[1, None, 3], [None, 5, None], [7, None, 9]] >>> x.tolist(-999) [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] """ _mask = self._mask # No mask ? Just return .data.tolist ? if _mask is nomask: return self._data.tolist() # Explicit fill_value: fill the array and get the list if fill_value is not None: return self.filled(fill_value).tolist() # Structured array ............. names = self.dtype.names if names: result = self._data.astype([(_, object) for _ in names]) for n in names: result[n][_mask[n]] = None return result.tolist() # Standard arrays ............... if _mask is nomask: return [None] # Set temps to save time when dealing w/ marrays... inishape = self.shape result = np.array(self._data.ravel(), dtype=object) result[_mask.ravel()] = None result.shape = inishape return result.tolist() # if fill_value is not None: # return self.filled(fill_value).tolist() # result = self.filled().tolist() # # Set temps to save time when dealing w/ mrecarrays... # _mask = self._mask # if _mask is nomask: # return result # nbdims = self.ndim # dtypesize = len(self.dtype) # if nbdims == 0: # return tuple([None] * dtypesize) # elif nbdims == 1: # maskedidx = _mask.nonzero()[0].tolist() # if dtypesize: # nodata = tuple([None] * dtypesize) # else: # nodata = None # [operator.setitem(result, i, nodata) for i in maskedidx] # else: # for idx in zip(*[i.tolist() for i in _mask.nonzero()]): # tmp = result # for i in idx[:-1]: # tmp = tmp[i] # tmp[idx[-1]] = None # return result #........................ def tostring(self, fill_value=None, order='C'): """ Return the array data as a string containing the raw bytes in the array. The array is filled with a fill value before the string conversion. Parameters ---------- fill_value : scalar, optional Value used to fill in the masked values. Deafult is None, in which case `MaskedArray.fill_value` is used. order : {'C','F','A'}, optional Order of the data item in the copy. Default is 'C'. - 'C' -- C order (row major). - 'F' -- Fortran order (column major). - 'A' -- Any, current order of array. - None -- Same as 'A'. See Also -------- ndarray.tostring tolist, tofile Notes ----- As for `ndarray.tostring`, information about the shape, dtype, etc., but also about `fill_value`, will be lost. Examples -------- >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.tostring() '\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00' """ return self.filled(fill_value).tostring(order=order) #........................ def tofile(self, fid, sep="", format="%s"): """ Save a masked array to a file in binary format. .. warning:: This function is not implemented yet. Raises ------ NotImplementedError When `tofile` is called. """ raise NotImplementedError("Not implemented yet, sorry...") def toflex(self): """ Transforms a masked array into a flexible-type array. The flexible type array that is returned will have two fields: * the ``_data`` field stores the ``_data`` part of the array. * the ``_mask`` field stores the ``_mask`` part of the array. Parameters ---------- None Returns ------- record : ndarray A new flexible-type `ndarray` with two fields: the first element containing a value, the second element containing the corresponding mask boolean. The returned record shape matches self.shape. Notes ----- A side-effect of transforming a masked array into a flexible `ndarray` is that meta information (``fill_value``, ...) will be lost. Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> print x [[1 -- 3] [-- 5 --] [7 -- 9]] >>> print x.toflex() [[(1, False) (2, True) (3, False)] [(4, True) (5, False) (6, True)] [(7, False) (8, True) (9, False)]] """ # Get the basic dtype .... ddtype = self.dtype # Make sure we have a mask _mask = self._mask if _mask is None: _mask = make_mask_none(self.shape, ddtype) # And get its dtype mdtype = self._mask.dtype # record = np.ndarray(shape=self.shape, dtype=[('_data', ddtype), ('_mask', mdtype)]) record['_data'] = self._data record['_mask'] = self._mask return record torecords = toflex #-------------------------------------------- # Pickling def __getstate__(self): """Return the internal state of the masked array, for pickling purposes. """ cf = 'CF'[self.flags.fnc] state = (1, self.shape, self.dtype, self.flags.fnc, self._data.tostring(cf), #self._data.tolist(), getmaskarray(self).tostring(cf), #getmaskarray(self).tolist(), self._fill_value, ) return state # def __setstate__(self, state): """Restore the internal state of the masked array, for pickling purposes. ``state`` is typically the output of the ``__getstate__`` output, and is a 5-tuple: - class name - a tuple giving the shape of the data - a typecode for the data - a binary string for the data - a binary string for the mask. """ (_, shp, typ, isf, raw, msk, flv) = state ndarray.__setstate__(self, (shp, typ, isf, raw)) self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk)) self.fill_value = flv # def __reduce__(self): """Return a 3-tuple for pickling a MaskedArray. """ return (_mareconstruct, (self.__class__, self._baseclass, (0,), 'b',), self.__getstate__()) # def __deepcopy__(self, memo=None): from copy import deepcopy copied = MaskedArray.__new__(type(self), self, copy=True) if memo is None: memo = {} memo[id(self)] = copied for (k, v) in self.__dict__.iteritems(): copied.__dict__[k] = deepcopy(v, memo) return copied def _mareconstruct(subtype, baseclass, baseshape, basetype,): """Internal function that builds a new MaskedArray from the information stored in a pickle. """ _data = ndarray.__new__(baseclass, baseshape, basetype) _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype)) return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) class mvoid(MaskedArray): """ Fake a 'void' object to use for masked array with structured dtypes. """ # def __new__(self, data, mask=nomask, dtype=None, fill_value=None): dtype = dtype or data.dtype _data = ndarray((), dtype=dtype) _data[()] = data _data = _data.view(self) if mask is not nomask: if isinstance(mask, np.void): _data._mask = mask else: try: # Mask is already a 0D array _data._mask = np.void(mask) except TypeError: # Transform the mask to a void mdtype = make_mask_descr(dtype) _data._mask = np.array(mask, dtype=mdtype)[()] if fill_value is not None: _data.fill_value = fill_value return _data def _get_data(self): # Make sure that the _data part is a np.void return self.view(ndarray)[()] _data = property(fget=_get_data) def __getitem__(self, indx): "Get the index..." m = self._mask if m is not nomask and m[indx]: return masked return self._data[indx] def __setitem__(self, indx, value): self._data[indx] = value self._mask[indx] |= getattr(value, "_mask", False) def __str__(self): m = self._mask if (m is nomask): return self._data.__str__() m = tuple(m) if (not any(m)): return self._data.__str__() r = self._data.tolist() p = masked_print_option if not p.enabled(): p = 'N/A' else: p = str(p) r = [(str(_), p)[int(_m)] for (_, _m) in zip(r, m)] return "(%s)" % ", ".join(r) def __repr__(self): m = self._mask if (m is nomask): return self._data.__repr__() m = tuple(m) if not any(m): return self._data.__repr__() p = masked_print_option if not p.enabled(): return self.filled(self.fill_value).__repr__() p = str(p) r = [(str(_), p)[int(_m)] for (_, _m) in zip(self._data.tolist(), m)] return "(%s)" % ", ".join(r) def __iter__(self): "Defines an iterator for mvoid" (_data, _mask) = (self._data, self._mask) if _mask is nomask: for d in _data: yield d else: for (d, m) in zip(_data, _mask): if m: yield masked else: yield d def filled(self, fill_value=None): """ Return a copy with masked fields filled with a given value. Parameters ---------- fill_value : scalar, optional The value to use for invalid entries (None by default). If None, the `fill_value` attribute is used instead. Returns ------- filled_void: A `np.void` object See Also -------- MaskedArray.filled """ return asarray(self).filled(fill_value)[()] def tolist(self): """ Transforms the mvoid object into a tuple. Masked fields are replaced by None. Returns ------- returned_tuple Tuple of fields """ _mask = self._mask if _mask is nomask: return self._data.tolist() result = [] for (d, m) in zip(self._data, self._mask): if m: result.append(None) else: # .item() makes sure we return a standard Python object result.append(d.item()) return tuple(result) #####-------------------------------------------------------------------------- #---- --- Shortcuts --- #####--------------------------------------------------------------------------- def isMaskedArray(x): """ Test whether input is an instance of MaskedArray. This function returns True if `x` is an instance of MaskedArray and returns False otherwise. Any object is accepted as input. Parameters ---------- x : object Object to test. Returns ------- result : bool True if `x` is a MaskedArray. See Also -------- isMA : Alias to isMaskedArray. isarray : Alias to isMaskedArray. Examples -------- >>> import numpy.ma as ma >>> a = np.eye(3, 3) >>> a array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> m = ma.masked_values(a, 0) >>> m masked_array(data = [[1.0 -- --] [-- 1.0 --] [-- -- 1.0]], mask = [[False True True] [ True False True] [ True True False]], fill_value=0.0) >>> ma.isMaskedArray(a) False >>> ma.isMaskedArray(m) True >>> ma.isMaskedArray([0, 1, 2]) False """ return isinstance(x, MaskedArray) isarray = isMaskedArray isMA = isMaskedArray #backward compatibility # We define the masked singleton as a float for higher precedence... # Note that it can be tricky sometimes w/ type comparison class MaskedConstant(MaskedArray): # _data = data = np.array(0.) _mask = mask = np.array(True) _baseclass = ndarray # def __new__(self): return self._data.view(self) # def __array_finalize__(self, obj): return # def __array_wrap__(self, obj): return self # def __str__(self): return str(masked_print_option._display) # def __repr__(self): return 'masked' # def flatten(self): return masked_array([self._data], dtype=float, mask=[True]) masked = masked_singleton = MaskedConstant() masked_array = MaskedArray def array(data, dtype=None, copy=False, order=False, mask=nomask, fill_value=None, keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0, ): """array(data, dtype=None, copy=False, order=False, mask=nomask, fill_value=None, keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0) Acts as shortcut to MaskedArray, with options in a different order for convenience. And backwards compatibility... """ #!!!: we should try to put 'order' somwehere return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, keep_mask=keep_mask, hard_mask=hard_mask, fill_value=fill_value, ndmin=ndmin, shrink=shrink) array.__doc__ = masked_array.__doc__ def is_masked(x): """ Determine whether input has masked values. Accepts any object as input, but always returns False unless the input is a MaskedArray containing masked values. Parameters ---------- x : array_like Array to check for masked values. Returns ------- result : bool True if `x` is a MaskedArray with masked values, False otherwise. Examples -------- >>> import numpy.ma as ma >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> x masked_array(data = [-- 1 -- 2 3], mask = [ True False True False False], fill_value=999999) >>> ma.is_masked(x) True >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) >>> x masked_array(data = [0 1 0 2 3], mask = False, fill_value=999999) >>> ma.is_masked(x) False Always returns False if `x` isn't a MaskedArray. >>> x = [False, True, False] >>> ma.is_masked(x) False >>> x = 'a string' >>> ma.is_masked(x) False """ m = getmask(x) if m is nomask: return False elif m.any(): return True return False #####--------------------------------------------------------------------------- #---- --- Extrema functions --- #####--------------------------------------------------------------------------- class _extrema_operation(object): """ Generic class for maximum/minimum functions. .. note:: This is the base class for `_maximum_operation` and `_minimum_operation`. """ def __call__(self, a, b=None): "Executes the call behavior." if b is None: return self.reduce(a) return where(self.compare(a, b), a, b) #......... def reduce(self, target, axis=None): "Reduce target along the given axis." target = narray(target, copy=False, subok=True) m = getmask(target) if axis is not None: kargs = { 'axis' : axis } else: kargs = {} target = target.ravel() if not (m is nomask): m = m.ravel() if m is nomask: t = self.ufunc.reduce(target, **kargs) else: target = target.filled(self.fill_value_func(target)).view(type(target)) t = self.ufunc.reduce(target, **kargs) m = umath.logical_and.reduce(m, **kargs) if hasattr(t, '_mask'): t._mask = m elif m: t = masked return t #......... def outer (self, a, b): "Return the function applied to the outer product of a and b." ma = getmask(a) mb = getmask(b) if ma is nomask and mb is nomask: m = nomask else: ma = getmaskarray(a) mb = getmaskarray(b) m = logical_or.outer(ma, mb) result = self.ufunc.outer(filled(a), filled(b)) if not isinstance(result, MaskedArray): result = result.view(MaskedArray) result._mask = m return result #............................ class _minimum_operation(_extrema_operation): "Object to calculate minima" def __init__ (self): """minimum(a, b) or minimum(a) In one argument case, returns the scalar minimum. """ self.ufunc = umath.minimum self.afunc = amin self.compare = less self.fill_value_func = minimum_fill_value #............................ class _maximum_operation(_extrema_operation): "Object to calculate maxima" def __init__ (self): """maximum(a, b) or maximum(a) In one argument case returns the scalar maximum. """ self.ufunc = umath.maximum self.afunc = amax self.compare = greater self.fill_value_func = maximum_fill_value #.......................................................... def min(obj, axis=None, out=None, fill_value=None): try: return obj.min(axis=axis, fill_value=fill_value, out=out) except (AttributeError, TypeError): # If obj doesn't have a max method, # ...or if the method doesn't accept a fill_value argument return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out) min.__doc__ = MaskedArray.min.__doc__ def max(obj, axis=None, out=None, fill_value=None): try: return obj.max(axis=axis, fill_value=fill_value, out=out) except (AttributeError, TypeError): # If obj doesn't have a max method, # ...or if the method doesn't accept a fill_value argument return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out) max.__doc__ = MaskedArray.max.__doc__ def ptp(obj, axis=None, out=None, fill_value=None): """a.ptp(axis=None) = a.max(axis)-a.min(axis)""" try: return obj.ptp(axis, out=out, fill_value=fill_value) except (AttributeError, TypeError): # If obj doesn't have a max method, # ...or if the method doesn't accept a fill_value argument return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out) ptp.__doc__ = MaskedArray.ptp.__doc__ #####--------------------------------------------------------------------------- #---- --- Definition of functions from the corresponding methods --- #####--------------------------------------------------------------------------- class _frommethod: """ Define functions from existing MaskedArray methods. Parameters ---------- methodname : str Name of the method to transform. """ def __init__(self, methodname): self.__name__ = methodname self.__doc__ = self.getdoc() # def getdoc(self): "Return the doc of the function (from the doc of the method)." meth = getattr(MaskedArray, self.__name__, None) or\ getattr(np, self.__name__, None) signature = self.__name__ + get_object_signature(meth) if meth is not None: doc = """ %s\n%s""" % (signature, getattr(meth, '__doc__', None)) return doc # def __call__(self, a, *args, **params): # Get the method from the array (if possible) method_name = self.__name__ method = getattr(a, method_name, None) if method is not None: return method(*args, **params) # Still here ? Then a is not a MaskedArray method = getattr(MaskedArray, method_name, None) if method is not None: return method(MaskedArray(a), *args, **params) # Still here ? OK, let's call the corresponding np function method = getattr(np, method_name) return method(a, *args, **params) all = _frommethod('all') anomalies = anom = _frommethod('anom') any = _frommethod('any') compress = _frommethod('compress') cumprod = _frommethod('cumprod') cumsum = _frommethod('cumsum') copy = _frommethod('copy') diagonal = _frommethod('diagonal') harden_mask = _frommethod('harden_mask') ids = _frommethod('ids') maximum = _maximum_operation() mean = _frommethod('mean') minimum = _minimum_operation() nonzero = _frommethod('nonzero') prod = _frommethod('prod') product = _frommethod('prod') ravel = _frommethod('ravel') repeat = _frommethod('repeat') shrink_mask = _frommethod('shrink_mask') soften_mask = _frommethod('soften_mask') std = _frommethod('std') sum = _frommethod('sum') swapaxes = _frommethod('swapaxes') #take = _frommethod('take') trace = _frommethod('trace') var = _frommethod('var') def take(a, indices, axis=None, out=None, mode='raise'): """ """ a = masked_array(a) return a.take(indices, axis=axis, out=out, mode=mode) #.............................................................................. def power(a, b, third=None): """ Returns element-wise base array raised to power from second array. This is the masked array version of `numpy.power`. For details see `numpy.power`. See Also -------- numpy.power Notes ----- The *out* argument to `numpy.power` is not supported, `third` has to be None. """ if third is not None: raise MaskError, "3-argument power not supported." # Get the masks ma = getmask(a) mb = getmask(b) m = mask_or(ma, mb) # Get the rawdata fa = getdata(a) fb = getdata(b) # Get the type of the result (so that we preserve subclasses) if isinstance(a, MaskedArray): basetype = type(a) else: basetype = MaskedArray # Get the result and view it as a (subclass of) MaskedArray err_status = np.geterr() try: np.seterr(divide='ignore', invalid='ignore') result = np.where(m, fa, umath.power(fa, fb)).view(basetype) finally: np.seterr(**err_status) result._update_from(a) # Find where we're in trouble w/ NaNs and Infs invalid = np.logical_not(np.isfinite(result.view(ndarray))) # Add the initial mask if m is not nomask: if not (result.ndim): return masked result._mask = np.logical_or(m, invalid) # Fix the invalid parts if invalid.any(): if not result.ndim: return masked elif result._mask is nomask: result._mask = invalid result._data[invalid] = result.fill_value return result # if fb.dtype.char in typecodes["Integer"]: # return masked_array(umath.power(fa, fb), m) # m = mask_or(m, (fa < 0) & (fb != fb.astype(int))) # if m is nomask: # return masked_array(umath.power(fa, fb)) # else: # fa = fa.copy() # if m.all(): # fa.flat = 1 # else: # np.putmask(fa,m,1) # return masked_array(umath.power(fa, fb), m) #.............................................................................. def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None): "Function version of the eponymous method." if fill_value is None: fill_value = default_fill_value(a) d = filled(a, fill_value) if axis is None: return d.argsort(kind=kind, order=order) return d.argsort(axis, kind=kind, order=order) argsort.__doc__ = MaskedArray.argsort.__doc__ def argmin(a, axis=None, fill_value=None): "Function version of the eponymous method." if fill_value is None: fill_value = default_fill_value(a) d = filled(a, fill_value) return d.argmin(axis=axis) argmin.__doc__ = MaskedArray.argmin.__doc__ def argmax(a, axis=None, fill_value=None): "Function version of the eponymous method." if fill_value is None: fill_value = default_fill_value(a) try: fill_value = -fill_value except: pass d = filled(a, fill_value) return d.argmax(axis=axis) argmin.__doc__ = MaskedArray.argmax.__doc__ def sort(a, axis= -1, kind='quicksort', order=None, endwith=True, fill_value=None): "Function version of the eponymous method." a = narray(a, copy=True, subok=True) if axis is None: a = a.flatten() axis = 0 if fill_value is None: if endwith: filler = minimum_fill_value(a) else: filler = maximum_fill_value(a) else: filler = fill_value # return indx = np.indices(a.shape).tolist() indx[axis] = filled(a, filler).argsort(axis=axis, kind=kind, order=order) return a[indx] sort.__doc__ = MaskedArray.sort.__doc__ def compressed(x): """ Return all the non-masked data as a 1-D array. This function is equivalent to calling the "compressed" method of a `MaskedArray`, see `MaskedArray.compressed` for details. See Also -------- MaskedArray.compressed Equivalent method. """ if getmask(x) is nomask: return np.asanyarray(x) else: return x.compressed() def concatenate(arrays, axis=0): """ Concatenate a sequence of arrays along the given axis. Parameters ---------- arrays : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- result : MaskedArray The concatenated array with any masked entries preserved. See Also -------- numpy.concatenate : Equivalent function in the top-level NumPy module. Examples -------- >>> import numpy.ma as ma >>> a = ma.arange(3) >>> a[1] = ma.masked >>> b = ma.arange(2, 5) >>> a masked_array(data = [0 -- 2], mask = [False True False], fill_value = 999999) >>> b masked_array(data = [2 3 4], mask = False, fill_value = 999999) >>> ma.concatenate([a, b]) masked_array(data = [0 -- 2 2 3 4], mask = [False True False False False False], fill_value = 999999) """ d = np.concatenate([getdata(a) for a in arrays], axis) rcls = get_masked_subclass(*arrays) data = d.view(rcls) # Check whether one of the arrays has a non-empty mask... for x in arrays: if getmask(x) is not nomask: break else: return data # OK, so we have to concatenate the masks dm = np.concatenate([getmaskarray(a) for a in arrays], axis) # If we decide to keep a '_shrinkmask' option, we want to check that ... # ... all of them are True, and then check for dm.any() # shrink = numpy.logical_or.reduce([getattr(a,'_shrinkmask',True) for a in arrays]) # if shrink and not dm.any(): if not dm.dtype.fields and not dm.any(): data._mask = nomask else: data._mask = dm.reshape(d.shape) return data def count(a, axis=None): if isinstance(a, MaskedArray): return a.count(axis) return masked_array(a, copy=False).count(axis) count.__doc__ = MaskedArray.count.__doc__ def diag(v, k=0): """ Extract a diagonal or construct a diagonal array. This function is the equivalent of `numpy.diag` that takes masked values into account, see `numpy.diag` for details. See Also -------- numpy.diag : Equivalent function for ndarrays. """ output = np.diag(v, k).view(MaskedArray) if getmask(v) is not nomask: output._mask = np.diag(v._mask, k) return output def expand_dims(x, axis): """ Expand the shape of an array. Expands the shape of the array by including a new axis before the one specified by the `axis` parameter. This function behaves the same as `numpy.expand_dims` but preserves masked elements. See Also -------- numpy.expand_dims : Equivalent function in top-level NumPy module. Examples -------- >>> import numpy.ma as ma >>> x = ma.array([1, 2, 4]) >>> x[1] = ma.masked >>> x masked_array(data = [1 -- 4], mask = [False True False], fill_value = 999999) >>> np.expand_dims(x, axis=0) array([[1, 2, 4]]) >>> ma.expand_dims(x, axis=0) masked_array(data = [[1 -- 4]], mask = [[False True False]], fill_value = 999999) The same result can be achieved using slicing syntax with `np.newaxis`. >>> x[np.newaxis, :] masked_array(data = [[1 -- 4]], mask = [[False True False]], fill_value = 999999) """ result = n_expand_dims(x, axis) if isinstance(x, MaskedArray): new_shape = result.shape result = x.view() result.shape = new_shape if result._mask is not nomask: result._mask.shape = new_shape return result #...................................... def left_shift (a, n): """ Shift the bits of an integer to the left. This is the masked array version of `numpy.left_shift`, for details see that function. See Also -------- numpy.left_shift """ m = getmask(a) if m is nomask: d = umath.left_shift(filled(a), n) return masked_array(d) else: d = umath.left_shift(filled(a, 0), n) return masked_array(d, mask=m) def right_shift (a, n): """ Shift the bits of an integer to the right. This is the masked array version of `numpy.right_shift`, for details see that function. See Also -------- numpy.right_shift """ m = getmask(a) if m is nomask: d = umath.right_shift(filled(a), n) return masked_array(d) else: d = umath.right_shift(filled(a, 0), n) return masked_array(d, mask=m) #...................................... def put(a, indices, values, mode='raise'): """ Set storage-indexed locations to corresponding values. This function is equivalent to `MaskedArray.put`, see that method for details. See Also -------- MaskedArray.put """ # We can't use 'frommethod', the order of arguments is different try: return a.put(indices, values, mode=mode) except AttributeError: return narray(a, copy=False).put(indices, values, mode=mode) def putmask(a, mask, values): #, mode='raise'): """ Changes elements of an array based on conditional and input values. This is the masked array version of `numpy.putmask`, for details see `numpy.putmask`. See Also -------- numpy.putmask Notes ----- Using a masked array as `values` will **not** transform a `ndarray` into a `MaskedArray`. """ # We can't use 'frommethod', the order of arguments is different if not isinstance(a, MaskedArray): a = a.view(MaskedArray) (valdata, valmask) = (getdata(values), getmask(values)) if getmask(a) is nomask: if valmask is not nomask: a._sharedmask = True a._mask = make_mask_none(a.shape, a.dtype) np.putmask(a._mask, mask, valmask) elif a._hardmask: if valmask is not nomask: m = a._mask.copy() np.putmask(m, mask, valmask) a.mask |= m else: if valmask is nomask: valmask = getmaskarray(values) np.putmask(a._mask, mask, valmask) np.putmask(a._data, mask, valdata) return def transpose(a, axes=None): """ Permute the dimensions of an array. This function is exactly equivalent to `numpy.transpose`. See Also -------- numpy.transpose : Equivalent function in top-level NumPy module. Examples -------- >>> import numpy.ma as ma >>> x = ma.arange(4).reshape((2,2)) >>> x[1, 1] = ma.masked >>>> x masked_array(data = [[0 1] [2 --]], mask = [[False False] [False True]], fill_value = 999999) >>> ma.transpose(x) masked_array(data = [[0 2] [1 --]], mask = [[False False] [False True]], fill_value = 999999) """ #We can't use 'frommethod', as 'transpose' doesn't take keywords try: return a.transpose(axes) except AttributeError: return narray(a, copy=False).transpose(axes).view(MaskedArray) def reshape(a, new_shape, order='C'): """ Returns an array containing the same data with a new shape. Refer to `MaskedArray.reshape` for full documentation. See Also -------- MaskedArray.reshape : equivalent function """ #We can't use 'frommethod', it whine about some parameters. Dmmit. try: return a.reshape(new_shape, order=order) except AttributeError: _tmp = narray(a, copy=False).reshape(new_shape, order=order) return _tmp.view(MaskedArray) def resize(x, new_shape): """ Return a new masked array with the specified size and shape. This is the masked equivalent of the `numpy.resize` function. The new array is filled with repeated copies of `x` (in the order that the data are stored in memory). If `x` is masked, the new array will be masked, and the new mask will be a repetition of the old one. See Also -------- numpy.resize : Equivalent function in the top level NumPy module. Examples -------- >>> import numpy.ma as ma >>> a = ma.array([[1, 2] ,[3, 4]]) >>> a[0, 1] = ma.masked >>> a masked_array(data = [[1 --] [3 4]], mask = [[False True] [False False]], fill_value = 999999) >>> np.resize(a, (3, 3)) array([[1, 2, 3], [4, 1, 2], [3, 4, 1]]) >>> ma.resize(a, (3, 3)) masked_array(data = [[1 -- 3] [4 1 --] [3 4 1]], mask = [[False True False] [False False True] [False False False]], fill_value = 999999) A MaskedArray is always returned, regardless of the input type. >>> a = np.array([[1, 2] ,[3, 4]]) >>> ma.resize(a, (3, 3)) masked_array(data = [[1 2 3] [4 1 2] [3 4 1]], mask = False, fill_value = 999999) """ # We can't use _frommethods here, as N.resize is notoriously whiny. m = getmask(x) if m is not nomask: m = np.resize(m, new_shape) result = np.resize(x, new_shape).view(get_masked_subclass(x)) if result.ndim: result._mask = m return result #................................................ def rank(obj): "maskedarray version of the numpy function." return np.rank(getdata(obj)) rank.__doc__ = np.rank.__doc__ # def shape(obj): "maskedarray version of the numpy function." return np.shape(getdata(obj)) shape.__doc__ = np.shape.__doc__ # def size(obj, axis=None): "maskedarray version of the numpy function." return np.size(getdata(obj), axis) size.__doc__ = np.size.__doc__ #................................................ #####-------------------------------------------------------------------------- #---- --- Extra functions --- #####-------------------------------------------------------------------------- def where (condition, x=None, y=None): """ Return a masked array with elements from x or y, depending on condition. Returns a masked array, shaped like condition, where the elements are from `x` when `condition` is True, and from `y` otherwise. If neither `x` nor `y` are given, the function returns a tuple of indices where `condition` is True (the result of ``condition.nonzero()``). Parameters ---------- condition : array_like, bool The condition to meet. For each True element, yield the corresponding element from `x`, otherwise from `y`. x, y : array_like, optional Values from which to choose. `x` and `y` need to have the same shape as condition, or be broadcast-able to that shape. Returns ------- out : MaskedArray or tuple of ndarrays The resulting masked array if `x` and `y` were given, otherwise the result of ``condition.nonzero()``. See Also -------- numpy.where : Equivalent function in the top-level NumPy module. Examples -------- >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], ... [1, 0, 1], ... [0, 1, 0]]) >>> print x [[0.0 -- 2.0] [-- 4.0 --] [6.0 -- 8.0]] >>> np.ma.where(x > 5) # return the indices where x > 5 (array([2, 2]), array([0, 2])) >>> print np.ma.where(x > 5, x, -3.1416) [[-3.1416 -- -3.1416] [-- -3.1416 --] [6.0 -- 8.0]] """ if x is None and y is None: return filled(condition, 0).nonzero() elif x is None or y is None: raise ValueError, "Either both or neither x and y should be given." # Get the condition ............... fc = filled(condition, 0).astype(MaskType) notfc = np.logical_not(fc) # Get the data ...................................... xv = getdata(x) yv = getdata(y) if x is masked: ndtype = yv.dtype elif y is masked: ndtype = xv.dtype else: ndtype = np.find_common_type([xv.dtype, yv.dtype], []) # Construct an empty array and fill it d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray) _data = d._data np.putmask(_data, fc, xv.astype(ndtype)) np.putmask(_data, notfc, yv.astype(ndtype)) # Create an empty mask and fill it _mask = d._mask = np.zeros(fc.shape, dtype=MaskType) np.putmask(_mask, fc, getmask(x)) np.putmask(_mask, notfc, getmask(y)) _mask |= getmaskarray(condition) if not _mask.any(): d._mask = nomask return d def choose (indices, choices, out=None, mode='raise'): """ Use an index array to construct a new array from a set of choices. Given an array of integers and a set of n choice arrays, this method will create a new array that merges each of the choice arrays. Where a value in `a` is i, the new array will have the value that choices[i] contains in the same place. Parameters ---------- a : ndarray of ints This array must contain integers in ``[0, n-1]``, where n is the number of choices. choices : sequence of arrays Choice arrays. The index array and all of the choices should be broadcastable to the same shape. out : array, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and `dtype`. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' : raise an error * 'wrap' : wrap around * 'clip' : clip to the range Returns ------- merged_array : array See Also -------- choose : equivalent function Examples -------- >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) >>> a = np.array([2, 1, 0]) >>> np.ma.choose(a, choice) masked_array(data = [3 2 1], mask = False, fill_value=999999) """ def fmask (x): "Returns the filled array, or True if masked." if x is masked: return True return filled(x) def nmask (x): "Returns the mask, True if ``masked``, False if ``nomask``." if x is masked: return True return getmask(x) # Get the indices...... c = filled(indices, 0) # Get the masks........ masks = [nmask(x) for x in choices] data = [fmask(x) for x in choices] # Construct the mask outputmask = np.choose(c, masks, mode=mode) outputmask = make_mask(mask_or(outputmask, getmask(indices)), copy=0, shrink=True) # Get the choices...... d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) if out is not None: if isinstance(out, MaskedArray): out.__setmask__(outputmask) return out d.__setmask__(outputmask) return d def round_(a, decimals=0, out=None): """ Return a copy of a, rounded to 'decimals' places. When 'decimals' is negative, it specifies the number of positions to the left of the decimal point. The real and imaginary parts of complex numbers are rounded separately. Nothing is done if the array is not of float type and 'decimals' is greater than or equal to 0. Parameters ---------- decimals : int Number of decimals to round to. May be negative. out : array_like Existing array to use for output. If not given, returns a default copy of a. Notes ----- If out is given and does not have a mask attribute, the mask of a is lost! """ if out is None: return np.round_(a, decimals, out) else: np.round_(getdata(a), decimals, out) if hasattr(out, '_mask'): out._mask = getmask(a) return out round = round_ def inner(a, b): """ Returns the inner product of a and b for arrays of floating point types. Like the generic NumPy equivalent the product sum is over the last dimension of a and b. Notes ----- The first argument is not conjugated. """ fa = filled(a, 0) fb = filled(b, 0) if len(fa.shape) == 0: fa.shape = (1,) if len(fb.shape) == 0: fb.shape = (1,) return np.inner(fa, fb).view(MaskedArray) inner.__doc__ = doc_note(np.inner.__doc__, "Masked values are replaced by 0.") innerproduct = inner def outer(a, b): "maskedarray version of the numpy function." fa = filled(a, 0).ravel() fb = filled(b, 0).ravel() d = np.outer(fa, fb) ma = getmask(a) mb = getmask(b) if ma is nomask and mb is nomask: return masked_array(d) ma = getmaskarray(a) mb = getmaskarray(b) m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0) return masked_array(d, mask=m) outer.__doc__ = doc_note(np.outer.__doc__, "Masked values are replaced by 0.") outerproduct = outer def allequal (a, b, fill_value=True): """ Return True if all entries of a and b are equal, using fill_value as a truth value where either or both are masked. Parameters ---------- a, b : array_like Input arrays to compare. fill_value : bool, optional Whether masked values in a or b are considered equal (True) or not (False). Returns ------- y : bool Returns True if the two arrays are equal within the given tolerance, False otherwise. If either array contains NaN, then False is returned. See Also -------- all, any numpy.ma.allclose Examples -------- >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data = [10000000000.0 1e-07 --], mask = [False False True], fill_value=1e+20) >>> b = array([1e10, 1e-7, -42.0]) >>> b array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) >>> ma.allequal(a, b, fill_value=False) False >>> ma.allequal(a, b) True """ m = mask_or(getmask(a), getmask(b)) if m is nomask: x = getdata(a) y = getdata(b) d = umath.equal(x, y) return d.all() elif fill_value: x = getdata(a) y = getdata(b) d = umath.equal(x, y) dm = array(d, mask=m, copy=False) return dm.filled(True).all(None) else: return False def allclose (a, b, masked_equal=True, rtol=1e-5, atol=1e-8): """ Returns True if two arrays are element-wise equal within a tolerance. This function is equivalent to `allclose` except that masked values are treated as equal (default) or unequal, depending on the `masked_equal` argument. Parameters ---------- a, b : array_like Input arrays to compare. masked_equal : bool, optional Whether masked values in `a` and `b` are considered equal (True) or not (False). They are considered equal by default. rtol : float, optional Relative tolerance. The relative difference is equal to ``rtol * b``. Default is 1e-5. atol : float, optional Absolute tolerance. The absolute difference is equal to `atol`. Default is 1e-8. Returns ------- y : bool Returns True if the two arrays are equal within the given tolerance, False otherwise. If either array contains NaN, then False is returned. See Also -------- all, any numpy.allclose : the non-masked `allclose`. Notes ----- If the following equation is element-wise True, then `allclose` returns True:: absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) Return True if all elements of `a` and `b` are equal subject to given tolerances. Examples -------- >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data = [10000000000.0 1e-07 --], mask = [False False True], fill_value = 1e+20) >>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) >>> ma.allclose(a, b) False >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) >>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) >>> ma.allclose(a, b) True >>> ma.allclose(a, b, masked_equal=False) False Masked values are not compared directly. >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) >>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) >>> ma.allclose(a, b) True >>> ma.allclose(a, b, masked_equal=False) False """ x = masked_array(a, copy=False) y = masked_array(b, copy=False) m = mask_or(getmask(x), getmask(y)) xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) # If we have some infs, they should fall at the same place. if not np.all(xinf == filled(np.isinf(y), False)): return False # No infs at all if not np.any(xinf): d = filled(umath.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)), masked_equal) return np.all(d) if not np.all(filled(x[xinf] == y[xinf], masked_equal)): return False x = x[~xinf] y = y[~xinf] d = filled(umath.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)), masked_equal) return np.all(d) #.............................................................................. def asarray(a, dtype=None, order=None): """ Convert the input to a masked array of the given data-type. No copy is performed if the input is already an `ndarray`. If `a` is a subclass of `MaskedArray`, a base class `MaskedArray` is returned. Parameters ---------- a : array_like Input data, in any form that can be converted to a masked array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists, ndarrays and masked arrays. dtype : dtype, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major ('C') or column-major ('FORTRAN') memory representation. Default is 'C'. Returns ------- out : MaskedArray Masked array interpretation of `a`. See Also -------- asanyarray : Similar to `asarray`, but conserves subclasses. Examples -------- >>> x = np.arange(10.).reshape(2, 5) >>> x array([[ 0., 1., 2., 3., 4.], [ 5., 6., 7., 8., 9.]]) >>> np.ma.asarray(x) masked_array(data = [[ 0. 1. 2. 3. 4.] [ 5. 6. 7. 8. 9.]], mask = False, fill_value = 1e+20) >>> type(np.ma.asarray(x)) <class 'numpy.ma.core.MaskedArray'> """ return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False) def asanyarray(a, dtype=None): """ Convert the input to a masked array, conserving subclasses. If `a` is a subclass of `MaskedArray`, its class is conserved. No copy is performed if the input is already an `ndarray`. Parameters ---------- a : array_like Input data, in any form that can be converted to an array. dtype : dtype, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major ('C') or column-major ('FORTRAN') memory representation. Default is 'C'. Returns ------- out : MaskedArray MaskedArray interpretation of `a`. See Also -------- asarray : Similar to `asanyarray`, but does not conserve subclass. Examples -------- >>> x = np.arange(10.).reshape(2, 5) >>> x array([[ 0., 1., 2., 3., 4.], [ 5., 6., 7., 8., 9.]]) >>> np.ma.asanyarray(x) masked_array(data = [[ 0. 1. 2. 3. 4.] [ 5. 6. 7. 8. 9.]], mask = False, fill_value = 1e+20) >>> type(np.ma.asanyarray(x)) <class 'numpy.ma.core.MaskedArray'> """ return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) #####-------------------------------------------------------------------------- #---- --- Pickling --- #####-------------------------------------------------------------------------- def dump(a, F): """ Pickle a masked array to a file. This is a wrapper around ``cPickle.dump``. Parameters ---------- a : MaskedArray The array to be pickled. F : str or file-like object The file to pickle `a` to. If a string, the full path to the file. """ if not hasattr(F, 'readline'): F = open(F, 'w') return cPickle.dump(a, F) def dumps(a): """ Return a string corresponding to the pickling of a masked array. This is a wrapper around ``cPickle.dumps``. Parameters ---------- a : MaskedArray The array for which the string representation of the pickle is returned. """ return cPickle.dumps(a) def load(F): """ Wrapper around ``cPickle.load`` which accepts either a file-like object or a filename. Parameters ---------- F : str or file The file or file name to load. See Also -------- dump : Pickle an array Notes ----- This is different from `numpy.load`, which does not use cPickle but loads the NumPy binary .npy format. """ if not hasattr(F, 'readline'): F = open(F, 'r') return cPickle.load(F) def loads(strg): """ Load a pickle from the current string. The result of ``cPickle.loads(strg)`` is returned. Parameters ---------- strg : str The string to load. See Also -------- dumps : Return a string corresponding to the pickling of a masked array. """ return cPickle.loads(strg) ################################################################################ def fromfile(file, dtype=float, count= -1, sep=''): raise NotImplementedError("Not yet implemented. Sorry") def fromflex(fxarray): """ Build a masked array from a suitable flexible-type array. The input array has to have a data-type with ``_data`` and ``_mask`` fields. This type of array is output by `MaskedArray.toflex`. Parameters ---------- fxarray : ndarray The structured input array, containing ``_data`` and ``_mask`` fields. If present, other fields are discarded. Returns ------- result : MaskedArray The constructed masked array. See Also -------- MaskedArray.toflex : Build a flexible-type array from a masked array. Examples -------- >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) >>> rec = x.toflex() >>> rec array([[(0, False), (1, True), (2, False)], [(3, True), (4, False), (5, True)], [(6, False), (7, True), (8, False)]], dtype=[('_data', '<i4'), ('_mask', '|b1')]) >>> x2 = np.ma.fromflex(rec) >>> x2 masked_array(data = [[0 -- 2] [-- 4 --] [6 -- 8]], mask = [[False True False] [ True False True] [False True False]], fill_value = 999999) Extra fields can be present in the structured array but are discarded: >>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')] >>> rec2 = np.zeros((2, 2), dtype=dt) >>> rec2 array([[(0, False, 0.0), (0, False, 0.0)], [(0, False, 0.0), (0, False, 0.0)]], dtype=[('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]) >>> y = np.ma.fromflex(rec2) >>> y masked_array(data = [[0 0] [0 0]], mask = [[False False] [False False]], fill_value = 999999) """ return masked_array(fxarray['_data'], mask=fxarray['_mask']) class _convert2ma: """ Convert functions from numpy to numpy.ma. Parameters ---------- _methodname : string Name of the method to transform. """ __doc__ = None # def __init__(self, funcname, params=None): self._func = getattr(np, funcname) self.__doc__ = self.getdoc() self._extras = params or {} # def getdoc(self): "Return the doc of the function (from the doc of the method)." doc = getattr(self._func, '__doc__', None) sig = get_object_signature(self._func) if doc: # Add the signature of the function at the beginning of the doc if sig: sig = "%s%s\n" % (self._func.__name__, sig) doc = sig + doc return doc # def __call__(self, a, *args, **params): # Find the common parameters to the call and the definition _extras = self._extras common_params = set(params).intersection(_extras) # Drop the common parameters from the call for p in common_params: _extras[p] = params.pop(p) # Get the result result = self._func.__call__(a, *args, **params).view(MaskedArray) if "fill_value" in common_params: result.fill_value = _extras.get("fill_value", None) if "hardmask" in common_params: result._hardmask = bool(_extras.get("hard_mask", False)) return result arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False)) clip = np.clip diff = np.diff empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False)) empty_like = _convert2ma('empty_like') frombuffer = _convert2ma('frombuffer') fromfunction = _convert2ma('fromfunction') identity = _convert2ma('identity', params=dict(fill_value=None, hardmask=False)) indices = np.indices ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False)) ones_like = np.ones_like squeeze = np.squeeze zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False)) zeros_like = np.zeros_like ###############################################################################
mit
1,502,629,442,599,724,500
7,520,388,733,710,198,000
30.617725
86
0.530509
false
hkawasaki/kawasaki-aio8-2
cms/djangoapps/contentstore/management/commands/check_course.py
22
2740
from django.core.management.base import BaseCommand, CommandError from xmodule.modulestore.django import modulestore from xmodule.modulestore.xml_importer import check_module_metadata_editability from xmodule.course_module import CourseDescriptor from xmodule.modulestore import Location class Command(BaseCommand): help = '''Enumerates through the course and find common errors''' def handle(self, *args, **options): if len(args) != 1: raise CommandError("check_course requires one argument: <location>") loc_str = args[0] loc = CourseDescriptor.id_to_location(loc_str) store = modulestore() course = store.get_item(loc, depth=3) err_cnt = 0 def _xlint_metadata(module): err_cnt = check_module_metadata_editability(module) for child in module.get_children(): err_cnt = err_cnt + _xlint_metadata(child) return err_cnt err_cnt = err_cnt + _xlint_metadata(course) # we've had a bug where the xml_attributes field can we rewritten as a string rather than a dict def _check_xml_attributes_field(module): err_cnt = 0 if hasattr(module, 'xml_attributes') and isinstance(module.xml_attributes, basestring): print 'module = {0} has xml_attributes as a string. It should be a dict'.format(module.location.url()) err_cnt = err_cnt + 1 for child in module.get_children(): err_cnt = err_cnt + _check_xml_attributes_field(child) return err_cnt err_cnt = err_cnt + _check_xml_attributes_field(course) # check for dangling discussion items, this can cause errors in the forums def _get_discussion_items(module): discussion_items = [] if module.location.category == 'discussion': discussion_items = discussion_items + [module.location.url()] for child in module.get_children(): discussion_items = discussion_items + _get_discussion_items(child) return discussion_items discussion_items = _get_discussion_items(course) # now query all discussion items via get_items() and compare with the tree-traversal queried_discussion_items = store.get_items( Location( 'i4x', course.location.org, course.location.course, 'discussion', None, None ) ) for item in queried_discussion_items: if item.location.url() not in discussion_items: print 'Found dangling discussion module = {0}'.format(item.location.url())
agpl-3.0
4,032,228,437,053,391,400
4,340,766,295,705,339,000
37.591549
118
0.613869
false
bbcf/bbcflib
bein/tests/test_bein.py
1
13588
import socket import re import sys import random from unittest2 import TestCase, TestSuite, main, TestLoader, skipIf from bein import * from bein.util import touch M = MiniLIMS("testing_lims") def hostname_contains(pattern): hostname = socket.gethostbyaddr(socket.gethostname())[0] if re.search(pattern, hostname) == None: return False else: return True try: if hostname_contains('vital-it.ch'): not_vital_it = False else: not_vital_it = True except: not_vital_it = True @program def count_lines(filename): """Count the number of lines in *filename* (equivalent to ``wc -l``).""" def parse_output(p): m = re.search(r'^\s*(\d+)\s+' + filename + r'\s*$', ''.join(p.stdout)) if m == None: return None else: return int(m.groups()[-1]) # in case of a weird line in LSF return {"arguments": ["wc","-l",filename], "return_value": parse_output} class TestProgramBinding(TestCase): def test_binding_works(self): with execution(None) as ex: with open('boris','w') as f: f.write("This is a test\nof the emergency broadcast\nsystem.\n") self.assertEqual(count_lines(ex, 'boris'), 3) def test_local_works(self): with execution(None) as ex: with open('boris','w') as f: f.write("This is a test\nof the emergency broadcast\nsystem.\n") q = count_lines._local(ex, 'boris') self.assertEqual(str(q.__class__), "<class 'bein.Future'>") self.assertEqual(q.wait(), 3) @skipIf(not_vital_it, "Not on VITAL-IT.") def test_lsf_works(self): with execution(None) as ex: with open('boris','w') as f: f.write("This is a test\nof the emergency broadcast\nsystem.\n") q = count_lines._lsf(ex, 'boris') self.assertEqual(str(q.__class__), "<class 'bein.Future'>") self.assertEqual(q.wait(), 3) def test_nonblocking_with_via_local(self): with execution(None) as ex: with open('boris','w') as f: f.write("This is a test\nof the emergency broadcast\nsystem.\n") q = count_lines.nonblocking(ex, 'boris', via='local') self.assertEqual(str(q.__class__), "<class 'bein.Future'>") self.assertEqual(q.wait(), 3) @skipIf(not_vital_it, "Not on VITAL-IT") def test_nonblocking_with_via_lsf(self): with execution(None) as ex: with open('boris','w') as f: f.write("This is a test\nof the emergency broadcast\nsystem.\n") q = count_lines.nonblocking(ex, 'boris', via='lsf') self.assertEqual(str(q.__class__), "<class 'bein.Future'>") self.assertEqual(q.wait(), 3) def test_syntaxerror_outside_execution(self): with execution(M) as ex: pass M.delete_execution(ex.id) with self.assertRaises(SyntaxError): touch(ex) def test_syntaxerror_outside_execution_nonblocking(self): with execution(M) as ex: pass M.delete_execution(ex.id) with self.assertRaises(SyntaxError): touch.nonblocking(ex) class TestUniqueFilenameIn(TestCase): def test_state_determines_filename(self): with execution(None) as ex: st = random.getstate() f = unique_filename_in() random.setstate(st) g = unique_filename_in() self.assertEqual(f, g) def test_unique_filename_exact_match(self): with execution(None) as ex: st = random.getstate() f = touch(ex) random.setstate(st) g = touch(ex) self.assertNotEqual(f, g) def test_unique_filename_beginnings_match(self): with execution(None) as ex: st = random.getstate() f = unique_filename_in() touch(ex, f + 'abcdefg') random.setstate(st) g = touch(ex) self.assertNotEqual(f, g) class TestMiniLIMS(TestCase): def test_resolve_alias_exception_on_no_file(self): with execution(None) as ex: M = MiniLIMS("boris") self.assertRaises(ValueError, M.resolve_alias, 55) def test_resolve_alias_returns_int_if_exists(self): with execution(None) as ex: f = touch(ex) M = MiniLIMS("boris") a = M.import_file(f) self.assertEqual(M.resolve_alias(a), a) def test_resolve_alias_with_alias(self): with execution(None) as ex: f = touch(ex) M = MiniLIMS("boris") a = M.import_file(f) M.add_alias(a, 'hilda') self.assertEqual(M.resolve_alias('hilda'), a) def test_path_to_file_on_execution(self): with execution(None) as ignoreme: f = touch(ignoreme) M = MiniLIMS("boris") fid = M.import_file(f) mpath = M.path_to_file(fid) with execution(M) as ex: fpath = ex.path_to_file(fid) self.assertEqual(mpath, fpath) def test_search_files(self): f_desc = unique_filename_in() t1 = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) f_id = M.import_file("../LICENSE", description=f_desc) t2 = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) f_found = M.search_files(with_text="LICENSE", with_description=f_desc, older_than=t2, source="import", newer_than=t1) self.assertIn(f_id, f_found) M.delete_file(f_id) f_desc = {"name":"test_search_files_by_dict", "m":5, "n":15} f_id = M.import_file("../LICENSE", description=f_desc) f_found = M.search_files(with_description=f_desc) self.assertIn(f_id, f_found) M.delete_file(f_id) def test_search_executions(self): with execution(M, description="desc_test") as ex: pass ex_found = M.search_executions(with_description="desc_test") self.assertIn(ex.id,ex_found) M.delete_execution(ex.id) ex_desc = {"name":"test_search_ex_by_dict", "m":5, "n":15} with execution(M, description=ex_desc) as ex: pass ex_found = M.search_executions(with_description=ex_desc) self.assertIn(ex.id, ex_found) try: with execution(M, description="desc_test_fail") as ex_nofail: 3/0 except: pass ex_found_nofail = M.search_executions(with_description="desc_test", fails=False) for e in ex_found_nofail: error = M.fetch_execution(e)["exception_string"] self.assertIsNone(error) ex_found_fail = M.search_executions(with_description="desc_test", fails=True) for e in ex_found_fail: error = M.fetch_execution(e)["exception_string"] self.assertIsNotNone(error) M.delete_execution(ex.id) M.delete_execution(ex_nofail.id) def test_browse_files(self): f_desc = "browse_file_test" f_id = M.import_file("../LICENSE", description=f_desc) f_found = M.browse_files(with_description=f_desc) #self.assertIn(f_id,f_found) M.delete_file(f_id) def test_browse_executions(self): ex_desc = "browse_ex_test" with execution(M, description=ex_desc) as ex: touch(ex,"boris") ex_found = M.browse_executions(with_description=ex_desc) #self.assertIs(ex.id,ex_found) M.delete_execution(ex.id) class TestExportFile(TestCase): def test_export_file(self): filea = M.import_file("../LICENSE") #file ID fileb = M.import_file("../doc/bein.rst") testdir = "testing.files" if not os.path.isdir(testdir): os.mkdir(testdir) M.associate_file(fileb,filea,template="%s.linked") M.export_file(filea, dst=os.path.join(testdir,"exportedfile"), with_associated=True) #test with file name given self.assertTrue(os.path.isfile(os.path.join(testdir,"exportedfile"+".linked"))) os.remove(os.path.join(testdir,"exportedfile")) os.remove(os.path.join(testdir,"exportedfile"+".linked")) M.export_file(filea, dst=testdir, with_associated=True) #test with directory given filename = M.fetch_file(filea)['repository_name'] self.assertTrue(os.path.isfile(os.path.join(testdir, filename +".linked"))) os.remove(os.path.join(testdir, filename)) os.remove(os.path.join(testdir, filename +".linked")) @program def echo(s): return {'arguments': ['echo',str(s)], 'return_value': None} class TestStdoutStderrRedirect(TestCase): def test_stdout_redirected(self): try: with execution(M) as ex: f = unique_filename_in() echo(ex, "boris!", stdout=f) with open(f) as q: l = q.readline() self.assertEqual(l, 'boris!\n') finally: M.delete_execution(ex.id) def test_stdout_local_redirected(self): try: with execution(M) as ex: f = unique_filename_in() m = echo.nonblocking(ex, "boris!", stdout=f) m.wait() with open(f) as q: l = q.readline() self.assertEqual(l, 'boris!\n') finally: M.delete_execution(ex.id) class TestNoSuchProgramError(TestCase): @program def nonexistent(): return {"arguments": ["meepbarf","hilda"], "return_value": None} def test_nonexistent(self): with execution(None) as ex: self.assertRaises(ValueError, self.nonexistent, ex) def test_nonexistent_local(self): with execution(None) as ex: f = self.nonexistent.nonblocking(ex, via="local") self.assertRaises(ValueError, f.wait) class TestImmutabilityDropped(TestCase): def test_immutability_dropped(self): executions = [] with execution(M) as ex: touch(ex, "boris") ex.add("boris") exid1 = ex.id borisid = M.search_files(source=('execution',ex.id))[0] self.assertFalse(M.fetch_file(borisid)['immutable']) with execution(M) as ex: ex.use(borisid) exid2 = ex.id self.assertTrue(M.fetch_file(borisid)['immutable']) M.delete_execution(exid2) self.assertFalse(M.fetch_file(borisid)['immutable']) M.delete_execution(exid1) self.assertEqual(M.search_files(source=('execution',exid1)), []) class TestAssociatePreservesFilenames(TestCase): def test_associate_with_names(self): try: with execution(M) as ex: touch(ex, "boris") touch(ex, "hilda") ex.add("boris") ex.add("hilda", associate_to_filename="boris", template="%s.meep") boris_id = M.search_files(source=('execution',ex.id), with_text="boris")[0] hilda_id = M.search_files(source=('execution',ex.id), with_text="hilda")[0] boris_name = M.fetch_file(boris_id)['repository_name'] hilda_name = M.fetch_file(hilda_id)['repository_name'] self.assertEqual("%s.meep" % boris_name, hilda_name) finally: try: M.delete_execution(ex.id) except: pass def test_associate_with_id(self): try: fid = M.import_file('test.py') with execution(M) as ex: touch(ex, "hilda") ex.add("hilda", associate_to_id=fid, template="%s.meep") hilda_id = M.search_files(source=('execution',ex.id))[0] hilda_name = M.fetch_file(hilda_id)['repository_name'] fid_name = M.fetch_file(fid)['repository_name'] self.assertEqual("%s.meep" % fid_name, hilda_name) finally: try: M.delete_execution(ex.id) M.delete_file(fid) except: pass def test_hierarchical_association(self): try: with execution(M) as ex: touch(ex, "a") touch(ex, "b") touch(ex, "c") ex.add("a") ex.add("b", associate_to_filename="a", template="%s.step") ex.add("c", associate_to_filename="b", template="%s.step") a_id = M.search_files(source=('execution',ex.id), with_text='a')[0] b_id = M.search_files(source=('execution',ex.id), with_text='b')[0] c_id = M.search_files(source=('execution',ex.id), with_text='c')[0] a_name = M.fetch_file(a_id)['repository_name'] b_name = M.fetch_file(b_id)['repository_name'] c_name = M.fetch_file(c_id)['repository_name'] self.assertEqual("%s.step" % a_name, b_name) self.assertEqual("%s.step.step" % a_name, c_name) finally: try: M.delete_execution(ex.id) except: pass #def test_given(tests): # module = sys.modules[__name__] # if tests == None: # defaultTest = None # else: # loader = TestLoader() # defaultTest = TestSuite() # tests = loader.loadTestsFromNames(tests, module) # defaultTest.addTests(tests) # main(defaultTest=defaultTest) if __name__ == '__main__': if len(sys.argv) > 1: test_given(sys.argv[1:]) else: test_given(None)
gpl-3.0
2,153,683,741,581,797,000
-3,067,358,302,844,734,500
35.04244
125
0.562629
false
Turkingwang/dpkt
dpkt/snoop.py
22
2963
# $Id$ """Snoop file format.""" import sys, time import dpkt # RFC 1761 SNOOP_MAGIC = 0x736E6F6F70000000L SNOOP_VERSION = 2 SDL_8023 = 0 SDL_8024 = 1 SDL_8025 = 2 SDL_8026 = 3 SDL_ETHER = 4 SDL_HDLC = 5 SDL_CHSYNC = 6 SDL_IBMCC = 7 SDL_FDDI = 8 SDL_OTHER = 9 dltoff = { SDL_ETHER:14 } class PktHdr(dpkt.Packet): """snoop packet header.""" __byte_order__ = '!' __hdr__ = ( ('orig_len', 'I', 0), ('incl_len', 'I', 0), ('rec_len', 'I', 0), ('cum_drops', 'I', 0), ('ts_sec', 'I', 0), ('ts_usec', 'I', 0), ) class FileHdr(dpkt.Packet): """snoop file header.""" __byte_order__ = '!' __hdr__ = ( ('magic', 'Q', SNOOP_MAGIC), ('v', 'I', SNOOP_VERSION), ('linktype', 'I', SDL_ETHER), ) class Writer(object): """Simple snoop dumpfile writer.""" def __init__(self, fileobj, linktype=SDL_ETHER): self.__f = fileobj fh = FileHdr(linktype=linktype) self.__f.write(str(fh)) def writepkt(self, pkt, ts=None): if ts is None: ts = time.time() s = str(pkt) n = len(s) pad_len = 4 - n % 4 if n % 4 else 0 ph = PktHdr(orig_len=n,incl_len=n, rec_len=PktHdr.__hdr_len__+n+pad_len, ts_sec=int(ts), ts_usec=int((int(ts) - float(ts)) * 1000000.0)) self.__f.write(str(ph)) self.__f.write(s + '\0' * pad_len) def close(self): self.__f.close() class Reader(object): """Simple pypcap-compatible snoop file reader.""" def __init__(self, fileobj): self.name = fileobj.name self.fd = fileobj.fileno() self.__f = fileobj buf = self.__f.read(FileHdr.__hdr_len__) self.__fh = FileHdr(buf) self.__ph = PktHdr if self.__fh.magic != SNOOP_MAGIC: raise ValueError, 'invalid snoop header' self.dloff = dltoff[self.__fh.linktype] self.filter = '' def fileno(self): return self.fd def datalink(self): return self.__fh.linktype def setfilter(self, value, optimize=1): return NotImplementedError def readpkts(self): return list(self) def dispatch(self, cnt, callback, *args): if cnt > 0: for i in range(cnt): ts, pkt = self.next() callback(ts, pkt, *args) else: for ts, pkt in self: callback(ts, pkt, *args) def loop(self, callback, *args): self.dispatch(0, callback, *args) def __iter__(self): self.__f.seek(FileHdr.__hdr_len__) while 1: buf = self.__f.read(PktHdr.__hdr_len__) if not buf: break hdr = self.__ph(buf) buf = self.__f.read(hdr.rec_len - PktHdr.__hdr_len__) yield (hdr.ts_sec + (hdr.ts_usec / 1000000.0), buf[:hdr.incl_len])
bsd-3-clause
-4,186,239,982,362,686,000
-2,990,828,894,621,371,400
24.110169
78
0.497131
false
amanikamail/flexx
docs/scripts/genuiclasses.py
20
3063
""" Generate docs for ui classes. """ import os from types import ModuleType from flexx import ui, app THIS_DIR = os.path.dirname(os.path.abspath(__file__)) DOC_DIR = os.path.abspath(os.path.join(THIS_DIR, '..')) OUTPUT_DIR = os.path.join(DOC_DIR, 'ui') created_files = [] def main(): pages = {} class_names = [] # Get all pages and class names for mod in ui.__dict__.values(): if isinstance(mod, ModuleType): classes = [] for w in mod.__dict__.values(): if isinstance(w, type) and issubclass(w, ui.Widget): if w.__module__ == mod.__name__: classes.append(w) if classes: classes.sort(key=lambda x: len(x.mro())) class_names.extend([w.__name__ for w in classes]) pages[mod.__name__] = classes # Create page for each module for module_name, classes in sorted(pages.items()): page_name = module_name.split('.')[-1].strip('_').capitalize() docs = '%s\n%s\n\n' % (page_name, '-' * len(page_name)) docs += '.. automodule:: %s\n\n' % module_name docs += '----\n\n' for cls in classes: name = cls.__name__ # Insert info on base clases if 'Inherits from' not in cls.__doc__: bases = [':class:`%s <flexx.ui.%s>`' % (bcls.__name__, bcls.__name__) for bcls in cls.__bases__] line = 'Inherits from: ' + ', '.join(bases) cls.__doc__ = line + '\n\n' + (cls.__doc__ or '') # Create doc for class docs += '.. autoclass:: flexx.ui.%s\n' % name docs += ' :members:\n\n' # Write doc page filename = os.path.join(OUTPUT_DIR, page_name.lower() + '.rst') created_files.append(filename) open(filename, 'wt').write(docs) # Create overview doc page docs = 'Ui API' docs += '\n' + '=' * len(docs) + '\n\n' docs += 'This is a list of all widget classes provided by ``flexx.ui``. ' docs += ':class:`Widget <flexx.ui.Widget>` is the base class of all widgets. ' docs += 'There is one document per widget type. Each document contains ' docs += 'examples with the widget(s) defined within.\n\n' for name in sorted(class_names): docs += '* :class:`%s <flexx.ui.%s>`\n' % (name, name) docs += '\n.. toctree::\n :maxdepth: 1\n :hidden:\n\n' for module_name in sorted(pages.keys()): docs += ' %s\n' % module_name.split('.')[-1].strip('_').lower() # Write overview doc page filename = os.path.join(OUTPUT_DIR, 'api.rst') created_files.append(filename) open(filename, 'wt').write(docs) print(' generated widget docs with %i pages and %i widgets' % (len(pages), len(class_names))) def clean(): while created_files: filename = created_files.pop() if os.path.isfile(filename): os.remove(filename)
bsd-2-clause
-9,217,519,785,993,282,000
7,058,974,379,772,733,000
34.616279
98
0.522037
false
eeshangarg/oh-mainline
vendor/packages/django-extensions/django_extensions/management/commands/sqldiff.py
17
30783
""" sqldiff.py - Prints the (approximated) difference between models and database TODO: - better support for relations - better support for constraints (mainly postgresql?) - support for table spaces with postgresql - when a table is not managed (meta.managed==False) then only do a one-way sqldiff ? show differences from db->table but not the other way around since it's not managed. KNOWN ISSUES: - MySQL has by far the most problems with introspection. Please be carefull when using MySQL with sqldiff. - Booleans are reported back as Integers, so there's know way to know if there was a real change. - Varchar sizes are reported back without unicode support so their size may change in comparison to the real length of the varchar. - Some of the 'fixes' to counter these problems might create false positives or false negatives. """ from django.core.management.base import BaseCommand from django.core.management import sql as _sql from django.core.management import CommandError from django.core.management.color import no_style from django.db import transaction, connection from django.db.models.fields import IntegerField from optparse import make_option ORDERING_FIELD = IntegerField('_order', null=True) def flatten(l, ltypes=(list, tuple)): ltype = type(l) l = list(l) i = 0 while i < len(l): while isinstance(l[i], ltypes): if not l[i]: l.pop(i) i -= 1 break else: l[i:i + 1] = l[i] i += 1 return ltype(l) def all_local_fields(meta): all_fields = meta.local_fields[:] for parent in meta.parents: all_fields.extend(all_local_fields(parent._meta)) return all_fields class SQLDiff(object): DATA_TYPES_REVERSE_OVERRIDE = {} DIFF_TYPES = [ 'error', 'comment', 'table-missing-in-db', 'field-missing-in-db', 'field-missing-in-model', 'index-missing-in-db', 'index-missing-in-model', 'unique-missing-in-db', 'unique-missing-in-model', 'field-type-differ', 'field-parameter-differ', ] DIFF_TEXTS = { 'error': 'error: %(0)s', 'comment': 'comment: %(0)s', 'table-missing-in-db': "table '%(0)s' missing in database", 'field-missing-in-db': "field '%(1)s' defined in model but missing in database", 'field-missing-in-model': "field '%(1)s' defined in database but missing in model", 'index-missing-in-db': "field '%(1)s' INDEX defined in model but missing in database", 'index-missing-in-model': "field '%(1)s' INDEX defined in database schema but missing in model", 'unique-missing-in-db': "field '%(1)s' UNIQUE defined in model but missing in database", 'unique-missing-in-model': "field '%(1)s' UNIQUE defined in database schema but missing in model", 'field-type-differ': "field '%(1)s' not of same type: db='%(3)s', model='%(2)s'", 'field-parameter-differ': "field '%(1)s' parameters differ: db='%(3)s', model='%(2)s'", } SQL_FIELD_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD'), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2])) SQL_FIELD_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP COLUMN'), style.SQL_FIELD(qn(args[1]))) SQL_INDEX_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('CREATE INDEX'), style.SQL_TABLE(qn("%s_idx" % '_'.join(args[0:2]))), style.SQL_KEYWORD('ON'), style.SQL_TABLE(qn(args[0])), style.SQL_FIELD(qn(args[1]))) # FIXME: need to lookup index name instead of just appending _idx to table + fieldname SQL_INDEX_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s;" % (style.SQL_KEYWORD('DROP INDEX'), style.SQL_TABLE(qn("%s_idx" % '_'.join(args[0:2])))) SQL_UNIQUE_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD'), style.SQL_KEYWORD('UNIQUE'), style.SQL_FIELD(qn(args[1]))) # FIXME: need to lookup unique constraint name instead of appending _key to table + fieldname SQL_UNIQUE_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_TABLE(qn("%s_key" % ('_'.join(args[:2]))))) SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2])) SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2])) SQL_ERROR = lambda self, style, qn, args: style.NOTICE('-- Error: %s' % style.ERROR(args[0])) SQL_COMMENT = lambda self, style, qn, args: style.NOTICE('-- Comment: %s' % style.SQL_TABLE(args[0])) SQL_TABLE_MISSING_IN_DB = lambda self, style, qn, args: style.NOTICE('-- Table missing: %s' % args[0]) def __init__(self, app_models, options): self.app_models = app_models self.options = options self.dense = options.get('dense_output', False) try: self.introspection = connection.introspection except AttributeError: from django.db import get_introspection_module self.introspection = get_introspection_module() self.cursor = connection.cursor() self.django_tables = self.get_django_tables(options.get('only_existing', True)) self.db_tables = self.introspection.get_table_list(self.cursor) self.differences = [] self.unknown_db_fields = {} self.DIFF_SQL = { 'error': self.SQL_ERROR, 'comment': self.SQL_COMMENT, 'table-missing-in-db': self.SQL_TABLE_MISSING_IN_DB, 'field-missing-in-db': self.SQL_FIELD_MISSING_IN_DB, 'field-missing-in-model': self.SQL_FIELD_MISSING_IN_MODEL, 'index-missing-in-db': self.SQL_INDEX_MISSING_IN_DB, 'index-missing-in-model': self.SQL_INDEX_MISSING_IN_MODEL, 'unique-missing-in-db': self.SQL_UNIQUE_MISSING_IN_DB, 'unique-missing-in-model': self.SQL_UNIQUE_MISSING_IN_MODEL, 'field-type-differ': self.SQL_FIELD_TYPE_DIFFER, 'field-parameter-differ': self.SQL_FIELD_PARAMETER_DIFFER, } def add_app_model_marker(self, app_label, model_name): self.differences.append((app_label, model_name, [])) def add_difference(self, diff_type, *args): assert diff_type in self.DIFF_TYPES, 'Unknown difference type' self.differences[-1][-1].append((diff_type, args)) def get_django_tables(self, only_existing): try: django_tables = self.introspection.django_table_names(only_existing=only_existing) except AttributeError: # backwards compatibility for before introspection refactoring (r8296) try: django_tables = _sql.django_table_names(only_existing=only_existing) except AttributeError: # backwards compatibility for before svn r7568 django_tables = _sql.django_table_list(only_existing=only_existing) return django_tables def sql_to_dict(self, query, param): """ sql_to_dict(query, param) -> list of dicts code from snippet at http://www.djangosnippets.org/snippets/1383/ """ cursor = connection.cursor() cursor.execute(query, param) fieldnames = [name[0] for name in cursor.description] result = [] for row in cursor.fetchall(): rowset = [] for field in zip(fieldnames, row): rowset.append(field) result.append(dict(rowset)) return result def get_field_model_type(self, field): return field.db_type(connection=connection) def get_field_db_type(self, description, field=None, table_name=None): from django.db import models # DB-API cursor.description #(name, type_code, display_size, internal_size, precision, scale, null_ok) = description type_code = description[1] if type_code in self.DATA_TYPES_REVERSE_OVERRIDE: reverse_type = self.DATA_TYPES_REVERSE_OVERRIDE[type_code] else: try: try: reverse_type = self.introspection.data_types_reverse[type_code] except AttributeError: # backwards compatibility for before introspection refactoring (r8296) reverse_type = self.introspection.DATA_TYPES_REVERSE.get(type_code) except KeyError: # type_code not found in data_types_reverse map key = (self.differences[-1][:2], description[:2]) if key not in self.unknown_db_fields: self.unknown_db_fields[key] = 1 self.add_difference('comment', "Unknown database type for field '%s' (%s)" % (description[0], type_code)) return None kwargs = {} if isinstance(reverse_type, tuple): kwargs.update(reverse_type[1]) reverse_type = reverse_type[0] if reverse_type == "CharField" and description[3]: kwargs['max_length'] = description[3] if reverse_type == "DecimalField": kwargs['max_digits'] = description[4] kwargs['decimal_places'] = description[5] and abs(description[5]) or description[5] if description[6]: kwargs['blank'] = True if not reverse_type in ('TextField', 'CharField'): kwargs['null'] = True if '.' in reverse_type: from django.utils import importlib # TODO: when was importlib added to django.utils ? and do we # need to add backwards compatibility code ? module_path, package_name = reverse_type.rsplit('.', 1) module = importlib.import_module(module_path) field_db_type = getattr(module, package_name)(**kwargs).db_type(connection=connection) else: field_db_type = getattr(models, reverse_type)(**kwargs).db_type(connection=connection) return field_db_type def strip_parameters(self, field_type): if field_type and field_type != 'double precision': return field_type.split(" ")[0].split("(")[0] return field_type def find_unique_missing_in_db(self, meta, table_indexes, table_name): for field in all_local_fields(meta): if field.unique: attname = field.db_column or field.attname if attname in table_indexes and table_indexes[attname]['unique']: continue self.add_difference('unique-missing-in-db', table_name, attname) def find_unique_missing_in_model(self, meta, table_indexes, table_name): # TODO: Postgresql does not list unique_togethers in table_indexes # MySQL does fields = dict([(field.db_column or field.name, field.unique) for field in all_local_fields(meta)]) for att_name, att_opts in table_indexes.iteritems(): if att_opts['unique'] and att_name in fields and not fields[att_name]: if att_name in flatten(meta.unique_together): continue self.add_difference('unique-missing-in-model', table_name, att_name) def find_index_missing_in_db(self, meta, table_indexes, table_name): for field in all_local_fields(meta): if field.db_index: attname = field.db_column or field.attname if not attname in table_indexes: self.add_difference('index-missing-in-db', table_name, attname) def find_index_missing_in_model(self, meta, table_indexes, table_name): fields = dict([(field.name, field) for field in all_local_fields(meta)]) for att_name, att_opts in table_indexes.iteritems(): if att_name in fields: field = fields[att_name] if field.db_index: continue if att_opts['primary_key'] and field.primary_key: continue if att_opts['unique'] and field.unique: continue if att_opts['unique'] and att_name in flatten(meta.unique_together): continue self.add_difference('index-missing-in-model', table_name, att_name) def find_field_missing_in_model(self, fieldmap, table_description, table_name): for row in table_description: if row[0] not in fieldmap: self.add_difference('field-missing-in-model', table_name, row[0]) def find_field_missing_in_db(self, fieldmap, table_description, table_name): db_fields = [row[0] for row in table_description] for field_name, field in fieldmap.iteritems(): if field_name not in db_fields: self.add_difference('field-missing-in-db', table_name, field_name, field.db_type(connection=connection)) def find_field_type_differ(self, meta, table_description, table_name, func=None): db_fields = dict([(row[0], row) for row in table_description]) for field in all_local_fields(meta): if field.name not in db_fields: continue description = db_fields[field.name] model_type = self.strip_parameters(self.get_field_model_type(field)) db_type = self.strip_parameters(self.get_field_db_type(description, field)) # use callback function if defined if func: model_type, db_type = func(field, description, model_type, db_type) if not model_type == db_type: self.add_difference('field-type-differ', table_name, field.name, model_type, db_type) def find_field_parameter_differ(self, meta, table_description, table_name, func=None): db_fields = dict([(row[0], row) for row in table_description]) for field in all_local_fields(meta): if field.name not in db_fields: continue description = db_fields[field.name] model_type = self.get_field_model_type(field) db_type = self.get_field_db_type(description, field, table_name) if not self.strip_parameters(model_type) == self.strip_parameters(db_type): continue # use callback function if defined if func: model_type, db_type = func(field, description, model_type, db_type) if not model_type == db_type: self.add_difference('field-parameter-differ', table_name, field.name, model_type, db_type) @transaction.commit_manually def find_differences(self): cur_app_label = None for app_model in self.app_models: meta = app_model._meta table_name = meta.db_table app_label = meta.app_label if cur_app_label != app_label: # Marker indicating start of difference scan for this table_name self.add_app_model_marker(app_label, app_model.__name__) #if not table_name in self.django_tables: if not table_name in self.db_tables: # Table is missing from database self.add_difference('table-missing-in-db', table_name) continue table_indexes = self.introspection.get_indexes(self.cursor, table_name) fieldmap = dict([(field.db_column or field.get_attname(), field) for field in all_local_fields(meta)]) # add ordering field if model uses order_with_respect_to if meta.order_with_respect_to: fieldmap['_order'] = ORDERING_FIELD try: table_description = self.introspection.get_table_description(self.cursor, table_name) except Exception, e: self.add_difference('error', 'unable to introspect table: %s' % str(e).strip()) transaction.rollback() # reset transaction continue else: transaction.commit() # Fields which are defined in database but not in model # 1) find: 'unique-missing-in-model' self.find_unique_missing_in_model(meta, table_indexes, table_name) # 2) find: 'index-missing-in-model' self.find_index_missing_in_model(meta, table_indexes, table_name) # 3) find: 'field-missing-in-model' self.find_field_missing_in_model(fieldmap, table_description, table_name) # Fields which are defined in models but not in database # 4) find: 'field-missing-in-db' self.find_field_missing_in_db(fieldmap, table_description, table_name) # 5) find: 'unique-missing-in-db' self.find_unique_missing_in_db(meta, table_indexes, table_name) # 6) find: 'index-missing-in-db' self.find_index_missing_in_db(meta, table_indexes, table_name) # Fields which have a different type or parameters # 7) find: 'type-differs' self.find_field_type_differ(meta, table_description, table_name) # 8) find: 'type-parameter-differs' self.find_field_parameter_differ(meta, table_description, table_name) def print_diff(self, style=no_style()): """ print differences to stdout """ if self.options.get('sql', True): self.print_diff_sql(style) else: self.print_diff_text(style) def print_diff_text(self, style): cur_app_label = None for app_label, model_name, diffs in self.differences: if not diffs: continue if not self.dense and cur_app_label != app_label: print style.NOTICE("+ Application:"), style.SQL_TABLE(app_label) cur_app_label = app_label if not self.dense: print style.NOTICE("|-+ Differences for model:"), style.SQL_TABLE(model_name) for diff in diffs: diff_type, diff_args = diff text = self.DIFF_TEXTS[diff_type] % dict((str(i), style.SQL_TABLE(e)) for i, e in enumerate(diff_args)) text = "'".join(i % 2 == 0 and style.ERROR(e) or e for i, e in enumerate(text.split("'"))) if not self.dense: print style.NOTICE("|--+"), text else: print style.NOTICE("App"), style.SQL_TABLE(app_label), style.NOTICE('Model'), style.SQL_TABLE(model_name), text def print_diff_sql(self, style): cur_app_label = None qn = connection.ops.quote_name has_differences = max([len(diffs) for app_label, model_name, diffs in self.differences]) if not has_differences: if not self.dense: print style.SQL_KEYWORD("-- No differences") else: print style.SQL_KEYWORD("BEGIN;") for app_label, model_name, diffs in self.differences: if not diffs: continue if not self.dense and cur_app_label != app_label: print style.NOTICE("-- Application: %s" % style.SQL_TABLE(app_label)) cur_app_label = app_label if not self.dense: print style.NOTICE("-- Model: %s" % style.SQL_TABLE(model_name)) for diff in diffs: diff_type, diff_args = diff text = self.DIFF_SQL[diff_type](style, qn, diff_args) if self.dense: text = text.replace("\n\t", " ") print text print style.SQL_KEYWORD("COMMIT;") class GenericSQLDiff(SQLDiff): pass class MySQLDiff(SQLDiff): # All the MySQL hacks together create something of a problem # Fixing one bug in MySQL creates another issue. So just keep in mind # that this is way unreliable for MySQL atm. def get_field_db_type(self, description, field=None, table_name=None): from MySQLdb.constants import FIELD_TYPE # weird bug? in mysql db-api where it returns three times the correct value for field length # if i remember correctly it had something todo with unicode strings # TODO: Fix this is a more meaningful and better understood manner description = list(description) if description[1] not in [FIELD_TYPE.TINY, FIELD_TYPE.SHORT]: # exclude tinyints from conversion. description[3] = description[3] / 3 description[4] = description[4] / 3 db_type = super(MySQLDiff, self).get_field_db_type(description) if not db_type: return if field: if field.primary_key and (db_type == 'integer' or db_type == 'bigint'): db_type += ' AUTO_INCREMENT' # MySQL isn't really sure about char's and varchar's like sqlite field_type = self.get_field_model_type(field) # Fix char/varchar inconsistencies if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar': db_type = db_type.lstrip("var") # They like to call 'bool's 'tinyint(1)' and introspection makes that a integer # just convert it back to it's proper type, a bool is a bool and nothing else. if db_type == 'integer' and description[1] == FIELD_TYPE.TINY and description[4] == 1: db_type = 'bool' if db_type == 'integer' and description[1] == FIELD_TYPE.SHORT: db_type = 'smallint UNSIGNED' # FIXME: what about if it's not UNSIGNED ? return db_type class SqliteSQLDiff(SQLDiff): # Unique does not seem to be implied on Sqlite for Primary_key's # if this is more generic among databases this might be usefull # to add to the superclass's find_unique_missing_in_db method def find_unique_missing_in_db(self, meta, table_indexes, table_name): for field in all_local_fields(meta): if field.unique: attname = field.db_column or field.attname if attname in table_indexes and table_indexes[attname]['unique']: continue if attname in table_indexes and table_indexes[attname]['primary_key']: continue self.add_difference('unique-missing-in-db', table_name, attname) # Finding Indexes by using the get_indexes dictionary doesn't seem to work # for sqlite. def find_index_missing_in_db(self, meta, table_indexes, table_name): pass def find_index_missing_in_model(self, meta, table_indexes, table_name): pass def get_field_db_type(self, description, field=None, table_name=None): db_type = super(SqliteSQLDiff, self).get_field_db_type(description) if not db_type: return if field: field_type = self.get_field_model_type(field) # Fix char/varchar inconsistencies if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar': db_type = db_type.lstrip("var") return db_type class PostgresqlSQLDiff(SQLDiff): DATA_TYPES_REVERSE_OVERRIDE = { 1042: 'CharField', # postgis types (TODO: support is very incomplete) 17506: 'django.contrib.gis.db.models.fields.PointField', 55902: 'django.contrib.gis.db.models.fields.MultiPolygonField', } # Hopefully in the future we can add constraint checking and other more # advanced checks based on this database. SQL_LOAD_CONSTRAINTS = """ SELECT nspname, relname, conname, attname, pg_get_constraintdef(pg_constraint.oid) FROM pg_constraint INNER JOIN pg_attribute ON pg_constraint.conrelid = pg_attribute.attrelid AND pg_attribute.attnum = any(pg_constraint.conkey) INNER JOIN pg_class ON conrelid=pg_class.oid INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace ORDER BY CASE WHEN contype='f' THEN 0 ELSE 1 END,contype,nspname,relname,conname; """ SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2])) SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2])) def __init__(self, app_models, options): SQLDiff.__init__(self, app_models, options) self.check_constraints = {} self.load_constraints() def load_constraints(self): for dct in self.sql_to_dict(self.SQL_LOAD_CONSTRAINTS, []): key = (dct['nspname'], dct['relname'], dct['attname']) if 'CHECK' in dct['pg_get_constraintdef']: self.check_constraints[key] = dct def get_field_db_type(self, description, field=None, table_name=None): db_type = super(PostgresqlSQLDiff, self).get_field_db_type(description) if not db_type: return if field: if field.primary_key and db_type == 'integer': db_type = 'serial' if table_name: tablespace = field.db_tablespace if tablespace == "": tablespace = "public" check_constraint = self.check_constraints.get((tablespace, table_name, field.attname), {}).get('pg_get_constraintdef', None) if check_constraint: check_constraint = check_constraint.replace("((", "(") check_constraint = check_constraint.replace("))", ")") check_constraint = '("'.join([')' in e and '" '.join(e.split(" ", 1)) or e for e in check_constraint.split("(")]) # TODO: might be more then one constraint in definition ? db_type += ' ' + check_constraint return db_type """ def find_field_type_differ(self, meta, table_description, table_name): def callback(field, description, model_type, db_type): if field.primary_key and db_type=='integer': db_type = 'serial' return model_type, db_type super(PostgresqlSQLDiff, self).find_field_type_differs(meta, table_description, table_name, callback) """ DATABASE_SQLDIFF_CLASSES = { 'postgresql_psycopg2' : PostgresqlSQLDiff, 'postgresql': PostgresqlSQLDiff, 'mysql': MySQLDiff, 'sqlite3': SqliteSQLDiff, 'oracle': GenericSQLDiff } class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('--all-applications', '-a', action='store_true', dest='all_applications', help="Automaticly include all application from INSTALLED_APPS."), make_option('--not-only-existing', '-e', action='store_false', dest='only_existing', help="Check all tables that exist in the database, not only tables that should exist based on models."), make_option('--dense-output', '-d', action='store_true', dest='dense_output', help="Shows the output in dense format, normally output is spreaded over multiple lines."), make_option('--output_text', '-t', action='store_false', dest='sql', default=True, help="Outputs the differences as descriptive text instead of SQL"), ) help = """Prints the (approximated) difference between models and fields in the database for the given app name(s). It indicates how columns in the database are different from the sql that would be generated by Django. This command is not a database migration tool. (Though it can certainly help) It's purpose is to show the current differences as a way to check/debug ur models compared to the real database tables and columns.""" output_transaction = False args = '<appname appname ...>' def handle(self, *app_labels, **options): from django import VERSION if VERSION[:2] < (1, 0): raise CommandError("SQLDiff only support Django 1.0 or higher!") from django.db import models from django.conf import settings if settings.DATABASE_ENGINE == 'dummy': # This must be the "dummy" database backend, which means the user # hasn't set DATABASE_ENGINE. raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" + "because you haven't specified the DATABASE_ENGINE setting.\n" + "Edit your settings file and change DATABASE_ENGINE to something like 'postgresql' or 'mysql'.") if options.get('all_applications', False): app_models = models.get_models() else: if not app_labels: raise CommandError('Enter at least one appname.') try: app_list = [models.get_app(app_label) for app_label in app_labels] except (models.ImproperlyConfigured, ImportError), e: raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e) app_models = [] for app in app_list: app_models.extend(models.get_models(app)) ## remove all models that are not managed by Django #app_models = [model for model in app_models if getattr(model._meta, 'managed', True)] if not app_models: raise CommandError('Unable to execute sqldiff no models founds.') engine = settings.DATABASE_ENGINE if not engine: engine = connection.__module__.split('.')[-2] cls = DATABASE_SQLDIFF_CLASSES.get(engine, GenericSQLDiff) sqldiff_instance = cls(app_models, options) sqldiff_instance.find_differences() sqldiff_instance.print_diff(self.style) return
agpl-3.0
2,417,731,144,190,329,000
2,602,208,542,603,239,400
48.095694
266
0.608615
false
jjs0sbw/CSPLN
apps/scaffolding/linux/web2py/gluon/contrib/fpdf/php.py
13
1256
#!/usr/bin/env python # -*- coding: latin-1 -*- # fpdf php helpers: def substr(s, start, length=-1): if length < 0: length=len(s)-start return s[start:start+length] def sprintf(fmt, *args): return fmt % args def print_r(array): if not isinstance(array, dict): array = dict([(k, k) for k in array]) for k, v in array.items(): print "[%s] => %s" % (k, v), def UTF8ToUTF16BE(instr, setbom=True): "Converts UTF-8 strings to UTF16-BE." outstr = "" if (setbom): outstr += "\xFE\xFF"; if not isinstance(instr, unicode): instr = instr.decode('UTF-8') outstr += instr.encode('UTF-16BE') return outstr def UTF8StringToArray(instr): "Converts UTF-8 strings to codepoints array" return [ord(c) for c in instr] # ttfints php helpers: def die(msg): raise RuntimeError(msg) def str_repeat(s, count): return s * count def str_pad(s, pad_length=0, pad_char= " ", pad_type= +1 ): if pad_type<0: # pad left return s.rjust(pad_length, pad_char) elif pad_type>0: # pad right return s.ljust(pad_length, pad_char) else: # pad both return s.center(pad_length, pad_char) strlen = count = lambda s: len(s)
gpl-3.0
2,279,895,118,370,422,500
-2,336,792,224,682,680,000
24.653061
59
0.590764
false
rismalrv/edx-platform
common/djangoapps/course_modes/views.py
62
10994
""" Views for the course_mode module """ import decimal from ipware.ip import get_ip from django.core.urlresolvers import reverse from django.http import HttpResponse, HttpResponseBadRequest from django.shortcuts import redirect from django.views.generic.base import View from django.utils.translation import ugettext as _ from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from edxmako.shortcuts import render_to_response from course_modes.models import CourseMode from courseware.access import has_access from student.models import CourseEnrollment from opaque_keys.edx.locations import SlashSeparatedCourseKey from opaque_keys.edx.keys import CourseKey from util.db import commit_on_success_with_read_committed from xmodule.modulestore.django import modulestore from embargo import api as embargo_api class ChooseModeView(View): """View used when the user is asked to pick a mode. When a get request is used, shows the selection page. When a post request is used, assumes that it is a form submission from the selection page, parses the response, and then sends user to the next step in the flow. """ @method_decorator(login_required) def get(self, request, course_id, error=None): """Displays the course mode choice page. Args: request (`Request`): The Django Request object. course_id (unicode): The slash-separated course key. Keyword Args: error (unicode): If provided, display this error message on the page. Returns: Response """ course_key = CourseKey.from_string(course_id) # Check whether the user has access to this course # based on country access rules. embargo_redirect = embargo_api.redirect_if_blocked( course_key, user=request.user, ip_address=get_ip(request), url=request.path ) if embargo_redirect: return redirect(embargo_redirect) enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(request.user, course_key) modes = CourseMode.modes_for_course_dict(course_key) # We assume that, if 'professional' is one of the modes, it is the *only* mode. # If we offer more modes alongside 'professional' in the future, this will need to route # to the usual "choose your track" page same is true for no-id-professional mode. has_enrolled_professional = (CourseMode.is_professional_slug(enrollment_mode) and is_active) if CourseMode.has_professional_mode(modes) and not has_enrolled_professional: return redirect( reverse( 'verify_student_start_flow', kwargs={'course_id': unicode(course_key)} ) ) # If there isn't a verified mode available, then there's nothing # to do on this page. The user has almost certainly been auto-registered # in the "honor" track by this point, so we send the user # to the dashboard. if not CourseMode.has_verified_mode(modes): return redirect(reverse('dashboard')) # If a user has already paid, redirect them to the dashboard. if is_active and (enrollment_mode in CourseMode.VERIFIED_MODES + [CourseMode.NO_ID_PROFESSIONAL_MODE]): return redirect(reverse('dashboard')) donation_for_course = request.session.get("donation_for_course", {}) chosen_price = donation_for_course.get(unicode(course_key), None) course = modulestore().get_course(course_key) # When a credit mode is available, students will be given the option # to upgrade from a verified mode to a credit mode at the end of the course. # This allows students who have completed photo verification to be eligible # for univerity credit. # Since credit isn't one of the selectable options on the track selection page, # we need to check *all* available course modes in order to determine whether # a credit mode is available. If so, then we show slightly different messaging # for the verified track. has_credit_upsell = any( CourseMode.is_credit_mode(mode) for mode in CourseMode.modes_for_course(course_key, only_selectable=False) ) context = { "course_modes_choose_url": reverse("course_modes_choose", kwargs={'course_id': course_key.to_deprecated_string()}), "modes": modes, "has_credit_upsell": has_credit_upsell, "course_name": course.display_name_with_default, "course_org": course.display_org_with_default, "course_num": course.display_number_with_default, "chosen_price": chosen_price, "error": error, "responsive": True, "nav_hidden": True, } if "verified" in modes: context["suggested_prices"] = [ decimal.Decimal(x.strip()) for x in modes["verified"].suggested_prices.split(",") if x.strip() ] context["currency"] = modes["verified"].currency.upper() context["min_price"] = modes["verified"].min_price context["verified_name"] = modes["verified"].name context["verified_description"] = modes["verified"].description return render_to_response("course_modes/choose.html", context) @method_decorator(login_required) @method_decorator(commit_on_success_with_read_committed) def post(self, request, course_id): """Takes the form submission from the page and parses it. Args: request (`Request`): The Django Request object. course_id (unicode): The slash-separated course key. Returns: Status code 400 when the requested mode is unsupported. When the honor mode is selected, redirects to the dashboard. When the verified mode is selected, returns error messages if the indicated contribution amount is invalid or below the minimum, otherwise redirects to the verification flow. """ course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) user = request.user # This is a bit redundant with logic in student.views.change_enrollment, # but I don't really have the time to refactor it more nicely and test. course = modulestore().get_course(course_key) if not has_access(user, 'enroll', course): error_msg = _("Enrollment is closed") return self.get(request, course_id, error=error_msg) requested_mode = self._get_requested_mode(request.POST) allowed_modes = CourseMode.modes_for_course_dict(course_key) if requested_mode not in allowed_modes: return HttpResponseBadRequest(_("Enrollment mode not supported")) if requested_mode == 'honor': # The user will have already been enrolled in the honor mode at this # point, so we just redirect them to the dashboard, thereby avoiding # hitting the database a second time attempting to enroll them. return redirect(reverse('dashboard')) mode_info = allowed_modes[requested_mode] if requested_mode == 'verified': amount = request.POST.get("contribution") or \ request.POST.get("contribution-other-amt") or 0 try: # Validate the amount passed in and force it into two digits amount_value = decimal.Decimal(amount).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN) except decimal.InvalidOperation: error_msg = _("Invalid amount selected.") return self.get(request, course_id, error=error_msg) # Check for minimum pricing if amount_value < mode_info.min_price: error_msg = _("No selected price or selected price is too low.") return self.get(request, course_id, error=error_msg) donation_for_course = request.session.get("donation_for_course", {}) donation_for_course[unicode(course_key)] = amount_value request.session["donation_for_course"] = donation_for_course return redirect( reverse( 'verify_student_start_flow', kwargs={'course_id': unicode(course_key)} ) ) def _get_requested_mode(self, request_dict): """Get the user's requested mode Args: request_dict (`QueryDict`): A dictionary-like object containing all given HTTP POST parameters. Returns: The course mode slug corresponding to the choice in the POST parameters, None if the choice in the POST parameters is missing or is an unsupported mode. """ if 'verified_mode' in request_dict: return 'verified' if 'honor_mode' in request_dict: return 'honor' else: return None def create_mode(request, course_id): """Add a mode to the course corresponding to the given course ID. Only available when settings.FEATURES['MODE_CREATION_FOR_TESTING'] is True. Attempts to use the following querystring parameters from the request: `mode_slug` (str): The mode to add, either 'honor', 'verified', or 'professional' `mode_display_name` (str): Describes the new course mode `min_price` (int): The minimum price a user must pay to enroll in the new course mode `suggested_prices` (str): Comma-separated prices to suggest to the user. `currency` (str): The currency in which to list prices. By default, this endpoint will create an 'honor' mode for the given course with display name 'Honor Code', a minimum price of 0, no suggested prices, and using USD as the currency. Args: request (`Request`): The Django Request object. course_id (unicode): A course ID. Returns: Response """ PARAMETERS = { 'mode_slug': u'honor', 'mode_display_name': u'Honor Code Certificate', 'min_price': 0, 'suggested_prices': u'', 'currency': u'usd', } # Try pulling querystring parameters out of the request for parameter, default in PARAMETERS.iteritems(): PARAMETERS[parameter] = request.GET.get(parameter, default) # Attempt to create the new mode for the given course course_key = CourseKey.from_string(course_id) CourseMode.objects.get_or_create(course_id=course_key, **PARAMETERS) # Return a success message and a 200 response return HttpResponse("Mode '{mode_slug}' created for '{course}'.".format( mode_slug=PARAMETERS['mode_slug'], course=course_id ))
agpl-3.0
4,716,586,360,564,254,000
3,576,299,761,298,551,000
40.330827
127
0.641714
false
stevenwudi/Kernelized_Correlation_Filter
CNN_training.py
1
3640
import numpy as np from keras.optimizers import SGD from models.CNN_CIFAR import cnn_cifar_batchnormalisation, cnn_cifar_small, cnn_cifar_nodropout, \ cnn_cifar_small_batchnormalisation from models.DataLoader import DataLoader from scripts.progress_bar import printProgress from time import time, localtime # this is a predefined dataloader loader = DataLoader(batch_size=32) # construct the model here (pre-defined model) model = cnn_cifar_small_batchnormalisation(loader.image_shape) print(model.name) nb_epoch = 200 early_stopping = True early_stopping_count = 0 early_stopping_wait = 3 train_loss = [] valid_loss = [] learning_rate = [0.0001, 0.001, 0.01] # let's train the model using SGD + momentum (how original). sgd = SGD(lr=learning_rate[-1], decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='mean_squared_error', optimizer=sgd) # load validation data from the h5py file (heavy lifting here) x_valid, y_valid = loader.get_valid() best_valid = np.inf for e in range(nb_epoch): print("epoch %d" % e) loss_list = [] time_list = [] time_start = time() for i in range(loader.n_iter_train): time_start_batch = time() X_batch, Y_batch = loader.next_train_batch() loss_list.append(model.train_on_batch(X_batch, Y_batch)) # calculate some time information time_list.append(time() - time_start_batch) eta = (loader.n_iter_train - i) * np.array(time_list).mean() printProgress(i, loader.n_iter_train-1, prefix='Progress:', suffix='batch error: %0.5f, ETA: %0.2f sec.'%(np.array(loss_list).mean(), eta), barLength=50) printProgress(i, loader.n_iter_train - 1, prefix='Progress:', suffix='batch error: %0.5f' % (np.array(loss_list).mean()), barLength=50) train_loss.append(np.asarray(loss_list).mean()) print('training loss is %f, one epoch uses: %0.2f sec' % (train_loss[-1], time() - time_start)) valid_loss.append(model.evaluate(x_valid, y_valid)) print('valid loss is %f' % valid_loss[-1]) if best_valid > valid_loss[-1]: early_stopping_count = 0 print('saving best valid result...') best_valid = valid_loss[-1] model.save('./models/CNN_Model_OBT100_multi_cnn_best_valid_'+model.name+'.h5') else: # we wait for early stopping loop until a certain time early_stopping_count += 1 if early_stopping_count > early_stopping_wait: early_stopping_count = 0 if len(learning_rate) > 1: learning_rate.pop() print('decreasing the learning rate to: %f'%learning_rate[-1]) model.optimizer.lr.set_value(learning_rate[-1]) else: break lt = localtime() lt_str = str(lt.tm_year)+"."+str(lt.tm_mon).zfill(2)+"." \ +str(lt.tm_mday).zfill(2)+"."+str(lt.tm_hour).zfill(2)+"."\ +str(lt.tm_min).zfill(2)+"."+str(lt.tm_sec).zfill(2) np.savetxt('./models/train_loss_'+model.name+'_'+lt_str+'.txt', train_loss) np.savetxt('./models/valid_loss_'+model.name+'_'+lt_str+'.txt', valid_loss) model.save('./models/CNN_Model_OBT100_multi_cnn_'+model.name+'_final.h5') print("done") #### we show some visualisation here import matplotlib.pyplot as plt import matplotlib.patches as mpatches train_loss = np.loadtxt('./models/train_loss_'+model.name+'_'+lt_str+'.txt') valid_loss = np.loadtxt('./models/valid_loss_'+model.name+'_'+lt_str+'.txt') plt.plot(train_loss, 'b') plt.plot(valid_loss, 'r') blue_label = mpatches.Patch(color='blue', label='train_loss') red_label = mpatches.Patch(color='red', label='valid_loss') plt.legend(handles=[blue_label, red_label])
gpl-3.0
-672,818,046,616,284,800
8,985,820,513,662,046,000
39.898876
161
0.657143
false
wakatime/wakatime
wakatime/packages/py27/pygments/lexers/sas.py
4
9449
# -*- coding: utf-8 -*- """ pygments.lexers.sas ~~~~~~~~~~~~~~~~~~~ Lexer for SAS. :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, words from pygments.token import Comment, Keyword, Name, Number, String, Text, \ Other, Generic __all__ = ['SASLexer'] class SASLexer(RegexLexer): """ For `SAS <http://www.sas.com/>`_ files. .. versionadded:: 2.2 """ # Syntax from syntax/sas.vim by James Kidd <james.kidd@covance.com> name = 'SAS' aliases = ['sas'] filenames = ['*.SAS', '*.sas'] mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas'] flags = re.IGNORECASE | re.MULTILINE builtins_macros = ( "bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp", "display", "do", "else", "end", "eval", "global", "goto", "if", "index", "input", "keydef", "label", "left", "length", "let", "local", "lowcase", "macro", "mend", "nrquote", "nrstr", "put", "qleft", "qlowcase", "qscan", "qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan", "str", "substr", "superq", "syscall", "sysevalf", "sysexec", "sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput", "then", "to", "trim", "unquote", "until", "upcase", "verify", "while", "window" ) builtins_conditionals = ( "do", "if", "then", "else", "end", "until", "while" ) builtins_statements = ( "abort", "array", "attrib", "by", "call", "cards", "cards4", "catname", "continue", "datalines", "datalines4", "delete", "delim", "delimiter", "display", "dm", "drop", "endsas", "error", "file", "filename", "footnote", "format", "goto", "in", "infile", "informat", "input", "keep", "label", "leave", "length", "libname", "link", "list", "lostcard", "merge", "missing", "modify", "options", "output", "out", "page", "put", "redirect", "remove", "rename", "replace", "retain", "return", "select", "set", "skip", "startsas", "stop", "title", "update", "waitsas", "where", "window", "x", "systask" ) builtins_sql = ( "add", "and", "alter", "as", "cascade", "check", "create", "delete", "describe", "distinct", "drop", "foreign", "from", "group", "having", "index", "insert", "into", "in", "key", "like", "message", "modify", "msgtype", "not", "null", "on", "or", "order", "primary", "references", "reset", "restrict", "select", "set", "table", "unique", "update", "validate", "view", "where" ) builtins_functions = ( "abs", "addr", "airy", "arcos", "arsin", "atan", "attrc", "attrn", "band", "betainv", "blshift", "bnot", "bor", "brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv", "close", "cnonct", "collate", "compbl", "compound", "compress", "cos", "cosh", "css", "curobs", "cv", "daccdb", "daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date", "datejul", "datepart", "datetime", "day", "dclose", "depdb", "depdbsl", "depsl", "depsyd", "deptab", "dequote", "dhms", "dif", "digamma", "dim", "dinfo", "dnum", "dopen", "doptname", "doptnum", "dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp", "fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs", "fexist", "fget", "fileexist", "filename", "fileref", "finfo", "finv", "fipname", "fipnamel", "fipstate", "floor", "fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint", "fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz", "fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn", "hbound", "hms", "hosthelp", "hour", "ibessel", "index", "indexc", "indexw", "input", "inputc", "inputn", "int", "intck", "intnx", "intrr", "irr", "jbessel", "juldate", "kurtosis", "lag", "lbound", "left", "length", "lgamma", "libname", "libref", "log", "log10", "log2", "logpdf", "logpmf", "logsdf", "lowcase", "max", "mdy", "mean", "min", "minute", "mod", "month", "mopen", "mort", "n", "netpv", "nmiss", "normal", "note", "npv", "open", "ordinal", "pathname", "pdf", "peek", "peekc", "pmf", "point", "poisson", "poke", "probbeta", "probbnml", "probchi", "probf", "probgam", "probhypr", "probit", "probnegb", "probnorm", "probt", "put", "putc", "putn", "qtr", "quote", "ranbin", "rancau", "ranexp", "rangam", "range", "rank", "rannor", "ranpoi", "rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse", "rewind", "right", "round", "saving", "scan", "sdf", "second", "sign", "sin", "sinh", "skewness", "soundex", "spedis", "sqrt", "std", "stderr", "stfips", "stname", "stnamel", "substr", "sum", "symget", "sysget", "sysmsg", "sysprod", "sysrc", "system", "tan", "tanh", "time", "timepart", "tinv", "tnonct", "today", "translate", "tranwrd", "trigamma", "trim", "trimn", "trunc", "uniform", "upcase", "uss", "var", "varfmt", "varinfmt", "varlabel", "varlen", "varname", "varnum", "varray", "varrayx", "vartype", "verify", "vformat", "vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw", "vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat", "vinformatd", "vinformatdx", "vinformatn", "vinformatnx", "vinformatw", "vinformatwx", "vinformatx", "vlabel", "vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype", "vtypex", "weekday", "year", "yyq", "zipfips", "zipname", "zipnamel", "zipstate" ) tokens = { 'root': [ include('comments'), include('proc-data'), include('cards-datalines'), include('logs'), include('general'), (r'.', Text), ], # SAS is multi-line regardless, but * is ended by ; 'comments': [ (r'^\s*\*.*?;', Comment), (r'/\*.*?\*/', Comment), (r'^\s*\*(.|\n)*?;', Comment.Multiline), (r'/[*](.|\n)*?[*]/', Comment.Multiline), ], # Special highlight for proc, data, quit, run 'proc-data': [ (r'(^|;)\s*(proc \w+|data|run|quit)[\s;]', Keyword.Reserved), ], # Special highlight cards and datalines 'cards-datalines': [ (r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'), ], 'data': [ (r'(.|\n)*^\s*;\s*$', Other, '#pop'), ], # Special highlight for put NOTE|ERROR|WARNING (order matters) 'logs': [ (r'\n?^\s*%?put ', Keyword, 'log-messages'), ], 'log-messages': [ (r'NOTE(:|-).*', Generic, '#pop'), (r'WARNING(:|-).*', Generic.Emph, '#pop'), (r'ERROR(:|-).*', Generic.Error, '#pop'), include('general'), ], 'general': [ include('keywords'), include('vars-strings'), include('special'), include('numbers'), ], # Keywords, statements, functions, macros 'keywords': [ (words(builtins_statements, prefix = r'\b', suffix = r'\b'), Keyword), (words(builtins_sql, prefix = r'\b', suffix = r'\b'), Keyword), (words(builtins_conditionals, prefix = r'\b', suffix = r'\b'), Keyword), (words(builtins_macros, prefix = r'%', suffix = r'\b'), Name.Builtin), (words(builtins_functions, prefix = r'\b', suffix = r'\('), Name.Builtin), ], # Strings and user-defined variables and macros (order matters) 'vars-strings': [ (r'&[a-z_]\w{0,31}\.?', Name.Variable), (r'%[a-z_]\w{0,31}', Name.Function), (r'\'', String, 'string_squote'), (r'"', String, 'string_dquote'), ], 'string_squote': [ ('\'', String, '#pop'), (r'\\\\|\\"|\\\n', String.Escape), # AFAIK, macro variables are not evaluated in single quotes # (r'&', Name.Variable, 'validvar'), (r'[^$\'\\]+', String), (r'[$\'\\]', String), ], 'string_dquote': [ (r'"', String, '#pop'), (r'\\\\|\\"|\\\n', String.Escape), (r'&', Name.Variable, 'validvar'), (r'[^$&"\\]+', String), (r'[$"\\]', String), ], 'validvar': [ (r'[a-z_]\w{0,31}\.?', Name.Variable, '#pop'), ], # SAS numbers and special variables 'numbers': [ (r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)(E[+-]?[0-9]+)?i?\b', Number), ], 'special': [ (r'(null|missing|_all_|_automatic_|_character_|_n_|' r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)', Keyword.Constant), ], # 'operators': [ # (r'(-|=|<=|>=|<|>|<>|&|!=|' # r'\||\*|\+|\^|/|!|~|~=)', Operator) # ], }
bsd-3-clause
-4,895,580,682,165,869,000
-7,325,306,904,841,019,000
40.442982
78
0.465975
false
Cubitect/ASMModSuit
ASMVillageMarker.py
1
5318
import SRenderLib from asmutils import * def create_mod(util): print '\nSearching for mappings for ASMVillageMarker...' SRenderLib.setup_lib(util) lines = util.readj('World') pos = findOps(lines,0,[['.field','protected',';'],['.field','protected','Z'],['.field','protected',';'],['.field','protected',';']]) util.setmap('VillageCollection',betweenr(lines[pos],'L',';')) util.setmap('World.villageCollectionObj',endw(lines[pos],2)) pos = findOps(lines,pos+1,[['.method','public','()L'+util.getmap('VillageCollection')]]) if pos is not None: util.setmap('World.getVillageCollection',endw(lines[pos],3)) lines = util.readj('VillageCollection') pos = findOps(lines,0,[['.method','public','()Ljava/util/List']]) util.setmap('VillageCollection.getVillageList',endw(lines[pos],3)) pos = findOps(lines,pos+1,[['.method','public',')L']]) util.setmap('Village',betweenr(lines[pos],')L',';')) lines = util.readj('Village') pos = findOps(lines,0,[['.method','public','()L']]) util.setmap('Village.getCenter',endw(lines[pos],3)) util.setmap('BlockPos',betweenr(lines[pos],')L',';')) pos = findOps(lines,pos+1,[['.method','public','()I']]) util.setmap('Village.getVillageRadius',endw(lines[pos],3)) pos = findOps(lines,pos+1,[['.method','public','()Ljava/util/List']]) util.setmap('Village.getVillageDoorInfoList',endw(lines[pos],3)) pos = findOps(lines,pos+1,[['.method','public',')L']]) util.setmap('VillageDoorInfo',betweenr(lines[pos],')L',';')) lines = util.readj('VillageDoorInfo') pos = findOps(lines,0,[['.method','public','()L']]) util.setmap('VillageDoorInfo.getDoorBlockPos',endw(lines[pos],3)) lines = util.readj('BlockPos') pos = findOps(lines,0,[['.super']]) util.setmap('Vec3i',endw(lines[pos],1)) lines = util.readj('Vec3i') pos = findOps(lines,0, [['.method','public','()I'],['stack 1 locals 1']]) util.setmap('Vec3i.getX',endw(lines[pos-1],3)) pos = findOps(lines,pos+1,[['.method','public','()I'],['stack 1 locals 1']]) util.setmap('Vec3i.getY',endw(lines[pos-1],3)) pos = findOps(lines,pos+1,[['.method','public','()I'],['stack 1 locals 1']]) util.setmap('Vec3i.getZ',endw(lines[pos-1],3)) print 'Applying ASMVillageMarker patch...' util.setmap('ASMVillageMarker','villagemarker/ASMVillageMarker') lines = util.readt('ASMVillageMarker') lines = '\1'.join(lines) lines = lines.replace('net/minecraft/server/integrated/IntegratedServer', util.getmap('IntegratedServer')) lines = lines.replace('net/minecraft/client/entity/EntityPlayerSP', util.getmap('EntityPlayerSP')) lines = lines.replace('net/minecraft/client/Minecraft', util.getmap('Minecraft')) lines = lines.replace('net/minecraft/world/WorldServer', util.getmap('WorldServer')) lines = lines.replace('net/minecraft/util/math/BlockPos', util.getmap('BlockPos')) lines = lines.replace('net/minecraft/village/VillageCollection', util.getmap('VillageCollection')) lines = lines.replace('net/minecraft/village/VillageDoorInfo', util.getmap('VillageDoorInfo')) lines = lines.replace('net/minecraft/village/Village', util.getmap('Village')) lines = lines.replace('thePlayer', util.getmap('Minecraft.thePlayer')) lines = lines.replace('dimension', util.getmap('Entity.dimension')) lines = lines.replace('isSingleplayer', util.getmap('Minecraft.isSingleplayer')) lines = lines.replace('worldServerForDimension', util.getmap('MinecraftServer.worldServerForDimension')) lines = lines.replace('getVillageDoorInfoList', util.getmap('Village.getVillageDoorInfoList')) lines = lines.replace('getVillageCollection', util.getmap('World.getVillageCollection')) lines = lines.replace('getVillageRadius', util.getmap('Village.getVillageRadius')) lines = lines.replace('getVillageList', util.getmap('VillageCollection.getVillageList')) lines = lines.replace('getDoorBlockPos', util.getmap('VillageDoorInfo.getDoorBlockPos')) lines = lines.replace('getIntegratedServer', util.getmap('Minecraft.getIntegratedServer')) lines = lines.replace('getMinecraft', util.getmap('Minecraft.getMinecraft')) lines = lines.replace('getCenter', util.getmap('Village.getCenter')) lines = lines.replace('getX', util.getmap('Vec3i.getX')) lines = lines.replace('getY', util.getmap('Vec3i.getY')) lines = lines.replace('getZ', util.getmap('Vec3i.getZ')) lines = lines.split('\1') util.write2mod('ASMVillageMarker',lines) print 'Injecting render call...' lines = util.readj('EntityRenderer') pos = 0 while True: pos = findOps(lines,pos+1,[['ldc','culling']]) if pos is None: break pos = findOps(lines,pos+1,[['dload'],['dload'],['dload']]) playerX = endw(lines[pos-2],1) playerY = endw(lines[pos-1],1) playerZ = endw(lines[pos ],1) pos = findOps(lines,pos+1,[['ldc','aboveClouds']]) pos = goBackTo(lines,pos,['invokevirtual']) lines.insert(pos+1,'dload '+playerX+'\n') lines.insert(pos+2,'dload '+playerY+'\n') lines.insert(pos+3,'dload '+playerZ+'\n') lines.insert(pos+4,'invokestatic Method '+util.getmap('ASMVillageMarker')+' render (DDD)V\n') util.write2mod('EntityRenderer',lines)
gpl-3.0
1,263,633,857,324,166,000
1,336,959,689,255,112,400
52.717172
136
0.668672
false
shadowmint/nwidget
lib/cocos2d-0.5.5/test/test_menu_items.py
1
2268
# This code is so you can run the samples without installing the package import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) # testinfo = "s, q" tags = "menu items, ToggleMenuItem, MultipleMenuItem, MenuItem, EntryMenuItem, ImageMenuItem, ColorMenuItem" from pyglet import image from pyglet.gl import * from pyglet import font from cocos.director import * from cocos.menu import * from cocos.scene import * from cocos.layer import * from operator import setslice def printf(*args): sys.stdout.write(''.join([str(x) for x in args])+'\n') class MainMenu(Menu): def __init__( self ): super( MainMenu, self ).__init__("Test Menu Items") # then add the items item1= ToggleMenuItem('ToggleMenuItem: ', self.on_toggle_callback, True ) resolutions = ['320x200','640x480','800x600', '1024x768', '1200x1024'] item2= MultipleMenuItem('MultipleMenuItem: ', self.on_multiple_callback, resolutions) item3 = MenuItem('MenuItem', self.on_callback ) item4 = EntryMenuItem('EntryMenuItem:', self.on_entry_callback, 'value', max_length=8) item5 = ImageMenuItem('imagemenuitem.png', self.on_image_callback) colors = [(255, 255, 255), (129, 255, 100), (50, 50, 100), (255, 200, 150)] item6 = ColorMenuItem('ColorMenuItem:', self.on_color_callback, colors) self.create_menu( [item1,item2,item3,item4,item5,item6] ) def on_quit( self ): pyglet.app.exit() def on_multiple_callback(self, idx ): print 'multiple item callback', idx def on_toggle_callback(self, b ): print 'toggle item callback', b def on_callback(self ): print 'item callback' def on_entry_callback (self, value): print 'entry item callback', value def on_image_callback (self): print 'image item callback' def on_color_callback(self, value): print 'color item callback:', value def main(): pyglet.font.add_directory('.') director.init( resizable=True) director.run( Scene( MainMenu() ) ) if __name__ == '__main__': main()
apache-2.0
-1,557,945,847,988,106,800
-3,045,366,066,173,539,000
28.648649
108
0.609788
false
ianyh/heroku-buildpack-python-opencv
vendor/.heroku/lib/python2.7/test/test_socket.py
9
60282
#!/usr/bin/env python import unittest from test import test_support import errno import socket import select import _testcapi import time import traceback import Queue import sys import os import array import contextlib from weakref import proxy import signal import math def try_address(host, port=0, family=socket.AF_INET): """Try to bind a socket on the given host:port and return True if that has been possible.""" try: sock = socket.socket(family, socket.SOCK_STREAM) sock.bind((host, port)) except (socket.error, socket.gaierror): return False else: sock.close() return True HOST = test_support.HOST MSG = b'Michael Gilfix was here\n' SUPPORTS_IPV6 = socket.has_ipv6 and try_address('::1', family=socket.AF_INET6) try: import thread import threading except ImportError: thread = None threading = None HOST = test_support.HOST MSG = 'Michael Gilfix was here\n' class SocketTCPTest(unittest.TestCase): def setUp(self): self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.port = test_support.bind_port(self.serv) self.serv.listen(1) def tearDown(self): self.serv.close() self.serv = None class SocketUDPTest(unittest.TestCase): def setUp(self): self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.port = test_support.bind_port(self.serv) def tearDown(self): self.serv.close() self.serv = None class ThreadableTest: """Threadable Test class The ThreadableTest class makes it easy to create a threaded client/server pair from an existing unit test. To create a new threaded class from an existing unit test, use multiple inheritance: class NewClass (OldClass, ThreadableTest): pass This class defines two new fixture functions with obvious purposes for overriding: clientSetUp () clientTearDown () Any new test functions within the class must then define tests in pairs, where the test name is preceeded with a '_' to indicate the client portion of the test. Ex: def testFoo(self): # Server portion def _testFoo(self): # Client portion Any exceptions raised by the clients during their tests are caught and transferred to the main thread to alert the testing framework. Note, the server setup function cannot call any blocking functions that rely on the client thread during setup, unless serverExplicitReady() is called just before the blocking call (such as in setting up a client/server connection and performing the accept() in setUp(). """ def __init__(self): # Swap the true setup function self.__setUp = self.setUp self.__tearDown = self.tearDown self.setUp = self._setUp self.tearDown = self._tearDown def serverExplicitReady(self): """This method allows the server to explicitly indicate that it wants the client thread to proceed. This is useful if the server is about to execute a blocking routine that is dependent upon the client thread during its setup routine.""" self.server_ready.set() def _setUp(self): self.server_ready = threading.Event() self.client_ready = threading.Event() self.done = threading.Event() self.queue = Queue.Queue(1) # Do some munging to start the client test. methodname = self.id() i = methodname.rfind('.') methodname = methodname[i+1:] test_method = getattr(self, '_' + methodname) self.client_thread = thread.start_new_thread( self.clientRun, (test_method,)) self.__setUp() if not self.server_ready.is_set(): self.server_ready.set() self.client_ready.wait() def _tearDown(self): self.__tearDown() self.done.wait() if not self.queue.empty(): msg = self.queue.get() self.fail(msg) def clientRun(self, test_func): self.server_ready.wait() self.clientSetUp() self.client_ready.set() if not callable(test_func): raise TypeError("test_func must be a callable function.") try: test_func() except Exception, strerror: self.queue.put(strerror) self.clientTearDown() def clientSetUp(self): raise NotImplementedError("clientSetUp must be implemented.") def clientTearDown(self): self.done.set() thread.exit() class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest): def __init__(self, methodName='runTest'): SocketTCPTest.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def clientSetUp(self): self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest): def __init__(self, methodName='runTest'): SocketUDPTest.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def clientSetUp(self): self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) class SocketConnectedTest(ThreadedTCPSocketTest): def __init__(self, methodName='runTest'): ThreadedTCPSocketTest.__init__(self, methodName=methodName) def setUp(self): ThreadedTCPSocketTest.setUp(self) # Indicate explicitly we're ready for the client thread to # proceed and then perform the blocking call to accept self.serverExplicitReady() conn, addr = self.serv.accept() self.cli_conn = conn def tearDown(self): self.cli_conn.close() self.cli_conn = None ThreadedTCPSocketTest.tearDown(self) def clientSetUp(self): ThreadedTCPSocketTest.clientSetUp(self) self.cli.connect((HOST, self.port)) self.serv_conn = self.cli def clientTearDown(self): self.serv_conn.close() self.serv_conn = None ThreadedTCPSocketTest.clientTearDown(self) class SocketPairTest(unittest.TestCase, ThreadableTest): def __init__(self, methodName='runTest'): unittest.TestCase.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def setUp(self): self.serv, self.cli = socket.socketpair() def tearDown(self): self.serv.close() self.serv = None def clientSetUp(self): pass def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) ####################################################################### ## Begin Tests class GeneralModuleTests(unittest.TestCase): def test_weakref(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) p = proxy(s) self.assertEqual(p.fileno(), s.fileno()) s.close() s = None try: p.fileno() except ReferenceError: pass else: self.fail('Socket proxy still exists') def testSocketError(self): # Testing socket module exceptions def raise_error(*args, **kwargs): raise socket.error def raise_herror(*args, **kwargs): raise socket.herror def raise_gaierror(*args, **kwargs): raise socket.gaierror self.assertRaises(socket.error, raise_error, "Error raising socket exception.") self.assertRaises(socket.error, raise_herror, "Error raising socket exception.") self.assertRaises(socket.error, raise_gaierror, "Error raising socket exception.") def testSendtoErrors(self): # Testing that sendto doens't masks failures. See #10169. s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.addCleanup(s.close) s.bind(('', 0)) sockname = s.getsockname() # 2 args with self.assertRaises(UnicodeEncodeError): s.sendto(u'\u2620', sockname) with self.assertRaises(TypeError) as cm: s.sendto(5j, sockname) self.assertIn('not complex', str(cm.exception)) with self.assertRaises(TypeError) as cm: s.sendto('foo', None) self.assertIn('not NoneType', str(cm.exception)) # 3 args with self.assertRaises(UnicodeEncodeError): s.sendto(u'\u2620', 0, sockname) with self.assertRaises(TypeError) as cm: s.sendto(5j, 0, sockname) self.assertIn('not complex', str(cm.exception)) with self.assertRaises(TypeError) as cm: s.sendto('foo', 0, None) self.assertIn('not NoneType', str(cm.exception)) with self.assertRaises(TypeError) as cm: s.sendto('foo', 'bar', sockname) self.assertIn('an integer is required', str(cm.exception)) with self.assertRaises(TypeError) as cm: s.sendto('foo', None, None) self.assertIn('an integer is required', str(cm.exception)) # wrong number of args with self.assertRaises(TypeError) as cm: s.sendto('foo') self.assertIn('(1 given)', str(cm.exception)) with self.assertRaises(TypeError) as cm: s.sendto('foo', 0, sockname, 4) self.assertIn('(4 given)', str(cm.exception)) def testCrucialConstants(self): # Testing for mission critical constants socket.AF_INET socket.SOCK_STREAM socket.SOCK_DGRAM socket.SOCK_RAW socket.SOCK_RDM socket.SOCK_SEQPACKET socket.SOL_SOCKET socket.SO_REUSEADDR def testHostnameRes(self): # Testing hostname resolution mechanisms hostname = socket.gethostname() try: ip = socket.gethostbyname(hostname) except socket.error: # Probably name lookup wasn't set up right; skip this test return self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.") try: hname, aliases, ipaddrs = socket.gethostbyaddr(ip) except socket.error: # Probably a similar problem as above; skip this test return all_host_names = [hostname, hname] + aliases fqhn = socket.getfqdn(ip) if not fqhn in all_host_names: self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names))) def testRefCountGetNameInfo(self): # Testing reference count for getnameinfo if hasattr(sys, "getrefcount"): try: # On some versions, this loses a reference orig = sys.getrefcount(__name__) socket.getnameinfo(__name__,0) except TypeError: self.assertEqual(sys.getrefcount(__name__), orig, "socket.getnameinfo loses a reference") def testInterpreterCrash(self): # Making sure getnameinfo doesn't crash the interpreter try: # On some versions, this crashes the interpreter. socket.getnameinfo(('x', 0, 0, 0), 0) except socket.error: pass def testNtoH(self): # This just checks that htons etc. are their own inverse, # when looking at the lower 16 or 32 bits. sizes = {socket.htonl: 32, socket.ntohl: 32, socket.htons: 16, socket.ntohs: 16} for func, size in sizes.items(): mask = (1L<<size) - 1 for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210): self.assertEqual(i & mask, func(func(i&mask)) & mask) swapped = func(mask) self.assertEqual(swapped & mask, mask) self.assertRaises(OverflowError, func, 1L<<34) def testNtoHErrors(self): good_values = [ 1, 2, 3, 1L, 2L, 3L ] bad_values = [ -1, -2, -3, -1L, -2L, -3L ] for k in good_values: socket.ntohl(k) socket.ntohs(k) socket.htonl(k) socket.htons(k) for k in bad_values: self.assertRaises(OverflowError, socket.ntohl, k) self.assertRaises(OverflowError, socket.ntohs, k) self.assertRaises(OverflowError, socket.htonl, k) self.assertRaises(OverflowError, socket.htons, k) def testGetServBy(self): eq = self.assertEqual # Find one service that exists, then check all the related interfaces. # I've ordered this by protocols that have both a tcp and udp # protocol, at least for modern Linuxes. if (sys.platform.startswith('linux') or sys.platform.startswith('freebsd') or sys.platform.startswith('netbsd') or sys.platform == 'darwin'): # avoid the 'echo' service on this platform, as there is an # assumption breaking non-standard port/protocol entry services = ('daytime', 'qotd', 'domain') else: services = ('echo', 'daytime', 'domain') for service in services: try: port = socket.getservbyname(service, 'tcp') break except socket.error: pass else: raise socket.error # Try same call with optional protocol omitted port2 = socket.getservbyname(service) eq(port, port2) # Try udp, but don't barf if it doesn't exist try: udpport = socket.getservbyname(service, 'udp') except socket.error: udpport = None else: eq(udpport, port) # Now make sure the lookup by port returns the same service name eq(socket.getservbyport(port2), service) eq(socket.getservbyport(port, 'tcp'), service) if udpport is not None: eq(socket.getservbyport(udpport, 'udp'), service) # Make sure getservbyport does not accept out of range ports. self.assertRaises(OverflowError, socket.getservbyport, -1) self.assertRaises(OverflowError, socket.getservbyport, 65536) def testDefaultTimeout(self): # Testing default timeout # The default timeout should initially be None self.assertEqual(socket.getdefaulttimeout(), None) s = socket.socket() self.assertEqual(s.gettimeout(), None) s.close() # Set the default timeout to 10, and see if it propagates socket.setdefaulttimeout(10) self.assertEqual(socket.getdefaulttimeout(), 10) s = socket.socket() self.assertEqual(s.gettimeout(), 10) s.close() # Reset the default timeout to None, and see if it propagates socket.setdefaulttimeout(None) self.assertEqual(socket.getdefaulttimeout(), None) s = socket.socket() self.assertEqual(s.gettimeout(), None) s.close() # Check that setting it to an invalid value raises ValueError self.assertRaises(ValueError, socket.setdefaulttimeout, -1) # Check that setting it to an invalid type raises TypeError self.assertRaises(TypeError, socket.setdefaulttimeout, "spam") def testIPv4_inet_aton_fourbytes(self): if not hasattr(socket, 'inet_aton'): return # No inet_aton, nothing to check # Test that issue1008086 and issue767150 are fixed. # It must return 4 bytes. self.assertEqual('\x00'*4, socket.inet_aton('0.0.0.0')) self.assertEqual('\xff'*4, socket.inet_aton('255.255.255.255')) def testIPv4toString(self): if not hasattr(socket, 'inet_pton'): return # No inet_pton() on this platform from socket import inet_aton as f, inet_pton, AF_INET g = lambda a: inet_pton(AF_INET, a) self.assertEqual('\x00\x00\x00\x00', f('0.0.0.0')) self.assertEqual('\xff\x00\xff\x00', f('255.0.255.0')) self.assertEqual('\xaa\xaa\xaa\xaa', f('170.170.170.170')) self.assertEqual('\x01\x02\x03\x04', f('1.2.3.4')) self.assertEqual('\xff\xff\xff\xff', f('255.255.255.255')) self.assertEqual('\x00\x00\x00\x00', g('0.0.0.0')) self.assertEqual('\xff\x00\xff\x00', g('255.0.255.0')) self.assertEqual('\xaa\xaa\xaa\xaa', g('170.170.170.170')) self.assertEqual('\xff\xff\xff\xff', g('255.255.255.255')) def testIPv6toString(self): if not hasattr(socket, 'inet_pton'): return # No inet_pton() on this platform try: from socket import inet_pton, AF_INET6, has_ipv6 if not has_ipv6: return except ImportError: return f = lambda a: inet_pton(AF_INET6, a) self.assertEqual('\x00' * 16, f('::')) self.assertEqual('\x00' * 16, f('0::0')) self.assertEqual('\x00\x01' + '\x00' * 14, f('1::')) self.assertEqual( '\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae', f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae') ) def testStringToIPv4(self): if not hasattr(socket, 'inet_ntop'): return # No inet_ntop() on this platform from socket import inet_ntoa as f, inet_ntop, AF_INET g = lambda a: inet_ntop(AF_INET, a) self.assertEqual('1.0.1.0', f('\x01\x00\x01\x00')) self.assertEqual('170.85.170.85', f('\xaa\x55\xaa\x55')) self.assertEqual('255.255.255.255', f('\xff\xff\xff\xff')) self.assertEqual('1.2.3.4', f('\x01\x02\x03\x04')) self.assertEqual('1.0.1.0', g('\x01\x00\x01\x00')) self.assertEqual('170.85.170.85', g('\xaa\x55\xaa\x55')) self.assertEqual('255.255.255.255', g('\xff\xff\xff\xff')) def testStringToIPv6(self): if not hasattr(socket, 'inet_ntop'): return # No inet_ntop() on this platform try: from socket import inet_ntop, AF_INET6, has_ipv6 if not has_ipv6: return except ImportError: return f = lambda a: inet_ntop(AF_INET6, a) self.assertEqual('::', f('\x00' * 16)) self.assertEqual('::1', f('\x00' * 15 + '\x01')) self.assertEqual( 'aef:b01:506:1001:ffff:9997:55:170', f('\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70') ) # XXX The following don't test module-level functionality... def _get_unused_port(self, bind_address='0.0.0.0'): """Use a temporary socket to elicit an unused ephemeral port. Args: bind_address: Hostname or IP address to search for a port on. Returns: A most likely to be unused port. """ tempsock = socket.socket() tempsock.bind((bind_address, 0)) host, port = tempsock.getsockname() tempsock.close() return port def testSockName(self): # Testing getsockname() port = self._get_unused_port() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.addCleanup(sock.close) sock.bind(("0.0.0.0", port)) name = sock.getsockname() # XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate # it reasonable to get the host's addr in addition to 0.0.0.0. # At least for eCos. This is required for the S/390 to pass. try: my_ip_addr = socket.gethostbyname(socket.gethostname()) except socket.error: # Probably name lookup wasn't set up right; skip this test return self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0]) self.assertEqual(name[1], port) def testGetSockOpt(self): # Testing getsockopt() # We know a socket should start without reuse==0 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.addCleanup(sock.close) reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) self.assertFalse(reuse != 0, "initial mode is reuse") def testSetSockOpt(self): # Testing setsockopt() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.addCleanup(sock.close) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) self.assertFalse(reuse == 0, "failed to set reuse mode") def testSendAfterClose(self): # testing send() after close() with timeout sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(1) sock.close() self.assertRaises(socket.error, sock.send, "spam") def testNewAttributes(self): # testing .family, .type and .protocol sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.assertEqual(sock.family, socket.AF_INET) self.assertEqual(sock.type, socket.SOCK_STREAM) self.assertEqual(sock.proto, 0) sock.close() def test_getsockaddrarg(self): host = '0.0.0.0' port = self._get_unused_port(bind_address=host) big_port = port + 65536 neg_port = port - 65536 sock = socket.socket() try: self.assertRaises(OverflowError, sock.bind, (host, big_port)) self.assertRaises(OverflowError, sock.bind, (host, neg_port)) sock.bind((host, port)) finally: sock.close() @unittest.skipUnless(os.name == "nt", "Windows specific") def test_sock_ioctl(self): self.assertTrue(hasattr(socket.socket, 'ioctl')) self.assertTrue(hasattr(socket, 'SIO_RCVALL')) self.assertTrue(hasattr(socket, 'RCVALL_ON')) self.assertTrue(hasattr(socket, 'RCVALL_OFF')) self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS')) s = socket.socket() self.addCleanup(s.close) self.assertRaises(ValueError, s.ioctl, -1, None) s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100)) def testGetaddrinfo(self): try: socket.getaddrinfo('localhost', 80) except socket.gaierror as err: if err.errno == socket.EAI_SERVICE: # see http://bugs.python.org/issue1282647 self.skipTest("buggy libc version") raise # len of every sequence is supposed to be == 5 for info in socket.getaddrinfo(HOST, None): self.assertEqual(len(info), 5) # host can be a domain name, a string representation of an # IPv4/v6 address or None socket.getaddrinfo('localhost', 80) socket.getaddrinfo('127.0.0.1', 80) socket.getaddrinfo(None, 80) if SUPPORTS_IPV6: socket.getaddrinfo('::1', 80) # port can be a string service name such as "http", a numeric # port number (int or long), or None socket.getaddrinfo(HOST, "http") socket.getaddrinfo(HOST, 80) socket.getaddrinfo(HOST, 80L) socket.getaddrinfo(HOST, None) # test family and socktype filters infos = socket.getaddrinfo(HOST, None, socket.AF_INET) for family, _, _, _, _ in infos: self.assertEqual(family, socket.AF_INET) infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM) for _, socktype, _, _, _ in infos: self.assertEqual(socktype, socket.SOCK_STREAM) # test proto and flags arguments socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP) socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE) # a server willing to support both IPv4 and IPv6 will # usually do this socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE) # Issue 17269 if hasattr(socket, 'AI_NUMERICSERV'): socket.getaddrinfo("localhost", None, 0, 0, 0, socket.AI_NUMERICSERV) def check_sendall_interrupted(self, with_timeout): # socketpair() is not stricly required, but it makes things easier. if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'): self.skipTest("signal.alarm and socket.socketpair required for this test") # Our signal handlers clobber the C errno by calling a math function # with an invalid domain value. def ok_handler(*args): self.assertRaises(ValueError, math.acosh, 0) def raising_handler(*args): self.assertRaises(ValueError, math.acosh, 0) 1 // 0 c, s = socket.socketpair() old_alarm = signal.signal(signal.SIGALRM, raising_handler) try: if with_timeout: # Just above the one second minimum for signal.alarm c.settimeout(1.5) with self.assertRaises(ZeroDivisionError): signal.alarm(1) c.sendall(b"x" * test_support.SOCK_MAX_SIZE) if with_timeout: signal.signal(signal.SIGALRM, ok_handler) signal.alarm(1) self.assertRaises(socket.timeout, c.sendall, b"x" * test_support.SOCK_MAX_SIZE) finally: signal.signal(signal.SIGALRM, old_alarm) c.close() s.close() def test_sendall_interrupted(self): self.check_sendall_interrupted(False) def test_sendall_interrupted_with_timeout(self): self.check_sendall_interrupted(True) def test_listen_backlog(self): for backlog in 0, -1: srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.bind((HOST, 0)) srv.listen(backlog) srv.close() # Issue 15989 srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.bind((HOST, 0)) self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1) srv.close() @unittest.skipUnless(SUPPORTS_IPV6, 'IPv6 required for this test.') def test_flowinfo(self): self.assertRaises(OverflowError, socket.getnameinfo, ('::1',0, 0xffffffff), 0) s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) try: self.assertRaises(OverflowError, s.bind, ('::1', 0, -10)) finally: s.close() @unittest.skipUnless(thread, 'Threading required for this test.') class BasicTCPTest(SocketConnectedTest): def __init__(self, methodName='runTest'): SocketConnectedTest.__init__(self, methodName=methodName) def testRecv(self): # Testing large receive over TCP msg = self.cli_conn.recv(1024) self.assertEqual(msg, MSG) def _testRecv(self): self.serv_conn.send(MSG) def testOverFlowRecv(self): # Testing receive in chunks over TCP seg1 = self.cli_conn.recv(len(MSG) - 3) seg2 = self.cli_conn.recv(1024) msg = seg1 + seg2 self.assertEqual(msg, MSG) def _testOverFlowRecv(self): self.serv_conn.send(MSG) def testRecvFrom(self): # Testing large recvfrom() over TCP msg, addr = self.cli_conn.recvfrom(1024) self.assertEqual(msg, MSG) def _testRecvFrom(self): self.serv_conn.send(MSG) def testOverFlowRecvFrom(self): # Testing recvfrom() in chunks over TCP seg1, addr = self.cli_conn.recvfrom(len(MSG)-3) seg2, addr = self.cli_conn.recvfrom(1024) msg = seg1 + seg2 self.assertEqual(msg, MSG) def _testOverFlowRecvFrom(self): self.serv_conn.send(MSG) def testSendAll(self): # Testing sendall() with a 2048 byte string over TCP msg = '' while 1: read = self.cli_conn.recv(1024) if not read: break msg += read self.assertEqual(msg, 'f' * 2048) def _testSendAll(self): big_chunk = 'f' * 2048 self.serv_conn.sendall(big_chunk) def testFromFd(self): # Testing fromfd() if not hasattr(socket, "fromfd"): return # On Windows, this doesn't exist fd = self.cli_conn.fileno() sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) self.addCleanup(sock.close) msg = sock.recv(1024) self.assertEqual(msg, MSG) def _testFromFd(self): self.serv_conn.send(MSG) def testDup(self): # Testing dup() sock = self.cli_conn.dup() self.addCleanup(sock.close) msg = sock.recv(1024) self.assertEqual(msg, MSG) def _testDup(self): self.serv_conn.send(MSG) def testShutdown(self): # Testing shutdown() msg = self.cli_conn.recv(1024) self.assertEqual(msg, MSG) # wait for _testShutdown to finish: on OS X, when the server # closes the connection the client also becomes disconnected, # and the client's shutdown call will fail. (Issue #4397.) self.done.wait() def _testShutdown(self): self.serv_conn.send(MSG) # Issue 15989 self.assertRaises(OverflowError, self.serv_conn.shutdown, _testcapi.INT_MAX + 1) self.assertRaises(OverflowError, self.serv_conn.shutdown, 2 + (_testcapi.UINT_MAX + 1)) self.serv_conn.shutdown(2) @unittest.skipUnless(thread, 'Threading required for this test.') class BasicUDPTest(ThreadedUDPSocketTest): def __init__(self, methodName='runTest'): ThreadedUDPSocketTest.__init__(self, methodName=methodName) def testSendtoAndRecv(self): # Testing sendto() and Recv() over UDP msg = self.serv.recv(len(MSG)) self.assertEqual(msg, MSG) def _testSendtoAndRecv(self): self.cli.sendto(MSG, 0, (HOST, self.port)) def testRecvFrom(self): # Testing recvfrom() over UDP msg, addr = self.serv.recvfrom(len(MSG)) self.assertEqual(msg, MSG) def _testRecvFrom(self): self.cli.sendto(MSG, 0, (HOST, self.port)) def testRecvFromNegative(self): # Negative lengths passed to recvfrom should give ValueError. self.assertRaises(ValueError, self.serv.recvfrom, -1) def _testRecvFromNegative(self): self.cli.sendto(MSG, 0, (HOST, self.port)) @unittest.skipUnless(thread, 'Threading required for this test.') class TCPCloserTest(ThreadedTCPSocketTest): def testClose(self): conn, addr = self.serv.accept() conn.close() sd = self.cli read, write, err = select.select([sd], [], [], 1.0) self.assertEqual(read, [sd]) self.assertEqual(sd.recv(1), '') def _testClose(self): self.cli.connect((HOST, self.port)) time.sleep(1.0) @unittest.skipUnless(thread, 'Threading required for this test.') class BasicSocketPairTest(SocketPairTest): def __init__(self, methodName='runTest'): SocketPairTest.__init__(self, methodName=methodName) def testRecv(self): msg = self.serv.recv(1024) self.assertEqual(msg, MSG) def _testRecv(self): self.cli.send(MSG) def testSend(self): self.serv.send(MSG) def _testSend(self): msg = self.cli.recv(1024) self.assertEqual(msg, MSG) @unittest.skipUnless(thread, 'Threading required for this test.') class NonBlockingTCPTests(ThreadedTCPSocketTest): def __init__(self, methodName='runTest'): ThreadedTCPSocketTest.__init__(self, methodName=methodName) def testSetBlocking(self): # Testing whether set blocking works self.serv.setblocking(True) self.assertIsNone(self.serv.gettimeout()) self.serv.setblocking(False) self.assertEqual(self.serv.gettimeout(), 0.0) start = time.time() try: self.serv.accept() except socket.error: pass end = time.time() self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.") # Issue 15989 if _testcapi.UINT_MAX < _testcapi.ULONG_MAX: self.serv.setblocking(_testcapi.UINT_MAX + 1) self.assertIsNone(self.serv.gettimeout()) def _testSetBlocking(self): pass def testAccept(self): # Testing non-blocking accept self.serv.setblocking(0) try: conn, addr = self.serv.accept() except socket.error: pass else: self.fail("Error trying to do non-blocking accept.") read, write, err = select.select([self.serv], [], []) if self.serv in read: conn, addr = self.serv.accept() conn.close() else: self.fail("Error trying to do accept after select.") def _testAccept(self): time.sleep(0.1) self.cli.connect((HOST, self.port)) def testConnect(self): # Testing non-blocking connect conn, addr = self.serv.accept() conn.close() def _testConnect(self): self.cli.settimeout(10) self.cli.connect((HOST, self.port)) def testRecv(self): # Testing non-blocking recv conn, addr = self.serv.accept() conn.setblocking(0) try: msg = conn.recv(len(MSG)) except socket.error: pass else: self.fail("Error trying to do non-blocking recv.") read, write, err = select.select([conn], [], []) if conn in read: msg = conn.recv(len(MSG)) conn.close() self.assertEqual(msg, MSG) else: self.fail("Error during select call to non-blocking socket.") def _testRecv(self): self.cli.connect((HOST, self.port)) time.sleep(0.1) self.cli.send(MSG) @unittest.skipUnless(thread, 'Threading required for this test.') class FileObjectClassTestCase(SocketConnectedTest): bufsize = -1 # Use default buffer size def __init__(self, methodName='runTest'): SocketConnectedTest.__init__(self, methodName=methodName) def setUp(self): SocketConnectedTest.setUp(self) self.serv_file = self.cli_conn.makefile('rb', self.bufsize) def tearDown(self): self.serv_file.close() self.assertTrue(self.serv_file.closed) SocketConnectedTest.tearDown(self) self.serv_file = None def clientSetUp(self): SocketConnectedTest.clientSetUp(self) self.cli_file = self.serv_conn.makefile('wb') def clientTearDown(self): self.cli_file.close() self.assertTrue(self.cli_file.closed) self.cli_file = None SocketConnectedTest.clientTearDown(self) def testSmallRead(self): # Performing small file read test first_seg = self.serv_file.read(len(MSG)-3) second_seg = self.serv_file.read(3) msg = first_seg + second_seg self.assertEqual(msg, MSG) def _testSmallRead(self): self.cli_file.write(MSG) self.cli_file.flush() def testFullRead(self): # read until EOF msg = self.serv_file.read() self.assertEqual(msg, MSG) def _testFullRead(self): self.cli_file.write(MSG) self.cli_file.close() def testUnbufferedRead(self): # Performing unbuffered file read test buf = '' while 1: char = self.serv_file.read(1) if not char: break buf += char self.assertEqual(buf, MSG) def _testUnbufferedRead(self): self.cli_file.write(MSG) self.cli_file.flush() def testReadline(self): # Performing file readline test line = self.serv_file.readline() self.assertEqual(line, MSG) def _testReadline(self): self.cli_file.write(MSG) self.cli_file.flush() def testReadlineAfterRead(self): a_baloo_is = self.serv_file.read(len("A baloo is")) self.assertEqual("A baloo is", a_baloo_is) _a_bear = self.serv_file.read(len(" a bear")) self.assertEqual(" a bear", _a_bear) line = self.serv_file.readline() self.assertEqual("\n", line) line = self.serv_file.readline() self.assertEqual("A BALOO IS A BEAR.\n", line) line = self.serv_file.readline() self.assertEqual(MSG, line) def _testReadlineAfterRead(self): self.cli_file.write("A baloo is a bear\n") self.cli_file.write("A BALOO IS A BEAR.\n") self.cli_file.write(MSG) self.cli_file.flush() def testReadlineAfterReadNoNewline(self): end_of_ = self.serv_file.read(len("End Of ")) self.assertEqual("End Of ", end_of_) line = self.serv_file.readline() self.assertEqual("Line", line) def _testReadlineAfterReadNoNewline(self): self.cli_file.write("End Of Line") def testClosedAttr(self): self.assertTrue(not self.serv_file.closed) def _testClosedAttr(self): self.assertTrue(not self.cli_file.closed) class FileObjectInterruptedTestCase(unittest.TestCase): """Test that the file object correctly handles EINTR internally.""" class MockSocket(object): def __init__(self, recv_funcs=()): # A generator that returns callables that we'll call for each # call to recv(). self._recv_step = iter(recv_funcs) def recv(self, size): return self._recv_step.next()() @staticmethod def _raise_eintr(): raise socket.error(errno.EINTR) def _test_readline(self, size=-1, **kwargs): mock_sock = self.MockSocket(recv_funcs=[ lambda : "This is the first line\nAnd the sec", self._raise_eintr, lambda : "ond line is here\n", lambda : "", ]) fo = socket._fileobject(mock_sock, **kwargs) self.assertEqual(fo.readline(size), "This is the first line\n") self.assertEqual(fo.readline(size), "And the second line is here\n") def _test_read(self, size=-1, **kwargs): mock_sock = self.MockSocket(recv_funcs=[ lambda : "This is the first line\nAnd the sec", self._raise_eintr, lambda : "ond line is here\n", lambda : "", ]) fo = socket._fileobject(mock_sock, **kwargs) self.assertEqual(fo.read(size), "This is the first line\n" "And the second line is here\n") def test_default(self): self._test_readline() self._test_readline(size=100) self._test_read() self._test_read(size=100) def test_with_1k_buffer(self): self._test_readline(bufsize=1024) self._test_readline(size=100, bufsize=1024) self._test_read(bufsize=1024) self._test_read(size=100, bufsize=1024) def _test_readline_no_buffer(self, size=-1): mock_sock = self.MockSocket(recv_funcs=[ lambda : "aa", lambda : "\n", lambda : "BB", self._raise_eintr, lambda : "bb", lambda : "", ]) fo = socket._fileobject(mock_sock, bufsize=0) self.assertEqual(fo.readline(size), "aa\n") self.assertEqual(fo.readline(size), "BBbb") def test_no_buffer(self): self._test_readline_no_buffer() self._test_readline_no_buffer(size=4) self._test_read(bufsize=0) self._test_read(size=100, bufsize=0) class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase): """Repeat the tests from FileObjectClassTestCase with bufsize==0. In this case (and in this case only), it should be possible to create a file object, read a line from it, create another file object, read another line from it, without loss of data in the first file object's buffer. Note that httplib relies on this when reading multiple requests from the same socket.""" bufsize = 0 # Use unbuffered mode def testUnbufferedReadline(self): # Read a line, create a new file object, read another line with it line = self.serv_file.readline() # first line self.assertEqual(line, "A. " + MSG) # first line self.serv_file = self.cli_conn.makefile('rb', 0) line = self.serv_file.readline() # second line self.assertEqual(line, "B. " + MSG) # second line def _testUnbufferedReadline(self): self.cli_file.write("A. " + MSG) self.cli_file.write("B. " + MSG) self.cli_file.flush() class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase): bufsize = 1 # Default-buffered for reading; line-buffered for writing class SocketMemo(object): """A wrapper to keep track of sent data, needed to examine write behaviour""" def __init__(self, sock): self._sock = sock self.sent = [] def send(self, data, flags=0): n = self._sock.send(data, flags) self.sent.append(data[:n]) return n def sendall(self, data, flags=0): self._sock.sendall(data, flags) self.sent.append(data) def __getattr__(self, attr): return getattr(self._sock, attr) def getsent(self): return [e.tobytes() if isinstance(e, memoryview) else e for e in self.sent] def setUp(self): FileObjectClassTestCase.setUp(self) self.serv_file._sock = self.SocketMemo(self.serv_file._sock) def testLinebufferedWrite(self): # Write two lines, in small chunks msg = MSG.strip() print >> self.serv_file, msg, print >> self.serv_file, msg # second line: print >> self.serv_file, msg, print >> self.serv_file, msg, print >> self.serv_file, msg # third line print >> self.serv_file, '' self.serv_file.flush() msg1 = "%s %s\n"%(msg, msg) msg2 = "%s %s %s\n"%(msg, msg, msg) msg3 = "\n" self.assertEqual(self.serv_file._sock.getsent(), [msg1, msg2, msg3]) def _testLinebufferedWrite(self): msg = MSG.strip() msg1 = "%s %s\n"%(msg, msg) msg2 = "%s %s %s\n"%(msg, msg, msg) msg3 = "\n" l1 = self.cli_file.readline() self.assertEqual(l1, msg1) l2 = self.cli_file.readline() self.assertEqual(l2, msg2) l3 = self.cli_file.readline() self.assertEqual(l3, msg3) class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase): bufsize = 2 # Exercise the buffering code class NetworkConnectionTest(object): """Prove network connection.""" def clientSetUp(self): # We're inherited below by BasicTCPTest2, which also inherits # BasicTCPTest, which defines self.port referenced below. self.cli = socket.create_connection((HOST, self.port)) self.serv_conn = self.cli class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest): """Tests that NetworkConnection does not break existing TCP functionality. """ class NetworkConnectionNoServer(unittest.TestCase): class MockSocket(socket.socket): def connect(self, *args): raise socket.timeout('timed out') @contextlib.contextmanager def mocked_socket_module(self): """Return a socket which times out on connect""" old_socket = socket.socket socket.socket = self.MockSocket try: yield finally: socket.socket = old_socket def test_connect(self): port = test_support.find_unused_port() cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.addCleanup(cli.close) with self.assertRaises(socket.error) as cm: cli.connect((HOST, port)) self.assertEqual(cm.exception.errno, errno.ECONNREFUSED) def test_create_connection(self): # Issue #9792: errors raised by create_connection() should have # a proper errno attribute. port = test_support.find_unused_port() with self.assertRaises(socket.error) as cm: socket.create_connection((HOST, port)) # Issue #16257: create_connection() calls getaddrinfo() against # 'localhost'. This may result in an IPV6 addr being returned # as well as an IPV4 one: # >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM) # >>> [(2, 2, 0, '', ('127.0.0.1', 41230)), # (26, 2, 0, '', ('::1', 41230, 0, 0))] # # create_connection() enumerates through all the addresses returned # and if it doesn't successfully bind to any of them, it propagates # the last exception it encountered. # # On Solaris, ENETUNREACH is returned in this circumstance instead # of ECONNREFUSED. So, if that errno exists, add it to our list of # expected errnos. expected_errnos = [ errno.ECONNREFUSED, ] if hasattr(errno, 'ENETUNREACH'): expected_errnos.append(errno.ENETUNREACH) self.assertIn(cm.exception.errno, expected_errnos) def test_create_connection_timeout(self): # Issue #9792: create_connection() should not recast timeout errors # as generic socket errors. with self.mocked_socket_module(): with self.assertRaises(socket.timeout): socket.create_connection((HOST, 1234)) @unittest.skipUnless(thread, 'Threading required for this test.') class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest): def __init__(self, methodName='runTest'): SocketTCPTest.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def clientSetUp(self): self.source_port = test_support.find_unused_port() def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) def _justAccept(self): conn, addr = self.serv.accept() conn.close() testFamily = _justAccept def _testFamily(self): self.cli = socket.create_connection((HOST, self.port), timeout=30) self.addCleanup(self.cli.close) self.assertEqual(self.cli.family, 2) testSourceAddress = _justAccept def _testSourceAddress(self): self.cli = socket.create_connection((HOST, self.port), timeout=30, source_address=('', self.source_port)) self.addCleanup(self.cli.close) self.assertEqual(self.cli.getsockname()[1], self.source_port) # The port number being used is sufficient to show that the bind() # call happened. testTimeoutDefault = _justAccept def _testTimeoutDefault(self): # passing no explicit timeout uses socket's global default self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(42) try: self.cli = socket.create_connection((HOST, self.port)) self.addCleanup(self.cli.close) finally: socket.setdefaulttimeout(None) self.assertEqual(self.cli.gettimeout(), 42) testTimeoutNone = _justAccept def _testTimeoutNone(self): # None timeout means the same as sock.settimeout(None) self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(30) try: self.cli = socket.create_connection((HOST, self.port), timeout=None) self.addCleanup(self.cli.close) finally: socket.setdefaulttimeout(None) self.assertEqual(self.cli.gettimeout(), None) testTimeoutValueNamed = _justAccept def _testTimeoutValueNamed(self): self.cli = socket.create_connection((HOST, self.port), timeout=30) self.assertEqual(self.cli.gettimeout(), 30) testTimeoutValueNonamed = _justAccept def _testTimeoutValueNonamed(self): self.cli = socket.create_connection((HOST, self.port), 30) self.addCleanup(self.cli.close) self.assertEqual(self.cli.gettimeout(), 30) @unittest.skipUnless(thread, 'Threading required for this test.') class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest): def __init__(self, methodName='runTest'): SocketTCPTest.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def clientSetUp(self): pass def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) def testInsideTimeout(self): conn, addr = self.serv.accept() self.addCleanup(conn.close) time.sleep(3) conn.send("done!") testOutsideTimeout = testInsideTimeout def _testInsideTimeout(self): self.cli = sock = socket.create_connection((HOST, self.port)) data = sock.recv(5) self.assertEqual(data, "done!") def _testOutsideTimeout(self): self.cli = sock = socket.create_connection((HOST, self.port), timeout=1) self.assertRaises(socket.timeout, lambda: sock.recv(5)) class Urllib2FileobjectTest(unittest.TestCase): # urllib2.HTTPHandler has "borrowed" socket._fileobject, and requires that # it close the socket if the close c'tor argument is true def testClose(self): class MockSocket: closed = False def flush(self): pass def close(self): self.closed = True # must not close unless we request it: the original use of _fileobject # by module socket requires that the underlying socket not be closed until # the _socketobject that created the _fileobject is closed s = MockSocket() f = socket._fileobject(s) f.close() self.assertTrue(not s.closed) s = MockSocket() f = socket._fileobject(s, close=True) f.close() self.assertTrue(s.closed) class TCPTimeoutTest(SocketTCPTest): def testTCPTimeout(self): def raise_timeout(*args, **kwargs): self.serv.settimeout(1.0) self.serv.accept() self.assertRaises(socket.timeout, raise_timeout, "Error generating a timeout exception (TCP)") def testTimeoutZero(self): ok = False try: self.serv.settimeout(0.0) foo = self.serv.accept() except socket.timeout: self.fail("caught timeout instead of error (TCP)") except socket.error: ok = True except: self.fail("caught unexpected exception (TCP)") if not ok: self.fail("accept() returned success when we did not expect it") def testInterruptedTimeout(self): # XXX I don't know how to do this test on MSWindows or any other # plaform that doesn't support signal.alarm() or os.kill(), though # the bug should have existed on all platforms. if not hasattr(signal, "alarm"): return # can only test on *nix self.serv.settimeout(5.0) # must be longer than alarm class Alarm(Exception): pass def alarm_handler(signal, frame): raise Alarm old_alarm = signal.signal(signal.SIGALRM, alarm_handler) try: signal.alarm(2) # POSIX allows alarm to be up to 1 second early try: foo = self.serv.accept() except socket.timeout: self.fail("caught timeout instead of Alarm") except Alarm: pass except: self.fail("caught other exception instead of Alarm:" " %s(%s):\n%s" % (sys.exc_info()[:2] + (traceback.format_exc(),))) else: self.fail("nothing caught") finally: signal.alarm(0) # shut off alarm except Alarm: self.fail("got Alarm in wrong place") finally: # no alarm can be pending. Safe to restore old handler. signal.signal(signal.SIGALRM, old_alarm) class UDPTimeoutTest(SocketUDPTest): def testUDPTimeout(self): def raise_timeout(*args, **kwargs): self.serv.settimeout(1.0) self.serv.recv(1024) self.assertRaises(socket.timeout, raise_timeout, "Error generating a timeout exception (UDP)") def testTimeoutZero(self): ok = False try: self.serv.settimeout(0.0) foo = self.serv.recv(1024) except socket.timeout: self.fail("caught timeout instead of error (UDP)") except socket.error: ok = True except: self.fail("caught unexpected exception (UDP)") if not ok: self.fail("recv() returned success when we did not expect it") class TestExceptions(unittest.TestCase): def testExceptionTree(self): self.assertTrue(issubclass(socket.error, Exception)) self.assertTrue(issubclass(socket.herror, socket.error)) self.assertTrue(issubclass(socket.gaierror, socket.error)) self.assertTrue(issubclass(socket.timeout, socket.error)) class TestLinuxAbstractNamespace(unittest.TestCase): UNIX_PATH_MAX = 108 def testLinuxAbstractNamespace(self): address = "\x00python-test-hello\x00\xff" s1 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s1.bind(address) s1.listen(1) s2 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s2.connect(s1.getsockname()) s1.accept() self.assertEqual(s1.getsockname(), address) self.assertEqual(s2.getpeername(), address) def testMaxName(self): address = "\x00" + "h" * (self.UNIX_PATH_MAX - 1) s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.bind(address) self.assertEqual(s.getsockname(), address) def testNameOverflow(self): address = "\x00" + "h" * self.UNIX_PATH_MAX s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.assertRaises(socket.error, s.bind, address) @unittest.skipUnless(thread, 'Threading required for this test.') class BufferIOTest(SocketConnectedTest): """ Test the buffer versions of socket.recv() and socket.send(). """ def __init__(self, methodName='runTest'): SocketConnectedTest.__init__(self, methodName=methodName) def testRecvIntoArray(self): buf = array.array('c', ' '*1024) nbytes = self.cli_conn.recv_into(buf) self.assertEqual(nbytes, len(MSG)) msg = buf.tostring()[:len(MSG)] self.assertEqual(msg, MSG) def _testRecvIntoArray(self): with test_support.check_py3k_warnings(): buf = buffer(MSG) self.serv_conn.send(buf) def testRecvIntoBytearray(self): buf = bytearray(1024) nbytes = self.cli_conn.recv_into(buf) self.assertEqual(nbytes, len(MSG)) msg = buf[:len(MSG)] self.assertEqual(msg, MSG) _testRecvIntoBytearray = _testRecvIntoArray def testRecvIntoMemoryview(self): buf = bytearray(1024) nbytes = self.cli_conn.recv_into(memoryview(buf)) self.assertEqual(nbytes, len(MSG)) msg = buf[:len(MSG)] self.assertEqual(msg, MSG) _testRecvIntoMemoryview = _testRecvIntoArray def testRecvFromIntoArray(self): buf = array.array('c', ' '*1024) nbytes, addr = self.cli_conn.recvfrom_into(buf) self.assertEqual(nbytes, len(MSG)) msg = buf.tostring()[:len(MSG)] self.assertEqual(msg, MSG) def _testRecvFromIntoArray(self): with test_support.check_py3k_warnings(): buf = buffer(MSG) self.serv_conn.send(buf) def testRecvFromIntoBytearray(self): buf = bytearray(1024) nbytes, addr = self.cli_conn.recvfrom_into(buf) self.assertEqual(nbytes, len(MSG)) msg = buf[:len(MSG)] self.assertEqual(msg, MSG) _testRecvFromIntoBytearray = _testRecvFromIntoArray def testRecvFromIntoMemoryview(self): buf = bytearray(1024) nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf)) self.assertEqual(nbytes, len(MSG)) msg = buf[:len(MSG)] self.assertEqual(msg, MSG) _testRecvFromIntoMemoryview = _testRecvFromIntoArray TIPC_STYPE = 2000 TIPC_LOWER = 200 TIPC_UPPER = 210 def isTipcAvailable(): """Check if the TIPC module is loaded The TIPC module is not loaded automatically on Ubuntu and probably other Linux distros. """ if not hasattr(socket, "AF_TIPC"): return False if not os.path.isfile("/proc/modules"): return False with open("/proc/modules") as f: for line in f: if line.startswith("tipc "): return True if test_support.verbose: print "TIPC module is not loaded, please 'sudo modprobe tipc'" return False class TIPCTest (unittest.TestCase): def testRDM(self): srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM) cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE, TIPC_LOWER, TIPC_UPPER) srv.bind(srvaddr) sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE, TIPC_LOWER + (TIPC_UPPER - TIPC_LOWER) / 2, 0) cli.sendto(MSG, sendaddr) msg, recvaddr = srv.recvfrom(1024) self.assertEqual(cli.getsockname(), recvaddr) self.assertEqual(msg, MSG) class TIPCThreadableTest (unittest.TestCase, ThreadableTest): def __init__(self, methodName = 'runTest'): unittest.TestCase.__init__(self, methodName = methodName) ThreadableTest.__init__(self) def setUp(self): self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM) self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE, TIPC_LOWER, TIPC_UPPER) self.srv.bind(srvaddr) self.srv.listen(5) self.serverExplicitReady() self.conn, self.connaddr = self.srv.accept() def clientSetUp(self): # The is a hittable race between serverExplicitReady() and the # accept() call; sleep a little while to avoid it, otherwise # we could get an exception time.sleep(0.1) self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM) addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE, TIPC_LOWER + (TIPC_UPPER - TIPC_LOWER) / 2, 0) self.cli.connect(addr) self.cliaddr = self.cli.getsockname() def testStream(self): msg = self.conn.recv(1024) self.assertEqual(msg, MSG) self.assertEqual(self.cliaddr, self.connaddr) def _testStream(self): self.cli.send(MSG) self.cli.close() def test_main(): tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest, TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ] tests.extend([ NonBlockingTCPTests, FileObjectClassTestCase, FileObjectInterruptedTestCase, UnbufferedFileObjectClassTestCase, LineBufferedFileObjectClassTestCase, SmallBufferedFileObjectClassTestCase, Urllib2FileobjectTest, NetworkConnectionNoServer, NetworkConnectionAttributesTest, NetworkConnectionBehaviourTest, ]) if hasattr(socket, "socketpair"): tests.append(BasicSocketPairTest) if sys.platform == 'linux2': tests.append(TestLinuxAbstractNamespace) if isTipcAvailable(): tests.append(TIPCTest) tests.append(TIPCThreadableTest) thread_info = test_support.threading_setup() test_support.run_unittest(*tests) test_support.threading_cleanup(*thread_info) if __name__ == "__main__": test_main()
mit
368,015,928,744,204,000
3,247,014,733,152,422,400
33.885417
117
0.607445
false
tunneln/CarnotKE
jyhton/Lib/test/test_StringIO_jy.py
9
1994
import unittest import cStringIO from test import test_support class TestUnicodeInput(unittest.TestCase): def test_differences_handling_unicode(self): # Test for the "feature" described on #1089. # # Basically, StringIO returns unicode objects if you feed it unicode, # but cStringIO don't. This should change in future versions of # CPython and Jython. self.assertEqual(u'foo', cStringIO.StringIO(u'foo').read()) self.assertEqual('foo', cStringIO.StringIO(u'foo').read()) class TestWrite(unittest.TestCase): def test_write_seek_write(self): f = cStringIO.StringIO() f.write('hello') f.seek(2) f.write('hi') self.assertEquals(f.getvalue(), 'hehio') #XXX: this should get pushed to CPython's test_StringIO def test_write_past_end(self): f = cStringIO.StringIO() f.write("abcdef") f.seek(10) f.write("uvwxyz") self.assertEqual(f.getvalue(), 'abcdef\x00\x00\x00\x00uvwxyz') def test_write_seek_back_then_write(self): # http://bugs.jython.org/issue2324 s = "abcdef" for i in xrange(len(s)): f = cStringIO.StringIO() f.write(s) f.seek(i) f.write("x" * 47) self.assertEqual(f.getvalue(), s[:i] + ("x" * 47)) class TestGetValueAfterClose(unittest.TestCase): # This test, or something like it, should be really be pushed upstream def test_getvalue_after_close(self): f = cStringIO.StringIO('hello') f.getvalue() f.close() try: f.getvalue() except ValueError: pass else: self.fail("cStringIO.StringIO: getvalue() after close() should have raised ValueError") def test_main(): test_support.run_unittest(TestUnicodeInput) test_support.run_unittest(TestWrite) test_support.run_unittest(TestGetValueAfterClose) if __name__ == '__main__': test_main()
apache-2.0
4,773,065,512,641,525,000
-5,580,555,642,475,370,000
31.16129
99
0.613842
false
jpwhite3/python-whirlwind-tour
examples/lab4.py
1
1539
from __future__ import print_function import sys import re import glob import argparse def eprint(*args, **kwargs): # Print to STDERR instead of STDOUT print(*args, file=sys.stderr, **kwargs) def grep(expression, filepath, ignorecase=False, invert=False): raw_expression = re.escape(expression) with open(filepath) as file: for line in file: # Enable case matching? if ignorecase: matches = re.search(raw_expression, line, re.I) else: matches = re.search(raw_expression, line) # Invert matches if need be and print if matches and not invert: print(line) elif invert and not matches: print(line) def main(): parser = argparse.ArgumentParser(description='This is a pure Python based clone of the GREP command') parser.add_argument('expression', action="store", type=str, help="Regular expression to match against") parser.add_argument('filepath', action="store", type=str, help="Path to file to search in. supports wildcard globs") parser.add_argument('-i', action="store_true", default=False, dest="ignorecase", help="Ignore case") parser.add_argument('-v', action="store_true", default=False, dest="invert", help="Show lines that don't match") args = parser.parse_args() file_list = glob.glob(args.filepath) for f in file_list: if len(file_list) > 1: eprint("\nResults for file: %s" % f) eprint("-"*(len(f)+18)) grep(args.expression, f, ignorecase=args.ignorecase, invert=args.invert) if __name__ == '__main__': main()
cc0-1.0
-6,719,891,253,829,459,000
9,093,899,611,862,266,000
30.744681
117
0.680962
false
Nexenta/cinder
cinder/tests/unit/volume/drivers/emc/vnx/test_taskflows.py
5
6841
# Copyright (c) 2016 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure from cinder import test from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception as vnx_ex from cinder.tests.unit.volume.drivers.emc.vnx import res_mock import cinder.volume.drivers.emc.vnx.taskflows as vnx_taskflow class TestTaskflow(test.TestCase): def setUp(self): super(TestTaskflow, self).setUp() self.work_flow = linear_flow.Flow('test_task') @res_mock.patch_client def test_copy_snapshot_task(self, client, mocked): store_spec = {'client': client, 'snap_name': 'original_name', 'new_snap_name': 'new_name' } self.work_flow.add(vnx_taskflow.CopySnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() @res_mock.patch_client def test_copy_snapshot_task_revert(self, client, mocked): store_spec = {'client': client, 'snap_name': 'original_name', 'new_snap_name': 'new_name' } self.work_flow.add(vnx_taskflow.CopySnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) self.assertRaises(vnx_ex.VNXSnapError, engine.run) @res_mock.patch_client def test_create_smp_task(self, client, mocked): store_spec = { 'client': client, 'smp_name': 'mount_point_name', 'base_lun_name': 'base_name' } self.work_flow.add(vnx_taskflow.CreateSMPTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() smp_id = engine.storage.fetch('smp_id') self.assertEqual(15, smp_id) @res_mock.patch_client def test_create_smp_task_revert(self, client, mocked): store_spec = { 'client': client, 'smp_name': 'mount_point_name', 'base_lun_name': 'base_name' } self.work_flow.add(vnx_taskflow.CreateSMPTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) self.assertRaises(vnx_ex.VNXCreateLunError, engine.run) smp_id = engine.storage.fetch('smp_id') self.assertIsInstance(smp_id, failure.Failure) @res_mock.patch_client def test_attach_snap_task(self, client, mocked): store_spec = { 'client': client, 'smp_name': 'mount_point_name', 'snap_name': 'snap_name' } self.work_flow.add(vnx_taskflow.AttachSnapTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() @res_mock.patch_client def test_attach_snap_task_revert(self, client, mocked): store_spec = { 'client': client, 'smp_name': 'mount_point_name', 'snap_name': 'snap_name' } self.work_flow.add(vnx_taskflow.AttachSnapTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) self.assertRaises(vnx_ex.VNXAttachSnapError, engine.run) @res_mock.patch_client def test_create_snapshot_task(self, client, mocked): store_spec = { 'client': client, 'lun_id': 12, 'snap_name': 'snap_name' } self.work_flow.add(vnx_taskflow.CreateSnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() @res_mock.patch_client def test_create_snapshot_task_revert(self, client, mocked): store_spec = { 'client': client, 'lun_id': 13, 'snap_name': 'snap_name' } self.work_flow.add(vnx_taskflow.CreateSnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) self.assertRaises(vnx_ex.VNXCreateSnapError, engine.run) @res_mock.patch_client def test_allow_read_write_task(self, client, mocked): store_spec = { 'client': client, 'snap_name': 'snap_name' } self.work_flow.add(vnx_taskflow.AllowReadWriteTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() @res_mock.patch_client def test_allow_read_write_task_revert(self, client, mocked): store_spec = { 'client': client, 'snap_name': 'snap_name' } self.work_flow.add(vnx_taskflow.AllowReadWriteTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) self.assertRaises(vnx_ex.VNXSnapError, engine.run) @res_mock.patch_client def test_create_cg_snapshot_task(self, client, mocked): store_spec = { 'client': client, 'cg_name': 'test_cg', 'cg_snap_name': 'my_snap_name' } self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() snap_name = engine.storage.fetch('new_cg_snap_name') self.assertIsInstance(snap_name, res_mock.StorageObjectMock) @res_mock.patch_client def test_create_cg_snapshot_task_revert(self, client, mocked): store_spec = { 'client': client, 'cg_name': 'test_cg', 'cg_snap_name': 'my_snap_name' } self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) self.assertRaises(vnx_ex.VNXCreateSnapError, engine.run)
apache-2.0
-8,461,004,095,106,041,000
-1,433,600,651,647,853,800
36.79558
77
0.562637
false
lifeinoppo/littlefishlet-scode
RES/REF/python_sourcecode/ipython-master/IPython/utils/process.py
17
2937
# encoding: utf-8 """ Utilities for working with external processes. """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import os import sys if sys.platform == 'win32': from ._process_win32 import system, getoutput, arg_split, check_pid elif sys.platform == 'cli': from ._process_cli import system, getoutput, arg_split, check_pid else: from ._process_posix import system, getoutput, arg_split, check_pid from ._process_common import getoutputerror, get_output_error_code, process_handler from . import py3compat class FindCmdError(Exception): pass def find_cmd(cmd): """Find absolute path to executable cmd in a cross platform manner. This function tries to determine the full path to a command line program using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the time it will use the version that is first on the users `PATH`. Warning, don't use this to find IPython command line programs as there is a risk you will find the wrong one. Instead find those using the following code and looking for the application itself:: from IPython.utils.path import get_ipython_module_path from IPython.utils.process import pycmd2argv argv = pycmd2argv(get_ipython_module_path('IPython.terminal.ipapp')) Parameters ---------- cmd : str The command line program to look for. """ path = py3compat.which(cmd) if path is None: raise FindCmdError('command could not be found: %s' % cmd) return path def is_cmd_found(cmd): """Check whether executable `cmd` exists or not and return a bool.""" try: find_cmd(cmd) return True except FindCmdError: return False def pycmd2argv(cmd): r"""Take the path of a python command and return a list (argv-style). This only works on Python based command line programs and will find the location of the ``python`` executable using ``sys.executable`` to make sure the right version is used. For a given path ``cmd``, this returns [cmd] if cmd's extension is .exe, .com or .bat, and [, cmd] otherwise. Parameters ---------- cmd : string The path of the command. Returns ------- argv-style list. """ ext = os.path.splitext(cmd)[1] if ext in ['.exe', '.com', '.bat']: return [cmd] else: return [sys.executable, cmd] def abbrev_cwd(): """ Return abbreviated version of cwd, e.g. d:mydir """ cwd = py3compat.getcwd().replace('\\','/') drivepart = '' tail = cwd if sys.platform == 'win32': if len(cwd) < 4: return cwd drivepart,tail = os.path.splitdrive(cwd) parts = tail.split('/') if len(parts) > 2: tail = '/'.join(parts[-2:]) return (drivepart + ( cwd == '/' and '/' or tail))
gpl-2.0
2,139,538,226,965,875,000
2,791,030,442,492,846,000
26.707547
83
0.645216
false
galfaroi/trading-with-python
lib/extra.py
77
2540
''' Created on Apr 28, 2013 Copyright: Jev Kuznetsov License: BSD ''' from __future__ import print_function import sys import urllib import os import xlrd # module for excel file reading import pandas as pd class ProgressBar: def __init__(self, iterations): self.iterations = iterations self.prog_bar = '[]' self.fill_char = '*' self.width = 50 self.__update_amount(0) def animate(self, iteration): print('\r', self, end='') sys.stdout.flush() self.update_iteration(iteration + 1) def update_iteration(self, elapsed_iter): self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0) self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations) def __update_amount(self, new_amount): percent_done = int(round((new_amount / 100.0) * 100.0)) all_full = self.width - 2 num_hashes = int(round((percent_done / 100.0) * all_full)) self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']' pct_place = (len(self.prog_bar) // 2) - len(str(percent_done)) pct_string = '%d%%' % percent_done self.prog_bar = self.prog_bar[0:pct_place] + \ (pct_string + self.prog_bar[pct_place + len(pct_string):]) def __str__(self): return str(self.prog_bar) def getSpyHoldings(dataDir): ''' get SPY holdings from the net, uses temp data storage to save xls file ''' dest = os.path.join(dataDir,"spy_holdings.xls") if os.path.exists(dest): print('File found, skipping download') else: print('saving to', dest) urllib.urlretrieve ("https://www.spdrs.com/site-content/xls/SPY_All_Holdings.xls?fund=SPY&docname=All+Holdings&onyx_code1=1286&onyx_code2=1700", dest) # download xls file and save it to data directory # parse wb = xlrd.open_workbook(dest) # open xls file, create a workbook sh = wb.sheet_by_index(0) # select first sheet data = {'name':[], 'symbol':[], 'weight':[],'sector':[]} for rowNr in range(5,505): # cycle through the rows v = sh.row_values(rowNr) # get all row values data['name'].append(v[0]) data['symbol'].append(v[1]) # symbol is in the second column, append it to the list data['weight'].append(float(v[2])) data['sector'].append(v[3]) return pd.DataFrame(data)
bsd-3-clause
8,088,655,905,664,437,000
4,103,761,116,623,232,500
34.811594
152
0.579528
false
ayoubg/gem5-graphics
gem5-gpu/tests/quick/se_gpu/10.backprop/test.py
1
1654
# Copyright (c) 2006 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Joel Hestness options.clusters = 4 options.cmd = 'gem5_gpu_backprop' options.options = '256'
bsd-3-clause
-4,246,125,919,563,267,000
9,147,308,229,938,351,000
52.354839
72
0.792019
false
bcarr092/pyCovertAudio
src/pyCovertAudio/BFSKModulator.py
1
2146
from pyCovertAudio_lib import * from BaseModulator import BaseModulator from SignalFunctions import SignalFunctions class BFSKModulator(BaseModulator): def __init__( self, bitsPerSymbol, sampleRate, samplesPerSymbol, symbolExpansionFactor, separationIntervals, configuration ): BaseModulator.__init__( self, bitsPerSymbol, sampleRate, samplesPerSymbol, symbolExpansionFactor, separationIntervals, configuration ) ( self.symbol0Frequency, self.symbol1Frequency, self.deltaFrequency, self.bandwidth ) = \ python_BFSK_determine_frequencies( self.samplesPerSymbol, self.sampleRate, self.carrierFrequency, self.separationIntervals ) def modulate(self, symbolSequence, signal, sentinel=None): symbolSignalLength = self.samplesPerSymbol * self.symbolExpansionFactor for symbol in symbolSequence: symbolFrequency = self.carrierFrequency if(symbol == 1): symbolFrequency += self.symbol1Frequency else: symbolFrequency += self.symbol0Frequency x = \ SignalFunctions.modulateFSK( symbolSignalLength, self.sampleRate, [symbolFrequency] ) signal.extend(x[: self.samplesPerSymbol]) signal.extend( [0.0 for i in range( (self.symbolExpansionFactor - 1) * self.samplesPerSymbol)] ) def toString(self): return ( "Modulator:\n\tAlgorithm:\t\t\tBFSK\n\tSymbol 0 frequency:\t\t" "%.02f\n\tSymbol 1 frequency:\t\t%.02f\n\tMin frequency" " separation:\t%.02f\n\tBandwidth:\t\t\t%.02f\n%s" % ( self.symbol0Frequency, self.symbol1Frequency, self.deltaFrequency, self.bandwidth, BaseModulator.toString(self) ) )
apache-2.0
-1,715,937,445,694,750,700
1,148,634,791,537,840,100
29.225352
79
0.55685
false
sebrandon1/nova
nova/virt/libvirt/designer.py
5
5322
# Copyright (C) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Policy based configuration of libvirt objects This module provides helper APIs for populating the config.py classes based on common operational needs / policies """ import six from nova.pci import utils as pci_utils def set_vif_guest_frontend_config(conf, mac, model, driver, queues=None): """Populate a LibvirtConfigGuestInterface instance with guest frontend details. """ conf.mac_addr = mac if model is not None: conf.model = model if driver is not None: conf.driver_name = driver if queues is not None: conf.vhost_queues = queues def set_vif_host_backend_bridge_config(conf, brname, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for a software bridge. """ conf.net_type = "bridge" conf.source_dev = brname if tapname: conf.target_dev = tapname def set_vif_host_backend_ethernet_config(conf, tapname): """Populate a LibvirtConfigGuestInterface instance with host backend details for an externally configured host device. NB use of this configuration is discouraged by libvirt project and will mark domains as 'tainted'. """ conf.net_type = "ethernet" conf.target_dev = tapname conf.script = "" def set_vif_host_backend_802qbg_config(conf, devname, managerid, typeid, typeidversion, instanceid, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for an 802.1qbg device. """ conf.net_type = "direct" conf.source_dev = devname conf.source_mode = "vepa" conf.vporttype = "802.1Qbg" conf.add_vport_param("managerid", managerid) conf.add_vport_param("typeid", typeid) conf.add_vport_param("typeidversion", typeidversion) conf.add_vport_param("instanceid", instanceid) if tapname: conf.target_dev = tapname def set_vif_host_backend_802qbh_config(conf, net_type, devname, profileid, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for an 802.1qbh device. """ conf.net_type = net_type if net_type == 'direct': conf.source_mode = 'passthrough' conf.source_dev = pci_utils.get_ifname_by_pci_address(devname) conf.driver_name = 'vhost' else: conf.source_dev = devname conf.model = None conf.vporttype = "802.1Qbh" conf.add_vport_param("profileid", profileid) if tapname: conf.target_dev = tapname def set_vif_host_backend_hw_veb(conf, net_type, devname, vlan, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for an device that supports hardware virtual ethernet bridge. """ conf.net_type = net_type if net_type == 'direct': conf.source_mode = 'passthrough' conf.source_dev = pci_utils.get_ifname_by_pci_address(devname) conf.driver_name = 'vhost' else: conf.source_dev = devname conf.model = None conf.vlan = vlan if tapname: conf.target_dev = tapname def set_vif_host_backend_hostdev_pci_config(conf, pci_slot): """Populate a LibvirtConfigGuestHostdev instance with pci address data.""" conf.domain, conf.bus, conf.slot, conf.function = ( pci_utils.get_pci_address_fields(pci_slot)) def set_vif_host_backend_direct_config(conf, devname, mode="passthrough"): """Populate a LibvirtConfigGuestInterface instance with direct Interface. """ conf.net_type = "direct" conf.source_mode = mode conf.source_dev = devname conf.model = "virtio" def set_vif_host_backend_vhostuser_config(conf, mode, path): """Populate a LibvirtConfigGuestInterface instance with host backend details for vhostuser socket. """ conf.net_type = "vhostuser" conf.vhostuser_type = "unix" conf.vhostuser_mode = mode conf.vhostuser_path = path def set_vif_bandwidth_config(conf, inst_type): """Config vif inbound/outbound bandwidth limit. parameters are set in instance_type_extra_specs table, key is in the format quota:vif_inbound_average. """ bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak', 'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak', 'vif_outbound_burst'] for key, value in six.iteritems(inst_type.get('extra_specs', {})): scope = key.split(':') if len(scope) > 1 and scope[0] == 'quota': if scope[1] in bandwidth_items: setattr(conf, scope[1], value)
apache-2.0
-2,112,114,188,505,080,600
-8,399,510,755,286,488,000
31.45122
78
0.661405
false
nirmeshk/oh-mainline
vendor/packages/html5lib/html5lib/tests/test_serializer.py
72
7831
import os import unittest from support import html5lib_test_files try: import json except ImportError: import simplejson as json import html5lib from html5lib import html5parser, serializer, constants from html5lib.treewalkers._base import TreeWalker optionals_loaded = [] try: from lxml import etree optionals_loaded.append("lxml") except ImportError: pass default_namespace = constants.namespaces["html"] class JsonWalker(TreeWalker): def __iter__(self): for token in self.tree: type = token[0] if type == "StartTag": if len(token) == 4: namespace, name, attrib = token[1:4] else: namespace = default_namespace name, attrib = token[1:3] yield self.startTag(namespace, name, self._convertAttrib(attrib)) elif type == "EndTag": if len(token) == 3: namespace, name = token[1:3] else: namespace = default_namespace name = token[1] yield self.endTag(namespace, name) elif type == "EmptyTag": if len(token) == 4: namespace, name, attrib = token[1:] else: namespace = default_namespace name, attrib = token[1:] for token in self.emptyTag(namespace, name, self._convertAttrib(attrib)): yield token elif type == "Comment": yield self.comment(token[1]) elif type in ("Characters", "SpaceCharacters"): for token in self.text(token[1]): yield token elif type == "Doctype": if len(token) == 4: yield self.doctype(token[1], token[2], token[3]) elif len(token) == 3: yield self.doctype(token[1], token[2]) else: yield self.doctype(token[1]) else: raise ValueError("Unknown token type: " + type) def _convertAttrib(self, attribs): """html5lib tree-walkers use a dict of (namespace, name): value for attributes, but JSON cannot represent this. Convert from the format in the serializer tests (a list of dicts with "namespace", "name", and "value" as keys) to html5lib's tree-walker format.""" attrs = {} for attrib in attribs: name = (attrib["namespace"], attrib["name"]) assert(name not in attrs) attrs[name] = attrib["value"] return attrs def serialize_html(input, options): options = dict([(str(k),v) for k,v in options.iteritems()]) return serializer.HTMLSerializer(**options).render(JsonWalker(input),options.get("encoding",None)) def serialize_xhtml(input, options): options = dict([(str(k),v) for k,v in options.iteritems()]) return serializer.XHTMLSerializer(**options).render(JsonWalker(input),options.get("encoding",None)) def make_test(input, expected, xhtml, options): result = serialize_html(input, options) if len(expected) == 1: assert expected[0] == result, "Expected:\n%s\nActual:\n%s\nOptions\nxhtml:False\n%s"%(expected[0], result, str(options)) elif result not in expected: assert False, "Expected: %s, Received: %s" % (expected, result) if not xhtml: return result = serialize_xhtml(input, options) if len(xhtml) == 1: assert xhtml[0] == result, "Expected:\n%s\nActual:\n%s\nOptions\nxhtml:True\n%s"%(xhtml[0], result, str(options)) elif result not in xhtml: assert False, "Expected: %s, Received: %s" % (xhtml, result) class EncodingTestCase(unittest.TestCase): def throwsWithLatin1(self, input): self.assertRaises(UnicodeEncodeError, serialize_html, input, {"encoding": "iso-8859-1"}) def testDoctypeName(self): self.throwsWithLatin1([["Doctype", u"\u0101"]]) def testDoctypePublicId(self): self.throwsWithLatin1([["Doctype", u"potato", u"\u0101"]]) def testDoctypeSystemId(self): self.throwsWithLatin1([["Doctype", u"potato", u"potato", u"\u0101"]]) def testCdataCharacters(self): self.assertEquals("<style>&amacr;", serialize_html([["StartTag", "http://www.w3.org/1999/xhtml", "style", {}], ["Characters", u"\u0101"]], {"encoding": "iso-8859-1"})) def testCharacters(self): self.assertEquals("&amacr;", serialize_html([["Characters", u"\u0101"]], {"encoding": "iso-8859-1"})) def testStartTagName(self): self.throwsWithLatin1([["StartTag", u"http://www.w3.org/1999/xhtml", u"\u0101", []]]) def testEmptyTagName(self): self.throwsWithLatin1([["EmptyTag", u"http://www.w3.org/1999/xhtml", u"\u0101", []]]) def testAttributeName(self): self.throwsWithLatin1([["StartTag", u"http://www.w3.org/1999/xhtml", u"span", [{"namespace": None, "name": u"\u0101", "value": u"potato"}]]]) def testAttributeValue(self): self.assertEquals("<span potato=&amacr;>", serialize_html([["StartTag", u"http://www.w3.org/1999/xhtml", u"span", [{"namespace": None, "name": u"potato", "value": u"\u0101"}]]], {"encoding": "iso-8859-1"})) def testEndTagName(self): self.throwsWithLatin1([["EndTag", u"http://www.w3.org/1999/xhtml", u"\u0101"]]) def testComment(self): self.throwsWithLatin1([["Comment", u"\u0101"]]) if "lxml" in optionals_loaded: class LxmlTestCase(unittest.TestCase): def setUp(self): self.parser = etree.XMLParser(resolve_entities=False) self.treewalker = html5lib.getTreeWalker("lxml") self.serializer = serializer.HTMLSerializer() def testEntityReplacement(self): doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&beta;</html>""" tree = etree.fromstring(doc, parser = self.parser).getroottree() result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False) self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>\u03B2</html>""", result) def testEntityXML(self): doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&gt;</html>""" tree = etree.fromstring(doc, parser = self.parser).getroottree() result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False) self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&gt;</html>""", result) def testEntityNoResolve(self): doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&beta;</html>""" tree = etree.fromstring(doc, parser = self.parser).getroottree() result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False, resolve_entities=False) self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&beta;</html>""", result) def test_serializer(): for filename in html5lib_test_files('serializer', '*.test'): tests = json.load(file(filename)) test_name = os.path.basename(filename).replace('.test','') for index, test in enumerate(tests['tests']): xhtml = test.get("xhtml", test["expected"]) if test_name == 'optionaltags': xhtml = None yield make_test, test["input"], test["expected"], xhtml, test.get("options", {})
agpl-3.0
-8,932,499,410,858,593,000
-2,662,605,769,737,592,000
42.505556
149
0.571702
false
davidwaroquiers/pymatgen
pymatgen/io/wannier90.py
5
6189
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ Modules for working with wannier90 input and output. """ from typing import Sequence import numpy as np from scipy.io import FortranEOFError, FortranFile __author__ = "Mark Turiansky" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __status__ = "Production" __date__ = "Jun 04, 2020" class Unk: """ Object representing the data in a UNK file. .. attribute:: ik int index of kpoint for this file .. attribute:: data numpy.ndarray that contains the wavefunction data for in the UNK file. The shape should be (nbnd, ngx, ngy, ngz) for regular calculations and (nbnd, 2, ngx, ngy, ngz) for noncollinear calculations. .. attribute:: is_noncollinear bool that specifies if data is from a noncollinear calculation .. attribute:: nbnd int number of bands in data .. attribute:: ng sequence of three integers that correspond to the grid size of the given data. The definition is ng = (ngx, ngy, ngz). """ ik: int is_noncollinear: bool nbnd: int ng: Sequence[int] def __init__(self, ik: int, data: np.ndarray) -> None: """ Initialize Unk class. Args: ik (int): index of the kpoint UNK file is for data (np.ndarray): data from the UNK file that has shape (nbnd, ngx, ngy, ngz) or (nbnd, 2, ngx, ngy, ngz) if noncollinear """ self.ik = ik self.data = data @property def data(self) -> np.ndarray: """ np.ndarray: contains the wavefunction data for in the UNK file. The shape should be (nbnd, ngx, ngy, ngz) for regular calculations and (nbnd, 2, ngx, ngy, ngz) for noncollinear calculations. """ return self._data @data.setter def data(self, value: np.ndarray) -> None: """ Sets the value of data. Args: value (np.ndarray): data to replace stored data, must haveshape (nbnd, ngx, ngy, ngz) or (nbnd, 2, ngx, ngy, ngz) if noncollinear calculation """ temp_val = np.array(value, dtype=np.complex128) if len(temp_val.shape) not in [4, 5]: raise ValueError( "invalid data shape, must be (nbnd, ngx, ngy, ngz" ") or (nbnd, 2, ngx, ngy, ngz) for noncollinear " f"data, given {temp_val.shape}" ) if len(temp_val.shape) == 5 and temp_val.shape[1] != 2: raise ValueError( "invalid noncollinear data, shape should be (nbnd" f", 2, ngx, ngy, ngz), given {temp_val.shape}" ) self._data = temp_val # derived properties self.is_noncollinear = len(self.data.shape) == 5 self.nbnd = self.data.shape[0] self.ng = self.data.shape[-3:] @staticmethod def from_file(filename: str) -> object: """ Reads the UNK data from file. Args: filename (str): path to UNK file to read Returns: Unk object """ input_data = [] with FortranFile(filename, "r") as f: *ng, ik, nbnd = f.read_ints() for _ in range(nbnd): input_data.append( # when reshaping need to specify ordering as fortran f.read_record(np.complex128).reshape(ng, order="F") ) try: for _ in range(nbnd): input_data.append(f.read_record(np.complex128).reshape(ng, order="F")) is_noncollinear = True except FortranEOFError: is_noncollinear = False # mypy made me create an extra variable here >:( data = np.array(input_data, dtype=np.complex128) # spinors are interwoven, need to separate them if is_noncollinear: temp_data = np.empty((nbnd, 2, *ng), dtype=np.complex128) temp_data[:, 0, :, :, :] = data[::2, :, :, :] temp_data[:, 1, :, :, :] = data[1::2, :, :, :] return Unk(ik, temp_data) return Unk(ik, data) def write_file(self, filename: str) -> None: """ Write the UNK file. Args: filename (str): path to UNK file to write, the name should have the form 'UNKXXXXX.YY' where XXXXX is the kpoint index (Unk.ik) and YY is 1 or 2 for the spin index or NC if noncollinear """ with FortranFile(filename, "w") as f: f.write_record(np.array([*self.ng, self.ik, self.nbnd], dtype=np.int32)) for ib in range(self.nbnd): if self.is_noncollinear: f.write_record(self.data[ib, 0].flatten("F")) f.write_record(self.data[ib, 1].flatten("F")) else: f.write_record(self.data[ib].flatten("F")) def __repr__(self) -> str: return ( f"<UNK ik={self.ik} nbnd={self.nbnd} ncl={self.is_noncollinear}" + f" ngx={self.ng[0]} ngy={self.ng[1]} ngz={self.ng[2]}>" ) def __eq__(self, other: object) -> bool: if not isinstance(other, Unk): return NotImplemented if not np.allclose(self.ng, other.ng): return False if self.ik != other.ik: return False if self.is_noncollinear != other.is_noncollinear: return False if self.nbnd != other.nbnd: return False for ib in range(self.nbnd): if self.is_noncollinear: if not ( np.allclose(self.data[ib, 0], other.data[ib, 0], atol=1e-4) and np.allclose(self.data[ib, 1], other.data[ib, 1], atol=1e-4) ): return False else: if not np.allclose(self.data[ib], other.data[ib], atol=1e-4): return False return True
mit
-6,046,016,428,954,525,000
3,416,918,695,524,368,400
31.067358
113
0.539182
false
bkj/ernest
enrich/modules/enrich_terminal_nodes.py
2
6773
#!/usr/bin/env python ''' Add single neighbor tag for owners and issuers to ownership index; tag enables hiding terminal nodes in front end ** Note ** This runs prospectively using the --most-recent argument ''' import argparse import json import logging from elasticsearch import Elasticsearch from elasticsearch.helpers import parallel_bulk, scan class ENRICH_TERMINAL_NODES: def __init__(self, args, parent_logger): self.args = args self.logger = logging.getLogger(parent_logger + ".terminal_nodes") with open(args.config_path, 'r') as inf: config = json.load(inf) self.config = config self.client = Elasticsearch([{ 'host': config['es']['host'], 'port': config['es']['port']} ]) self.match_all = { "query": { "match_all": {} } } def raw_dict(self, x, dict_type): if dict_type == 'issuer': key = x['issuerCik'] val = x['ownerCik'] elif dict_type == 'owner': key = x['ownerCik'] val = x['issuerCik'] return { "key": key, "value": val } def build_query(self, val): val = '__meta__.' + val + '_has_one_neighbor' query = { "query": { "bool": { "should": [ { "filtered": { "filter": { "missing": { "field": val } } } }, { "match": { val: True } } ], "minimum_should_match": 1 } } } return query def get_terminal_nodes(self, search_type): temp_dict = {} for a in scan(self.client, index=self.config['ownership']['index'], query=self.match_all): x = self.raw_dict(a['_source'], search_type) if x["key"] in temp_dict: if temp_dict[x["key"]]["terminal"] is True: if x["value"] != temp_dict[x["key"]]["value"]: temp_dict[x["key"]]["terminal"] = False else: pass else: pass else: temp_dict[x["key"]] = { "value": x["value"], "terminal": True } return [key for key in temp_dict if temp_dict[key]['terminal'] is True] def get_update_nodes(self, query_type): gtn = self.get_terminal_nodes(query_type) if self.args.from_scratch: query = self.build_query(query_type) else: query = {"query": { "bool": { "must_not": { "match": { "__meta__." + query_type + "_has_one_neighbor": True } }, "must": { "terms": { } } } }} return query, gtn def main(self, query_type): actions = [] query, t_nodes = self.get_update_nodes(query_type) i = 0 tn = [t_nodes[j: j + 1024] for j in range(0, len(t_nodes), 1024)] for p in tn: query["query"]["bool"]["must"]["terms"][query_type + "Cik"] = p for person in scan(self.client, index=self.config['ownership']['index'], query=query): actions.append({ "_op_type": "update", "_index": self.config['ownership']['index'], "_id": person['_id'], "_type": person['_type'], "doc": { "__meta__": { query_type + "_has_one_neighbor": True } } }) i += 1 if i > 500: for success, info in parallel_bulk(self.client, actions, chunk_size=510): if not success: self.logger.error('[RESPONSE]|{}'.format(info)) else: self.logger.info('[RESPONSE]|{}'.format(info)) actions = [] i = 0 for success, info in parallel_bulk(self.client, actions, chunk_size=510): if not success: self.logger.error('[RESPONSE]|{}'.format(info)) else: self.logger.info('[RESPONSE]|{}'.format(info)) f_query = { "query": { "bool": { "must_not": { "terms": { query_type + 'Cik': t_nodes } }, "must": { "match": { "__meta__." + query_type + "_has_one_neighbor": True } } } } } for a in scan(self.client, index=self.config['ownership']['index'], query=f_query): a['_source']['__meta__'][query_type + '_has_one_neighbor'] = False res = self.client.index( index=self.config['ownership']['index'], doc_type=a['_type'], body=a['_source'], id=a['_id'] ) self.logger.info(res) if __name__ == "__main__": parser = argparse.ArgumentParser(description='add single neighbor tags') parser.add_argument('--from-scratch', dest='from_scratch', action="store_true") parser.add_argument('--most-recent', dest='most_recent', action="store_true") parser.add_argument('--config-path', type=str, action='store', default='../config.json') args = parser.parse_args()
apache-2.0
-5,975,908,906,172,110,000
-7,246,327,913,641,830,000
31.878641
80
0.367193
false
lberruti/ansible-modules-extras
notification/irc.py
41
6075
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Jan-Piet Mens <jpmens () gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # DOCUMENTATION = ''' --- module: irc version_added: "1.2" short_description: Send a message to an IRC channel description: - Send a message to an IRC channel. This is a very simplistic implementation. options: server: description: - IRC server name/address required: false default: localhost port: description: - IRC server port number required: false default: 6667 nick: description: - Nickname. May be shortened, depending on server's NICKLEN setting. required: false default: ansible msg: description: - The message body. required: true default: null color: description: - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). required: false default: "none" choices: [ "none", "yellow", "red", "green", "blue", "black" ] channel: description: - Channel name required: true key: description: - Channel key required: false version_added: 1.7 passwd: description: - Server password required: false timeout: description: - Timeout to use while waiting for successful registration and join messages, this is to prevent an endless loop default: 30 version_added: 1.5 use_ssl: description: - Designates whether TLS/SSL should be used when connecting to the IRC server default: False version_added: 1.8 # informational: requirements for nodes requirements: [ socket ] author: Jan-Piet Mens, Matt Martz ''' EXAMPLES = ''' - irc: server=irc.example.net channel="#t1" msg="Hello world" - local_action: irc port=6669 channel="#t1" msg="All finished at {{ ansible_date_time.iso8601 }}" color=red nick=ansibleIRC ''' # =========================================== # IRC module support methods. # import re import socket import ssl from time import sleep def send_msg(channel, msg, server='localhost', port='6667', key=None, nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False): '''send message to IRC''' colornumbers = { 'black': "01", 'red': "04", 'green': "09", 'yellow': "08", 'blue': "12", } try: colornumber = colornumbers[color] colortext = "\x03" + colornumber except: colortext = "" message = colortext + msg irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if use_ssl: irc = ssl.wrap_socket(irc) irc.connect((server, int(port))) if passwd: irc.send('PASS %s\r\n' % passwd) irc.send('NICK %s\r\n' % nick) irc.send('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick)) motd = '' start = time.time() while 1: motd += irc.recv(1024) # The server might send back a shorter nick than we specified (due to NICKLEN), # so grab that and use it from now on (assuming we find the 00[1-4] response). match = re.search('^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M) if match: nick = match.group('nick') break elif time.time() - start > timeout: raise Exception('Timeout waiting for IRC server welcome response') sleep(0.5) if key: irc.send('JOIN %s %s\r\n' % (channel, key)) else: irc.send('JOIN %s\r\n' % channel) join = '' start = time.time() while 1: join += irc.recv(1024) if re.search('^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M): break elif time.time() - start > timeout: raise Exception('Timeout waiting for IRC JOIN response') sleep(0.5) irc.send('PRIVMSG %s :%s\r\n' % (channel, message)) sleep(1) irc.send('PART %s\r\n' % channel) irc.send('QUIT\r\n') sleep(1) irc.close() # =========================================== # Main # def main(): module = AnsibleModule( argument_spec=dict( server=dict(default='localhost'), port=dict(default=6667), nick=dict(default='ansible'), msg=dict(required=True), color=dict(default="none", choices=["yellow", "red", "green", "blue", "black", "none"]), channel=dict(required=True), key=dict(), passwd=dict(), timeout=dict(type='int', default=30), use_ssl=dict(type='bool', default=False) ), supports_check_mode=True ) server = module.params["server"] port = module.params["port"] nick = module.params["nick"] msg = module.params["msg"] color = module.params["color"] channel = module.params["channel"] key = module.params["key"] passwd = module.params["passwd"] timeout = module.params["timeout"] use_ssl = module.params["use_ssl"] try: send_msg(channel, msg, server, port, key, nick, color, passwd, timeout, use_ssl) except Exception, e: module.fail_json(msg="unable to send to IRC: %s" % e) module.exit_json(changed=False, channel=channel, nick=nick, msg=msg) # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
-2,971,269,054,332,439,600
-6,767,242,307,440,768,000
27.255814
138
0.59144
false
bev-a-tron/pledgeservice
testlib/waitress/trigger.py
31
7964
############################################################################## # # Copyright (c) 2001-2005 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## import asyncore import os import socket import errno from waitress.compat import thread # Wake up a call to select() running in the main thread. # # This is useful in a context where you are using Medusa's I/O # subsystem to deliver data, but the data is generated by another # thread. Normally, if Medusa is in the middle of a call to # select(), new output data generated by another thread will have # to sit until the call to select() either times out or returns. # If the trigger is 'pulled' by another thread, it should immediately # generate a READ event on the trigger object, which will force the # select() invocation to return. # # A common use for this facility: letting Medusa manage I/O for a # large number of connections; but routing each request through a # thread chosen from a fixed-size thread pool. When a thread is # acquired, a transaction is performed, but output data is # accumulated into buffers that will be emptied more efficiently # by Medusa. [picture a server that can process database queries # rapidly, but doesn't want to tie up threads waiting to send data # to low-bandwidth connections] # # The other major feature provided by this class is the ability to # move work back into the main thread: if you call pull_trigger() # with a thunk argument, when select() wakes up and receives the # event it will call your thunk from within that thread. The main # purpose of this is to remove the need to wrap thread locks around # Medusa's data structures, which normally do not need them. [To see # why this is true, imagine this scenario: A thread tries to push some # new data onto a channel's outgoing data queue at the same time that # the main thread is trying to remove some] class _triggerbase(object): """OS-independent base class for OS-dependent trigger class.""" kind = None # subclass must set to "pipe" or "loopback"; used by repr def __init__(self): self._closed = False # `lock` protects the `thunks` list from being traversed and # appended to simultaneously. self.lock = thread.allocate_lock() # List of no-argument callbacks to invoke when the trigger is # pulled. These run in the thread running the asyncore mainloop, # regardless of which thread pulls the trigger. self.thunks = [] def readable(self): return True def writable(self): return False def handle_connect(self): pass def handle_close(self): self.close() # Override the asyncore close() method, because it doesn't know about # (so can't close) all the gimmicks we have open. Subclass must # supply a _close() method to do platform-specific closing work. _close() # will be called iff we're not already closed. def close(self): if not self._closed: self._closed = True self.del_channel() self._close() # subclass does OS-specific stuff def pull_trigger(self, thunk=None): if thunk: self.lock.acquire() try: self.thunks.append(thunk) finally: self.lock.release() self._physical_pull() def handle_read(self): try: self.recv(8192) except (OSError, socket.error): return self.lock.acquire() try: for thunk in self.thunks: try: thunk() except: nil, t, v, tbinfo = asyncore.compact_traceback() self.log_info( 'exception in trigger thunk: (%s:%s %s)' % (t, v, tbinfo)) self.thunks = [] finally: self.lock.release() if os.name == 'posix': class trigger(_triggerbase, asyncore.file_dispatcher): kind = "pipe" def __init__(self, map): _triggerbase.__init__(self) r, self.trigger = self._fds = os.pipe() asyncore.file_dispatcher.__init__(self, r, map=map) def _close(self): for fd in self._fds: os.close(fd) self._fds = [] def _physical_pull(self): os.write(self.trigger, b'x') else: # pragma: no cover # Windows version; uses just sockets, because a pipe isn't select'able # on Windows. class trigger(_triggerbase, asyncore.dispatcher): kind = "loopback" def __init__(self, map): _triggerbase.__init__(self) # Get a pair of connected sockets. The trigger is the 'w' # end of the pair, which is connected to 'r'. 'r' is put # in the asyncore socket map. "pulling the trigger" then # means writing something on w, which will wake up r. w = socket.socket() # Disable buffering -- pulling the trigger sends 1 byte, # and we want that sent immediately, to wake up asyncore's # select() ASAP. w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) count = 0 while True: count += 1 # Bind to a local port; for efficiency, let the OS pick # a free port for us. # Unfortunately, stress tests showed that we may not # be able to connect to that port ("Address already in # use") despite that the OS picked it. This appears # to be a race bug in the Windows socket implementation. # So we loop until a connect() succeeds (almost always # on the first try). See the long thread at # http://mail.zope.org/pipermail/zope/2005-July/160433.html # for hideous details. a = socket.socket() a.bind(("127.0.0.1", 0)) connect_address = a.getsockname() # assigned (host, port) pair a.listen(1) try: w.connect(connect_address) break # success except socket.error as detail: if detail[0] != errno.WSAEADDRINUSE: # "Address already in use" is the only error # I've seen on two WinXP Pro SP2 boxes, under # Pythons 2.3.5 and 2.4.1. raise # (10048, 'Address already in use') # assert count <= 2 # never triggered in Tim's tests if count >= 10: # I've never seen it go above 2 a.close() w.close() raise RuntimeError("Cannot bind trigger!") # Close `a` and try again. Note: I originally put a short # sleep() here, but it didn't appear to help or hurt. a.close() r, addr = a.accept() # r becomes asyncore's (self.)socket a.close() self.trigger = w asyncore.dispatcher.__init__(self, r, map=map) def _close(self): # self.socket is r, and self.trigger is w, from __init__ self.socket.close() self.trigger.close() def _physical_pull(self): self.trigger.send(b'x')
agpl-3.0
8,805,151,411,484,474,000
6,377,403,132,997,173,000
37.84878
79
0.575339
false
epssy/hue
desktop/core/ext-py/lxml/benchmark/bench_etree.py
30
10920
import sys, copy from itertools import * from StringIO import StringIO import benchbase from benchbase import (with_attributes, with_text, onlylib, serialized, children, nochange) TEXT = "some ASCII text" UTEXT = u"some klingon: \F8D2" ############################################################ # Benchmarks ############################################################ class BenchMark(benchbase.TreeBenchMark): @nochange def bench_iter_children(self, root): for child in root: pass @nochange def bench_iter_children_reversed(self, root): for child in reversed(root): pass @nochange def bench_first_child(self, root): for i in self.repeat1000: child = root[0] @nochange def bench_last_child(self, root): for i in self.repeat1000: child = root[-1] @nochange def bench_middle_child(self, root): pos = len(root) / 2 for i in self.repeat1000: child = root[pos] @nochange @with_attributes(False) @with_text(text=True) @onlylib('lxe', 'ET') def bench_tostring_text_ascii(self, root): self.etree.tostring(root, method="text") @nochange @with_attributes(False) @with_text(text=True, utext=True) @onlylib('lxe') def bench_tostring_text_unicode(self, root): self.etree.tostring(root, method="text", encoding=unicode) @nochange @with_attributes(False) @with_text(text=True, utext=True) @onlylib('lxe', 'ET') def bench_tostring_text_utf16(self, root): self.etree.tostring(root, method="text", encoding='UTF-16') @nochange @with_attributes(False) @with_text(text=True, utext=True) @onlylib('lxe') @children def bench_tostring_text_utf8_with_tail(self, children): for child in children: self.etree.tostring(child, method="text", encoding='UTF-8', with_tail=True) @nochange @with_attributes(True, False) @with_text(text=True, utext=True) def bench_tostring_utf8(self, root): self.etree.tostring(root, encoding='UTF-8') @nochange @with_attributes(True, False) @with_text(text=True, utext=True) def bench_tostring_utf16(self, root): self.etree.tostring(root, encoding='UTF-16') @nochange @with_attributes(True, False) @with_text(text=True, utext=True) def bench_tostring_utf8_unicode_XML(self, root): xml = unicode(self.etree.tostring(root, encoding='UTF-8'), 'UTF-8') self.etree.XML(xml) @nochange @with_attributes(True, False) @with_text(text=True, utext=True) def bench_write_utf8_parse_stringIO(self, root): f = StringIO() self.etree.ElementTree(root).write(f, encoding='UTF-8') f.seek(0) self.etree.parse(f) @with_attributes(True, False) @with_text(text=True, utext=True) @serialized def bench_parse_stringIO(self, root_xml): f = StringIO(root_xml) self.etree.parse(f) @with_attributes(True, False) @with_text(text=True, utext=True) @serialized def bench_XML(self, root_xml): self.etree.XML(root_xml) @with_attributes(True, False) @with_text(text=True, utext=True) @serialized def bench_iterparse_stringIO(self, root_xml): f = StringIO(root_xml) for event, element in self.etree.iterparse(f): pass @with_attributes(True, False) @with_text(text=True, utext=True) @serialized def bench_iterparse_stringIO_clear(self, root_xml): f = StringIO(root_xml) for event, element in self.etree.iterparse(f): element.clear() def bench_append_from_document(self, root1, root2): # == "1,2 2,3 1,3 3,1 3,2 2,1" # trees 1 and 2, or 2 and 3, or ... for el in root2: root1.append(el) def bench_insert_from_document(self, root1, root2): pos = len(root1)/2 for el in root2: root1.insert(pos, el) pos = pos + 1 def bench_rotate_children(self, root): # == "1 2 3" # runs on any single tree independently for i in range(100): el = root[0] del root[0] root.append(el) def bench_reorder(self, root): for i in range(1,len(root)/2): el = root[0] del root[0] root[-i:-i] = [ el ] def bench_reorder_slice(self, root): for i in range(1,len(root)/2): els = root[0:1] del root[0] root[-i:-i] = els def bench_clear(self, root): root.clear() @nochange @children def bench_has_children(self, children): for child in children: if child and child and child and child and child: pass @nochange @children def bench_len(self, children): for child in children: map(len, repeat(child, 20)) @children def bench_create_subelements(self, children): SubElement = self.etree.SubElement for child in children: SubElement(child, '{test}test') def bench_append_elements(self, root): Element = self.etree.Element for child in root: el = Element('{test}test') child.append(el) @nochange @children def bench_makeelement(self, children): empty_attrib = {} for child in children: child.makeelement('{test}test', empty_attrib) @nochange @children def bench_create_elements(self, children): Element = self.etree.Element for child in children: Element('{test}test') @children def bench_replace_children_element(self, children): Element = self.etree.Element for child in children: el = Element('{test}test') child[:] = [el] @children def bench_replace_children(self, children): els = [ self.etree.Element("newchild") ] for child in children: child[:] = els def bench_remove_children(self, root): for child in root: root.remove(child) def bench_remove_children_reversed(self, root): for child in reversed(root): root.remove(child) @children def bench_set_attributes(self, children): for child in children: child.set('a', 'bla') @with_attributes(True) @children @nochange def bench_get_attributes(self, children): for child in children: child.get('bla1') child.get('{attr}test1') @children def bench_setget_attributes(self, children): for child in children: child.set('a', 'bla') for child in children: child.get('a') @nochange def bench_root_getchildren(self, root): root.getchildren() @nochange def bench_root_list_children(self, root): list(root) @nochange @children def bench_getchildren(self, children): for child in children: child.getchildren() @nochange @children def bench_get_children_slice(self, children): for child in children: child[:] @nochange @children def bench_get_children_slice_2x(self, children): for child in children: child[:] child[:] @nochange @children @with_attributes(True, False) @with_text(utext=True, text=True, no_text=True) def bench_deepcopy(self, children): for child in children: copy.deepcopy(child) @nochange @with_attributes(True, False) @with_text(utext=True, text=True, no_text=True) def bench_deepcopy_all(self, root): copy.deepcopy(root) @nochange @children def bench_tag(self, children): for child in children: child.tag @nochange @children def bench_tag_repeat(self, children): for child in children: for i in self.repeat100: child.tag @nochange @with_text(utext=True, text=True, no_text=True) @children def bench_text(self, children): for child in children: child.text @nochange @with_text(utext=True, text=True, no_text=True) @children def bench_text_repeat(self, children): for child in children: for i in self.repeat500: child.text @children def bench_set_text(self, children): text = TEXT for child in children: child.text = text @children def bench_set_utext(self, children): text = UTEXT for child in children: child.text = text @nochange @onlylib('lxe') def bench_index(self, root): for child in root: root.index(child) @nochange @onlylib('lxe') def bench_index_slice(self, root): for child in root[5:100]: root.index(child, 5, 100) @nochange @onlylib('lxe') def bench_index_slice_neg(self, root): for child in root[-100:-5]: root.index(child, start=-100, stop=-5) @nochange def bench_getiterator_all(self, root): list(root.getiterator()) @nochange def bench_getiterator_islice(self, root): list(islice(root.getiterator(), 10, 110)) @nochange def bench_getiterator_tag(self, root): list(islice(root.getiterator(self.SEARCH_TAG), 3, 10)) @nochange def bench_getiterator_tag_all(self, root): list(root.getiterator(self.SEARCH_TAG)) @nochange def bench_getiterator_tag_none(self, root): list(root.getiterator("{ThisShould}NeverExist")) @nochange def bench_getiterator_tag_text(self, root): [ e.text for e in root.getiterator(self.SEARCH_TAG) ] @nochange def bench_findall(self, root): root.findall(".//*") @nochange def bench_findall_child(self, root): root.findall(".//*/" + self.SEARCH_TAG) @nochange def bench_findall_tag(self, root): root.findall(".//" + self.SEARCH_TAG) @nochange def bench_findall_path(self, root): root.findall(".//*[%s]/./%s/./*" % (self.SEARCH_TAG, self.SEARCH_TAG)) @nochange @onlylib('lxe') def bench_xpath_path(self, root): ns, tag = self.SEARCH_TAG[1:].split('}') root.xpath(".//*[p:%s]/./p:%s/./*" % (tag,tag), namespaces = {'p':ns}) @nochange @onlylib('lxe') def bench_iterfind(self, root): list(root.iterfind(".//*")) @nochange @onlylib('lxe') def bench_iterfind_tag(self, root): list(root.iterfind(".//" + self.SEARCH_TAG)) @nochange @onlylib('lxe') def bench_iterfind_islice(self, root): list(islice(root.iterfind(".//*"), 10, 110)) if __name__ == '__main__': benchbase.main(BenchMark)
apache-2.0
2,227,882,698,743,588,400
-463,207,850,312,624,600
25.896552
78
0.578571
false
modera/mcloud
mcloud/plugins/monitor.py
1
1101
import inject from mcloud.application import ApplicationController from mcloud.events import EventBus from mcloud.plugin import IMcloudPlugin from mcloud.plugins import Plugin from mcloud.txdocker import IDockerClient from twisted.internet import reactor from twisted.python import log from zope.interface import implements class DockerMonitorPlugin(Plugin): """ Monitors docker events and emmits "containers.updated" event when non-internal containers change their state. """ implements(IMcloudPlugin) client = inject.attr(IDockerClient) event_bus = inject.attr(EventBus) app_controller = inject.attr(ApplicationController) def setup(self): # reactor.callLater(0, self.attach_to_events) pass def on_event(self, event): if not self.app_controller.is_internal(event['id']): log.msg('New docker event: %s' % event) self.event_bus.fire_event('containers.updated', event) def attach_to_events(self, *args): log.msg('Start monitoring docker events') return self.client.events(self.on_event)
apache-2.0
-8,437,274,882,306,374,000
3,621,962,753,105,252,000
31.382353
82
0.722071
false
romain-li/edx-platform
common/lib/xmodule/xmodule/textannotation_module.py
3
6835
"""Text annotation module""" from lxml import etree from pkg_resources import resource_string from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor from xblock.core import Scope, String from xmodule.annotator_mixin import get_instructions from xmodule.annotator_token import retrieve_token from xblock.fragment import Fragment import textwrap # Make '_' a no-op so we can scrape strings. Using lambda instead of # `django.utils.translation.ugettext_noop` because Django cannot be imported in this file _ = lambda text: text class AnnotatableFields(object): """Fields for `TextModule` and `TextDescriptor`.""" data = String( help=_("XML data for the annotation"), scope=Scope.content, default=textwrap.dedent("""\ <annotatable> <instructions> <p> Add the instructions to the assignment here. </p> </instructions> <p> Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse. </p> </annotatable> """)) display_name = String( display_name=_("Display Name"), help=_("The display name for this component."), scope=Scope.settings, default=_('Text Annotation'), ) instructor_tags = String( display_name=_("Tags for Assignments"), help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"), scope=Scope.settings, default='imagery:red,parallelism:blue', ) source = String( display_name=_("Source/Citation"), help=_("Optional for citing source of any material used. Automatic citation can be done using <a href=\"http://easybib.com\">EasyBib</a>"), scope=Scope.settings, default='None', ) diacritics = String( display_name=_("Diacritic Marks"), help=_("Add diacritic marks to be added to a text using the comma-separated form, i.e. markname;urltomark;baseline,markname2;urltomark2;baseline2"), scope=Scope.settings, default='', ) annotation_storage_url = String( help=_("Location of Annotation backend"), scope=Scope.settings, default="http://your_annotation_storage.com", display_name=_("Url for Annotation Storage") ) annotation_token_secret = String( help=_("Secret string for annotation storage"), scope=Scope.settings, default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", display_name=_("Secret Token String for Annotation") ) default_tab = String( display_name=_("Default Annotations Tab"), help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."), scope=Scope.settings, default="myNotes", ) # currently only supports one instructor, will build functionality for multiple later instructor_email = String( display_name=_("Email for 'Instructor' Annotations"), help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."), scope=Scope.settings, default="", ) annotation_mode = String( display_name=_("Mode for Annotation Tool"), help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"), scope=Scope.settings, default="everyone", ) class TextAnnotationModule(AnnotatableFields, XModule): ''' Text Annotation Module ''' js = {'coffee': [], 'js': []} css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]} icon_class = 'textannotation' def __init__(self, *args, **kwargs): super(TextAnnotationModule, self).__init__(*args, **kwargs) xmltree = etree.fromstring(self.data) self.instructions = self._extract_instructions(xmltree) self.content = etree.tostring(xmltree, encoding='unicode') self.user_email = "" self.is_course_staff = False if self.runtime.get_user_role() in ['instructor', 'staff']: self.is_course_staff = True if self.runtime.get_real_user is not None: try: self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email except Exception: # pylint: disable=broad-except self.user_email = _("No email address found.") def _extract_instructions(self, xmltree): """ Removes <instructions> from the xmltree and returns them as a string, otherwise None. """ return get_instructions(xmltree) def student_view(self, context): """ Renders parameters to template. """ context = { 'course_key': self.runtime.course_id, 'display_name': self.display_name_with_default_escaped, 'tag': self.instructor_tags, 'source': self.source, 'instructions_html': self.instructions, 'content_html': self.content, 'token': retrieve_token(self.user_email, self.annotation_token_secret), 'diacritic_marks': self.diacritics, 'annotation_storage': self.annotation_storage_url, 'default_tab': self.default_tab, 'instructor_email': self.instructor_email, 'annotation_mode': self.annotation_mode, 'is_course_staff': self.is_course_staff, } fragment = Fragment(self.system.render_template('textannotation.html', context)) # TinyMCE already exists in Studio so we should not load the files again # get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids if self.runtime.get_real_user is not None: fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js") fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js") return fragment class TextAnnotationDescriptor(AnnotatableFields, RawDescriptor): ''' Text Annotation Descriptor ''' module_class = TextAnnotationModule resources_dir = None mako_template = "widgets/raw-edit.html" @property def non_editable_metadata_fields(self): non_editable_fields = super(TextAnnotationDescriptor, self).non_editable_metadata_fields non_editable_fields.extend([ TextAnnotationDescriptor.annotation_storage_url, TextAnnotationDescriptor.annotation_token_secret, ]) return non_editable_fields
agpl-3.0
-7,104,925,396,849,668,000
6,471,196,971,572,202,000
41.71875
252
0.646525
false
dhhagan/PAM
Python/PAM.py
1
5037
#PAM.py import re import glob, os, time from numpy import * from pylab import * def analyzeFile(fileName,delim): cols = {} indexToName = {} lineNum = 0 goodLines = 0 shortLines = 0 FILE = open(fileName,'r') for line in FILE: line = line.strip() if lineNum < 1: lineNum += 1 continue elif lineNum == 1: headings = line.split(delim) i = 0 for heading in headings: heading = heading.strip() cols[heading] = [] indexToName[i] = heading i += 1 lineNum += 1 lineLength = len(cols) else: data = line.split(delim) if len(data) == lineLength: goodLines += 1 i = 0 for point in data: point = point.strip() cols[indexToName[i]] += [point] i += 1 lineNum += 1 else: shortLines += 1 lineNum += 1 continue FILE.close return cols, indexToName, lineNum, shortLines def numericalSort(value): numbers = re.compile(r'(\d+)') parts = numbers.split(value) parts[1::2] = map(int, parts[1::2]) return parts def popDate(fileName): run = fileName.split('.')[0] runNo = run.split('_')[-1] return runNo def getFile(date,regex):#Works files = [] files = sorted((glob.glob('*'+regex+'*')),key=numericalSort,reverse=False) if date.lower() == 'last': files = files.pop() else: files = [item for item in files if re.search(date,item)] return files def plotConc(data,ozone,times): # This function plots data versus time import datetime as dt from matplotlib import pyplot as plt from matplotlib.dates import date2num #time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times] time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times] x = date2num(time) legend1 = [] legend2 = [] fig = plt.figure('Gas Concentration Readings for East St.Louis') ax1 = fig.add_subplot(111) ax2 = twinx() for key,value in data.items(): ax1.plot_date(x,data[key],'-',xdate=True) legend1.append(key) for key, value in ozone.items(): ax2.plot_date(x,ozone[key],'-.',xdate=True) legend2.append(key) title('Gas Concentrations for East St. Louis', fontsize = 12) ax1.set_ylabel(r'$Concentration(ppb)$', fontsize = 12) ax2.set_ylabel(r'$Concentration(ppb)$', fontsize = 12) xlabel(r"$Time \, Stamp$", fontsize = 12) ax1.legend(legend1,loc='upper right') ax2.legend(legend2,loc='lower right') grid(True) return def plotBankRelays(data,relays,times): # This function plots data versus time import datetime as dt from matplotlib import pyplot as plt from matplotlib.dates import date2num time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times] x = date2num(time) #x1 = [date.strftime("%m-%d %H:%M:%S") for date in time] legend1 = [] legend2 = [] #plt.locator_params(axis='x', nbins=4) fig = plt.figure('VAPS Thermocouple Readings: Chart 2') ax1 = fig.add_subplot(111) ax2 = twinx() for key,value in data.items(): ax1.plot_date(x,data[key],'-',xdate=True) legend1.append(key) for key,value in relays.items(): ax2.plot_date(x,relays[key],'--',xdate=True) legend2.append(key) title('VAPS Temperatures: Chart 2', fontsize = 12) ax1.set_ylabel(r'$Temperature(^oC)$', fontsize = 12) ax2.set_ylabel(r'$Relay \, States$', fontsize = 12) ax1.set_xlabel(r"$Time \, Stamp$", fontsize = 12) #print [num2date(item) for item in ax1.get_xticks()] #ax1.set_xticks(x) #ax1.set_xticklabels([date.strftime("%m-%d %H:%M %p") for date in time]) #ax1.legend(bbox_to_anchor=(0.,1.02,1.,.102),loc=3,ncol=2,mode="expand",borderaxespad=0.) ax1.legend(legend1,loc='upper right') ax2.legend(legend2,loc='lower right') #ax1.xaxis.set_major_formatter(FormatStrFormatter(date.strftime("%m-%d %H:%M:%S"))) plt.subplots_adjust(bottom=0.15) grid(True) return def goodFiles(files,goodHeaders,delim): # Good irregFiles = 0 goodFiles = [] for file in files: lineNo = 0 falseCount = 0 FILE = open(file,'r') for line in FILE: line = line.strip() if lineNo == 5: # Check all the headings to make sure the file is good head = line.split(delim) for item in head: if item in goodHeaders: continue else: falseCount += 1 if falseCount == 0: goodFiles.append(file) else: irregFiles += 1 lineNo += 1 else: lineNo += 1 continue FILE.close return goodFiles, irregFiles
mit
3,632,515,333,932,245,500
-4,308,288,288,667,135,500
27.297753
97
0.561842
false
pkainz/pylearn2
pylearn2/scripts/datasets/make_cifar100_patches_8x8.py
41
2282
""" This script makes a dataset of two million approximately whitened patches, extracted at random uniformly from the CIFAR-100 train dataset. This script is intended to reproduce the preprocessing used by Adam Coates et. al. in their work from the first half of 2011 on the CIFAR-10 and STL-10 datasets. """ from __future__ import print_function from pylearn2.utils import serial from pylearn2.datasets import preprocessing from pylearn2.datasets.cifar100 import CIFAR100 from pylearn2.utils import string data_dir = string.preprocess('${PYLEARN2_DATA_PATH}') print('Loading CIFAR-100 train dataset...') data = CIFAR100(which_set='train') print("Preparing output directory...") patch_dir = data_dir + '/cifar100/cifar100_patches_8x8' serial.mkdir(patch_dir) README = open(patch_dir + '/README', 'w') README.write(""" The .pkl files in this directory may be opened in python using cPickle, pickle, or pylearn2.serial.load. data.pkl contains a pylearn2 Dataset object defining an unlabeled dataset of 2 million 8x8 approximately whitened, contrast-normalized patches drawn uniformly at random from the CIFAR-100 train set. preprocessor.pkl contains a pylearn2 Pipeline object that was used to extract the patches and approximately whiten / contrast normalize them. This object is necessary when extracting features for supervised learning or test set classification, because the extracted features must be computed using inputs that have been whitened with the ZCA matrix learned and stored by this Pipeline. They were created with the pylearn2 script make_cifar100_patches.py. All other files in this directory, including this README, were created by the same script and are necessary for the other files to function correctly. """) README.close() print("Preprocessing the data...") pipeline = preprocessing.Pipeline() pipeline.items.append( preprocessing.ExtractPatches(patch_shape=(8, 8), num_patches=2*1000*1000)) pipeline.items.append( preprocessing.GlobalContrastNormalization(sqrt_bias=10., use_std=True)) pipeline.items.append(preprocessing.ZCA()) data.apply_preprocessor(preprocessor=pipeline, can_fit=True) data.use_design_loc(patch_dir + '/data.npy') serial.save(patch_dir + '/data.pkl', data) serial.save(patch_dir + '/preprocessor.pkl', pipeline)
bsd-3-clause
-7,435,166,557,441,024,000
-3,868,593,645,395,604,000
35.222222
78
0.786591
false
MRCSDZ/subtitols
includes/fckeditor/editor/filemanager/browser/default/connectors/py/connector.py
11
22691
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2007 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python. Tested With: Standard: Python 2.3.3 Zope: Zope Version: (Zope 2.8.1-final, python 2.3.5, linux2) Python Version: 2.3.5 (#4, Mar 10 2005, 01:40:25) [GCC 3.3.3 20040412 (Red Hat Linux 3.3.3-7)] System Platform: linux2 """ """ Author Notes (04 December 2005): This module has gone through quite a few phases of change. Obviously, I am only supporting that part of the code that I use. Initially I had the upload directory as a part of zope (ie. uploading files directly into Zope), before realising that there were too many complex intricacies within Zope to deal with. Zope is one ugly piece of code. So I decided to complement Zope by an Apache server (which I had running anyway, and doing nothing). So I mapped all uploads from an arbitrary server directory to an arbitrary web directory. All the FCKeditor uploading occurred this way, and I didn't have to stuff around with fiddling with Zope objects and the like (which are terribly complex and something you don't want to do - trust me). Maybe a Zope expert can touch up the Zope components. In the end, I had FCKeditor loaded in Zope (probably a bad idea as well), and I replaced the connector.py with an alias to a server module. Right now, all Zope components will simple remain as is because I've had enough of Zope. See notes right at the end of this file for how I aliased out of Zope. Anyway, most of you probably wont use Zope, so things are pretty simple in that regard. Typically, SERVER_DIR is the root of WEB_DIR (not necessarily). Most definitely, SERVER_USERFILES_DIR points to WEB_USERFILES_DIR. """ import cgi import re import os import string """ escape Converts the special characters '<', '>', and '&'. RFC 1866 specifies that these characters be represented in HTML as &lt; &gt; and &amp; respectively. In Python 1.5 we use the new string.replace() function for speed. """ def escape(text, replace=string.replace): text = replace(text, '&', '&amp;') # must be done 1st text = replace(text, '<', '&lt;') text = replace(text, '>', '&gt;') text = replace(text, '"', '&quot;') return text """ getFCKeditorConnector Creates a new instance of an FCKeditorConnector, and runs it """ def getFCKeditorConnector(context=None): # Called from Zope. Passes the context through connector = FCKeditorConnector(context=context) return connector.run() """ FCKeditorRequest A wrapper around the request object Can handle normal CGI request, or a Zope request Extend as required """ class FCKeditorRequest(object): def __init__(self, context=None): if (context is not None): r = context.REQUEST else: r = cgi.FieldStorage() self.context = context self.request = r def isZope(self): if (self.context is not None): return True return False def has_key(self, key): return self.request.has_key(key) def get(self, key, default=None): value = None if (self.isZope()): value = self.request.get(key, default) else: if key in self.request.keys(): value = self.request[key].value else: value = default return value """ FCKeditorConnector The connector class """ class FCKeditorConnector(object): # Configuration for FCKEditor # can point to another server here, if linked correctly #WEB_HOST = "http://127.0.0.1/" WEB_HOST = "" SERVER_DIR = "/var/www/html/" WEB_USERFILES_FOLDER = WEB_HOST + "upload/" SERVER_USERFILES_FOLDER = SERVER_DIR + "upload/" # Allow access (Zope) __allow_access_to_unprotected_subobjects__ = 1 # Class Attributes parentFolderRe = re.compile("[\/][^\/]+[\/]?$") """ Constructor """ def __init__(self, context=None): # The given root path will NOT be shown to the user # Only the userFilesPath will be shown # Instance Attributes self.context = context self.request = FCKeditorRequest(context=context) self.rootPath = self.SERVER_DIR self.userFilesFolder = self.SERVER_USERFILES_FOLDER self.webUserFilesFolder = self.WEB_USERFILES_FOLDER # Enables / Disables the connector self.enabled = False # Set to True to enable this connector # These are instance variables self.zopeRootContext = None self.zopeUploadContext = None # Copied from php module =) self.allowedExtensions = { "File": None, "Image": None, "Flash": None, "Media": None } self.deniedExtensions = { "File": [ "html","htm","php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess","asis" ], "Image": [ "html","htm","php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess","asis" ], "Flash": [ "html","htm","php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess","asis" ], "Media": [ "html","htm","php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess","asis" ] } """ Zope specific functions """ def isZope(self): # The context object is the zope object if (self.context is not None): return True return False def getZopeRootContext(self): if self.zopeRootContext is None: self.zopeRootContext = self.context.getPhysicalRoot() return self.zopeRootContext def getZopeUploadContext(self): if self.zopeUploadContext is None: folderNames = self.userFilesFolder.split("/") c = self.getZopeRootContext() for folderName in folderNames: if (folderName <> ""): c = c[folderName] self.zopeUploadContext = c return self.zopeUploadContext """ Generic manipulation functions """ def getUserFilesFolder(self): return self.userFilesFolder def getWebUserFilesFolder(self): return self.webUserFilesFolder def getAllowedExtensions(self, resourceType): return self.allowedExtensions[resourceType] def getDeniedExtensions(self, resourceType): return self.deniedExtensions[resourceType] def removeFromStart(self, string, char): return string.lstrip(char) def removeFromEnd(self, string, char): return string.rstrip(char) def convertToXmlAttribute(self, value): if (value is None): value = "" return escape(value) def convertToPath(self, path): if (path[-1] <> "/"): return path + "/" else: return path def getUrlFromPath(self, resourceType, path): if (resourceType is None) or (resourceType == ''): url = "%s%s" % ( self.removeFromEnd(self.getUserFilesFolder(), '/'), path ) else: url = "%s%s%s" % ( self.getUserFilesFolder(), resourceType, path ) return url def getWebUrlFromPath(self, resourceType, path): if (resourceType is None) or (resourceType == ''): url = "%s%s" % ( self.removeFromEnd(self.getWebUserFilesFolder(), '/'), path ) else: url = "%s%s%s" % ( self.getWebUserFilesFolder(), resourceType, path ) return url def removeExtension(self, fileName): index = fileName.rindex(".") newFileName = fileName[0:index] return newFileName def getExtension(self, fileName): index = fileName.rindex(".") + 1 fileExtension = fileName[index:] return fileExtension def getParentFolder(self, folderPath): parentFolderPath = self.parentFolderRe.sub('', folderPath) return parentFolderPath """ serverMapFolder Purpose: works out the folder map on the server """ def serverMapFolder(self, resourceType, folderPath): # Get the resource type directory resourceTypeFolder = "%s%s/" % ( self.getUserFilesFolder(), resourceType ) # Ensure that the directory exists self.createServerFolder(resourceTypeFolder) # Return the resource type directory combined with the # required path return "%s%s" % ( resourceTypeFolder, self.removeFromStart(folderPath, '/') ) """ createServerFolder Purpose: physically creates a folder on the server """ def createServerFolder(self, folderPath): # Check if the parent exists parentFolderPath = self.getParentFolder(folderPath) if not(os.path.exists(parentFolderPath)): errorMsg = self.createServerFolder(parentFolderPath) if errorMsg is not None: return errorMsg # Check if this exists if not(os.path.exists(folderPath)): os.mkdir(folderPath) os.chmod(folderPath, 0755) errorMsg = None else: if os.path.isdir(folderPath): errorMsg = None else: raise "createServerFolder: Non-folder of same name already exists" return errorMsg """ getRootPath Purpose: returns the root path on the server """ def getRootPath(self): return self.rootPath """ setXmlHeaders Purpose: to prepare the headers for the xml to return """ def setXmlHeaders(self): #now = self.context.BS_get_now() #yesterday = now - 1 self.setHeader("Content-Type", "text/xml") #self.setHeader("Expires", yesterday) #self.setHeader("Last-Modified", now) #self.setHeader("Cache-Control", "no-store, no-cache, must-revalidate") self.printHeaders() return def setHeader(self, key, value): if (self.isZope()): self.context.REQUEST.RESPONSE.setHeader(key, value) else: print "%s: %s" % (key, value) return def printHeaders(self): # For non-Zope requests, we need to print an empty line # to denote the end of headers if (not(self.isZope())): print "" """ createXmlFooter Purpose: returns the xml header """ def createXmlHeader(self, command, resourceType, currentFolder): self.setXmlHeaders() s = "" # Create the XML document header s += """<?xml version="1.0" encoding="utf-8" ?>""" # Create the main connector node s += """<Connector command="%s" resourceType="%s">""" % ( command, resourceType ) # Add the current folder node s += """<CurrentFolder path="%s" url="%s" />""" % ( self.convertToXmlAttribute(currentFolder), self.convertToXmlAttribute( self.getWebUrlFromPath( resourceType, currentFolder ) ), ) return s """ createXmlFooter Purpose: returns the xml footer """ def createXmlFooter(self): s = """</Connector>""" return s """ sendError Purpose: in the event of an error, return an xml based error """ def sendError(self, number, text): self.setXmlHeaders() s = "" # Create the XML document header s += """<?xml version="1.0" encoding="utf-8" ?>""" s += """<Connector>""" s += """<Error number="%s" text="%s" />""" % (number, text) s += """</Connector>""" return s """ getFolders Purpose: command to recieve a list of folders """ def getFolders(self, resourceType, currentFolder): if (self.isZope()): return self.getZopeFolders(resourceType, currentFolder) else: return self.getNonZopeFolders(resourceType, currentFolder) def getZopeFolders(self, resourceType, currentFolder): # Open the folders node s = "" s += """<Folders>""" zopeFolder = self.findZopeFolder(resourceType, currentFolder) for (name, o) in zopeFolder.objectItems(["Folder"]): s += """<Folder name="%s" />""" % ( self.convertToXmlAttribute(name) ) # Close the folders node s += """</Folders>""" return s def getNonZopeFolders(self, resourceType, currentFolder): # Map the virtual path to our local server serverPath = self.serverMapFolder(resourceType, currentFolder) # Open the folders node s = "" s += """<Folders>""" for someObject in os.listdir(serverPath): someObjectPath = os.path.join(serverPath, someObject) if os.path.isdir(someObjectPath): s += """<Folder name="%s" />""" % ( self.convertToXmlAttribute(someObject) ) # Close the folders node s += """</Folders>""" return s """ getFoldersAndFiles Purpose: command to recieve a list of folders and files """ def getFoldersAndFiles(self, resourceType, currentFolder): if (self.isZope()): return self.getZopeFoldersAndFiles(resourceType, currentFolder) else: return self.getNonZopeFoldersAndFiles(resourceType, currentFolder) def getNonZopeFoldersAndFiles(self, resourceType, currentFolder): # Map the virtual path to our local server serverPath = self.serverMapFolder(resourceType, currentFolder) # Open the folders / files node folders = """<Folders>""" files = """<Files>""" for someObject in os.listdir(serverPath): someObjectPath = os.path.join(serverPath, someObject) if os.path.isdir(someObjectPath): folders += """<Folder name="%s" />""" % ( self.convertToXmlAttribute(someObject) ) elif os.path.isfile(someObjectPath): size = os.path.getsize(someObjectPath) files += """<File name="%s" size="%s" />""" % ( self.convertToXmlAttribute(someObject), os.path.getsize(someObjectPath) ) # Close the folders / files node folders += """</Folders>""" files += """</Files>""" # Return it s = folders + files return s def getZopeFoldersAndFiles(self, resourceType, currentFolder): folders = self.getZopeFolders(resourceType, currentFolder) files = self.getZopeFiles(resourceType, currentFolder) s = folders + files return s def getZopeFiles(self, resourceType, currentFolder): # Open the files node s = "" s += """<Files>""" zopeFolder = self.findZopeFolder(resourceType, currentFolder) for (name, o) in zopeFolder.objectItems(["File","Image"]): s += """<File name="%s" size="%s" />""" % ( self.convertToXmlAttribute(name), ((o.get_size() / 1024) + 1) ) # Close the files node s += """</Files>""" return s def findZopeFolder(self, resourceType, folderName): # returns the context of the resource / folder zopeFolder = self.getZopeUploadContext() folderName = self.removeFromStart(folderName, "/") folderName = self.removeFromEnd(folderName, "/") if (resourceType <> ""): try: zopeFolder = zopeFolder[resourceType] except: zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType) zopeFolder = zopeFolder[resourceType] if (folderName <> ""): folderNames = folderName.split("/") for folderName in folderNames: zopeFolder = zopeFolder[folderName] return zopeFolder """ createFolder Purpose: command to create a new folder """ def createFolder(self, resourceType, currentFolder): if (self.isZope()): return self.createZopeFolder(resourceType, currentFolder) else: return self.createNonZopeFolder(resourceType, currentFolder) def createZopeFolder(self, resourceType, currentFolder): # Find out where we are zopeFolder = self.findZopeFolder(resourceType, currentFolder) errorNo = 0 errorMsg = "" if self.request.has_key("NewFolderName"): newFolder = self.request.get("NewFolderName", None) zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder) else: errorNo = 102 error = """<Error number="%s" originalDescription="%s" />""" % ( errorNo, self.convertToXmlAttribute(errorMsg) ) return error def createNonZopeFolder(self, resourceType, currentFolder): errorNo = 0 errorMsg = "" if self.request.has_key("NewFolderName"): newFolder = self.request.get("NewFolderName", None) currentFolderPath = self.serverMapFolder( resourceType, currentFolder ) try: newFolderPath = currentFolderPath + newFolder errorMsg = self.createServerFolder(newFolderPath) if (errorMsg is not None): errorNo = 110 except: errorNo = 103 else: errorNo = 102 error = """<Error number="%s" originalDescription="%s" />""" % ( errorNo, self.convertToXmlAttribute(errorMsg) ) return error """ getFileName Purpose: helper function to extrapolate the filename """ def getFileName(self, filename): for splitChar in ["/", "\\"]: array = filename.split(splitChar) if (len(array) > 1): filename = array[-1] return filename """ fileUpload Purpose: command to upload files to server """ def fileUpload(self, resourceType, currentFolder): if (self.isZope()): return self.zopeFileUpload(resourceType, currentFolder) else: return self.nonZopeFileUpload(resourceType, currentFolder) def zopeFileUpload(self, resourceType, currentFolder, count=None): zopeFolder = self.findZopeFolder(resourceType, currentFolder) file = self.request.get("NewFile", None) fileName = self.getFileName(file.filename) fileNameOnly = self.removeExtension(fileName) fileExtension = self.getExtension(fileName).lower() if (count): nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension) else: nid = fileName title = nid try: zopeFolder.manage_addProduct['OFSP'].manage_addFile( id=nid, title=title, file=file.read() ) except: if (count): count += 1 else: count = 1 self.zopeFileUpload(resourceType, currentFolder, count) return def nonZopeFileUpload(self, resourceType, currentFolder): errorNo = 0 errorMsg = "" if self.request.has_key("NewFile"): # newFile has all the contents we need newFile = self.request.get("NewFile", "") # Get the file name newFileName = newFile.filename newFileNameOnly = self.removeExtension(newFileName) newFileExtension = self.getExtension(newFileName).lower() allowedExtensions = self.getAllowedExtensions(resourceType) deniedExtensions = self.getDeniedExtensions(resourceType) if (allowedExtensions is not None): # Check for allowed isAllowed = False if (newFileExtension in allowedExtensions): isAllowed = True elif (deniedExtensions is not None): # Check for denied isAllowed = True if (newFileExtension in deniedExtensions): isAllowed = False else: # No extension limitations isAllowed = True if (isAllowed): if (self.isZope()): # Upload into zope self.zopeFileUpload(resourceType, currentFolder) else: # Upload to operating system # Map the virtual path to the local server path currentFolderPath = self.serverMapFolder( resourceType, currentFolder ) i = 0 while (True): newFilePath = "%s%s" % ( currentFolderPath, newFileName ) if os.path.exists(newFilePath): i += 1 newFilePath = "%s%s(%s).%s" % ( currentFolderPath, newFileNameOnly, i, newFileExtension ) errorNo = 201 break else: fileHandle = open(newFilePath,'w') linecount = 0 while (1): #line = newFile.file.readline() line = newFile.readline() if not line: break fileHandle.write("%s" % line) linecount += 1 os.chmod(newFilePath, 0777) break else: newFileName = "Extension not allowed" errorNo = 203 else: newFileName = "No File" errorNo = 202 string = """ <script type="text/javascript"> window.parent.frames["frmUpload"].OnUploadCompleted(%s,"%s"); </script> """ % ( errorNo, newFileName.replace('"',"'") ) return string def run(self): s = "" try: # Check if this is disabled if not(self.enabled): return self.sendError(1, "This connector is disabled. Please check the connector configurations and try again") # Make sure we have valid inputs if not( (self.request.has_key("Command")) and (self.request.has_key("Type")) and (self.request.has_key("CurrentFolder")) ): return # Get command command = self.request.get("Command", None) # Get resource type resourceType = self.request.get("Type", None) # folder syntax must start and end with "/" currentFolder = self.request.get("CurrentFolder", None) if (currentFolder[-1] <> "/"): currentFolder += "/" if (currentFolder[0] <> "/"): currentFolder = "/" + currentFolder # Check for invalid paths if (".." in currentFolder): return self.sendError(102, "") # File upload doesn't have to return XML, so intercept # her:e if (command == "FileUpload"): return self.fileUpload(resourceType, currentFolder) # Begin XML s += self.createXmlHeader(command, resourceType, currentFolder) # Execute the command if (command == "GetFolders"): f = self.getFolders elif (command == "GetFoldersAndFiles"): f = self.getFoldersAndFiles elif (command == "CreateFolder"): f = self.createFolder else: f = None if (f is not None): s += f(resourceType, currentFolder) s += self.createXmlFooter() except Exception, e: s = "ERROR: %s" % e return s # Running from command line if __name__ == '__main__': # To test the output, uncomment the standard headers #print "Content-Type: text/html" #print "" print getFCKeditorConnector() """ Running from zope, you will need to modify this connector. If you have uploaded the FCKeditor into Zope (like me), you need to move this connector out of Zope, and replace the "connector" with an alias as below. The key to it is to pass the Zope context in, as we then have a like to the Zope context. ## Script (Python) "connector.py" ##bind container=container ##bind context=context ##bind namespace= ##bind script=script ##bind subpath=traverse_subpath ##parameters=*args, **kws ##title=ALIAS ## import Products.connector as connector return connector.getFCKeditorConnector(context=context).run() """
gpl-3.0
-5,616,444,477,575,959,000
-5,517,581,160,454,060,000
26.905732
193
0.655326
false
sguazt/prometheus
tools/giws/datatypes/stringDataGiws.py
1
10567
#!/usr/bin/python -u # Copyright or Copr. INRIA/Scilab - Sylvestre LEDRU # # Sylvestre LEDRU - <sylvestre.ledru@inria.fr> <sylvestre@ledru.info> # # This software is a computer program whose purpose is to generate C++ wrapper # for Java objects/methods. # # This software is governed by the CeCILL license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL license and that you accept its terms. # # For more information, see the file COPYING from datatypes.dataGiws import dataGiws from configGiws import configGiws from JNIFrameWork import JNIFrameWork class stringDataGiws(dataGiws): nativeType="char *" callMethod="CallObjectMethod" callStaticMethod="CallStaticObjectMethod" temporaryVariableName="myStringBuffer" def getTypeSignature(self): return "Ljava/lang/String;" def getJavaTypeSyntax(self): if self.isArray(): return "jobjectArray" else: return "jstring" def getRealJavaType(self): return "java.lang.String" def getDescription(self): return "Java String" def getNativeType(self, ForceNotArray=False, UseConst=False): if self.isArray(): if UseConst: pointer = " const*" else: pointer = "*" return ("char" + pointer) + pointer * self.getDimensionArray() else: if UseConst: pointer = " const*" else: pointer = "*" return "char" + pointer def __errorMemoryString(self, detachThread): # Management of the error when not enought memory to create the string if configGiws().getThrowsException(): errorMgntMemBis="""%sthrow %s::JniBadAllocException(curEnv);"""%(detachThread,configGiws().getExceptionFileName()) else: errorMgntMemBis="""std::cerr << "Could not convert C string to Java UTF string, memory full." << std::endl;%s exit(EXIT_FAILURE);"""%(detachThread) return errorMgntMemBis def specificPreProcessing(self, parameter, detachThread): """ Overrides the preprocessing of the array """ name=parameter.getName() # Management of the error when not enought memory to create the string if configGiws().getThrowsException(): errorMgntMem="""%sthrow %s::JniBadAllocException(curEnv);"""%(detachThread,configGiws().getExceptionFileName()) else: errorMgntMem="""std::cerr << "Could not allocate Java string array, memory full." << std::endl;%s exit(EXIT_FAILURE);"""%(detachThread) errorMgntMemBis = self.__errorMemoryString(detachThread) if self.isArray(): if self.getDimensionArray() == 1: return """ // create java array of strings. jobjectArray %s_ = curEnv->NewObjectArray( %sSize, stringArrayClass, NULL); if (%s_ == NULL) { %s } // convert each char * to java strings and fill the java array. for ( int i = 0; i < %sSize; i++) { jstring TempString = curEnv->NewStringUTF( %s[i] ); if (TempString == NULL) { %s } curEnv->SetObjectArrayElement( %s_, i, TempString); // avoid keeping reference on too many strings curEnv->DeleteLocalRef(TempString); }"""%(name,name,name,errorMgntMem,name,name,errorMgntMemBis,name) else: return """ // create java array of array of strings. jobjectArray %s_ = curEnv->NewObjectArray( %sSize, curEnv->FindClass("[Ljava/lang/String;"), NULL); if (%s_ == NULL) { %s } for ( int i = 0; i < %sSize; i++) { jobjectArray %sLocal = curEnv->NewObjectArray( %sSizeCol, stringArrayClass, NULL); // convert each char * to java strings and fill the java array. for ( int j = 0; j < %sSizeCol; j++) { jstring TempString = curEnv->NewStringUTF( %s[i][j] ); if (TempString == NULL) { %s } curEnv->SetObjectArrayElement( %sLocal, j, TempString); // avoid keeping reference on too many strings curEnv->DeleteLocalRef(TempString); } curEnv->SetObjectArrayElement(%s_, i, %sLocal); curEnv->DeleteLocalRef(%sLocal); }"""%(name,name,name,errorMgntMem,name,name,name,name,name,errorMgntMemBis,name,name,name,name) else: # Need to store is for the post processing (delete) self.parameterName=name tempName=name+"_" return """ jstring %s = curEnv->NewStringUTF( %s ); if (%s != NULL && %s == NULL) { %s } """%(tempName,name,name,tempName,errorMgntMemBis) def specificPostProcessing(self, detachThread): """ Called when we are returning a string or an array of string """ # We are doing an exception check here JUST in this case because # in methodGiws::__createMethodBody we usually do it at the end # of the method just after deleting the variable # but when dealing with string, in this method, we are calling some # methods which override the "exception engine" which drive the JNI # engine crazy. str=JNIFrameWork().getExceptionCheckProfile(detachThread) str=str+"if (res != NULL) { " if self.isArray(): strCommon="" strDeclaration="" if configGiws().getDisableReturnSize()==True: strCommon+="int lenRow;" else: # The size of the array is returned as output argument of the function strDeclaration="*" strCommon+=""" %s lenRow = curEnv->GetArrayLength(res); """%(strDeclaration) self.temporaryVariableName="arrayOfString" if self.getDimensionArray() == 1: str+=strCommon+""" char **arrayOfString; arrayOfString = new char *[%slenRow]; for (jsize i = 0; i < %slenRow; i++){ jstring resString = reinterpret_cast<jstring>(curEnv->GetObjectArrayElement(res, i)); const char *tempString = curEnv->GetStringUTFChars(resString, 0); arrayOfString[i] = new char[strlen(tempString) + 1]; strcpy(arrayOfString[i], tempString); curEnv->ReleaseStringUTFChars(resString, tempString); curEnv->DeleteLocalRef(resString); } """%(strDeclaration, strDeclaration) return str else: if configGiws().getDisableReturnSize()==True: str+="int lenCol;" str+=strCommon+""" char ***arrayOfString; arrayOfString = new char **[%slenRow]; for (jsize i = 0; i < %slenRow; i++){ /* Line of the array */ jobjectArray resStringLine = reinterpret_cast<jobjectArray>(curEnv->GetObjectArrayElement(res, i)); %slenCol = curEnv->GetArrayLength(resStringLine); arrayOfString[i]=new char*[%slenCol]; for (jsize j = 0; j < %slenCol; j++){ jstring resString = reinterpret_cast<jstring>(curEnv->GetObjectArrayElement(resStringLine, j)); const char *tempString = curEnv->GetStringUTFChars(resString, 0); arrayOfString[i][j] = new char[strlen(tempString) + 1]; strcpy(arrayOfString[i][j], tempString); curEnv->ReleaseStringUTFChars(resString, tempString); curEnv->DeleteLocalRef(resString); } curEnv->DeleteLocalRef(resStringLine); } """%(strDeclaration, strDeclaration, strDeclaration, strDeclaration, strDeclaration) return str else: if hasattr(self,"parameterName"): str+="""curEnv->DeleteLocalRef(%s);"""%(self.parameterName+"_") str=str+""" const char *tempString = curEnv->GetStringUTFChars(res, 0); char * %s = new char[strlen(tempString) + 1]; strcpy(%s, tempString); curEnv->ReleaseStringUTFChars(res, tempString); curEnv->DeleteLocalRef(res); """%(self.temporaryVariableName, self.temporaryVariableName) return str def getReturnSyntax(self): str="" if self.isArray(): str = str + """ curEnv->DeleteLocalRef(res); return arrayOfString; """ else: str = str + """ return %s; """%(self.temporaryVariableName) str = str + """ } else { curEnv->DeleteLocalRef(res); return NULL; }""" return str
apache-2.0
-1,128,846,055,101,684,600
3,910,947,141,404,864,500
39.48659
126
0.578972
false
vitan/django
tests/template_tests/syntax_tests/test_invalid_string.py
46
2257
from django.test import SimpleTestCase from ..utils import setup class InvalidStringTests(SimpleTestCase): @setup({'invalidstr01': '{{ var|default:"Foo" }}'}) def test_invalidstr01(self): output = self.engine.render_to_string('invalidstr01') if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, 'Foo') @setup({'invalidstr02': '{{ var|default_if_none:"Foo" }}'}) def test_invalidstr02(self): output = self.engine.render_to_string('invalidstr02') if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'invalidstr03': '{% for v in var %}({{ v }}){% endfor %}'}) def test_invalidstr03(self): output = self.engine.render_to_string('invalidstr03') self.assertEqual(output, '') @setup({'invalidstr04': '{% if var %}Yes{% else %}No{% endif %}'}) def test_invalidstr04(self): output = self.engine.render_to_string('invalidstr04') self.assertEqual(output, 'No') @setup({'invalidstr04_2': '{% if var|default:"Foo" %}Yes{% else %}No{% endif %}'}) def test_invalidstr04_2(self): output = self.engine.render_to_string('invalidstr04_2') self.assertEqual(output, 'Yes') @setup({'invalidstr05': '{{ var }}'}) def test_invalidstr05(self): output = self.engine.render_to_string('invalidstr05') if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'invalidstr06': '{{ var.prop }}'}) def test_invalidstr06(self): output = self.engine.render_to_string('invalidstr06') if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'invalidstr07': '{% load i18n %}{% blocktrans %}{{ var }}{% endblocktrans %}'}) def test_invalidstr07(self): output = self.engine.render_to_string('invalidstr07') if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '')
bsd-3-clause
-5,320,674,666,403,778,000
-7,674,253,460,274,479,000
36
91
0.603013
false
ypid/series60-remote
pc/lib/log.py
1
1490
# -*- coding: utf-8 -*- # Copyright (c) 2008 - 2009 Lukas Hetzenecker <LuHe@gmx.at> from PyQt4.QtCore import * from PyQt4.QtGui import * import logging class QtStreamHandler(logging.Handler): def __init__(self, parent, main): logging.Handler.__init__(self) self.parent = parent self.main = main self.textWidget = parent self.formater = logging.Formatter("%(message)s") def setFormatter(self, format): self.formater = format def createLock(self): self.mutex = QMutex() def acquire(self): self.mutex.lock() def release(self): self.mutex.unlock() def emit(self,record): self.textWidget.appendPlainText(self.formater.format(record)) self.textWidget.moveCursor(QTextCursor.StartOfLine) self.textWidget.ensureCursorVisible() class QtOutput(object): def __init__(self, parent, out=None, color=None): self.textWidget = parent self.out = out self.color = color def write(self, m): self.textWidget.moveCursor(QTextCursor.End) if self.color: tc = self.textWidget.textColor() self.textWidget.setTextColor(self.color) self.textWidget.insertPlainText( m ) if self.color: self.textWidget.setTextColor(tc) if self.out: if isinstance(m, unicode): self.out.write(m.encode("utf8")) else: self.out.write(m)
gpl-2.0
1,034,282,174,071,800,600
-3,847,416,188,419,577,300
25.140351
69
0.606711
false
hmen89/odoo
addons/gamification/models/goal.py
24
25742
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## from openerp import SUPERUSER_ID from openerp.osv import fields, osv from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF from openerp.tools.safe_eval import safe_eval from openerp.tools.translate import _ import logging import time from datetime import date, datetime, timedelta _logger = logging.getLogger(__name__) class gamification_goal_definition(osv.Model): """Goal definition A goal definition contains the way to evaluate an objective Each module wanting to be able to set goals to the users needs to create a new gamification_goal_definition """ _name = 'gamification.goal.definition' _description = 'Gamification goal definition' def _get_suffix(self, cr, uid, ids, field_name, arg, context=None): res = dict.fromkeys(ids, '') for goal in self.browse(cr, uid, ids, context=context): if goal.suffix and not goal.monetary: res[goal.id] = goal.suffix elif goal.monetary: # use the current user's company currency user = self.pool.get('res.users').browse(cr, uid, uid, context) if goal.suffix: res[goal.id] = "%s %s" % (user.company_id.currency_id.symbol, goal.suffix) else: res[goal.id] = user.company_id.currency_id.symbol else: res[goal.id] = "" return res _columns = { 'name': fields.char('Goal Definition', required=True, translate=True), 'description': fields.text('Goal Description'), 'monetary': fields.boolean('Monetary Value', help="The target and current value are defined in the company currency."), 'suffix': fields.char('Suffix', help="The unit of the target and current values", translate=True), 'full_suffix': fields.function(_get_suffix, type="char", string="Full Suffix", help="The currency and suffix field"), 'computation_mode': fields.selection([ ('manually', 'Recorded manually'), ('count', 'Automatic: number of records'), ('sum', 'Automatic: sum on a field'), ('python', 'Automatic: execute a specific Python code'), ], string="Computation Mode", help="Defined how will be computed the goals. The result of the operation will be stored in the field 'Current'.", required=True), 'display_mode': fields.selection([ ('progress', 'Progressive (using numerical values)'), ('boolean', 'Exclusive (done or not-done)'), ], string="Displayed as", required=True), 'model_id': fields.many2one('ir.model', string='Model', help='The model object for the field to evaluate'), 'field_id': fields.many2one('ir.model.fields', string='Field to Sum', help='The field containing the value to evaluate'), 'field_date_id': fields.many2one('ir.model.fields', string='Date Field', help='The date to use for the time period evaluated'), 'domain': fields.char("Filter Domain", help="Domain for filtering records. General rule, not user depending, e.g. [('state', '=', 'done')]. The expression can contain reference to 'user' which is a browse record of the current user if not in batch mode.", required=True), 'batch_mode': fields.boolean('Batch Mode', help="Evaluate the expression in batch instead of once for each user"), 'batch_distinctive_field': fields.many2one('ir.model.fields', string="Distinctive field for batch user", help="In batch mode, this indicates which field distinct one user form the other, e.g. user_id, partner_id..."), 'batch_user_expression': fields.char("Evaluted expression for batch mode", help="The value to compare with the distinctive field. The expression can contain reference to 'user' which is a browse record of the current user, e.g. user.id, user.partner_id.id..."), 'compute_code': fields.text('Python Code', help="Python code to be executed for each user. 'result' should contains the new current value. Evaluated user can be access through object.user_id."), 'condition': fields.selection([ ('higher', 'The higher the better'), ('lower', 'The lower the better') ], string='Goal Performance', help='A goal is considered as completed when the current value is compared to the value to reach', required=True), 'action_id': fields.many2one('ir.actions.act_window', string="Action", help="The action that will be called to update the goal value."), 'res_id_field': fields.char("ID Field of user", help="The field name on the user profile (res.users) containing the value for res_id for action."), } _defaults = { 'condition': 'higher', 'computation_mode': 'manually', 'domain': "[]", 'monetary': False, 'display_mode': 'progress', } def number_following(self, cr, uid, model_name="mail.thread", context=None): """Return the number of 'model_name' objects the user is following The model specified in 'model_name' must inherit from mail.thread """ user = self.pool.get('res.users').browse(cr, uid, uid, context=context) return self.pool.get('mail.followers').search(cr, uid, [('res_model', '=', model_name), ('partner_id', '=', user.partner_id.id)], count=True, context=context) def _check_domain_validity(self, cr, uid, ids, context=None): # take admin as should always be present superuser = self.pool['res.users'].browse(cr, uid, SUPERUSER_ID, context=context) for definition in self.browse(cr, uid, ids, context=context): if definition.computation_mode not in ('count', 'sum'): continue obj = self.pool[definition.model_id.model] try: domain = safe_eval(definition.domain, {'user': superuser}) # demmy search to make sure the domain is valid obj.search(cr, uid, domain, context=context, count=True) except (ValueError, SyntaxError), e: msg = e.message or (e.msg + '\n' + e.text) raise osv.except_osv(_('Error!'),_("The domain for the definition %s seems incorrect, please check it.\n\n%s" % (definition.name, msg))) return True def create(self, cr, uid, vals, context=None): res_id = super(gamification_goal_definition, self).create(cr, uid, vals, context=context) if vals.get('computation_mode') in ('count', 'sum'): self._check_domain_validity(cr, uid, [res_id], context=context) return res_id def write(self, cr, uid, ids, vals, context=None): res = super(gamification_goal_definition, self).write(cr, uid, ids, vals, context=context) if vals.get('computation_mode', 'count') in ('count', 'sum') and (vals.get('domain') or vals.get('model_id')): self._check_domain_validity(cr, uid, ids, context=context) return res class gamification_goal(osv.Model): """Goal instance for a user An individual goal for a user on a specified time period""" _name = 'gamification.goal' _description = 'Gamification goal instance' def _get_completion(self, cr, uid, ids, field_name, arg, context=None): """Return the percentage of completeness of the goal, between 0 and 100""" res = dict.fromkeys(ids, 0.0) for goal in self.browse(cr, uid, ids, context=context): if goal.definition_condition == 'higher': if goal.current >= goal.target_goal: res[goal.id] = 100.0 else: res[goal.id] = round(100.0 * goal.current / goal.target_goal, 2) elif goal.current < goal.target_goal: # a goal 'lower than' has only two values possible: 0 or 100% res[goal.id] = 100.0 else: res[goal.id] = 0.0 return res def on_change_definition_id(self, cr, uid, ids, definition_id=False, context=None): goal_definition = self.pool.get('gamification.goal.definition') if not definition_id: return {'value': {'definition_id': False}} goal_definition = goal_definition.browse(cr, uid, definition_id, context=context) return {'value': {'computation_mode': goal_definition.computation_mode, 'definition_condition': goal_definition.condition}} _columns = { 'definition_id': fields.many2one('gamification.goal.definition', string='Goal Definition', required=True, ondelete="cascade"), 'user_id': fields.many2one('res.users', string='User', required=True), 'line_id': fields.many2one('gamification.challenge.line', string='Challenge Line', ondelete="cascade"), 'challenge_id': fields.related('line_id', 'challenge_id', string="Challenge", type='many2one', relation='gamification.challenge', store=True, readonly=True, help="Challenge that generated the goal, assign challenge to users to generate goals with a value in this field."), 'start_date': fields.date('Start Date'), 'end_date': fields.date('End Date'), # no start and end = always active 'target_goal': fields.float('To Reach', required=True, track_visibility='always'), # no goal = global index 'current': fields.float('Current Value', required=True, track_visibility='always'), 'completeness': fields.function(_get_completion, type='float', string='Completeness'), 'state': fields.selection([ ('draft', 'Draft'), ('inprogress', 'In progress'), ('reached', 'Reached'), ('failed', 'Failed'), ('canceled', 'Canceled'), ], string='State', required=True, track_visibility='always'), 'to_update': fields.boolean('To update'), 'closed': fields.boolean('Closed goal', help="These goals will not be recomputed."), 'computation_mode': fields.related('definition_id', 'computation_mode', type='char', string="Computation mode"), 'remind_update_delay': fields.integer('Remind delay', help="The number of days after which the user assigned to a manual goal will be reminded. Never reminded if no value is specified."), 'last_update': fields.date('Last Update', help="In case of manual goal, reminders are sent if the goal as not been updated for a while (defined in challenge). Ignored in case of non-manual goal or goal not linked to a challenge."), 'definition_description': fields.related('definition_id', 'description', type='char', string='Definition Description', readonly=True), 'definition_condition': fields.related('definition_id', 'condition', type='char', string='Definition Condition', readonly=True), 'definition_suffix': fields.related('definition_id', 'full_suffix', type="char", string="Suffix", readonly=True), 'definition_display': fields.related('definition_id', 'display_mode', type="char", string="Display Mode", readonly=True), } _defaults = { 'current': 0, 'state': 'draft', 'start_date': fields.date.today, } _order = 'create_date desc, end_date desc, definition_id, id' def _check_remind_delay(self, cr, uid, goal, context=None): """Verify if a goal has not been updated for some time and send a reminder message of needed. :return: data to write on the goal object """ if goal.remind_update_delay and goal.last_update: delta_max = timedelta(days=goal.remind_update_delay) last_update = datetime.strptime(goal.last_update, DF).date() if date.today() - last_update > delta_max: # generate a remind report temp_obj = self.pool.get('email.template') template_id = self.pool['ir.model.data'].get_object(cr, uid, 'gamification', 'email_template_goal_reminder', context) body_html = temp_obj.render_template(cr, uid, template_id.body_html, 'gamification.goal', goal.id, context=context) self.pool['mail.thread'].message_post(cr, uid, 0, body=body_html, partner_ids=[goal.user_id.partner_id.id], context=context, subtype='mail.mt_comment') return {'to_update': True} return {} def update(self, cr, uid, ids, context=None): """Update the goals to recomputes values and change of states If a manual goal is not updated for enough time, the user will be reminded to do so (done only once, in 'inprogress' state). If a goal reaches the target value, the status is set to reached If the end date is passed (at least +1 day, time not considered) without the target value being reached, the goal is set as failed.""" if context is None: context = {} commit = context.get('commit_gamification', False) goals_by_definition = {} all_goals = {} for goal in self.browse(cr, uid, ids, context=context): if goal.state in ('draft', 'canceled'): # draft or canceled goals should not be recomputed continue goals_by_definition.setdefault(goal.definition_id, []).append(goal) all_goals[goal.id] = goal for definition, goals in goals_by_definition.items(): goals_to_write = dict((goal.id, {}) for goal in goals) if definition.computation_mode == 'manually': for goal in goals: goals_to_write[goal.id].update(self._check_remind_delay(cr, uid, goal, context)) elif definition.computation_mode == 'python': # TODO batch execution for goal in goals: # execute the chosen method cxt = { 'self': self.pool.get('gamification.goal'), 'object': goal, 'pool': self.pool, 'cr': cr, 'context': dict(context), # copy context to prevent side-effects of eval 'uid': uid, 'date': date, 'datetime': datetime, 'timedelta': timedelta, 'time': time } code = definition.compute_code.strip() safe_eval(code, cxt, mode="exec", nocopy=True) # the result of the evaluated codeis put in the 'result' local variable, propagated to the context result = cxt.get('result') if result is not None and type(result) in (float, int, long): if result != goal.current: goals_to_write[goal.id]['current'] = result else: _logger.exception(_('Invalid return content from the evaluation of code for definition %s' % definition.name)) else: # count or sum obj = self.pool.get(definition.model_id.model) field_date_name = definition.field_date_id and definition.field_date_id.name or False if definition.computation_mode == 'count' and definition.batch_mode: # batch mode, trying to do as much as possible in one request general_domain = safe_eval(definition.domain) field_name = definition.batch_distinctive_field.name subqueries = {} for goal in goals: start_date = field_date_name and goal.start_date or False end_date = field_date_name and goal.end_date or False subqueries.setdefault((start_date, end_date), {}).update({goal.id:safe_eval(definition.batch_user_expression, {'user': goal.user_id})}) # the global query should be split by time periods (especially for recurrent goals) for (start_date, end_date), query_goals in subqueries.items(): subquery_domain = list(general_domain) subquery_domain.append((field_name, 'in', list(set(query_goals.values())))) if start_date: subquery_domain.append((field_date_name, '>=', start_date)) if end_date: subquery_domain.append((field_date_name, '<=', end_date)) if field_name == 'id': # grouping on id does not work and is similar to search anyway user_ids = obj.search(cr, uid, subquery_domain, context=context) user_values = [{'id': user_id, 'id_count': 1} for user_id in user_ids] else: user_values = obj.read_group(cr, uid, subquery_domain, fields=[field_name], groupby=[field_name], context=context) # user_values has format of read_group: [{'partner_id': 42, 'partner_id_count': 3},...] for goal in [g for g in goals if g.id in query_goals.keys()]: for user_value in user_values: queried_value = field_name in user_value and user_value[field_name] or False if isinstance(queried_value, tuple) and len(queried_value) == 2 and isinstance(queried_value[0], (int, long)): queried_value = queried_value[0] if queried_value == query_goals[goal.id]: new_value = user_value.get(field_name+'_count', goal.current) if new_value != goal.current: goals_to_write[goal.id]['current'] = new_value else: for goal in goals: # eval the domain with user replaced by goal user object domain = safe_eval(definition.domain, {'user': goal.user_id}) # add temporal clause(s) to the domain if fields are filled on the goal if goal.start_date and field_date_name: domain.append((field_date_name, '>=', goal.start_date)) if goal.end_date and field_date_name: domain.append((field_date_name, '<=', goal.end_date)) if definition.computation_mode == 'sum': field_name = definition.field_id.name # TODO for master: group on user field in batch mode res = obj.read_group(cr, uid, domain, [field_name], [], context=context) new_value = res and res[0][field_name] or 0.0 else: # computation mode = count new_value = obj.search(cr, uid, domain, context=context, count=True) # avoid useless write if the new value is the same as the old one if new_value != goal.current: goals_to_write[goal.id]['current'] = new_value for goal_id, value in goals_to_write.items(): if not value: continue goal = all_goals[goal_id] # check goal target reached if (goal.definition_id.condition == 'higher' and value.get('current', goal.current) >= goal.target_goal) \ or (goal.definition_id.condition == 'lower' and value.get('current', goal.current) <= goal.target_goal): value['state'] = 'reached' # check goal failure elif goal.end_date and fields.date.today() > goal.end_date: value['state'] = 'failed' value['closed'] = True if value: self.write(cr, uid, [goal.id], value, context=context) if commit: cr.commit() return True def action_start(self, cr, uid, ids, context=None): """Mark a goal as started. This should only be used when creating goals manually (in draft state)""" self.write(cr, uid, ids, {'state': 'inprogress'}, context=context) return self.update(cr, uid, ids, context=context) def action_reach(self, cr, uid, ids, context=None): """Mark a goal as reached. If the target goal condition is not met, the state will be reset to In Progress at the next goal update until the end date.""" return self.write(cr, uid, ids, {'state': 'reached'}, context=context) def action_fail(self, cr, uid, ids, context=None): """Set the state of the goal to failed. A failed goal will be ignored in future checks.""" return self.write(cr, uid, ids, {'state': 'failed'}, context=context) def action_cancel(self, cr, uid, ids, context=None): """Reset the completion after setting a goal as reached or failed. This is only the current state, if the date and/or target criterias match the conditions for a change of state, this will be applied at the next goal update.""" return self.write(cr, uid, ids, {'state': 'inprogress'}, context=context) def create(self, cr, uid, vals, context=None): """Overwrite the create method to add a 'no_remind_goal' field to True""" context = dict(context or {}) context['no_remind_goal'] = True return super(gamification_goal, self).create(cr, uid, vals, context=context) def write(self, cr, uid, ids, vals, context=None): """Overwrite the write method to update the last_update field to today If the current value is changed and the report frequency is set to On change, a report is generated """ if context is None: context = {} vals['last_update'] = fields.date.today() result = super(gamification_goal, self).write(cr, uid, ids, vals, context=context) for goal in self.browse(cr, uid, ids, context=context): if goal.state != "draft" and ('definition_id' in vals or 'user_id' in vals): # avoid drag&drop in kanban view raise osv.except_osv(_('Error!'), _('Can not modify the configuration of a started goal')) if vals.get('current'): if 'no_remind_goal' in context: # new goals should not be reported continue if goal.challenge_id and goal.challenge_id.report_message_frequency == 'onchange': self.pool.get('gamification.challenge').report_progress(cr, SUPERUSER_ID, goal.challenge_id, users=[goal.user_id], context=context) return result def get_action(self, cr, uid, goal_id, context=None): """Get the ir.action related to update the goal In case of a manual goal, should return a wizard to update the value :return: action description in a dictionnary """ goal = self.browse(cr, uid, goal_id, context=context) if goal.definition_id.action_id: # open a the action linked to the goal action = goal.definition_id.action_id.read()[0] if goal.definition_id.res_id_field: current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context) action['res_id'] = safe_eval(goal.definition_id.res_id_field, {'user': current_user}) # if one element to display, should see it in form mode if possible action['views'] = [(view_id, mode) for (view_id, mode) in action['views'] if mode == 'form'] or action['views'] return action if goal.computation_mode == 'manually': # open a wizard window to update the value manually action = { 'name': _("Update %s") % goal.definition_id.name, 'id': goal_id, 'type': 'ir.actions.act_window', 'views': [[False, 'form']], 'target': 'new', 'context': {'default_goal_id': goal_id, 'default_current': goal.current}, 'res_model': 'gamification.goal.wizard' } return action return False
agpl-3.0
-8,952,250,249,933,449,000
1,648,172,572,663,019,800
51.75
228
0.578316
false
superdesk/superdesk-core
apps/ldap/users_service.py
2
1273
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license import logging from eve.utils import config from superdesk.users import UsersService, UsersResource, is_admin # NOQA logger = logging.getLogger(__name__) class ADUsersService(UsersService): """ Service class for UsersResource and should be used when AD is active. """ readonly_fields = ["email", "first_name", "last_name"] def on_fetched(self, doc): super().on_fetched(doc) for document in doc["_items"]: self.set_defaults(document) def on_fetched_item(self, doc): super().on_fetched_item(doc) self.set_defaults(doc) def set_defaults(self, doc): """Set the readonly fields for LDAP user. :param dict doc: user """ readonly = {} user_attributes = config.LDAP_USER_ATTRIBUTES for value in user_attributes.values(): if value in self.readonly_fields: readonly[value] = True doc["_readonly"] = readonly
agpl-3.0
6,408,156,241,233,689,000
7,494,771,954,923,490,000
26.673913
73
0.646504
false
vlachoudis/sl4a
python/src/Lib/plat-irix5/IN.py
66
3097
# Generated by h2py from /usr/include/netinet/in.h from warnings import warnpy3k warnpy3k("the IN module has been removed in Python 3.0", stacklevel=2) del warnpy3k # Included from sys/endian.h LITTLE_ENDIAN = 1234 BIG_ENDIAN = 4321 PDP_ENDIAN = 3412 BYTE_ORDER = BIG_ENDIAN BYTE_ORDER = LITTLE_ENDIAN def ntohl(x): return (x) def ntohs(x): return (x) def htonl(x): return (x) def htons(x): return (x) def htonl(x): return ntohl(x) def htons(x): return ntohs(x) # Included from sys/bsd_types.h # Included from sys/mkdev.h ONBITSMAJOR = 7 ONBITSMINOR = 8 OMAXMAJ = 0x7f OMAXMIN = 0xff NBITSMAJOR = 14 NBITSMINOR = 18 MAXMAJ = 0x1ff MAXMIN = 0x3ffff OLDDEV = 0 NEWDEV = 1 MKDEV_VER = NEWDEV def major(dev): return __major(MKDEV_VER, dev) def minor(dev): return __minor(MKDEV_VER, dev) # Included from sys/select.h FD_SETSIZE = 1024 NBBY = 8 IPPROTO_IP = 0 IPPROTO_ICMP = 1 IPPROTO_IGMP = 2 IPPROTO_GGP = 3 IPPROTO_ENCAP = 4 IPPROTO_TCP = 6 IPPROTO_EGP = 8 IPPROTO_PUP = 12 IPPROTO_UDP = 17 IPPROTO_IDP = 22 IPPROTO_TP = 29 IPPROTO_XTP = 36 IPPROTO_HELLO = 63 IPPROTO_ND = 77 IPPROTO_EON = 80 IPPROTO_RAW = 255 IPPROTO_MAX = 256 IPPORT_RESERVED = 1024 IPPORT_USERRESERVED = 5000 IPPORT_MAXPORT = 65535 def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0) IN_CLASSA_NET = 0xff000000 IN_CLASSA_NSHIFT = 24 IN_CLASSA_HOST = 0x00ffffff IN_CLASSA_MAX = 128 def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000) IN_CLASSB_NET = 0xffff0000 IN_CLASSB_NSHIFT = 16 IN_CLASSB_HOST = 0x0000ffff IN_CLASSB_MAX = 65536 def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000) IN_CLASSC_NET = 0xffffff00 IN_CLASSC_NSHIFT = 8 IN_CLASSC_HOST = 0x000000ff def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000) IN_CLASSD_NET = 0xf0000000 IN_CLASSD_NSHIFT = 28 IN_CLASSD_HOST = 0x0fffffff def IN_MULTICAST(i): return IN_CLASSD(i) def IN_EXPERIMENTAL(i): return (((long)(i) & 0xf0000000) == 0xf0000000) def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000) INADDR_ANY = 0x00000000 INADDR_BROADCAST = 0xffffffff INADDR_LOOPBACK = 0x7F000001 INADDR_UNSPEC_GROUP = 0xe0000000 INADDR_ALLHOSTS_GROUP = 0xe0000001 INADDR_MAX_LOCAL_GROUP = 0xe00000ff INADDR_NONE = 0xffffffff IN_LOOPBACKNET = 127 IP_OPTIONS = 1 IP_MULTICAST_IF = 2 IP_MULTICAST_TTL = 3 IP_MULTICAST_LOOP = 4 IP_ADD_MEMBERSHIP = 5 IP_DROP_MEMBERSHIP = 6 IP_HDRINCL = 7 IP_TOS = 8 IP_TTL = 9 IP_RECVOPTS = 10 IP_RECVRETOPTS = 11 IP_RECVDSTADDR = 12 IP_RETOPTS = 13 IP_OPTIONS = 1 IP_HDRINCL = 2 IP_TOS = 3 IP_TTL = 4 IP_RECVOPTS = 5 IP_RECVRETOPTS = 6 IP_RECVDSTADDR = 7 IP_RETOPTS = 8 IP_MULTICAST_IF = 20 IP_MULTICAST_TTL = 21 IP_MULTICAST_LOOP = 22 IP_ADD_MEMBERSHIP = 23 IP_DROP_MEMBERSHIP = 24 IRIX4_IP_OPTIONS = 1 IRIX4_IP_MULTICAST_IF = 2 IRIX4_IP_MULTICAST_TTL = 3 IRIX4_IP_MULTICAST_LOOP = 4 IRIX4_IP_ADD_MEMBERSHIP = 5 IRIX4_IP_DROP_MEMBERSHIP = 6 IRIX4_IP_HDRINCL = 7 IRIX4_IP_TOS = 8 IRIX4_IP_TTL = 9 IRIX4_IP_RECVOPTS = 10 IRIX4_IP_RECVRETOPTS = 11 IRIX4_IP_RECVDSTADDR = 12 IRIX4_IP_RETOPTS = 13 IP_DEFAULT_MULTICAST_TTL = 1 IP_DEFAULT_MULTICAST_LOOP = 1 IP_MAX_MEMBERSHIPS = 20
apache-2.0
-1,894,795,743,841,337,900
-4,562,558,420,246,764,000
20.506944
71
0.720375
false
l2isbad/netdata
collectors/python.d.plugin/python_modules/pyyaml2/composer.py
4
4952
# SPDX-License-Identifier: MIT __all__ = ['Composer', 'ComposerError'] from error import MarkedYAMLError from events import * from nodes import * class ComposerError(MarkedYAMLError): pass class Composer(object): def __init__(self): self.anchors = {} def check_node(self): # Drop the STREAM-START event. if self.check_event(StreamStartEvent): self.get_event() # If there are more documents available? return not self.check_event(StreamEndEvent) def get_node(self): # Get the root node of the next document. if not self.check_event(StreamEndEvent): return self.compose_document() def get_single_node(self): # Drop the STREAM-START event. self.get_event() # Compose a document if the stream is not empty. document = None if not self.check_event(StreamEndEvent): document = self.compose_document() # Ensure that the stream contains no more documents. if not self.check_event(StreamEndEvent): event = self.get_event() raise ComposerError("expected a single document in the stream", document.start_mark, "but found another document", event.start_mark) # Drop the STREAM-END event. self.get_event() return document def compose_document(self): # Drop the DOCUMENT-START event. self.get_event() # Compose the root node. node = self.compose_node(None, None) # Drop the DOCUMENT-END event. self.get_event() self.anchors = {} return node def compose_node(self, parent, index): if self.check_event(AliasEvent): event = self.get_event() anchor = event.anchor if anchor not in self.anchors: raise ComposerError(None, None, "found undefined alias %r" % anchor.encode('utf-8'), event.start_mark) return self.anchors[anchor] event = self.peek_event() anchor = event.anchor if anchor is not None: if anchor in self.anchors: raise ComposerError("found duplicate anchor %r; first occurence" % anchor.encode('utf-8'), self.anchors[anchor].start_mark, "second occurence", event.start_mark) self.descend_resolver(parent, index) if self.check_event(ScalarEvent): node = self.compose_scalar_node(anchor) elif self.check_event(SequenceStartEvent): node = self.compose_sequence_node(anchor) elif self.check_event(MappingStartEvent): node = self.compose_mapping_node(anchor) self.ascend_resolver() return node def compose_scalar_node(self, anchor): event = self.get_event() tag = event.tag if tag is None or tag == u'!': tag = self.resolve(ScalarNode, event.value, event.implicit) node = ScalarNode(tag, event.value, event.start_mark, event.end_mark, style=event.style) if anchor is not None: self.anchors[anchor] = node return node def compose_sequence_node(self, anchor): start_event = self.get_event() tag = start_event.tag if tag is None or tag == u'!': tag = self.resolve(SequenceNode, None, start_event.implicit) node = SequenceNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style) if anchor is not None: self.anchors[anchor] = node index = 0 while not self.check_event(SequenceEndEvent): node.value.append(self.compose_node(node, index)) index += 1 end_event = self.get_event() node.end_mark = end_event.end_mark return node def compose_mapping_node(self, anchor): start_event = self.get_event() tag = start_event.tag if tag is None or tag == u'!': tag = self.resolve(MappingNode, None, start_event.implicit) node = MappingNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style) if anchor is not None: self.anchors[anchor] = node while not self.check_event(MappingEndEvent): #key_event = self.peek_event() item_key = self.compose_node(node, None) #if item_key in node.value: # raise ComposerError("while composing a mapping", start_event.start_mark, # "found duplicate key", key_event.start_mark) item_value = self.compose_node(node, item_key) #node.value[item_key] = item_value node.value.append((item_key, item_value)) end_event = self.get_event() node.end_mark = end_event.end_mark return node
gpl-3.0
3,689,163,470,374,846,500
5,222,179,736,983,390,000
34.371429
89
0.582795
false
usc-isi/essex-baremetal-support
nova/tests/notifier/test_list_notifier.py
5
3462
# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import nova from nova import log as logging import nova.notifier.api import nova.notifier.log_notifier import nova.notifier.no_op_notifier from nova.notifier import list_notifier from nova import test class NotifierListTestCase(test.TestCase): """Test case for notifications""" def setUp(self): super(NotifierListTestCase, self).setUp() list_notifier._reset_drivers() # Mock log to add one to exception_count when log.exception is called def mock_exception(cls, *args): self.exception_count += 1 self.exception_count = 0 list_notifier_log = logging.getLogger('nova.notifier.list_notifier') self.stubs.Set(list_notifier_log, "exception", mock_exception) # Mock no_op notifier to add one to notify_count when called. def mock_notify(cls, *args): self.notify_count += 1 self.notify_count = 0 self.stubs.Set(nova.notifier.no_op_notifier, 'notify', mock_notify) # Mock log_notifier to raise RuntimeError when called. def mock_notify2(cls, *args): raise RuntimeError("Bad notifier.") self.stubs.Set(nova.notifier.log_notifier, 'notify', mock_notify2) def tearDown(self): list_notifier._reset_drivers() super(NotifierListTestCase, self).tearDown() def test_send_notifications_successfully(self): self.flags(notification_driver='nova.notifier.list_notifier', list_notifier_drivers=['nova.notifier.no_op_notifier', 'nova.notifier.no_op_notifier']) nova.notifier.api.notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_count, 2) self.assertEqual(self.exception_count, 0) def test_send_notifications_with_errors(self): self.flags(notification_driver='nova.notifier.list_notifier', list_notifier_drivers=['nova.notifier.no_op_notifier', 'nova.notifier.log_notifier']) nova.notifier.api.notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_count, 1) self.assertEqual(self.exception_count, 1) def test_when_driver_fails_to_import(self): self.flags(notification_driver='nova.notifier.list_notifier', list_notifier_drivers=['nova.notifier.no_op_notifier', 'nova.notifier.logo_notifier', 'fdsjgsdfhjkhgsfkj']) nova.notifier.api.notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.exception_count, 2) self.assertEqual(self.notify_count, 1)
apache-2.0
-1,393,897,615,586,246,000
-2,722,301,875,023,570,400
40.214286
78
0.642403
false
virt-who/virt-who
virtwho/manager/subscriptionmanager/subscriptionmanager.py
1
16260
# -*- coding: utf-8 -*- from __future__ import print_function """ Module for communication with subscription-manager, part of virt-who Copyright (C) 2011 Radek Novacek <rnovacek@redhat.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ import os import json from six.moves.http_client import BadStatusLine from six import string_types import rhsm.connection as rhsm_connection import rhsm.certificate as rhsm_certificate import rhsm.config as rhsm_config from virtwho.config import NotSetSentinel from virtwho.manager import Manager, ManagerError, ManagerFatalError, ManagerThrottleError from virtwho.virt import AbstractVirtReport from virtwho.util import generate_correlation_id class SubscriptionManagerError(ManagerError): pass class SubscriptionManagerUnregisteredError(ManagerFatalError): pass # Mapping between strings returned from getJob and report statuses STATE_MAPPING = { 'FINISHED': AbstractVirtReport.STATE_FINISHED, 'CANCELED': AbstractVirtReport.STATE_CANCELED, 'FAILED': AbstractVirtReport.STATE_FAILED, 'RUNNING': AbstractVirtReport.STATE_PROCESSING, 'WAITING': AbstractVirtReport.STATE_PROCESSING, 'CREATED': AbstractVirtReport.STATE_PROCESSING, } class NamedOptions(object): """ Object used for compatibility with RHSM """ pass class SubscriptionManager(Manager): sm_type = "sam" """ Class for interacting subscription-manager. """ def __init__(self, logger, options): self.logger = logger self.options = options self.cert_uuid = None self.rhsm_config = None self.cert_file = None self.key_file = None self.readConfig() self.connection = None self.correlation_id = generate_correlation_id() def readConfig(self): """ Parse rhsm.conf in order to obtain consumer certificate and key paths. """ self.rhsm_config = rhsm_config.initConfig( rhsm_config.DEFAULT_CONFIG_PATH) consumer_cert_dir = self.rhsm_config.get("rhsm", "consumerCertDir") cert = 'cert.pem' key = 'key.pem' self.cert_file = os.path.join(consumer_cert_dir, cert) self.key_file = os.path.join(consumer_cert_dir, key) def _check_owner_lib(self, kwargs, config): """ Try to check values of env and owner. These values has to be equal to values obtained from Satellite server. :param kwargs: dictionary possibly containing valid username and password used for connection to rhsm :param config: Configuration of virt-who :return: None """ if config is None: return # Check 'owner' and 'env' only in situation, when these values # are set and rhsm_username and rhsm_password are not set if 'username' not in kwargs and 'password' not in kwargs and \ 'owner' in config.keys() and 'env' in config.keys(): pass else: return uuid = self.uuid() consumer = self.connection.getConsumer(uuid) if 'environment' in consumer: environment = consumer['environment'] else: return if environment: environment_name = environment['name'] owner = self.connection.getOwner(uuid) owner_id = owner['key'] if config['owner'] != owner_id: raise ManagerError( "Cannot send data to: %s, because owner from configuration: %s is different" % (owner_id, config['owner']) ) if config['env'] != environment_name: raise ManagerError( "Cannot send data to: %s, because Satellite env: %s differs from configuration: %s" % (owner_id, environment_name, config['env']) ) def _connect(self, config=None): """ Connect to the subscription-manager. """ kwargs = { 'host': self.rhsm_config.get('server', 'hostname'), 'ssl_port': int(self.rhsm_config.get('server', 'port')), 'handler': self.rhsm_config.get('server', 'prefix'), 'proxy_hostname': self.rhsm_config.get('server', 'proxy_hostname'), 'proxy_port': self.rhsm_config.get('server', 'proxy_port'), 'proxy_user': self.rhsm_config.get('server', 'proxy_user'), 'proxy_password': self.rhsm_config.get('server', 'proxy_password'), 'insecure': self.rhsm_config.get('server', 'insecure') } kwargs_to_config = { 'host': 'rhsm_hostname', 'ssl_port': 'rhsm_port', 'handler': 'rhsm_prefix', 'proxy_hostname': 'rhsm_proxy_hostname', 'proxy_port': 'rhsm_proxy_port', 'proxy_user': 'rhsm_proxy_user', 'proxy_password': 'rhsm_proxy_password', 'insecure': 'rhsm_insecure' } rhsm_username = None rhsm_password = None if config: try: rhsm_username = config['rhsm_username'] rhsm_password = config['rhsm_password'] except KeyError: pass if rhsm_username == NotSetSentinel: rhsm_username = None if rhsm_password == NotSetSentinel: rhsm_password = None # Testing for None is necessary, it might be an empty string for key, value in kwargs.items(): try: from_config = config[kwargs_to_config[key]] if from_config is not NotSetSentinel and from_config is \ not None: if key is 'ssl_port': from_config = int(from_config) kwargs[key] = from_config except KeyError: continue if rhsm_username and rhsm_password: self.logger.debug("Authenticating with RHSM username %s", rhsm_username) kwargs['username'] = rhsm_username kwargs['password'] = rhsm_password else: self.logger.debug("Authenticating with certificate: %s", self.cert_file) if not os.access(self.cert_file, os.R_OK): raise SubscriptionManagerUnregisteredError( "Unable to read certificate, system is not registered or you are not root") kwargs['cert_file'] = self.cert_file kwargs['key_file'] = self.key_file self.logger.info("X-Correlation-ID: %s", self.correlation_id) if self.correlation_id: kwargs['correlation_id'] = self.correlation_id self.connection = rhsm_connection.UEPConnection(**kwargs) try: if not self.connection.ping()['result']: raise SubscriptionManagerError( "Unable to obtain status from server, UEPConnection is likely not usable." ) except rhsm_connection.RateLimitExceededException as e: raise ManagerThrottleError(e.retry_after) except BadStatusLine: raise ManagerError("Communication with subscription manager interrupted") self._check_owner_lib(kwargs, config) return self.connection def sendVirtGuests(self, report, options=None): """ Update consumer facts with info about virtual guests. `guests` is a list of `Guest` instances (or it children). """ guests = report.guests self._connect() # Sort the list guests.sort(key=lambda item: item.uuid) serialized_guests = [guest.toDict() for guest in guests] self.logger.info('Sending update in guests lists for config ' '"%s": %d guests found', report.config.name, len(guests)) self.logger.debug("Domain info: %s", json.dumps(serialized_guests, indent=4)) # Send list of guest uuids to the server try: self.connection.updateConsumer(self.uuid(), guest_uuids=serialized_guests, hypervisor_id=report.hypervisor_id) except rhsm_connection.GoneException: raise ManagerError("Communication with subscription manager failed: consumer no longer exists") except rhsm_connection.RateLimitExceededException as e: raise ManagerThrottleError(e.retry_after) report.state = AbstractVirtReport.STATE_FINISHED def hypervisorCheckIn(self, report, options=None): """ Send hosts to guests mapping to subscription manager. """ connection = self._connect(report.config) is_async = self._is_rhsm_server_async(report, connection) serialized_mapping = self._hypervisor_mapping(report, is_async, connection) self.logger.debug("Host-to-guest mapping being sent to '{owner}': {mapping}".format( owner=report.config['owner'], mapping=json.dumps(serialized_mapping, indent=4))) # All subclasses of ConfigSection use dictionary like notation, # but RHSM uses attribute like notation if options: named_options = NamedOptions() for key, value in options['global'].items(): setattr(named_options, key, value) else: named_options = None try: try: result = self.connection.hypervisorCheckIn( report.config['owner'], report.config['env'], serialized_mapping, options=named_options) # pylint:disable=unexpected-keyword-arg except TypeError: # This is temporary workaround until the options parameter gets implemented # in python-rhsm self.logger.debug( "hypervisorCheckIn method in python-rhsm doesn't understand options parameter, ignoring" ) result = self.connection.hypervisorCheckIn(report.config['owner'], report.config['env'], serialized_mapping) except BadStatusLine: raise ManagerError("Communication with subscription manager interrupted") except rhsm_connection.RateLimitExceededException as e: raise ManagerThrottleError(e.retry_after) except rhsm_connection.GoneException: raise ManagerError("Communication with subscription manager failed: consumer no longer exists") except rhsm_connection.ConnectionException as e: if hasattr(e, 'code'): raise ManagerError("Communication with subscription manager failed with code %d: %s" % (e.code, str(e))) raise ManagerError("Communication with subscription manager failed: %s" % str(e)) if is_async is True: report.state = AbstractVirtReport.STATE_CREATED report.job_id = result['id'] else: report.state = AbstractVirtReport.STATE_FINISHED return result def _is_rhsm_server_async(self, report, connection=None): """ Check if server has capability 'hypervisor_async'. """ if connection is None: self._connect(report.config) self.logger.debug("Checking if server has capability 'hypervisor_async'") is_async = hasattr(self.connection, 'has_capability') and self.connection.has_capability('hypervisors_async') if is_async: self.logger.debug("Server has capability 'hypervisors_async'") else: self.logger.debug("Server does not have 'hypervisors_async' capability") return is_async def _hypervisor_mapping(self, report, is_async, connection=None): """ Return mapping of hypervisor """ if connection is None: self._connect(report.config) mapping = report.association serialized_mapping = {} ids_seen = [] if is_async: hosts = [] # Transform the mapping into the async version for hypervisor in mapping['hypervisors']: if hypervisor.hypervisorId in ids_seen: self.logger.warning("The hypervisor id '%s' is assigned to 2 different systems. " "Only one will be recorded at the server." % hypervisor.hypervisorId) hosts.append(hypervisor.toDict()) ids_seen.append(hypervisor.hypervisorId) serialized_mapping = {'hypervisors': hosts} else: # Reformat the data from the mapping to make it fit with # the old api. for hypervisor in mapping['hypervisors']: if hypervisor.hypervisorId in ids_seen: self.logger.warning("The hypervisor id '%s' is assigned to 2 different systems. " "Only one will be recorded at the server." % hypervisor.hypervisorId) guests = [g.toDict() for g in hypervisor.guestIds] serialized_mapping[hypervisor.hypervisorId] = guests ids_seen.append(hypervisor.hypervisorId) return serialized_mapping def check_report_state(self, report): # BZ 1554228 job_id = str(report.job_id) self._connect(report.config) self.logger.debug('Checking status of job %s', job_id) try: result = self.connection.getJob(job_id) except BadStatusLine: raise ManagerError("Communication with subscription manager interrupted") except rhsm_connection.RateLimitExceededException as e: raise ManagerThrottleError(e.retry_after) except rhsm_connection.ConnectionException as e: if hasattr(e, 'code'): raise ManagerError("Communication with subscription manager failed with code %d: %s" % (e.code, str(e))) raise ManagerError("Communication with subscription manager failed: %s" % str(e)) state = STATE_MAPPING.get(result['state'], AbstractVirtReport.STATE_FAILED) report.state = state if state not in (AbstractVirtReport.STATE_FINISHED, AbstractVirtReport.STATE_CANCELED, AbstractVirtReport.STATE_FAILED): self.logger.debug('Job %s not finished', job_id) else: # log completed job status result_data = result.get('resultData', {}) if not result_data: self.logger.warning("Job status report without resultData: %s", result) return if isinstance(result_data, string_types): self.logger.warning("Job status report encountered the following error: %s", result_data) return for fail in result_data.get('failedUpdate', []): self.logger.error("Error during update list of guests: %s", str(fail)) self.logger.debug("Number of mappings unchanged: %d", len(result_data.get('unchanged', []))) self.logger.info("Mapping for config \"%s\" updated", report.config.name) def uuid(self): """ Read consumer certificate and get consumer UUID from it. """ if not self.cert_uuid: try: certificate = rhsm_certificate.create_from_file(self.cert_file) self.cert_uuid = certificate.subject["CN"] except Exception as e: raise SubscriptionManagerError("Unable to open certificate %s (%s):" % (self.cert_file, str(e))) return self.cert_uuid
gpl-2.0
7,533,400,177,034,917,000
3,927,710,464,754,566,700
40.692308
124
0.610701
false
sestrella/ansible
lib/ansible/modules/network/ios/ios_facts.py
12
7398
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: ios_facts version_added: "2.2" author: - "Peter Sprygada (@privateip)" - "Sumit Jaiswal (@justjais)" short_description: Collect facts from remote devices running Cisco IOS description: - Collects a base set of device facts from a remote device that is running IOS. This module prepends all of the base network fact keys with C(ansible_net_<fact>). The facts module will always collect a base set of facts from the device and can enable or disable collection of additional facts. extends_documentation_fragment: ios notes: - Tested against IOS 15.6 options: gather_subset: description: - When supplied, this argument restricts the facts collected to a given subset. - Possible values for this argument include C(all), C(min), C(hardware), C(config), and C(interfaces). - Specify a list of values to include a larger subset. - Use a value with an initial C(!) to collect all facts except that subset. required: false default: '!config' gather_network_resources: description: - When supplied, this argument will restrict the facts collected to a given subset. Possible values for this argument include all and the resources like interfaces, vlans etc. Can specify a list of values to include a larger subset. Values can also be used with an initial C(M(!)) to specify that a specific subset should not be collected. Valid subsets are 'all', 'interfaces', 'l2_interfaces', 'vlans', 'lag_interfaces', 'lacp', 'lacp_interfaces', 'lldp_global', 'lldp_interfaces', 'l3_interfaces'. version_added: "2.9" """ EXAMPLES = """ - name: Gather all legacy facts ios_facts: gather_subset: all - name: Gather only the config and default facts ios_facts: gather_subset: - config - name: Do not gather hardware facts ios_facts: gather_subset: - "!hardware" - name: Gather legacy and resource facts ios_facts: gather_subset: all gather_network_resources: all - name: Gather only the interfaces resource facts and no legacy facts ios_facts: gather_subset: - '!all' - '!min' gather_network_resources: - interfaces - name: Gather interfaces resource and minimal legacy facts ios_facts: gather_subset: min gather_network_resources: interfaces - name: Gather L2 interfaces resource and minimal legacy facts ios_facts: gather_subset: min gather_network_resources: l2_interfaces - name: Gather L3 interfaces resource and minimal legacy facts ios_facts: gather_subset: min gather_network_resources: l3_interfaces """ RETURN = """ ansible_net_gather_subset: description: The list of fact subsets collected from the device returned: always type: list ansible_net_gather_network_resources: description: The list of fact for network resource subsets collected from the device returned: when the resource is configured type: list # default ansible_net_model: description: The model name returned from the device returned: always type: str ansible_net_serialnum: description: The serial number of the remote device returned: always type: str ansible_net_version: description: The operating system version running on the remote device returned: always type: str ansible_net_iostype: description: The operating system type (IOS or IOS-XE) running on the remote device returned: always type: str ansible_net_hostname: description: The configured hostname of the device returned: always type: str ansible_net_image: description: The image file the device is running returned: always type: str ansible_net_stacked_models: description: The model names of each device in the stack returned: when multiple devices are configured in a stack type: list ansible_net_stacked_serialnums: description: The serial numbers of each device in the stack returned: when multiple devices are configured in a stack type: list ansible_net_api: description: The name of the transport returned: always type: str ansible_net_python_version: description: The Python version Ansible controller is using returned: always type: str # hardware ansible_net_filesystems: description: All file system names available on the device returned: when hardware is configured type: list ansible_net_filesystems_info: description: A hash of all file systems containing info about each file system (e.g. free and total space) returned: when hardware is configured type: dict ansible_net_memfree_mb: description: The available free memory on the remote device in Mb returned: when hardware is configured type: int ansible_net_memtotal_mb: description: The total memory on the remote device in Mb returned: when hardware is configured type: int # config ansible_net_config: description: The current active config from the device returned: when config is configured type: str # interfaces ansible_net_all_ipv4_addresses: description: All IPv4 addresses configured on the device returned: when interfaces is configured type: list ansible_net_all_ipv6_addresses: description: All IPv6 addresses configured on the device returned: when interfaces is configured type: list ansible_net_interfaces: description: A hash of all interfaces running on the system returned: when interfaces is configured type: dict ansible_net_neighbors: description: - The list of CDP and LLDP neighbors from the remote device. If both, CDP and LLDP neighbor data is present on one port, CDP is preferred. returned: when interfaces is configured type: dict """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.ios.argspec.facts.facts import FactsArgs from ansible.module_utils.network.ios.facts.facts import Facts from ansible.module_utils.network.ios.ios import ios_argument_spec def main(): """ Main entry point for AnsibleModule """ argument_spec = FactsArgs.argument_spec argument_spec.update(ios_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = ['default value for `gather_subset` ' 'will be changed to `min` from `!config` v2.11 onwards'] result = Facts(module).get_facts() ansible_facts, additional_warnings = result warnings.extend(additional_warnings) module.exit_json(ansible_facts=ansible_facts, warnings=warnings) if __name__ == '__main__': main()
gpl-3.0
-4,559,564,579,752,026,000
6,655,834,033,632,513,000
30.480851
108
0.726007
false
naiquevin/jinger
jinger/test/test_site.py
1
1107
# import unittest import os from jinger.site import create_empty_site, createdir from jinger.test import DIR_PLAYGROUND, JingerPlaygroundTest class SiteTest(JingerPlaygroundTest): def test_create_dir(self): mysite = createdir(DIR_PLAYGROUND, 'mysite') self.assertTrue(os.path.exists(mysite)) # check that if the dir already exists, it raises an Exception pass def test_create_empty_site(self): create_empty_site('mysite', DIR_PLAYGROUND) newsite = os.path.join(DIR_PLAYGROUND, 'mysite') os.path.exists(newsite) os.path.exists(os.path.join(newsite, 'templates')) os.path.exists(os.path.join(newsite, 'public')) os.path.exists(os.path.join(newsite, 'config.json')) create_empty_site('myothersite', DIR_PLAYGROUND, '_source', 'www') newsite = os.path.join(DIR_PLAYGROUND, 'myothersite') os.path.exists(newsite) os.path.exists(os.path.join(newsite, '_source')) os.path.exists(os.path.join(newsite, 'www')) os.path.exists(os.path.join(newsite, 'config.json'))
mit
3,914,855,785,401,083,000
-9,140,641,838,131,633,000
34.709677
74
0.661247
false
Blitzen/oauthlib
oauthlib/oauth1/rfc5849/endpoints/resource.py
42
7083
# -*- coding: utf-8 -*- """ oauthlib.oauth1.rfc5849.endpoints.resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module is an implementation of the resource protection provider logic of OAuth 1.0 RFC 5849. """ from __future__ import absolute_import, unicode_literals import logging from .base import BaseEndpoint from .. import errors log = logging.getLogger(__name__) class ResourceEndpoint(BaseEndpoint): """An endpoint responsible for protecting resources. Typical use is to instantiate with a request validator and invoke the ``validate_protected_resource_request`` in a decorator around a view function. If the request is valid, invoke and return the response of the view. If invalid create and return an error response directly from the decorator. See :doc:`/oauth1/validator` for details on which validator methods to implement for this endpoint. An example decorator:: from functools import wraps from your_validator import your_validator from oauthlib.oauth1 import ResourceEndpoint endpoint = ResourceEndpoint(your_validator) def require_oauth(realms=None): def decorator(f): @wraps(f) def wrapper(request, *args, **kwargs): v, r = provider.validate_protected_resource_request( request.url, http_method=request.method, body=request.data, headers=request.headers, realms=realms or []) if v: return f(*args, **kwargs) else: return abort(403) """ def validate_protected_resource_request(self, uri, http_method='GET', body=None, headers=None, realms=None): """Create a request token response, with a new request token if valid. :param uri: The full URI of the token request. :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc. :param body: The request body as a string. :param headers: The request headers as a dict. :param realms: A list of realms the resource is protected under. This will be supplied to the ``validate_realms`` method of the request validator. :returns: A tuple of 2 elements. 1. True if valid, False otherwise. 2. An oauthlib.common.Request object. """ try: request = self._create_request(uri, http_method, body, headers) except errors.OAuth1Error: return False, None try: self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error: return False, request if not request.resource_owner_key: return False, request if not self.request_validator.check_access_token( request.resource_owner_key): return False, request if not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce, request, access_token=request.resource_owner_key): return False, request # The server SHOULD return a 401 (Unauthorized) status code when # receiving a request with invalid client credentials. # Note: This is postponed in order to avoid timing attacks, instead # a dummy client is assigned and used to maintain near constant # time request verification. # # Note that early exit would enable client enumeration valid_client = self.request_validator.validate_client_key( request.client_key, request) if not valid_client: request.client_key = self.request_validator.dummy_client # The server SHOULD return a 401 (Unauthorized) status code when # receiving a request with invalid or expired token. # Note: This is postponed in order to avoid timing attacks, instead # a dummy token is assigned and used to maintain near constant # time request verification. # # Note that early exit would enable resource owner enumeration valid_resource_owner = self.request_validator.validate_access_token( request.client_key, request.resource_owner_key, request) if not valid_resource_owner: request.resource_owner_key = self.request_validator.dummy_access_token # Note that `realm`_ is only used in authorization headers and how # it should be interepreted is not included in the OAuth spec. # However they could be seen as a scope or realm to which the # client has access and as such every client should be checked # to ensure it is authorized access to that scope or realm. # .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2 # # Note that early exit would enable client realm access enumeration. # # The require_realm indicates this is the first step in the OAuth # workflow where a client requests access to a specific realm. # This first step (obtaining request token) need not require a realm # and can then be identified by checking the require_resource_owner # flag and abscence of realm. # # Clients obtaining an access token will not supply a realm and it will # not be checked. Instead the previously requested realm should be # transferred from the request token to the access token. # # Access to protected resources will always validate the realm but note # that the realm is now tied to the access token and not provided by # the client. valid_realm = self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request, uri=request.uri, realms=realms) valid_signature = self._check_signature(request) # We delay checking validity until the very end, using dummy values for # calculations and fetching secrets/keys to ensure the flow of every # request remains almost identical regardless of whether valid values # have been supplied. This ensures near constant time execution and # prevents malicious users from guessing sensitive information v = all((valid_client, valid_resource_owner, valid_realm, valid_signature)) if not v: log.info("[Failure] request verification failed.") log.info("Valid client: %s", valid_client) log.info("Valid token: %s", valid_resource_owner) log.info("Valid realm: %s", valid_realm) log.info("Valid signature: %s", valid_signature) return v, request
bsd-3-clause
8,441,079,162,012,733,000
6,483,965,690,396,756,000
43.829114
114
0.619794
false
sunlightlabs/openstates
scrapers/md/events.py
2
4320
import pytz import dateutil.parser import datetime from urllib.parse import urlsplit, parse_qs from utils import LXMLMixin from openstates.scrape import Scraper, Event class MDEventScraper(Scraper, LXMLMixin): _TZ = pytz.timezone("US/Eastern") chambers = {"upper": "Senate", "lower": ""} date_format = "%B %d, %Y" def scrape(self, chamber=None, start=None, end=None): if start is None: start_date = datetime.datetime.now().strftime(self.date_format) else: start_date = datetime.datetime.strptime(start, "%Y-%m-%d") start_date = start_date.strftime(self.date_format) # default to 30 days if no end if end is None: dtdelta = datetime.timedelta(days=30) end_date = datetime.datetime.now() + dtdelta end_date = end_date.strftime(self.date_format) else: end_date = datetime.datetime.strptime(end, "%Y-%m-%d") end_date = end_date.strftime(self.date_format) url = "http://mgaleg.maryland.gov/webmga/frmHearingSchedule.aspx?&range={} - {}" url = url.format(start_date, end_date) page = self.lxmlize(url) if chamber is None: yield from self.scrape_chamber(page, "upper") yield from self.scrape_chamber(page, "lower") else: yield from self.scrape_chamber(page, chamber) def scrape_chamber(self, page, chamber): xpath = '//div[@id="ContentPlaceHolder1_div{}SingleColumn"]' "/div".format( self.chambers[chamber] ) com = None rows = page.xpath(xpath) for row in rows: css = row.xpath("@class")[0] if "CommitteeBanner" in css: com = row.xpath("string(.//h3/a[1])").strip() elif "CmteInfo" in css or "DayPanelSingleColumn" in css: yield from self.parse_div(row, chamber, com) def parse_div(self, row, chamber, com): cal_link = row.xpath('.//a[.//span[@id="calendarmarker"]]/@href')[0] # event_date = row.xpath('string(.//div[contains(@class,"ItemDate")])').strip() title, location, start_date, end_date = self.parse_gcal(cal_link) event = Event( start_date=start_date, end_date=end_date, name=title, location_name=location ) event.add_source("http://mgaleg.maryland.gov/webmga/frmHearingSchedule.aspx") for item in row.xpath('.//div[@class="col-xs-12a Item"]'): description = item.xpath("string(.)").strip() agenda = event.add_agenda_item(description=description) for item in row.xpath('.//div[contains(@class,"ItemContainer")]/a'): description = item.xpath("string(.)").strip() agenda = event.add_agenda_item(description=description) event.add_document( description, item.xpath("@href")[0], media_type="application/pdf", on_duplicate="ignore", ) for item in row.xpath( './/div[contains(@class,"ItemContainer")]' '[./div[@class="col-xs-1 Item"]]' ): description = item.xpath("string(.)").strip() agenda = event.add_agenda_item(description=description) bill = item.xpath('.//div[@class="col-xs-1 Item"]/a/text()')[0].strip() agenda.add_bill(bill) video = row.xpath('.//a[./span[@class="OnDemand"]]') if video: event.add_media_link( "Video of Hearing", video[0].xpath("@href")[0], "text/html" ) if "subcommittee" in title.lower(): subcom = title.split("-")[0].strip() event.add_participant(subcom, type="committee", note="host") else: event.add_participant(com, type="committee", note="host") yield event # Due to the convoluted HTML, it's easier just to parse the google cal links def parse_gcal(self, url): query = urlsplit(url).query params = parse_qs(query) dates = params["dates"][0].split("/") start_date = self._TZ.localize(dateutil.parser.parse(dates[0])) end_date = self._TZ.localize(dateutil.parser.parse(dates[1])) return params["text"][0], params["location"][0], start_date, end_date
gpl-3.0
-5,492,322,645,800,202,000
6,550,939,836,687,325,000
36.894737
88
0.577315
false
sodafree/backend
build/ipython/build/lib.linux-i686-2.7/IPython/core/tests/test_oinspect.py
3
8346
"""Tests for the object inspection functionality. """ #----------------------------------------------------------------------------- # Copyright (C) 2010-2011 The IPython Development Team. # # Distributed under the terms of the BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function # Stdlib imports import os import re # Third-party imports import nose.tools as nt # Our own imports from .. import oinspect from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, line_cell_magic, register_line_magic, register_cell_magic, register_line_cell_magic) from IPython.external.decorator import decorator from IPython.utils import py3compat #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- inspector = oinspect.Inspector() ip = get_ipython() #----------------------------------------------------------------------------- # Local utilities #----------------------------------------------------------------------------- # WARNING: since this test checks the line number where a function is # defined, if any code is inserted above, the following line will need to be # updated. Do NOT insert any whitespace between the next line and the function # definition below. THIS_LINE_NUMBER = 48 # Put here the actual number of this line def test_find_source_lines(): nt.assert_equal(oinspect.find_source_lines(test_find_source_lines), THIS_LINE_NUMBER+1) # A couple of utilities to ensure these tests work the same from a source or a # binary install def pyfile(fname): return os.path.normcase(re.sub('.py[co]$', '.py', fname)) def match_pyfiles(f1, f2): nt.assert_equal(pyfile(f1), pyfile(f2)) def test_find_file(): match_pyfiles(oinspect.find_file(test_find_file), os.path.abspath(__file__)) def test_find_file_decorated1(): @decorator def noop1(f): def wrapper(): return f(*a, **kw) return wrapper @noop1 def f(x): "My docstring" match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__)) nt.assert_equal(f.__doc__, "My docstring") def test_find_file_decorated2(): @decorator def noop2(f, *a, **kw): return f(*a, **kw) @noop2 def f(x): "My docstring 2" match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__)) nt.assert_equal(f.__doc__, "My docstring 2") def test_find_file_magic(): run = ip.find_line_magic('run') nt.assert_not_equal(oinspect.find_file(run), None) # A few generic objects we can then inspect in the tests below class Call(object): """This is the class docstring.""" def __init__(self, x, y=1): """This is the constructor docstring.""" def __call__(self, *a, **kw): """This is the call docstring.""" def method(self, x, z=2): """Some method's docstring""" class OldStyle: """An old-style class for testing.""" pass def f(x, y=2, *a, **kw): """A simple function.""" def g(y, z=3, *a, **kw): pass # no docstring @register_line_magic def lmagic(line): "A line magic" @register_cell_magic def cmagic(line, cell): "A cell magic" @register_line_cell_magic def lcmagic(line, cell=None): "A line/cell magic" @magics_class class SimpleMagics(Magics): @line_magic def Clmagic(self, cline): "A class-based line magic" @cell_magic def Ccmagic(self, cline, ccell): "A class-based cell magic" @line_cell_magic def Clcmagic(self, cline, ccell=None): "A class-based line/cell magic" def check_calltip(obj, name, call, docstring): """Generic check pattern all calltip tests will use""" info = inspector.info(obj, name) call_line, ds = oinspect.call_tip(info) nt.assert_equal(call_line, call) nt.assert_equal(ds, docstring) #----------------------------------------------------------------------------- # Tests #----------------------------------------------------------------------------- def test_calltip_class(): check_calltip(Call, 'Call', 'Call(x, y=1)', Call.__init__.__doc__) def test_calltip_instance(): c = Call(1) check_calltip(c, 'c', 'c(*a, **kw)', c.__call__.__doc__) def test_calltip_method(): c = Call(1) check_calltip(c.method, 'c.method', 'c.method(x, z=2)', c.method.__doc__) def test_calltip_function(): check_calltip(f, 'f', 'f(x, y=2, *a, **kw)', f.__doc__) def test_calltip_function2(): check_calltip(g, 'g', 'g(y, z=3, *a, **kw)', '<no docstring>') def test_calltip_builtin(): check_calltip(sum, 'sum', None, sum.__doc__) def test_calltip_line_magic(): check_calltip(lmagic, 'lmagic', 'lmagic(line)', "A line magic") def test_calltip_cell_magic(): check_calltip(cmagic, 'cmagic', 'cmagic(line, cell)', "A cell magic") def test_calltip_line_magic(): check_calltip(lcmagic, 'lcmagic', 'lcmagic(line, cell=None)', "A line/cell magic") def test_class_magics(): cm = SimpleMagics(ip) ip.register_magics(cm) check_calltip(cm.Clmagic, 'Clmagic', 'Clmagic(cline)', "A class-based line magic") check_calltip(cm.Ccmagic, 'Ccmagic', 'Ccmagic(cline, ccell)', "A class-based cell magic") check_calltip(cm.Clcmagic, 'Clcmagic', 'Clcmagic(cline, ccell=None)', "A class-based line/cell magic") def test_info(): "Check that Inspector.info fills out various fields as expected." i = inspector.info(Call, oname='Call') nt.assert_equal(i['type_name'], 'type') expted_class = str(type(type)) # <class 'type'> (Python 3) or <type 'type'> nt.assert_equal(i['base_class'], expted_class) nt.assert_equal(i['string_form'], "<class 'IPython.core.tests.test_oinspect.Call'>") fname = __file__ if fname.endswith(".pyc"): fname = fname[:-1] # case-insensitive comparison needed on some filesystems # e.g. Windows: nt.assert_equal(i['file'].lower(), fname.lower()) nt.assert_equal(i['definition'], 'Call(self, *a, **kw)\n') nt.assert_equal(i['docstring'], Call.__doc__) nt.assert_equal(i['source'], None) nt.assert_true(i['isclass']) nt.assert_equal(i['init_definition'], "Call(self, x, y=1)\n") nt.assert_equal(i['init_docstring'], Call.__init__.__doc__) i = inspector.info(Call, detail_level=1) nt.assert_not_equal(i['source'], None) nt.assert_equal(i['docstring'], None) c = Call(1) c.__doc__ = "Modified instance docstring" i = inspector.info(c) nt.assert_equal(i['type_name'], 'Call') nt.assert_equal(i['docstring'], "Modified instance docstring") nt.assert_equal(i['class_docstring'], Call.__doc__) nt.assert_equal(i['init_docstring'], Call.__init__.__doc__) nt.assert_equal(i['call_docstring'], c.__call__.__doc__) # Test old-style classes, which for example may not have an __init__ method. if not py3compat.PY3: i = inspector.info(OldStyle) nt.assert_equal(i['type_name'], 'classobj') i = inspector.info(OldStyle()) nt.assert_equal(i['type_name'], 'instance') nt.assert_equal(i['docstring'], OldStyle.__doc__) def test_getdoc(): class A(object): """standard docstring""" pass class B(object): """standard docstring""" def getdoc(self): return "custom docstring" class C(object): """standard docstring""" def getdoc(self): return None a = A() b = B() c = C() nt.assert_equal(oinspect.getdoc(a), "standard docstring") nt.assert_equal(oinspect.getdoc(b), "custom docstring") nt.assert_equal(oinspect.getdoc(c), "standard docstring") def test_pdef(): # See gh-1914 def foo(): pass inspector.pdef(foo, 'foo')
bsd-3-clause
-8,864,505,154,621,943,000
-1,610,682,169,522,475,300
27.77931
88
0.556913
false
beagles/neutron_hacking
neutron/services/firewall/agents/varmour/varmour_api.py
20
4931
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013 vArmour Networks Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Gary Duan, gduan@varmour.com, vArmour Networks import base64 import httplib2 from oslo.config import cfg from neutron.openstack.common import jsonutils as json from neutron.openstack.common import log as logging from neutron.services.firewall.agents.varmour import varmour_utils as va_utils OPTS = [ cfg.StrOpt('director', default='localhost', help=_("vArmour director ip")), cfg.StrOpt('director_port', default='443', help=_("vArmour director port")), cfg.StrOpt('username', default='varmour', help=_("vArmour director username")), cfg.StrOpt('password', default='varmour', secret=True, help=_("vArmour director password")), ] cfg.CONF.register_opts(OPTS, "vArmour") LOG = logging.getLogger(__name__) REST_URL_PREFIX = '/api/v1.0' class vArmourAPIException(Exception): message = _("An unknown exception.") def __init__(self, **kwargs): try: self.err = self.message % kwargs except Exception: self.err = self.message def __str__(self): return self.err class AuthenticationFailure(vArmourAPIException): message = _("Invalid login credential.") class vArmourRestAPI(object): def __init__(self): LOG.debug(_('vArmourRestAPI: started')) self.user = cfg.CONF.vArmour.username self.passwd = cfg.CONF.vArmour.password self.server = cfg.CONF.vArmour.director self.port = cfg.CONF.vArmour.director_port self.timeout = 3 self.key = '' def auth(self): headers = {} enc = base64.b64encode(self.user + ':' + self.passwd) headers['Authorization'] = 'Basic ' + enc resp = self.rest_api('POST', va_utils.REST_URL_AUTH, None, headers) if resp and resp['status'] == 200: self.key = resp['body']['auth'] return True else: raise AuthenticationFailure() def commit(self): self.rest_api('POST', va_utils.REST_URL_COMMIT) def rest_api(self, method, url, body=None, headers=None): url = REST_URL_PREFIX + url if body: body_data = json.dumps(body) else: body_data = '' if not headers: headers = {} enc = base64.b64encode('%s:%s' % (self.user, self.key)) headers['Authorization'] = 'Basic ' + enc LOG.debug(_("vArmourRestAPI: %(server)s %(port)s"), {'server': self.server, 'port': self.port}) try: action = "https://" + self.server + ":" + self.port + url LOG.debug(_("vArmourRestAPI Sending: " "%(method)s %(action)s %(headers)s %(body_data)s"), {'method': method, 'action': action, 'headers': headers, 'body_data': body_data}) h = httplib2.Http(timeout=3, disable_ssl_certificate_validation=True) resp, resp_str = h.request(action, method, body=body_data, headers=headers) LOG.debug(_("vArmourRestAPI Response: %(status)s %(resp_str)s"), {'status': resp.status, 'resp_str': resp_str}) if resp.status == 200: return {'status': resp.status, 'reason': resp.reason, 'body': json.loads(resp_str)} except Exception: LOG.error(_('vArmourRestAPI: Could not establish HTTP connection')) def del_cfg_objs(self, url, prefix): resp = self.rest_api('GET', url) if resp and resp['status'] == 200: olist = resp['body']['response'] if not olist: return for o in olist: if o.startswith(prefix): self.rest_api('DELETE', url + '/"name:%s"' % o) self.commit() def count_cfg_objs(self, url, prefix): count = 0 resp = self.rest_api('GET', url) if resp and resp['status'] == 200: for o in resp['body']['response']: if o.startswith(prefix): count += 1 return count
apache-2.0
-7,851,507,980,123,569,000
7,138,566,916,817,698,000
32.544218
79
0.566011
false
eerwitt/tensorflow
tensorflow/python/saved_model/main_op_impl.py
25
2164
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel main op implementation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops as tf_data_flow_ops from tensorflow.python.ops import variables def main_op(): """Returns a main op to init variables and tables. Returns the main op including the group of ops that initializes all variables, initializes local variables and initialize all tables. Returns: The set of ops to be run as part of the main op upon the load operation. """ init = variables.global_variables_initializer() init_local = variables.local_variables_initializer() init_tables = tf_data_flow_ops.tables_initializer() return control_flow_ops.group(init, init_local, init_tables) def main_op_with_restore(restore_op_name): """Returns a main op to init variables, tables and restore the graph. Returns the main op including the group of ops that initializes all variables, initialize local variables, initialize all tables and the restore op name. Args: restore_op_name: Name of the op to use to restore the graph. Returns: The set of ops to be run as part of the main op upon the load operation. """ with ops.control_dependencies([main_op()]): main_op_with_restore = control_flow_ops.group(restore_op_name) return main_op_with_restore
apache-2.0
-4,516,581,380,765,146,600
2,595,223,688,558,493,000
36.964912
80
0.726895
false
DMSC-Instrument-Data/lewis
setup.py
2
2713
#!/usr/bin/env python # -*- coding: utf-8 -*- # ********************************************************************* # lewis - a library for creating hardware device simulators # Copyright (C) 2016-2017 European Spallation Source ERIC # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ********************************************************************* from setuptools import setup, find_packages # as suggested on http://python-packaging.readthedocs.io/en/latest/metadata.html def readme(): with open('README.rst') as f: return f.read() setup( name='lewis', version='1.2.0', description='LeWIS - Let\'s Write Intricate Simulators!', long_description=readme(), url='https://github.com/DMSC-Instrument-Data/lewis', author='Michael Hart, Michael Wedel, Owen Arnold', author_email='michael.hart@stfc.ac.uk', license='GPL v3', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering', 'Topic :: Software Development :: Libraries', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', ], keywords='hardware simulation controls', packages=find_packages(where='src'), package_dir={'': 'src'}, install_requires=['six', 'pyzmq', 'json-rpc', 'semantic_version', 'PyYAML', 'scanf>=1.4.1'], extras_require={ 'epics': ['pcaspy'], 'dev': ['flake8', 'mock>=1.0.1', 'sphinx>=1.4.5', 'sphinx_rtd_theme', 'pytest', 'pytest-cov', 'coverage', 'tox'], }, entry_points={ 'console_scripts': [ 'lewis=lewis.scripts.run:run_simulation', 'lewis-control=lewis.scripts.control:control_simulation' ], }, )
gpl-3.0
2,774,889,334,649,595,000
-5,196,409,367,252,223,000
36.680556
85
0.609657
false
kata198/usrsvc
usrsvcmod/Monitoring/ActivityFile.py
1
3670
''' Copyright (c) 2016 Tim Savannah All Rights Reserved. This software is licensed under the terms of the GPLv3. This may change at my discretion, retroactively, and without notice. You should have received a copy of this with the source distribution as a file titled, LICENSE. The most current license can be found at: https://github.com/kata198/usrsvc/LICENSE This location may need to be changed at some point in the future, in which case you are may email Tim Savannah <kata198 at gmail dot com>, or find them on the current website intended for distribution of usrsvc. ActivityFileMonitor - Asserts that a specific file or directory should be modified within a certain threshold ''' # vim:set ts=4 shiftwidth=4 softtabstop=4 expandtab : import os import time from func_timeout import FunctionTimedOut from . import MonitoringBase from ..logging import logMsg, logErr # TODO: We need to implement the check here as launching and joining on a thread, so that we don't lockup all monitoring if someone # uses an NFS file on a disconnected device or anything else that will result in an indefinite uninterruptable ("D") state. class ActivityFileMonitor(MonitoringBase): ''' ActivityFileMonitor - Class for doing activity file monitoring ''' def __init__(self, programName, activityFile, activityFileLimit): MonitoringBase.__init__(self) self.programName = programName self.activityFile = activityFile self.activityFileLimit = activityFileLimit @classmethod def createFromConfig(cls, programConfig): if not programConfig.Monitoring.activityfile: return None return cls(programConfig.name, programConfig.Monitoring.activityfile, programConfig.Monitoring.activityfile_limit) def shouldRestart(self, program=None): ''' Returns True if activity file has not been modified within the threshold specified by activityfile_limit (should restart), otherwise False. @param program - unused. ''' activityFile = self.activityFile activityFileLimit = self.activityFileLimit programName = self.programName if not activityFile: # Yes this is checked twice if created through createFromConfig, but it may be called otherwise so better safe. return False try: # If activity file is not present, this is a fail and we restart. if not os.path.exists(activityFile): self.setReason('Restarting %s because activity file ( %s ) does not exist\n' %(programName, activityFile,)) return True # Gather the mtime and see if we are past the threshold lastModified = os.stat(activityFile).st_mtime now = time.time() threshold = float(now - self.activityFileLimit) if lastModified < threshold: self.setReason('Restarting %s because it has not modified activity file ( %s ) in %.4f seconds. Limit is %d seconds.\n' %(programName, activityFile, float(now - lastModified), activityFileLimit) ) return True except FunctionTimedOut: logErr('MONITOR: ActivityFile timed out on %s\n' %(programName,)) raise except Exception as e: # If we got an exception, just log and try again next round. logErr('Got an exception in activity file monitoring. Not restarting program. Program="%s" activityfile="%s"\nlocals: %s\n' %(programName, activityFile, str(locals()))) return False # vim:set ts=4 shiftwidth=4 softtabstop=4 expandtab :
lgpl-2.1
7,796,439,558,679,271,000
-847,885,575,162,544,800
40.704545
212
0.687193
false
johnsensible/django-sendfile
examples/protected_downloads/settings.py
4
2706
# Django settings for protected_downloads project. import os.path DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@domain.com'), ) MANAGERS = ADMINS PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(PROJECT_ROOT, 'download.db'), } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'n309^dwk=@+g72ko--8vjyz&1v0u%xf#*0=wzr=2n#f3hb0a=l' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', # 'django.template.loaders.eggs.load_template_source', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) ROOT_URLCONF = 'protected_downloads.urls' TEMPLATE_DIRS = ( os.path.join(PROJECT_ROOT, 'templates'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.admin', 'download', 'sendfile', ) # SENDFILE settings SENDFILE_BACKEND = 'sendfile.backends.development' #SENDFILE_BACKEND = 'sendfile.backends.xsendfile' #SENDFILE_BACKEND = 'sendfile.backends.nginx' SENDFILE_ROOT = os.path.join(PROJECT_ROOT, 'protected') SENDFILE_URL = '/protected'
bsd-3-clause
-6,755,379,974,023,639,000
4,783,738,473,275,215,000
28.413043
78
0.719882
false
stefanoteso/musm-adt17
musm/pc.py
1
4018
import numpy as np import gurobipy as gurobi from .problem import Problem class PC(Problem): _ATTRIBUTES = [ ('cpu', 37), ('hd', 10), ('manufacturer', 8), ('ram', 10), ('monitor', 8), ('pctype', 3), ] _ATTR_TO_COSTS = { 'pctype': [50, 0, 80], 'manufacturer': [100, 0, 100, 50, 0, 0, 50, 50], 'cpu' : [ 1.4*100, 1.4*130, 1.1*70, 1.1*90, 1.2*80, 1.2*50, 1.2*60, 1.2*80, 1.2*90, 1.2*100, 1.2*110, 1.2*120, 1.2*130, 1.2*140, 1.2*170, 1.5*50, 1.5*60, 1.5*80, 1.5*90, 1.5*100, 1.5*110, 1.5*130, 1.5*150, 1.5*160, 1.5*170, 1.5*180, 1.5*220, 1.4*27, 1.4*30, 1.4*40, 1.4*45, 1.4*50, 1.4*55, 1.4*60, 1.4*70, 1.6*70, 1.6*73, ], 'monitor': [ 0.6*100, 0.6*104, 0.6*120, 0.6*133, 0.6*140, 0.6*150, 0.6*170, 0.6*210 ], 'ram': [ 0.8*64, 0.8*128, 0.8*160, 0.8*192, 0.8*256, 0.8*320, 0.8*384, 0.8*512, 0.8*1024, 0.8*2048 ], 'hd': [ 4*8, 4*10, 4*12, 4*15, 4*20, 4*30, 4*40, 4*60, 4*80, 4*120 ], } def __init__(self, **kwargs): super().__init__(sum(attr[1] for attr in self._ATTRIBUTES)) self.cost_matrix = np.hstack([ np.array(self._ATTR_TO_COSTS[attr], dtype=float) for attr, _ in self._ATTRIBUTES ]).reshape((1, -1)) / 2754.4 def _add_constraints(self, model, x): base, offs = 0, {} for attr, size in self._ATTRIBUTES: offs[attr] = base x_attr = [x[z] for z in range(base, base + size)] model.addConstr(gurobi.quicksum(x_attr) == 1) base += size def implies(head, body): # NOTE here we subtract 1 from head and body bits because the bit # numbers in the constraints were computed starting from one, to # work in MiniZinc, while Gurobi expects them to start from zero head = 1 - x[head - 1] body = gurobi.quicksum([x[i - 1] for i in body]) return model.addConstr(head + body >= 1) # Manufacturer -> Type implies(offs['manufacturer'] + 2, [offs['pctype'] + i for i in [1, 2]]) implies(offs['manufacturer'] + 4, [offs['pctype'] + 1]) implies(offs['manufacturer'] + 6, [offs['pctype'] + 2]) implies(offs['manufacturer'] + 7, [offs['pctype'] + i for i in [1, 3]]) # Manufacturer -> CPU implies(offs['manufacturer'] + 1, [offs['cpu'] + i for i in range(28, 37+1)]) implies(offs['manufacturer'] + 2, [offs['cpu'] + i for i in list(range(1, 4+1)) + list(range(6, 27+1))]) implies(offs['manufacturer'] + 7, [offs['cpu'] + i for i in list(range(1, 4+1)) + list(range(6, 27+1))]) implies(offs['manufacturer'] + 4, [offs['cpu'] + i for i in range(5, 27+1)]) implies(offs['manufacturer'] + 3, [offs['cpu'] + i for i in range(6, 27+1)]) implies(offs['manufacturer'] + 5, [offs['cpu'] + i for i in range(6, 27+1)]) implies(offs['manufacturer'] + 8, [offs['cpu'] + i for i in range(6, 27+1)]) implies(offs['manufacturer'] + 6, [offs['cpu'] + i for i in range(16, 27+1)]) # Type -> RAM implies(offs['pctype'] + 1, [offs['ram'] + i for i in range(1, 9+1)]) implies(offs['pctype'] + 2, [offs['ram'] + i for i in [2, 5, 8, 9]]) implies(offs['pctype'] + 3, [offs['ram'] + i for i in [5, 8, 9, 10]]) # Type -> HD implies(offs['pctype'] + 1, [offs['hd'] + i for i in range(1, 6+1)]) implies(offs['pctype'] + 2, [offs['hd'] + i for i in range(5, 10+1)]) implies(offs['pctype'] + 3, [offs['hd'] + i for i in range(5, 10+1)]) # Type -> Monitor implies(offs['pctype'] + 1, [offs['monitor'] + i for i in range(1, 6+1)]) implies(offs['pctype'] + 2, [offs['monitor'] + i for i in range(6, 8+1)]) implies(offs['pctype'] + 3, [offs['monitor'] + i for i in range(6, 8+1)])
mit
-2,355,501,571,492,825,000
5,620,960,172,553,019,000
43.153846
112
0.498507
false
procangroup/edx-platform
openedx/core/djangoapps/programs/signals.py
11
1873
""" This module contains signals / handlers related to programs. """ import logging from django.dispatch import receiver from openedx.core.djangoapps.signals.signals import COURSE_CERT_AWARDED LOGGER = logging.getLogger(__name__) @receiver(COURSE_CERT_AWARDED) def handle_course_cert_awarded(sender, user, course_key, mode, status, **kwargs): # pylint: disable=unused-argument """ If programs is enabled and a learner is awarded a course certificate, schedule a celery task to process any programs certificates for which the learner may now be eligible. Args: sender: class of the object instance that sent this signal user: django.contrib.auth.User - the user to whom a cert was awarded course_key: refers to the course run for which the cert was awarded mode: mode / certificate type, e.g. "verified" status: either "downloadable" or "generating" Returns: None """ # Import here instead of top of file since this module gets imported before # the credentials app is loaded, resulting in a Django deprecation warning. from openedx.core.djangoapps.credentials.models import CredentialsApiConfig # Avoid scheduling new tasks if certification is disabled. if not CredentialsApiConfig.current().is_learner_issuance_enabled: return # schedule background task to process LOGGER.debug( 'handling COURSE_CERT_AWARDED: username=%s, course_key=%s, mode=%s, status=%s', user, course_key, mode, status, ) # import here, because signal is registered at startup, but items in tasks are not yet able to be loaded from openedx.core.djangoapps.programs.tasks.v1.tasks import award_program_certificates award_program_certificates.delay(user.username)
agpl-3.0
-6,590,187,093,345,358,000
-8,476,246,005,034,675,000
33.685185
116
0.691938
false
hbrunn/OpenUpgrade
addons/hr_holidays/tests/test_holidays_flow.py
44
10276
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from datetime import datetime from dateutil.relativedelta import relativedelta from openerp.addons.hr_holidays.tests.common import TestHrHolidaysBase from openerp.exceptions import AccessError from openerp.osv.orm import except_orm from openerp.tools import mute_logger class TestHolidaysFlow(TestHrHolidaysBase): @mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm') def test_00_leave_request_flow(self): """ Testing leave request flow """ cr, uid = self.cr, self.uid def _check_holidays_status(holiday_status, ml, lt, rl, vrl): self.assertEqual(holiday_status.max_leaves, ml, 'hr_holidays: wrong type days computation') self.assertEqual(holiday_status.leaves_taken, lt, 'hr_holidays: wrong type days computation') self.assertEqual(holiday_status.remaining_leaves, rl, 'hr_holidays: wrong type days computation') self.assertEqual(holiday_status.virtual_remaining_leaves, vrl, 'hr_holidays: wrong type days computation') # HrUser creates some holiday statuses -> crash because only HrManagers should do this with self.assertRaises(AccessError): self.holidays_status_dummy = self.hr_holidays_status.create(cr, self.user_hruser_id, { 'name': 'UserCheats', 'limit': True, }) # HrManager creates some holiday statuses self.holidays_status_0 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, { 'name': 'WithMeetingType', 'limit': True, 'categ_id': self.registry('calendar.event.type').create(cr, self.user_hrmanager_id, {'name': 'NotLimitedMeetingType'}), }) self.holidays_status_1 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, { 'name': 'NotLimited', 'limit': True, }) self.holidays_status_2 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, { 'name': 'Limited', 'limit': False, 'double_validation': True, }) # -------------------------------------------------- # Case1: unlimited type of leave request # -------------------------------------------------- # Employee creates a leave request for another employee -> should crash with self.assertRaises(except_orm): self.hr_holidays.create(cr, self.user_employee_id, { 'name': 'Hol10', 'employee_id': self.employee_hruser_id, 'holiday_status_id': self.holidays_status_1, 'date_from': (datetime.today() - relativedelta(days=1)), 'date_to': datetime.today(), 'number_of_days_temp': 1, }) # Employee creates a leave request in a no-limit category hol1_id = self.hr_holidays.create(cr, self.user_employee_id, { 'name': 'Hol11', 'employee_id': self.employee_emp_id, 'holiday_status_id': self.holidays_status_1, 'date_from': (datetime.today() - relativedelta(days=1)), 'date_to': datetime.today(), 'number_of_days_temp': 1, }) hol1 = self.hr_holidays.browse(cr, self.user_hruser_id, hol1_id) self.assertEqual(hol1.state, 'confirm', 'hr_holidays: newly created leave request should be in confirm state') # Employee validates its leave request -> should not work self.hr_holidays.signal_validate(cr, self.user_employee_id, [hol1_id]) hol1.refresh() self.assertEqual(hol1.state, 'confirm', 'hr_holidays: employee should not be able to validate its own leave request') # HrUser validates the employee leave request self.hr_holidays.signal_validate(cr, self.user_hrmanager_id, [hol1_id]) hol1.refresh() self.assertEqual(hol1.state, 'validate', 'hr_holidays: validates leave request should be in validate state') # -------------------------------------------------- # Case2: limited type of leave request # -------------------------------------------------- # Employee creates a new leave request at the same time -> crash, avoid interlapping with self.assertRaises(except_orm): self.hr_holidays.create(cr, self.user_employee_id, { 'name': 'Hol21', 'employee_id': self.employee_emp_id, 'holiday_status_id': self.holidays_status_1, 'date_from': (datetime.today() - relativedelta(days=1)).strftime('%Y-%m-%d %H:%M'), 'date_to': datetime.today(), 'number_of_days_temp': 1, }) # Employee creates a leave request in a limited category -> crash, not enough days left with self.assertRaises(except_orm): self.hr_holidays.create(cr, self.user_employee_id, { 'name': 'Hol22', 'employee_id': self.employee_emp_id, 'holiday_status_id': self.holidays_status_2, 'date_from': (datetime.today() + relativedelta(days=0)).strftime('%Y-%m-%d %H:%M'), 'date_to': (datetime.today() + relativedelta(days=1)), 'number_of_days_temp': 1, }) # Clean transaction self.hr_holidays.unlink(cr, uid, self.hr_holidays.search(cr, uid, [('name', 'in', ['Hol21', 'Hol22'])])) # HrUser allocates some leaves to the employee aloc1_id = self.hr_holidays.create(cr, self.user_hruser_id, { 'name': 'Days for limited category', 'employee_id': self.employee_emp_id, 'holiday_status_id': self.holidays_status_2, 'type': 'add', 'number_of_days_temp': 2, }) # HrUser validates the allocation request self.hr_holidays.signal_validate(cr, self.user_hruser_id, [aloc1_id]) self.hr_holidays.signal_second_validate(cr, self.user_hruser_id, [aloc1_id]) # Checks Employee has effectively some days left hol_status_2 = self.hr_holidays_status.browse(cr, self.user_employee_id, self.holidays_status_2) _check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0) # Employee creates a leave request in the limited category, now that he has some days left hol2_id = self.hr_holidays.create(cr, self.user_employee_id, { 'name': 'Hol22', 'employee_id': self.employee_emp_id, 'holiday_status_id': self.holidays_status_2, 'date_from': (datetime.today() + relativedelta(days=2)).strftime('%Y-%m-%d %H:%M'), 'date_to': (datetime.today() + relativedelta(days=3)), 'number_of_days_temp': 1, }) hol2 = self.hr_holidays.browse(cr, self.user_hruser_id, hol2_id) # Check left days: - 1 virtual remaining day hol_status_2.refresh() _check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 1.0) # HrUser validates the first step self.hr_holidays.signal_validate(cr, self.user_hruser_id, [hol2_id]) hol2.refresh() self.assertEqual(hol2.state, 'validate1', 'hr_holidays: first validation should lead to validate1 state') # HrUser validates the second step self.hr_holidays.signal_second_validate(cr, self.user_hruser_id, [hol2_id]) hol2.refresh() self.assertEqual(hol2.state, 'validate', 'hr_holidays: second validation should lead to validate state') # Check left days: - 1 day taken hol_status_2.refresh() _check_holidays_status(hol_status_2, 2.0, 1.0, 1.0, 1.0) # HrManager finds an error: he refuses the leave request self.hr_holidays.signal_refuse(cr, self.user_hrmanager_id, [hol2_id]) hol2.refresh() self.assertEqual(hol2.state, 'refuse', 'hr_holidays: refuse should lead to refuse state') # Check left days: 2 days left again hol_status_2.refresh() _check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0) # Annoyed, HrUser tries to fix its error and tries to reset the leave request -> does not work, only HrManager self.hr_holidays.signal_reset(cr, self.user_hruser_id, [hol2_id]) self.assertEqual(hol2.state, 'refuse', 'hr_holidays: hr_user should not be able to reset a refused leave request') # HrManager resets the request self.hr_holidays.signal_reset(cr, self.user_hrmanager_id, [hol2_id]) hol2.refresh() self.assertEqual(hol2.state, 'draft', 'hr_holidays: resetting should lead to draft state') # HrManager changes the date and put too much days -> crash when confirming self.hr_holidays.write(cr, self.user_hrmanager_id, [hol2_id], { 'date_from': (datetime.today() + relativedelta(days=4)).strftime('%Y-%m-%d %H:%M'), 'date_to': (datetime.today() + relativedelta(days=7)), 'number_of_days_temp': 4, }) with self.assertRaises(except_orm): self.hr_holidays.signal_confirm(cr, self.user_hrmanager_id, [hol2_id])
agpl-3.0
-6,335,564,382,337,338,000
5,461,784,881,950,436,000
48.403846
131
0.586123
false
dpiers/coderang-meteor
public/jsrepl/extern/python/unclosured/lib/python2.7/glob.py
173
2249
"""Filename globbing utility.""" import sys import os import re import fnmatch __all__ = ["glob", "iglob"] def glob(pathname): """Return a list of paths matching a pathname pattern. The pattern may contain simple shell-style wildcards a la fnmatch. """ return list(iglob(pathname)) def iglob(pathname): """Return an iterator which yields the paths matching a pathname pattern. The pattern may contain simple shell-style wildcards a la fnmatch. """ if not has_magic(pathname): if os.path.lexists(pathname): yield pathname return dirname, basename = os.path.split(pathname) if not dirname: for name in glob1(os.curdir, basename): yield name return if has_magic(dirname): dirs = iglob(dirname) else: dirs = [dirname] if has_magic(basename): glob_in_dir = glob1 else: glob_in_dir = glob0 for dirname in dirs: for name in glob_in_dir(dirname, basename): yield os.path.join(dirname, name) # These 2 helper functions non-recursively glob inside a literal directory. # They return a list of basenames. `glob1` accepts a pattern while `glob0` # takes a literal basename (so it only has to check for its existence). def glob1(dirname, pattern): if not dirname: dirname = os.curdir if isinstance(pattern, unicode) and not isinstance(dirname, unicode): dirname = unicode(dirname, sys.getfilesystemencoding() or sys.getdefaultencoding()) try: names = os.listdir(dirname) except os.error: return [] if pattern[0] != '.': names = filter(lambda x: x[0] != '.', names) return fnmatch.filter(names, pattern) def glob0(dirname, basename): if basename == '': # `os.path.split()` returns an empty basename for paths ending with a # directory separator. 'q*x/' should match only directories. if os.path.isdir(dirname): return [basename] else: if os.path.lexists(os.path.join(dirname, basename)): return [basename] return [] magic_check = re.compile('[*?[]') def has_magic(s): return magic_check.search(s) is not None
mit
8,884,900,517,094,692,000
3,094,950,720,485,212,000
27.833333
77
0.62739
false
hikelee/launcher
launcher/templatetags/helpers.py
1
6201
""" sentry.templatetags.sentry_helpers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import import functools import os.path from collections import namedtuple from datetime import timedelta import pytz import six from django import template from django.conf import settings from django.template.defaultfilters import stringfilter from django.utils import timezone from django.utils.html import escape from django.utils.safestring import mark_safe from django.utils.translation import ugettext as _ from six.moves import range from six.moves.urllib.parse import quote from launcher.utils.strings import soft_break as _soft_break,soft_hyphenate,to_unicode,truncatechars SentryVersion=namedtuple('SentryVersion',[ 'current', 'latest', 'update_available', 'build', ]) register=template.Library() truncatechars=register.filter(stringfilter(truncatechars)) truncatechars.is_safe=True @register.filter def multiply(x,y): def coerce(value): if isinstance(value,(six.integer_types,float)): return value try: return int(value) except ValueError: return float(value) return coerce(x)*coerce(y) @register.filter def pprint(value,break_after=10): """ break_after is used to define how often a <span> is inserted (for soft wrapping). """ value=to_unicode(value) return mark_safe( u'<span></span>'. join([escape(value[i:(i+break_after)]) for i in range(0,len(value),break_after)]) ) @register.filter def is_url(value): if not isinstance(value,six.string_types): return False if not value.startswith(('http://','https://')): return False if ' ' in value: return False return True # seriously Django? @register.filter def subtract(value,amount): return int(value)-int(amount) @register.filter def absolute_value(value): return abs(int(value) if isinstance(value,six.integer_types) else float(value)) @register.filter def has_charts(group): from launcher.utils.db import has_charts if hasattr(group,'_state'): db=group._state.db or 'default' else: db='default' return has_charts(db) @register.filter def as_sorted(value): return sorted(value) @register.filter def small_count(v,precision=1): if not v: return 0 z=[ (1000000000,_('b')), (1000000,_('m')), (1000,_('k')), ] v=int(v) for x,y in z: o,p=divmod(v,x) if o: if len(six.text_type(o))>2 or not p: return '%d%s'%(o,y) return ('%.{}f%s'.format(precision))%(v/float(x),y) return v @register.filter def num_digits(value): return len(six.text_type(value)) @register.filter def to_str(data): return six.text_type(data) @register.filter def is_none(value): return value is None @register.filter def timesince(value,now=None): from django.template.defaultfilters import timesince if now is None: now=timezone.now() if not value: return _('never') if value<(now-timedelta(days=5)): return value.date() value=(' '.join(timesince(value,now).split(' ')[0:2])).strip(',') if value==_('0 minutes'): return _('just now') if value==_('1 day'): return _('yesterday') return value+_(' ago') @register.filter def duration(value): if not value: return '0s' # value is assumed to be in ms value=value/1000.0 hours,minutes,seconds=0,0,0 if value>3600: hours=value/3600 value=value%3600 if value>60: minutes=value/60 value=value%60 seconds=value output=[] if hours: output.append('%dh'%hours) if minutes: output.append('%dm'%minutes) if seconds>1: output.append('%0.2fs'%seconds) elif seconds: output.append('%dms'%(seconds*1000)) return ''.join(output) @register.filter def date(dt,arg=None): from django.template.defaultfilters import date if not timezone.is_aware(dt): dt=dt.replace(tzinfo=timezone.utc) return date(dt,arg) @register.filter def trim_schema(value): return value.split('//',1)[-1] @register.filter def with_metadata(group_list,request): group_list=list(group_list) if request.user.is_authenticated() and group_list: project=group_list[0].project bookmarks=set( project.bookmark_set.filter( user=request.user, group__in=group_list, ).values_list('group_id',flat=True) ) else: bookmarks=set() # TODO(dcramer): this is obsolete and needs to pull from the tsdb backend historical_data={} for g in group_list: yield g,{ 'is_bookmarked':g.pk in bookmarks, 'historical_data':','.join(six.text_type(x[1]) for x in historical_data.get(g.id,[])), } @register.simple_tag def percent(value,total,format=None): if not (value and total): result=0 else: result=int(value)/float(total)*100 if format is None: return int(result) else: return ('%%%s'%format)%result @register.filter def titlize(value): return value.replace('_',' ').title() @register.filter def split(value,delim=''): return value.split(delim) @register.inclusion_tag('sentry/partial/github_button.html') def github_button(user,repo): return { 'user':user, 'repo':repo, } @register.filter def urlquote(value,safe=''): return quote(value.encode('utf8'),safe) @register.filter def basename(value): return os.path.basename(value) @register.filter def user_display_name(user): return user.name or user.username @register.simple_tag(takes_context=True) def localized_datetime(context,dt,format='DATETIME_FORMAT'): request=context['request'] timezone=getattr(request,'timezone',None) if not timezone: timezone=pytz.timezone(settings.SENTRY_DEFAULT_TIME_ZONE) dt=dt.astimezone(timezone) return date(dt,format) @register.filter def format_userinfo(user): parts=user.username.split('@') if len(parts)==1: username=user.username else: username=parts[0].lower() return mark_safe('<span title="%s">%s</span>'%(escape(user.username),escape(username),)) @register.filter def soft_break(value,length): return _soft_break( value, length, functools.partial(soft_hyphenate,length=max(length//10,10)), )
mit
-6,437,698,877,217,264,000
-3,480,042,049,001,698,000
22.13806
100
0.687631
false
zubron/servo
components/script/dom/bindings/codegen/BindingGen.py
150
1729
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import sys import os sys.path.append(os.path.join(".", "parser")) sys.path.append(os.path.join(".", "ply")) import cPickle from Configuration import Configuration from CodegenRust import CGBindingRoot, replaceFileIfChanged def generate_binding_rs(config, outputprefix, webidlfile): """ |config| Is the configuration object. |outputprefix| is a prefix to use for the header guards and filename. """ filename = outputprefix + ".rs" module = CGBindingRoot(config, outputprefix, webidlfile).define() if not module: print "Skipping empty module: %s" % (filename) elif replaceFileIfChanged(filename, module): print "Generating binding implementation: %s" % (filename) def main(): # Parse arguments. from optparse import OptionParser usagestring = "usage: %prog configFile outputdir outputPrefix webIDLFile" o = OptionParser(usage=usagestring) (options, args) = o.parse_args() if len(args) != 4: o.error(usagestring) configFile = os.path.normpath(args[0]) outputdir = args[1] outputPrefix = args[2] webIDLFile = os.path.normpath(args[3]) # Load the parsing results resultsPath = os.path.join(outputdir, 'ParserResults.pkl') with open(resultsPath, 'rb') as f: parserData = cPickle.load(f) # Create the configuration data. config = Configuration(configFile, parserData) # Generate the prototype classes. generate_binding_rs(config, outputPrefix, webIDLFile) if __name__ == '__main__': main()
mpl-2.0
8,815,844,273,950,509,000
-297,061,629,144,578,940
31.018519
77
0.692308
false
louyihua/edx-platform
lms/djangoapps/survey/migrations/0001_initial.py
50
2289
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone from django.conf import settings import model_utils.fields import xmodule_django.models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='SurveyAnswer', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)), ('field_name', models.CharField(max_length=255, db_index=True)), ('field_value', models.CharField(max_length=1024)), ('course_key', xmodule_django.models.CourseKeyField(max_length=255, null=True, db_index=True)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='SurveyForm', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)), ('name', models.CharField(unique=True, max_length=255, db_index=True)), ('form', models.TextField()), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='surveyanswer', name='form', field=models.ForeignKey(to='survey.SurveyForm'), ), migrations.AddField( model_name='surveyanswer', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL), ), ]
agpl-3.0
7,188,046,760,319,521,000
7,518,009,846,284,546,000
40.618182
147
0.596767
false
kevclarx/ansible
lib/ansible/template/vars.py
35
3911
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from jinja2.utils import missing from ansible.module_utils.six import iteritems from ansible.module_utils._text import to_native __all__ = ['AnsibleJ2Vars'] class AnsibleJ2Vars: ''' Helper class to template all variable content before jinja2 sees it. This is done by hijacking the variable storage that jinja2 uses, and overriding __contains__ and __getitem__ to look like a dict. Added bonus is avoiding duplicating the large hashes that inject tends to be. To facilitate using builtin jinja2 things like range, globals are also handled here. ''' def __init__(self, templar, globals, locals=None, *extras): ''' Initializes this object with a valid Templar() object, as well as several dictionaries of variables representing different scopes (in jinja2 terminology). ''' self._templar = templar self._globals = globals self._extras = extras self._locals = dict() if isinstance(locals, dict): for key, val in iteritems(locals): if val is not missing: if key[:2] == 'l_': self._locals[key[2:]] = val elif key not in ('context', 'environment', 'template'): self._locals[key] = val def __contains__(self, k): if k in self._templar._available_variables: return True if k in self._locals: return True for i in self._extras: if k in i: return True if k in self._globals: return True return False def __getitem__(self, varname): if varname not in self._templar._available_variables: if varname in self._locals: return self._locals[varname] for i in self._extras: if varname in i: return i[varname] if varname in self._globals: return self._globals[varname] else: raise KeyError("undefined variable: %s" % varname) variable = self._templar._available_variables[varname] # HostVars is special, return it as-is, as is the special variable # 'vars', which contains the vars structure from ansible.vars.hostvars import HostVars if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars) or hasattr(variable, '__UNSAFE__'): return variable else: value = None try: value = self._templar.template(variable) except Exception as e: raise type(e)(to_native(variable) + ': ' + e.message) return value def add_locals(self, locals): ''' If locals are provided, create a copy of self containing those locals in addition to what is already in this variable proxy. ''' if locals is None: return self return AnsibleJ2Vars(self._templar, self._globals, locals=locals, *self._extras)
gpl-3.0
7,030,718,408,254,209,000
4,038,283,947,495,098,000
35.896226
129
0.617489
false
admcrae/tensorflow
tensorflow/contrib/keras/python/keras/__init__.py
29
1864
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Keras API. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.keras.python.keras import activations from tensorflow.contrib.keras.python.keras import applications from tensorflow.contrib.keras.python.keras import backend from tensorflow.contrib.keras.python.keras import callbacks from tensorflow.contrib.keras.python.keras import constraints from tensorflow.contrib.keras.python.keras import datasets from tensorflow.contrib.keras.python.keras import engine from tensorflow.contrib.keras.python.keras import initializers from tensorflow.contrib.keras.python.keras import layers from tensorflow.contrib.keras.python.keras import losses from tensorflow.contrib.keras.python.keras import metrics from tensorflow.contrib.keras.python.keras import models from tensorflow.contrib.keras.python.keras import optimizers from tensorflow.contrib.keras.python.keras import preprocessing from tensorflow.contrib.keras.python.keras import regularizers from tensorflow.contrib.keras.python.keras import utils from tensorflow.contrib.keras.python.keras import wrappers __version__ = '2.0.2-tf'
apache-2.0
314,703,654,721,426,800
2,996,802,772,046,880,000
45.6
80
0.77897
false
sasukeh/acos-client
acos_client/v21/device_info.py
4
1044
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base class DeviceInfo(base.BaseV21): def get(self, **kwargs): return self._get('system.device_info.get', **kwargs) def cpu_current_usage(self, **kwargs): return self._get('system.device_info.cpu.current_usage.get', **kwargs) def cpu_historical_usage(self, **kwargs): return self._get('system.device_info.cpu.historical_usage.get', **kwargs)
apache-2.0
1,515,229,668,619,326,000
-8,058,914,553,048,894,000
37.666667
78
0.645594
false
vigneshkarthi/satireguru
satire-bot.py
1
3178
import twitter import yaml import time import pickle import re global match, api, msg, oldID import random msg = '' #RegEx for parsing twitter handle from retrived keyword = ''; #UTF_CHARS = ur'a-z0-9_\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u00ff' #TAG_EXP = ur'(^|[^0-9A-Z&/]+)(#|\uff03)([0-9A-Z_]*[A-Z_]+[%s]*)' % UTF_CHARS #TAG_REGEX = re.compile(TAG_EXP, re.UNICODE | re.IGNORECASE) #Performs OAuth authentication, place all the neccessary keys in access.yaml def authenticate(): global api data = yaml.load(open("access.yaml")) api = twitter.Api(consumer_key=data['consumer-key'],consumer_secret=data['consumer-secret'],access_token_key=data['access-key'],access_token_secret=data['access-secret']) #Parses response.yaml to search and reply with relevant messages according to twitterhandles, fill your responses in response.yaml def choose_reply(): global match, msg comments = yaml.load(open("response.yaml")) for name in comments['name']: if(name['keyword']==match): msg = random.choice(name['response']) #Module which checks for mentions and replies to the mentioner and the person mentioned #current version supports only one mentioned person def get_and_post_replies(old): cache_msg_to_post = ' ' global match, api while(1): try: i = 0 repl = api.GetMentions() total = len(repl) newID = int(repl[i].id) while(newID != old): print repl[i].text+", by @"+repl[i].user.screen_name if "pm" in repl[i].text.lower(): match = 'PM' print "Match is", match choose_reply() msg_to_post = "@"+repl[i].user.screen_name+" "+msg if(msg_to_post == cache_msg_to_post): msg_to_post = msg_to_post + random.randint(0,1000) cache_msg_to_post = msg_to_post try: api.PostUpdate(msg_to_post, in_reply_to_status_id=repl[i].id) print "Msg posted is", msg_to_post i = i+1 if (total == i): break newID = int(repl[i].id) except twitter.TwitterError: print "Something happend.. Saving ID's to file.. Not to Worry" fileObj = open("idstore",'r+') old = repl[0].id fileObj.seek(0) fileObj.write(str(old)) fileObj.close() return else: i = i + 1 if (total == i): break newId = int(repl[i].id) old = int(repl[0].id) print "No New Tweets !!" print "Gonna sleep for a minute :)" time.sleep(60) except KeyboardInterrupt: fileObj = open("idstore", 'r+') fileObj.seek(0) fileObj.write(str(old)) print "Saving ID's to file.. Exiting!!" return authenticate() fileObj = open("idstore",'r+') old = fileObj.read() old = int(old) get_and_post_replies(old)
gpl-2.0
7,980,854,328,738,724,000
1,498,281,968,267,098,600
35.113636
174
0.538704
false
9miao/Firefly
gfirefly/server/server.py
6
4947
#coding:utf8 ''' Created on 2013-8-2 @author: lan (www.9miao.com) ''' from gfirefly.netconnect.protoc import LiberateFactory from flask import Flask from gfirefly.distributed.root import PBRoot,BilateralFactory from gfirefly.distributed.node import RemoteObject from gfirefly.dbentrust.dbpool import dbpool from gfirefly.dbentrust.memclient import mclient from gfirefly.server.logobj import loogoo from gfirefly.server.globalobject import GlobalObject from gtwisted.utils import log from gtwisted.core import reactor from gfirefly.utils import services import os,sys,affinity reactor = reactor def serverStop(): """停止服务进程 """ log.msg('stop') if GlobalObject().stophandler: GlobalObject().stophandler() reactor.callLater(0.5,reactor.stop) return True class FFServer: """抽象出的一个服务进程 """ def __init__(self): ''' ''' self.netfactory = None#net前端 self.root = None#分布式root节点 self.webroot = None#http服务 self.remote = {}#remote节点 self.master_remote = None self.db = None self.mem = None self.servername = None self.remoteportlist = [] def config(self, config, servername=None, dbconfig=None, memconfig=None, masterconf=None): '''配置服务器 ''' GlobalObject().json_config = config netport = config.get('netport')#客户端连接 webport = config.get('webport')#http连接 rootport = config.get('rootport')#root节点配置 self.remoteportlist = config.get('remoteport',[])#remote节点配置列表 if not servername: servername = config.get('name')#服务器名称 logpath = config.get('log')#日志 hasdb = config.get('db')#数据库连接 hasmem = config.get('mem')#memcached连接 app = config.get('app')#入口模块名称 cpuid = config.get('cpu')#绑定cpu mreload = config.get('reload')#重新加载模块名称 self.servername = servername if netport: self.netfactory = LiberateFactory() netservice = services.CommandService("netservice") self.netfactory.addServiceChannel(netservice) reactor.listenTCP(netport,self.netfactory) if webport: self.webroot = Flask("servername") GlobalObject().webroot = self.webroot reactor.listenWSGI(webport, self.webroot) if rootport: self.root = PBRoot() rootservice = services.Service("rootservice") self.root.addServiceChannel(rootservice) reactor.listenTCP(rootport, BilateralFactory(self.root)) for cnf in self.remoteportlist: rname = cnf.get('rootname') self.remote[rname] = RemoteObject(self.servername) if hasdb and dbconfig: log.msg(str(dbconfig)) dbpool.initPool(**dbconfig) if hasmem and memconfig: urls = memconfig.get('urls') hostname = str(memconfig.get('hostname')) mclient.connect(urls, hostname) if logpath: log.addObserver(loogoo(logpath))#日志处理 log.startLogging(sys.stdout) if cpuid: affinity.set_process_affinity_mask(os.getpid(), cpuid) GlobalObject().config(netfactory = self.netfactory, root=self.root, remote = self.remote) if masterconf: masterport = masterconf.get('rootport') masterhost = masterconf.get('roothost') self.master_remote = RemoteObject(servername) addr = ('localhost',masterport) if not masterhost else (masterhost,masterport) self.master_remote.connect(addr) GlobalObject().masterremote = self.master_remote import admin if app: __import__(app) if mreload: _path_list = mreload.split(".") GlobalObject().reloadmodule = __import__(mreload,fromlist=_path_list[:1]) GlobalObject().remote_connect = self.remote_connect def remote_connect(self, rname, rhost): """进行rpc的连接 """ for cnf in self.remoteportlist: _rname = cnf.get('rootname') if rname == _rname: rport = cnf.get('rootport') if not rhost: addr = ('localhost',rport) else: addr = (rhost,rport) self.remote[rname].connect(addr) break def start(self): '''启动服务器 ''' log.msg('[%s] started...'%self.servername) log.msg('[%s] pid: %s'%(self.servername,os.getpid())) reactor.run()
mit
2,703,505,276,081,141,000
-418,179,113,790,861,600
32.293706
90
0.57992
false
RafaelTorrealba/odoo
openerp/addons/test_new_api/models.py
79
9125
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-2014 OpenERP (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import datetime from openerp.exceptions import AccessError ############################################################################## # # OLD API # ############################################################################## from openerp.osv import osv, fields class res_partner(osv.Model): _inherit = 'res.partner' # # add related fields to test them # _columns = { # a regular one 'related_company_partner_id': fields.related( 'company_id', 'partner_id', type='many2one', obj='res.partner'), # a related field with a single field 'single_related_company_id': fields.related( 'company_id', type='many2one', obj='res.company'), # a related field with a single field that is also a related field! 'related_related_company_id': fields.related( 'single_related_company_id', type='many2one', obj='res.company'), } class TestFunctionCounter(osv.Model): _name = 'test_old_api.function_counter' def _compute_cnt(self, cr, uid, ids, fname, arg, context=None): res = {} for cnt in self.browse(cr, uid, ids, context=context): res[cnt.id] = cnt.access and cnt.cnt + 1 or 0 return res _columns = { 'access': fields.datetime('Datetime Field'), 'cnt': fields.function( _compute_cnt, type='integer', string='Function Field', store=True), } class TestFunctionNoInfiniteRecursion(osv.Model): _name = 'test_old_api.function_noinfiniterecursion' def _compute_f1(self, cr, uid, ids, fname, arg, context=None): res = {} for tf in self.browse(cr, uid, ids, context=context): res[tf.id] = 'create' in tf.f0 and 'create' or 'write' cntobj = self.pool['test_old_api.function_counter'] cnt_id = self.pool['ir.model.data'].xmlid_to_res_id( cr, uid, 'test_new_api.c1') cntobj.write( cr, uid, cnt_id, {'access': datetime.datetime.now()}, context=context) return res _columns = { 'f0': fields.char('Char Field'), 'f1': fields.function( _compute_f1, type='char', string='Function Field', store=True), } ############################################################################## # # NEW API # ############################################################################## from openerp import models, fields, api, _ class Category(models.Model): _name = 'test_new_api.category' name = fields.Char(required=True) parent = fields.Many2one('test_new_api.category') display_name = fields.Char(compute='_compute_display_name', inverse='_inverse_display_name') discussions = fields.Many2many('test_new_api.discussion', 'test_new_api_discussion_category', 'category', 'discussion') @api.one @api.depends('name', 'parent.display_name') # this definition is recursive def _compute_display_name(self): if self.parent: self.display_name = self.parent.display_name + ' / ' + self.name else: self.display_name = self.name @api.one def _inverse_display_name(self): names = self.display_name.split('/') # determine sequence of categories categories = [] for name in names[:-1]: category = self.search([('name', 'ilike', name.strip())]) categories.append(category[0]) categories.append(self) # assign parents following sequence for parent, child in zip(categories, categories[1:]): if parent and child: child.parent = parent # assign name of last category, and reassign display_name (to normalize it) self.name = names[-1].strip() def read(self, fields=None, load='_classic_read'): if self.search_count([('id', 'in', self._ids), ('name', '=', 'NOACCESS')]): raise AccessError('Sorry') return super(Category, self).read(fields, load) class Discussion(models.Model): _name = 'test_new_api.discussion' name = fields.Char(string='Title', required=True, help="General description of what this discussion is about.") moderator = fields.Many2one('res.users') categories = fields.Many2many('test_new_api.category', 'test_new_api_discussion_category', 'discussion', 'category') participants = fields.Many2many('res.users') messages = fields.One2many('test_new_api.message', 'discussion') message_changes = fields.Integer(string='Message changes') @api.onchange('moderator') def _onchange_moderator(self): self.participants |= self.moderator @api.onchange('messages') def _onchange_messages(self): self.message_changes = len(self.messages) class Message(models.Model): _name = 'test_new_api.message' discussion = fields.Many2one('test_new_api.discussion', ondelete='cascade') body = fields.Text() author = fields.Many2one('res.users', default=lambda self: self.env.user) name = fields.Char(string='Title', compute='_compute_name', store=True) display_name = fields.Char(string='Abstract', compute='_compute_display_name') size = fields.Integer(compute='_compute_size', search='_search_size') double_size = fields.Integer(compute='_compute_double_size') discussion_name = fields.Char(related='discussion.name') @api.one @api.constrains('author', 'discussion') def _check_author(self): if self.discussion and self.author not in self.discussion.participants: raise ValueError(_("Author must be among the discussion participants.")) @api.one @api.depends('author.name', 'discussion.name') def _compute_name(self): self.name = "[%s] %s" % (self.discussion.name or '', self.author.name or '') @api.one @api.depends('author.name', 'discussion.name', 'body') def _compute_display_name(self): stuff = "[%s] %s: %s" % (self.author.name, self.discussion.name or '', self.body or '') self.display_name = stuff[:80] @api.one @api.depends('body') def _compute_size(self): self.size = len(self.body or '') def _search_size(self, operator, value): if operator not in ('=', '!=', '<', '<=', '>', '>=', 'in', 'not in'): return [] # retrieve all the messages that match with a specific SQL query query = """SELECT id FROM "%s" WHERE char_length("body") %s %%s""" % \ (self._table, operator) self.env.cr.execute(query, (value,)) ids = [t[0] for t in self.env.cr.fetchall()] return [('id', 'in', ids)] @api.one @api.depends('size') def _compute_double_size(self): # This illustrates a subtle situation: self.double_size depends on # self.size. When size is computed, self.size is assigned, which should # normally invalidate self.double_size. However, this may not happen # while self.double_size is being computed: the last statement below # would fail, because self.double_size would be undefined. self.double_size = 0 size = self.size self.double_size = self.double_size + size class MixedModel(models.Model): _name = 'test_new_api.mixed' number = fields.Float(digits=(10, 2), default=3.14) date = fields.Date() now = fields.Datetime(compute='_compute_now') lang = fields.Selection(string='Language', selection='_get_lang') reference = fields.Reference(string='Related Document', selection='_reference_models') @api.one def _compute_now(self): # this is a non-stored computed field without dependencies self.now = fields.Datetime.now() @api.model def _get_lang(self): langs = self.env['res.lang'].search([]) return [(lang.code, lang.name) for lang in langs] @api.model def _reference_models(self): models = self.env['ir.model'].search([('state', '!=', 'manual')]) return [(model.model, model.name) for model in models if not model.model.startswith('ir.')]
agpl-3.0
7,675,432,398,538,383,000
7,868,349,209,634,469,000
37.340336
97
0.592767
false