repo_name
stringlengths
5
100
path
stringlengths
4
251
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
499
1.05M
license
stringclasses
15 values
avengerpenguin/rdflib-django
src/rdflib_django/fields.py
1
3342
""" Custom fields for storing RDF primitives. Based on http://blog.elsdoerfer.name/2008/01/08/fuzzydates-or-one-django-model-field-multiple-database-columns/ """ from django.db import models from rdflib.graph import Graph from rdflib.term import BNode, URIRef, Literal class LiteralField(models.TextField): """ Custom field for storing literals. """ __metaclass__ = models.SubfieldBase description = "Field for storing Literals, including their type and language" def to_python(self, value): if not value: return None if isinstance(value, Literal): return value parts = value.split('^^') if len(parts) != 3: raise ValueError("Wrong value: {0}".format(value)) return Literal(parts[0], parts[1] or None, parts[2] or None) def value_to_string(self, obj): value = self._get_val_from_obj(obj) return self.get_prep_value(value) def get_prep_value(self, value): if not isinstance(value, Literal): raise TypeError("Value {0} has the wrong type: {1}".format(value, value.__class__)) return unicode(value) + "^^" + (value.language or '') + "^^" + (value.datatype or '') def deserialize_uri(value): """ Deserialize a representation of a BNode or URIRef. """ if isinstance(value, BNode): return value if isinstance(value, URIRef): return value if not value: return None if not isinstance(value, basestring): raise ValueError("Cannot create URI from {0} of type {1}".format(value, value.__class__)) if value.startswith("_:"): return BNode(value[2:]) return URIRef(value) def serialize_uri(value): """ Serialize a BNode or URIRef. """ if isinstance(value, BNode): return value.n3() if isinstance(value, URIRef): return unicode(value) raise ValueError("Cannot get prepvalue for {0} of type {1}".format(value, value.__class__)) class URIField(models.CharField): """ Custom field for storing URIRefs and BNodes. URIRefs are stored as themselves; BNodes are stored in their Notation3 serialization. """ __metaclass__ = models.SubfieldBase description = "Field for storing URIRefs and BNodes." def __init__(self, *args, **kwargs): if 'max_length' not in kwargs: kwargs['max_length'] = 500 super(URIField, self).__init__(*args, **kwargs) def to_python(self, value): return deserialize_uri(value) def value_to_string(self, obj): return serialize_uri(self._get_val_from_obj(obj)) def get_prep_value(self, value): return serialize_uri(value) class GraphReferenceField(models.CharField): """ Custom field for storing graph references. """ __metaclass__ = models.SubfieldBase description = "Field for storing references to Graphs" def to_python(self, value): if isinstance(value, Graph): return value.identifier return deserialize_uri(value) def value_to_string(self, obj): value = self._get_val_from_obj(obj) return self.get_prep_value(value) def get_prep_value(self, value): if isinstance(value, Graph): return serialize_uri(value.identifier) return serialize_uri(value)
mit
abn/coreemu
daemon/core/netns/vnet.py
10
20423
# # CORE # Copyright (c)2010-2012 the Boeing Company. # See the LICENSE file included in this distribution. # # authors: Tom Goff <thomas.goff@boeing.com> # Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com> # ''' vnet.py: PyCoreNet and LxBrNet classes that implement virtual networks using Linux Ethernet bridging and ebtables rules. ''' import os, sys, threading, time, subprocess from core.api import coreapi from core.misc.utils import * from core.constants import * from core.coreobj import PyCoreNet, PyCoreObj from core.netns.vif import VEth, GreTap checkexec([BRCTL_BIN, IP_BIN, EBTABLES_BIN, TC_BIN]) ebtables_lock = threading.Lock() class EbtablesQueue(object): ''' Helper class for queuing up ebtables commands into rate-limited atomic commits. This improves performance and reliability when there are many WLAN link updates. ''' # update rate is every 300ms rate = 0.3 # ebtables atomic_file = "/tmp/pycore.ebtables.atomic" def __init__(self): ''' Initialize the helper class, but don't start the update thread until a WLAN is instantiated. ''' self.doupdateloop = False self.updatethread = None # this lock protects cmds and updates lists self.updatelock = threading.Lock() # list of pending ebtables commands self.cmds = [] # list of WLANs requiring update self.updates = [] # timestamps of last WLAN update; this keeps track of WLANs that are # using this queue self.last_update_time = {} def startupdateloop(self, wlan): ''' Kick off the update loop; only needs to be invoked once. ''' self.updatelock.acquire() self.last_update_time[wlan] = time.time() self.updatelock.release() if self.doupdateloop: return self.doupdateloop = True self.updatethread = threading.Thread(target = self.updateloop) self.updatethread.daemon = True self.updatethread.start() def stopupdateloop(self, wlan): ''' Kill the update loop thread if there are no more WLANs using it. ''' self.updatelock.acquire() try: del self.last_update_time[wlan] except KeyError: pass self.updatelock.release() if len(self.last_update_time) > 0: return self.doupdateloop = False if self.updatethread: self.updatethread.join() self.updatethread = None def ebatomiccmd(self, cmd): ''' Helper for building ebtables atomic file command list. ''' r = [EBTABLES_BIN, "--atomic-file", self.atomic_file] if cmd: r.extend(cmd) return r def lastupdate(self, wlan): ''' Return the time elapsed since this WLAN was last updated. ''' try: elapsed = time.time() - self.last_update_time[wlan] except KeyError: self.last_update_time[wlan] = time.time() elapsed = 0.0 return elapsed def updated(self, wlan): ''' Keep track of when this WLAN was last updated. ''' self.last_update_time[wlan] = time.time() self.updates.remove(wlan) def updateloop(self): ''' Thread target that looks for WLANs needing update, and rate limits the amount of ebtables activity. Only one userspace program should use ebtables at any given time, or results can be unpredictable. ''' while self.doupdateloop: self.updatelock.acquire() for wlan in self.updates: if self.lastupdate(wlan) > self.rate: self.buildcmds(wlan) #print "ebtables commit %d rules" % len(self.cmds) self.ebcommit(wlan) self.updated(wlan) self.updatelock.release() time.sleep(self.rate) def ebcommit(self, wlan): ''' Perform ebtables atomic commit using commands built in the self.cmds list. ''' # save kernel ebtables snapshot to a file cmd = self.ebatomiccmd(["--atomic-save",]) try: check_call(cmd) except Exception, e: self.eberror(wlan, "atomic-save (%s)" % cmd, e) # no atomic file, exit return # modify the table file using queued ebtables commands for c in self.cmds: cmd = self.ebatomiccmd(c) try: check_call(cmd) except Exception, e: self.eberror(wlan, "cmd=%s" % cmd, e) pass self.cmds = [] # commit the table file to the kernel cmd = self.ebatomiccmd(["--atomic-commit",]) try: check_call(cmd) os.unlink(self.atomic_file) except Exception, e: self.eberror(wlan, "atomic-commit (%s)" % cmd, e) def ebchange(self, wlan): ''' Flag a change to the given WLAN's _linked dict, so the ebtables chain will be rebuilt at the next interval. ''' self.updatelock.acquire() if wlan not in self.updates: self.updates.append(wlan) self.updatelock.release() def buildcmds(self, wlan): ''' Inspect a _linked dict from a wlan, and rebuild the ebtables chain for that WLAN. ''' wlan._linked_lock.acquire() # flush the chain self.cmds.extend([["-F", wlan.brname],]) # rebuild the chain for (netif1, v) in wlan._linked.items(): for (netif2, linked) in v.items(): if wlan.policy == "DROP" and linked: self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname, "-o", netif2.localname, "-j", "ACCEPT"], ["-A", wlan.brname, "-o", netif1.localname, "-i", netif2.localname, "-j", "ACCEPT"]]) elif wlan.policy == "ACCEPT" and not linked: self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname, "-o", netif2.localname, "-j", "DROP"], ["-A", wlan.brname, "-o", netif1.localname, "-i", netif2.localname, "-j", "DROP"]]) wlan._linked_lock.release() def eberror(self, wlan, source, error): ''' Log an ebtables command error and send an exception. ''' if not wlan: return wlan.exception(coreapi.CORE_EXCP_LEVEL_ERROR, wlan.brname, "ebtables command error: %s\n%s\n" % (source, error)) # a global object because all WLANs share the same queue # cannot have multiple threads invoking the ebtables commnd ebq = EbtablesQueue() def ebtablescmds(call, cmds): ebtables_lock.acquire() try: for cmd in cmds: call(cmd) finally: ebtables_lock.release() class LxBrNet(PyCoreNet): policy = "DROP" def __init__(self, session, objid = None, name = None, verbose = False, start = True, policy = None): PyCoreNet.__init__(self, session, objid, name, verbose, start) if name is None: name = str(self.objid) if policy is not None: self.policy = policy self.name = name sessionid = self.session.shortsessionid() self.brname = "b.%s.%s" % (str(self.objid), sessionid) self.up = False if start: self.startup() ebq.startupdateloop(self) def startup(self): try: check_call([BRCTL_BIN, "addbr", self.brname]) except Exception, e: self.exception(coreapi.CORE_EXCP_LEVEL_FATAL, self.brname, "Error adding bridge: %s" % e) try: # turn off spanning tree protocol and forwarding delay check_call([BRCTL_BIN, "stp", self.brname, "off"]) check_call([BRCTL_BIN, "setfd", self.brname, "0"]) check_call([IP_BIN, "link", "set", self.brname, "up"]) # create a new ebtables chain for this bridge ebtablescmds(check_call, [ [EBTABLES_BIN, "-N", self.brname, "-P", self.policy], [EBTABLES_BIN, "-A", "FORWARD", "--logical-in", self.brname, "-j", self.brname]]) # turn off multicast snooping so mcast forwarding occurs w/o IGMP joins snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % \ self.brname if os.path.exists(snoop): open(snoop, "w").write('0') except Exception, e: self.exception(coreapi.CORE_EXCP_LEVEL_WARNING, self.brname, "Error setting bridge parameters: %s" % e) self.up = True def shutdown(self): if not self.up: return ebq.stopupdateloop(self) mutecall([IP_BIN, "link", "set", self.brname, "down"]) mutecall([BRCTL_BIN, "delbr", self.brname]) ebtablescmds(mutecall, [ [EBTABLES_BIN, "-D", "FORWARD", "--logical-in", self.brname, "-j", self.brname], [EBTABLES_BIN, "-X", self.brname]]) for netif in self.netifs(): # removes veth pairs used for bridge-to-bridge connections netif.shutdown() self._netif.clear() self._linked.clear() del self.session self.up = False def attach(self, netif): if self.up: try: check_call([BRCTL_BIN, "addif", self.brname, netif.localname]) check_call([IP_BIN, "link", "set", netif.localname, "up"]) except Exception, e: self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname, "Error joining interface %s to bridge %s: %s" % \ (netif.localname, self.brname, e)) return PyCoreNet.attach(self, netif) def detach(self, netif): if self.up: try: check_call([BRCTL_BIN, "delif", self.brname, netif.localname]) except Exception, e: self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname, "Error removing interface %s from bridge %s: %s" % \ (netif.localname, self.brname, e)) return PyCoreNet.detach(self, netif) def linked(self, netif1, netif2): # check if the network interfaces are attached to this network if self._netif[netif1.netifi] != netif1: raise ValueError, "inconsistency for netif %s" % netif1.name if self._netif[netif2.netifi] != netif2: raise ValueError, "inconsistency for netif %s" % netif2.name try: linked = self._linked[netif1][netif2] except KeyError: if self.policy == "ACCEPT": linked = True elif self.policy == "DROP": linked = False else: raise Exception, "unknown policy: %s" % self.policy self._linked[netif1][netif2] = linked return linked def unlink(self, netif1, netif2): ''' Unlink two PyCoreNetIfs, resulting in adding or removing ebtables filtering rules. ''' self._linked_lock.acquire() if not self.linked(netif1, netif2): self._linked_lock.release() return self._linked[netif1][netif2] = False self._linked_lock.release() ebq.ebchange(self) def link(self, netif1, netif2): ''' Link two PyCoreNetIfs together, resulting in adding or removing ebtables filtering rules. ''' self._linked_lock.acquire() if self.linked(netif1, netif2): self._linked_lock.release() return self._linked[netif1][netif2] = True self._linked_lock.release() ebq.ebchange(self) def linkconfig(self, netif, bw = None, delay = None, loss = None, duplicate = None, jitter = None, netif2 = None, devname = None): ''' Configure link parameters by applying tc queuing disciplines on the interface. ''' if devname is None: devname = netif.localname tc = [TC_BIN, "qdisc", "replace", "dev", devname] parent = ["root"] changed = False if netif.setparam('bw', bw): # from tc-tbf(8): minimum value for burst is rate / kernel_hz if bw is not None: burst = max(2 * netif.mtu, bw / 1000) limit = 0xffff # max IP payload tbf = ["tbf", "rate", str(bw), "burst", str(burst), "limit", str(limit)] if bw > 0: if self.up: if (self.verbose): self.info("linkconfig: %s" % \ ([tc + parent + ["handle", "1:"] + tbf],)) check_call(tc + parent + ["handle", "1:"] + tbf) netif.setparam('has_tbf', True) changed = True elif netif.getparam('has_tbf') and bw <= 0: tcd = [] + tc tcd[2] = "delete" if self.up: check_call(tcd + parent) netif.setparam('has_tbf', False) # removing the parent removes the child netif.setparam('has_netem', False) changed = True if netif.getparam('has_tbf'): parent = ["parent", "1:1"] netem = ["netem"] changed = max(changed, netif.setparam('delay', delay)) if loss is not None: loss = float(loss) changed = max(changed, netif.setparam('loss', loss)) if duplicate is not None: duplicate = float(duplicate) changed = max(changed, netif.setparam('duplicate', duplicate)) changed = max(changed, netif.setparam('jitter', jitter)) if not changed: return # jitter and delay use the same delay statement if delay is not None: netem += ["delay", "%sus" % delay] if jitter is not None: if delay is None: netem += ["delay", "0us", "%sus" % jitter, "25%"] else: netem += ["%sus" % jitter, "25%"] if loss is not None: netem += ["loss", "%s%%" % min(loss, 100)] if duplicate is not None: netem += ["duplicate", "%s%%" % min(duplicate, 100)] if delay <= 0 and jitter <= 0 and loss <= 0 and duplicate <= 0: # possibly remove netem if it exists and parent queue wasn't removed if not netif.getparam('has_netem'): return tc[2] = "delete" if self.up: if self.verbose: self.info("linkconfig: %s" % \ ([tc + parent + ["handle", "10:"]],)) check_call(tc + parent + ["handle", "10:"]) netif.setparam('has_netem', False) elif len(netem) > 1: if self.up: if self.verbose: self.info("linkconfig: %s" % \ ([tc + parent + ["handle", "10:"] + netem],)) check_call(tc + parent + ["handle", "10:"] + netem) netif.setparam('has_netem', True) def linknet(self, net): ''' Link this bridge with another by creating a veth pair and installing each device into each bridge. ''' sessionid = self.session.shortsessionid() try: self_objid = '%x' % self.objid except TypeError: self_objid = '%s' % self.objid try: net_objid = '%x' % net.objid except TypeError: net_objid = '%s' % net.objid localname = 'veth%s.%s.%s' % (self_objid, net_objid, sessionid) if len(localname) >= 16: raise ValueError, "interface local name '%s' too long" % \ localname name = 'veth%s.%s.%s' % (net_objid, self_objid, sessionid) if len(name) >= 16: raise ValueError, "interface name '%s' too long" % name netif = VEth(node = None, name = name, localname = localname, mtu = 1500, net = self, start = self.up) self.attach(netif) if net.up: # this is similar to net.attach() but uses netif.name instead # of localname check_call([BRCTL_BIN, "addif", net.brname, netif.name]) check_call([IP_BIN, "link", "set", netif.name, "up"]) i = net.newifindex() net._netif[i] = netif with net._linked_lock: net._linked[netif] = {} netif.net = self netif.othernet = net return netif def getlinknetif(self, net): ''' Return the interface of that links this net with another net (that were linked using linknet()). ''' for netif in self.netifs(): if hasattr(netif, 'othernet') and netif.othernet == net: return netif return None def addrconfig(self, addrlist): ''' Set addresses on the bridge. ''' if not self.up: return for addr in addrlist: try: check_call([IP_BIN, "addr", "add", str(addr), "dev", self.brname]) except Exception, e: self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname, "Error adding IP address: %s" % e) class GreTapBridge(LxBrNet): ''' A network consisting of a bridge with a gretap device for tunneling to another system. ''' def __init__(self, session, remoteip = None, objid = None, name = None, policy = "ACCEPT", localip = None, ttl = 255, key = None, verbose = False, start = True): LxBrNet.__init__(self, session = session, objid = objid, name = name, verbose = verbose, policy = policy, start = False) self.grekey = key if self.grekey is None: self.grekey = self.session.sessionid ^ self.objid self.localnum = None self.remotenum = None self.remoteip = remoteip self.localip = localip self.ttl = ttl if remoteip is None: self.gretap = None else: self.gretap = GreTap(node = self, name = None, session = session, remoteip = remoteip, objid = None, localip = localip, ttl = ttl, key = self.grekey) if start: self.startup() def startup(self): ''' Creates a bridge and adds the gretap device to it. ''' LxBrNet.startup(self) if self.gretap: self.attach(self.gretap) def shutdown(self): ''' Detach the gretap device and remove the bridge. ''' if self.gretap: self.detach(self.gretap) self.gretap.shutdown() self.gretap = None LxBrNet.shutdown(self) def addrconfig(self, addrlist): ''' Set the remote tunnel endpoint. This is a one-time method for creating the GreTap device, which requires the remoteip at startup. The 1st address in the provided list is remoteip, 2nd optionally specifies localip. ''' if self.gretap: raise ValueError, "gretap already exists for %s" % self.name remoteip = addrlist[0].split('/')[0] localip = None if len(addrlist) > 1: localip = addrlist[1].split('/')[0] self.gretap = GreTap(session = self.session, remoteip = remoteip, objid = None, name = None, localip = localip, ttl = self.ttl, key = self.grekey) self.attach(self.gretap) def setkey(self, key): ''' Set the GRE key used for the GreTap device. This needs to be set prior to instantiating the GreTap device (before addrconfig). ''' self.grekey = key
bsd-2-clause
Unidata/netcdf4-python
test/tst_endian.py
1
6046
import netCDF4 import numpy as np import unittest, os, tempfile from numpy.testing import assert_array_equal, assert_array_almost_equal data = np.arange(12,dtype='f4').reshape(3,4) FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name FILE_NAME3 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name def create_file(file,format,data): import warnings dataset = netCDF4.Dataset(file,'w',format=format) dataset.createDimension('time', None) dataset.createDimension('space', 4) dims = ('time', 'space') little = data.astype('<f4') big = data.astype('>f4') warnings.simplefilter('ignore') # ignore UserWarnings generated below ll = dataset.createVariable('little-little', '<f4', dims) lb = dataset.createVariable('little-big', '<f4', dims) bl = dataset.createVariable('big-little', '>f4', dims) bb = dataset.createVariable('big-big', '>f4', dims) ll[:] = little lb[:] = big bl[:] = little bb[:] = big dataset.close() def check_byteswap(file, data): # byteswapping is done internally to native endian format # when numpy array has non-native byte order. The byteswap was # initially done in place, which caused the numpy array to # be modified in the calling program. Pull request #555 # changed the byteswap to a copy, and this test checks # to make sure the input numpy array is not modified. dataset = netCDF4.Dataset(file,'w') dataset.createDimension('time', None) dataset.createDimension('space', 4) dims = ('time', 'space') bl = dataset.createVariable('big-little', np.float32, dims, endian='big') data2 = data.copy() bl[:] = data dataset.close() f = netCDF4.Dataset(file) bl = f.variables['big-little'][:] # check data. assert_array_almost_equal(data, data2) assert_array_almost_equal(bl, data) f.close() def check_data(file, data): f = netCDF4.Dataset(file) ll = f.variables['little-little'][:] lb = f.variables['little-big'][:] bb = f.variables['big-big'][:] bl = f.variables['big-little'][:] # check data. assert_array_almost_equal(ll, data) assert_array_almost_equal(lb, data) assert_array_almost_equal(bl, data) assert_array_almost_equal(bb, data) f.close() def issue310(file): mval = 999.; fval = -999 nc = netCDF4.Dataset(file, "w") nc.createDimension('obs', 10) if netCDF4.is_native_little: endian='big' elif netCDF4.is_native_big: endian='little' else: raise ValueError('cannot determine native endianness') var_big_endian = nc.createVariable(\ 'obs_big_endian', '>f8', ('obs', ),\ endian=endian,fill_value=fval) # use default _FillValue var_big_endian2 = nc.createVariable(\ 'obs_big_endian2', '>f8', ('obs', ),\ endian=endian) # NOTE: missing_value be written in same byte order # as variable, or masked array won't be masked correctly # when data is read in. var_big_endian.missing_value = mval var_big_endian[0]=np.pi var_big_endian[1]=mval var_big_endian2.missing_value = mval var_big_endian2[0]=np.pi var_big_endian2[1]=mval var_native_endian = nc.createVariable(\ 'obs_native_endian', '<f8', ('obs', ),\ endian='native',fill_value=fval) var_native_endian.missing_value = mval var_native_endian[0]=np.pi var_native_endian[1]=mval assert_array_almost_equal(var_native_endian[:].filled(), var_big_endian[:].filled()) assert_array_almost_equal(var_big_endian[:].filled(), var_big_endian2[:].filled()) nc.close() def issue346(file): # create a big and a little endian variable xb = np.arange(10, dtype='>i4') xl = np.arange(xb.size, dtype='<i4') nc = netCDF4.Dataset(file, mode='w') nc.createDimension('x', size=xb.size) vb=nc.createVariable('xb', xb.dtype, ('x'), endian='big') vl=nc.createVariable('xl', xl.dtype, ('x'), endian='little') nc.variables['xb'][:] = xb nc.variables['xl'][:] = xl nc.close() nc = netCDF4.Dataset(file) datab = nc.variables['xb'][:] datal = nc.variables['xl'][:] assert_array_equal(datab,xb) assert_array_equal(datal,xl) nc.close() def issue930(file): # make sure view to unsigned data type (triggered # by _Unsigned attribute being set) is correct when # data byte order is non-native. nc = netCDF4.Dataset(file,'w') d = nc.createDimension('x',2) v1 = nc.createVariable('v1','i2','x',endian='big') v2 = nc.createVariable('v2','i2','x',endian='little') v1[0] = 255; v1[1] = 1 v2[0] = 255; v2[1] = 1 v1._Unsigned="TRUE"; v1.missing_value=np.int16(1) v2._Unsigned="TRUE"; v2.missing_value=np.int16(1) nc.close() nc = netCDF4.Dataset(file) assert_array_equal(nc['v1'][:],np.ma.masked_array([255,1],mask=[False,True])) assert_array_equal(nc['v2'][:],np.ma.masked_array([255,1],mask=[False,True])) nc.set_auto_mask(False) assert_array_equal(nc['v1'][:],np.array([255,1])) assert_array_equal(nc['v2'][:],np.array([255,1])) nc.close() class EndianTestCase(unittest.TestCase): def setUp(self): create_file(FILE_NAME,'NETCDF4_CLASSIC',data); self.file=FILE_NAME create_file(FILE_NAME2,'NETCDF3_CLASSIC',data); self.file2=FILE_NAME2 self.file3 = FILE_NAME3 def tearDown(self): # Remove the temporary files os.remove(self.file) os.remove(self.file2) os.remove(self.file3) def runTest(self): """testing endian conversion capability""" check_data(self.file, data) check_data(self.file2, data) check_byteswap(self.file3, data) issue310(self.file) issue346(self.file2) issue930(self.file2) if __name__ == '__main__': unittest.main()
mit
wakermahmud/sync-engine
inbox/models/contact.py
4
3350
from sqlalchemy import Column, Integer, String, Enum, ForeignKey, Text, Index from sqlalchemy.orm import relationship, backref, validates from sqlalchemy.schema import UniqueConstraint from inbox.sqlalchemy_ext.util import MAX_TEXT_LENGTH from inbox.models.mixins import HasPublicID, HasEmailAddress, HasRevisions from inbox.models.base import MailSyncBase from inbox.models.message import Message from inbox.models.namespace import Namespace class Contact(MailSyncBase, HasRevisions, HasPublicID, HasEmailAddress): """Data for a user's contact.""" API_OBJECT_NAME = 'contact' namespace_id = Column(ForeignKey(Namespace.id, ondelete='CASCADE'), nullable=False) namespace = relationship(Namespace, load_on_pending=True) # A server-provided unique ID. uid = Column(String(64), nullable=False) # A constant, unique identifier for the remote backend this contact came # from. E.g., 'google', 'eas', 'inbox' provider_name = Column(String(64)) name = Column(Text) # phone_number = Column(String(64)) raw_data = Column(Text) # A score to use for ranking contact search results. This should be # precomputed to facilitate performant search. score = Column(Integer) # Flag to set if the contact is deleted in a remote backend. # (This is an unmapped attribute, i.e., it does not correspond to a # database column.) deleted = False __table_args__ = (UniqueConstraint('uid', 'namespace_id', 'provider_name'), Index('ix_contact_ns_uid_provider_name', 'namespace_id', 'uid', 'provider_name')) @validates('raw_data') def validate_length(self, key, value): return value if value is None else value[:MAX_TEXT_LENGTH] def merge_from(self, new_contact): # This must be updated when new fields are added to the class. merge_attrs = ['name', 'email_address', 'raw_data'] for attr in merge_attrs: if getattr(self, attr) != getattr(new_contact, attr): setattr(self, attr, getattr(new_contact, attr)) class MessageContactAssociation(MailSyncBase): """Association table between messages and contacts. Examples -------- If m is a message, get the contacts in the to: field with [assoc.contact for assoc in m.contacts if assoc.field == 'to_addr'] If c is a contact, get messages sent to contact c with [assoc.message for assoc in c.message_associations if assoc.field == ... 'to_addr'] """ contact_id = Column(Integer, ForeignKey(Contact.id, ondelete='CASCADE'), primary_key=True) message_id = Column(Integer, ForeignKey(Message.id, ondelete='CASCADE'), primary_key=True) field = Column(Enum('from_addr', 'to_addr', 'cc_addr', 'bcc_addr', 'reply_to')) # Note: The `cascade` properties need to be a parameter of the backref # here, and not of the relationship. Otherwise a sqlalchemy error is thrown # when you try to delete a message or a contact. contact = relationship( Contact, backref=backref('message_associations', cascade='all, delete-orphan')) message = relationship( Message, backref=backref('contacts', cascade='all, delete-orphan'))
agpl-3.0
laurentb/weboob
modules/zerobin/crypto.py
2
3421
# -*- coding: utf-8 -*- # Copyright(C) 2016 Vincent A # # This file is part of a weboob module. # # This weboob module is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This weboob module is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this weboob module. If not, see <http://www.gnu.org/licenses/>. from base64 import b64decode, b64encode from collections import OrderedDict import math from os import urandom try: from Cryptodome.Cipher import AES from Cryptodome.Protocol.KDF import PBKDF2 from Cryptodome.Hash import SHA256 from Cryptodome.Hash import HMAC except ImportError: from Crypto.Cipher import AES from Crypto.Protocol.KDF import PBKDF2 from Crypto.Hash import SHA256 from Crypto.Hash import HMAC def log2(n): return math.log(n, 2) def encrypt(plaintext): iv = urandom(16) salt = urandom(8) iterations = 1000 ks = 128 ts = 64 hash_func = lambda k, s: HMAC.new(k, s, SHA256).digest() password = b64encode(urandom(32)) key = PBKDF2(password, salt=salt, count=iterations, prf=hash_func) smalliv = trunc_iv(iv, plaintext, 0) cipher = AES.new(key, mode=AES.MODE_CCM, nonce=smalliv, mac_len=ts // 8) ciphertext = b''.join(cipher.encrypt_and_digest(plaintext)) # OrderedDict because 0bin is a piece of shit requiring "iv" as the first key return password.decode('ascii'), OrderedDict([ ('iv', b64encode(iv).decode('ascii')), ('v', 1), ('iter', iterations), ('ks', ks), ('ts', ts), ('mode', 'ccm'), ('adata', ''), ('cipher', 'aes'), ('salt', b64encode(salt).decode('ascii')), ('ct', b64encode(ciphertext).decode('ascii')), ]) def trunc_iv(iv, t, tl): ol = len(t) - tl if ol <= 0: oll = 2 else: oll = int(max(2, math.ceil(log2(ol) / 8.))) assert oll <= 4 if oll < 15 - len(iv): ivl = len(iv) else: ivl = 15 - oll iv = iv[:ivl] return iv def decrypt(secretkey, params): iv = b64decode(params['iv']) salt = b64decode(params['salt']) #~ keylen = params.get('ks', 128) // 8 # FIXME use somewhere? taglen = params.get('ts', 64) // 8 iterations = params.get('iter', 1000) data = b64decode(params['ct']) ciphertext = data[:-taglen] tag = data[-taglen:] if params.get('adata'): raise NotImplementedError('authenticated data support is not implemented') iv = trunc_iv(iv, ciphertext, taglen) hash_func = lambda k, s: HMAC.new(k, s, SHA256).digest() key = PBKDF2(secretkey, salt=salt, count=iterations, prf=hash_func) mode_str = params.get('mode', 'ccm') mode = dict(ccm=AES.MODE_CCM)[mode_str] if mode_str == 'ccm': cipher = AES.new(key, mode=AES.MODE_CCM, nonce=iv, mac_len=taglen) else: cipher = AES.new(key, mode=mode, iv=iv) decrypted = cipher.decrypt_and_verify(ciphertext, tag) return decrypted
lgpl-3.0
wlamond/scikit-learn
sklearn/manifold/mds.py
20
16712
""" Multi-dimensional Scaling (MDS) """ # author: Nelle Varoquaux <nelle.varoquaux@gmail.com> # License: BSD import numpy as np import warnings from ..base import BaseEstimator from ..metrics import euclidean_distances from ..utils import check_random_state, check_array, check_symmetric from ..externals.joblib import Parallel from ..externals.joblib import delayed from ..isotonic import IsotonicRegression def _smacof_single(dissimilarities, metric=True, n_components=2, init=None, max_iter=300, verbose=0, eps=1e-3, random_state=None): """Computes multidimensional scaling using SMACOF algorithm Parameters ---------- dissimilarities : ndarray, shape (n_samples, n_samples) Pairwise dissimilarities between the points. Must be symmetric. metric : boolean, optional, default: True Compute metric or nonmetric SMACOF algorithm. n_components : int, optional, default: 2 Number of dimensions in which to immerse the dissimilarities. If an ``init`` array is provided, this option is overridden and the shape of ``init`` is used to determine the dimensionality of the embedding space. init : ndarray, shape (n_samples, n_components), optional, default: None Starting configuration of the embedding to initialize the algorithm. By default, the algorithm is initialized with a randomly chosen array. max_iter : int, optional, default: 300 Maximum number of iterations of the SMACOF algorithm for a single run. verbose : int, optional, default: 0 Level of verbosity. eps : float, optional, default: 1e-3 Relative tolerance with respect to stress at which to declare convergence. random_state : int, RandomState instance or None, optional, default: None The generator used to initialize the centers. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : ndarray, shape (n_samples, n_components) Coordinates of the points in a ``n_components``-space. stress : float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points). n_iter : int The number of iterations corresponding to the best stress. """ dissimilarities = check_symmetric(dissimilarities, raise_exception=True) n_samples = dissimilarities.shape[0] random_state = check_random_state(random_state) sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel() sim_flat_w = sim_flat[sim_flat != 0] if init is None: # Randomly choose initial configuration X = random_state.rand(n_samples * n_components) X = X.reshape((n_samples, n_components)) else: # overrides the parameter p n_components = init.shape[1] if n_samples != init.shape[0]: raise ValueError("init matrix should be of shape (%d, %d)" % (n_samples, n_components)) X = init old_stress = None ir = IsotonicRegression() for it in range(max_iter): # Compute distance and monotonic regression dis = euclidean_distances(X) if metric: disparities = dissimilarities else: dis_flat = dis.ravel() # dissimilarities with 0 are considered as missing values dis_flat_w = dis_flat[sim_flat != 0] # Compute the disparities using a monotonic regression disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w) disparities = dis_flat.copy() disparities[sim_flat != 0] = disparities_flat disparities = disparities.reshape((n_samples, n_samples)) disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) / (disparities ** 2).sum()) # Compute stress stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2 # Update X using the Guttman transform dis[dis == 0] = 1e-5 ratio = disparities / dis B = - ratio B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1) X = 1. / n_samples * np.dot(B, X) dis = np.sqrt((X ** 2).sum(axis=1)).sum() if verbose >= 2: print('it: %d, stress %s' % (it, stress)) if old_stress is not None: if(old_stress - stress / dis) < eps: if verbose: print('breaking at iteration %d with stress %s' % (it, stress)) break old_stress = stress / dis return X, stress, it + 1 def smacof(dissimilarities, metric=True, n_components=2, init=None, n_init=8, n_jobs=1, max_iter=300, verbose=0, eps=1e-3, random_state=None, return_n_iter=False): """Computes multidimensional scaling using the SMACOF algorithm. The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a multidimensional scaling algorithm which minimizes an objective function (the *stress*) using a majorization technique. Stress majorization, also known as the Guttman Transform, guarantees a monotone convergence of stress, and is more powerful than traditional techniques such as gradient descent. The SMACOF algorithm for metric MDS can summarized by the following steps: 1. Set an initial start configuration, randomly or not. 2. Compute the stress 3. Compute the Guttman Transform 4. Iterate 2 and 3 until convergence. The nonmetric algorithm adds a monotonic regression step before computing the stress. Parameters ---------- dissimilarities : ndarray, shape (n_samples, n_samples) Pairwise dissimilarities between the points. Must be symmetric. metric : boolean, optional, default: True Compute metric or nonmetric SMACOF algorithm. n_components : int, optional, default: 2 Number of dimensions in which to immerse the dissimilarities. If an ``init`` array is provided, this option is overridden and the shape of ``init`` is used to determine the dimensionality of the embedding space. init : ndarray, shape (n_samples, n_components), optional, default: None Starting configuration of the embedding to initialize the algorithm. By default, the algorithm is initialized with a randomly chosen array. n_init : int, optional, default: 8 Number of times the SMACOF algorithm will be run with different initializations. The final results will be the best output of the runs, determined by the run with the smallest final stress. If ``init`` is provided, this option is overridden and a single run is performed. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If multiple initializations are used (``n_init``), each run of the algorithm is computed in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For ``n_jobs`` below -1, (``n_cpus + 1 + n_jobs``) are used. Thus for ``n_jobs = -2``, all CPUs but one are used. max_iter : int, optional, default: 300 Maximum number of iterations of the SMACOF algorithm for a single run. verbose : int, optional, default: 0 Level of verbosity. eps : float, optional, default: 1e-3 Relative tolerance with respect to stress at which to declare convergence. random_state : int, RandomState instance or None, optional, default: None The generator used to initialize the centers. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. return_n_iter : bool, optional, default: False Whether or not to return the number of iterations. Returns ------- X : ndarray, shape (n_samples, n_components) Coordinates of the points in a ``n_components``-space. stress : float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points). n_iter : int The number of iterations corresponding to the best stress. Returned only if ``return_n_iter`` is set to ``True``. Notes ----- "Modern Multidimensional Scaling - Theory and Applications" Borg, I.; Groenen P. Springer Series in Statistics (1997) "Nonmetric multidimensional scaling: a numerical method" Kruskal, J. Psychometrika, 29 (1964) "Multidimensional scaling by optimizing goodness of fit to a nonmetric hypothesis" Kruskal, J. Psychometrika, 29, (1964) """ dissimilarities = check_array(dissimilarities) random_state = check_random_state(random_state) if hasattr(init, '__array__'): init = np.asarray(init).copy() if not n_init == 1: warnings.warn( 'Explicit initial positions passed: ' 'performing only one init of the MDS instead of %d' % n_init) n_init = 1 best_pos, best_stress = None, None if n_jobs == 1: for it in range(n_init): pos, stress, n_iter_ = _smacof_single( dissimilarities, metric=metric, n_components=n_components, init=init, max_iter=max_iter, verbose=verbose, eps=eps, random_state=random_state) if best_stress is None or stress < best_stress: best_stress = stress best_pos = pos.copy() best_iter = n_iter_ else: seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))( delayed(_smacof_single)( dissimilarities, metric=metric, n_components=n_components, init=init, max_iter=max_iter, verbose=verbose, eps=eps, random_state=seed) for seed in seeds) positions, stress, n_iters = zip(*results) best = np.argmin(stress) best_stress = stress[best] best_pos = positions[best] best_iter = n_iters[best] if return_n_iter: return best_pos, best_stress, best_iter else: return best_pos, best_stress class MDS(BaseEstimator): """Multidimensional scaling Read more in the :ref:`User Guide <multidimensional_scaling>`. Parameters ---------- n_components : int, optional, default: 2 Number of dimensions in which to immerse the dissimilarities. metric : boolean, optional, default: True If ``True``, perform metric MDS; otherwise, perform nonmetric MDS. n_init : int, optional, default: 4 Number of times the SMACOF algorithm will be run with different initializations. The final results will be the best output of the runs, determined by the run with the smallest final stress. max_iter : int, optional, default: 300 Maximum number of iterations of the SMACOF algorithm for a single run. verbose : int, optional, default: 0 Level of verbosity. eps : float, optional, default: 1e-3 Relative tolerance with respect to stress at which to declare convergence. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If multiple initializations are used (``n_init``), each run of the algorithm is computed in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For ``n_jobs`` below -1, (``n_cpus + 1 + n_jobs``) are used. Thus for ``n_jobs = -2``, all CPUs but one are used. random_state : int, RandomState instance or None, optional, default: None The generator used to initialize the centers. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. dissimilarity : 'euclidean' | 'precomputed', optional, default: 'euclidean' Dissimilarity measure to use: - 'euclidean': Pairwise Euclidean distances between points in the dataset. - 'precomputed': Pre-computed dissimilarities are passed directly to ``fit`` and ``fit_transform``. Attributes ---------- embedding_ : array-like, shape (n_components, n_samples) Stores the position of the dataset in the embedding space. stress_ : float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points). References ---------- "Modern Multidimensional Scaling - Theory and Applications" Borg, I.; Groenen P. Springer Series in Statistics (1997) "Nonmetric multidimensional scaling: a numerical method" Kruskal, J. Psychometrika, 29 (1964) "Multidimensional scaling by optimizing goodness of fit to a nonmetric hypothesis" Kruskal, J. Psychometrika, 29, (1964) """ def __init__(self, n_components=2, metric=True, n_init=4, max_iter=300, verbose=0, eps=1e-3, n_jobs=1, random_state=None, dissimilarity="euclidean"): self.n_components = n_components self.dissimilarity = dissimilarity self.metric = metric self.n_init = n_init self.max_iter = max_iter self.eps = eps self.verbose = verbose self.n_jobs = n_jobs self.random_state = random_state @property def _pairwise(self): return self.kernel == "precomputed" def fit(self, X, y=None, init=None): """ Computes the position of the points in the embedding space Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) Input data. If ``dissimilarity=='precomputed'``, the input should be the dissimilarity matrix. init : ndarray, shape (n_samples,), optional, default: None Starting configuration of the embedding to initialize the SMACOF algorithm. By default, the algorithm is initialized with a randomly chosen array. """ self.fit_transform(X, init=init) return self def fit_transform(self, X, y=None, init=None): """ Fit the data from X, and returns the embedded coordinates Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) Input data. If ``dissimilarity=='precomputed'``, the input should be the dissimilarity matrix. init : ndarray, shape (n_samples,), optional, default: None Starting configuration of the embedding to initialize the SMACOF algorithm. By default, the algorithm is initialized with a randomly chosen array. """ X = check_array(X) if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed": warnings.warn("The MDS API has changed. ``fit`` now constructs an" " dissimilarity matrix from data. To use a custom " "dissimilarity matrix, set " "``dissimilarity='precomputed'``.") if self.dissimilarity == "precomputed": self.dissimilarity_matrix_ = X elif self.dissimilarity == "euclidean": self.dissimilarity_matrix_ = euclidean_distances(X) else: raise ValueError("Proximity must be 'precomputed' or 'euclidean'." " Got %s instead" % str(self.dissimilarity)) self.embedding_, self.stress_, self.n_iter_ = smacof( self.dissimilarity_matrix_, metric=self.metric, n_components=self.n_components, init=init, n_init=self.n_init, n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps, random_state=self.random_state, return_n_iter=True) return self.embedding_
bsd-3-clause
nathanlnw/TJ_FlySwing_combineALL
tools/keil.py
20
6753
import os import sys import string import xml.etree.ElementTree as etree from xml.etree.ElementTree import SubElement from utils import _make_path_relative from utils import xml_indent fs_encoding = sys.getfilesystemencoding() def _get_filetype(fn): if fn.rfind('.c') != -1 or fn.rfind('.C') != -1 or fn.rfind('.cpp') != -1: return 1 # assemble file type if fn.rfind('.s') != -1 or fn.rfind('.S') != -1: return 2 # header type if fn.rfind('.h') != -1: return 5 # other filetype return 5 def MDK4AddGroup(ProjectFiles, parent, name, files, project_path): group = SubElement(parent, 'Group') group_name = SubElement(group, 'GroupName') group_name.text = name for f in files: fn = f.rfile() name = fn.name path = os.path.dirname(fn.abspath) basename = os.path.basename(path) path = _make_path_relative(project_path, path) path = os.path.join(path, name) files = SubElement(group, 'Files') file = SubElement(files, 'File') file_name = SubElement(file, 'FileName') name = os.path.basename(path) if ProjectFiles.count(name): name = basename + '_' + name ProjectFiles.append(name) file_name.text = name.decode(fs_encoding) file_type = SubElement(file, 'FileType') file_type.text = '%d' % _get_filetype(name) file_path = SubElement(file, 'FilePath') file_path.text = path.decode(fs_encoding) def MDK4Project(target, script): project_path = os.path.dirname(os.path.abspath(target)) tree = etree.parse('template.uvproj') root = tree.getroot() out = file(target, 'wb') out.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n') CPPPATH = [] CPPDEFINES = [] LINKFLAGS = '' CCFLAGS = '' ProjectFiles = [] # add group groups = tree.find('Targets/Target/Groups') if groups is None: groups = SubElement(tree.find('Targets/Target'), 'Groups') for group in script: group_xml = MDK4AddGroup(ProjectFiles, groups, group['name'], group['src'], project_path) # get each include path if group.has_key('CPPPATH') and group['CPPPATH']: if CPPPATH: CPPPATH += group['CPPPATH'] else: CPPPATH += group['CPPPATH'] # get each group's definitions if group.has_key('CPPDEFINES') and group['CPPDEFINES']: if CPPDEFINES: CPPDEFINES += group['CPPDEFINES'] else: CPPDEFINES += group['CPPDEFINES'] # get each group's link flags if group.has_key('LINKFLAGS') and group['LINKFLAGS']: if LINKFLAGS: LINKFLAGS += ' ' + group['LINKFLAGS'] else: LINKFLAGS += group['LINKFLAGS'] # remove repeat path paths = set() for path in CPPPATH: inc = _make_path_relative(project_path, os.path.normpath(path)) paths.add(inc) #.replace('\\', '/') paths = [i for i in paths] paths.sort() CPPPATH = string.join(paths, ';') definitions = [i for i in set(CPPDEFINES)] CPPDEFINES = string.join(definitions, ', ') # write include path, definitions and link flags IncludePath = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/IncludePath') IncludePath.text = CPPPATH Define = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/Define') Define.text = CPPDEFINES Misc = tree.find('Targets/Target/TargetOption/TargetArmAds/LDads/Misc') Misc.text = LINKFLAGS xml_indent(root) out.write(etree.tostring(root, encoding='utf-8')) out.close() def MDKProject(target, script): template = file('template.Uv2', "rb") lines = template.readlines() project = file(target, "wb") project_path = os.path.dirname(os.path.abspath(target)) line_index = 5 # write group for group in script: lines.insert(line_index, 'Group (%s)\r\n' % group['name']) line_index += 1 lines.insert(line_index, '\r\n') line_index += 1 # write file ProjectFiles = [] CPPPATH = [] CPPDEFINES = [] LINKFLAGS = '' CCFLAGS = '' # number of groups group_index = 1 for group in script: # print group['name'] # get each include path if group.has_key('CPPPATH') and group['CPPPATH']: if CPPPATH: CPPPATH += group['CPPPATH'] else: CPPPATH += group['CPPPATH'] # get each group's definitions if group.has_key('CPPDEFINES') and group['CPPDEFINES']: if CPPDEFINES: CPPDEFINES += ';' + group['CPPDEFINES'] else: CPPDEFINES += group['CPPDEFINES'] # get each group's link flags if group.has_key('LINKFLAGS') and group['LINKFLAGS']: if LINKFLAGS: LINKFLAGS += ' ' + group['LINKFLAGS'] else: LINKFLAGS += group['LINKFLAGS'] # generate file items for node in group['src']: fn = node.rfile() name = fn.name path = os.path.dirname(fn.abspath) basename = os.path.basename(path) path = _make_path_relative(project_path, path) path = os.path.join(path, name) if ProjectFiles.count(name): name = basename + '_' + name ProjectFiles.append(name) lines.insert(line_index, 'File %d,%d,<%s><%s>\r\n' % (group_index, _get_filetype(name), path, name)) line_index += 1 group_index = group_index + 1 lines.insert(line_index, '\r\n') line_index += 1 # remove repeat path paths = set() for path in CPPPATH: inc = _make_path_relative(project_path, os.path.normpath(path)) paths.add(inc) #.replace('\\', '/') paths = [i for i in paths] CPPPATH = string.join(paths, ';') definitions = [i for i in set(CPPDEFINES)] CPPDEFINES = string.join(definitions, ', ') while line_index < len(lines): if lines[line_index].startswith(' ADSCINCD '): lines[line_index] = ' ADSCINCD (' + CPPPATH + ')\r\n' if lines[line_index].startswith(' ADSLDMC ('): lines[line_index] = ' ADSLDMC (' + LINKFLAGS + ')\r\n' if lines[line_index].startswith(' ADSCDEFN ('): lines[line_index] = ' ADSCDEFN (' + CPPDEFINES + ')\r\n' line_index += 1 # write project for line in lines: project.write(line) project.close()
gpl-2.0
stackforge/nova-powervm
nova_powervm/virt/powervm/vif.py
2
36148
# Copyright 2016, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from nova import context as ctx from nova import exception from nova import network as net_api from nova.network import model as network_model from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import importutils import pypowervm.const as pvm_c from pypowervm import exceptions as pvm_ex from pypowervm.tasks import cna as pvm_cna from pypowervm.tasks import partition as pvm_par from pypowervm.tasks import sriov as sriovtask from pypowervm import util as pvm_util from pypowervm.wrappers import event as pvm_evt from pypowervm.wrappers import iocard as pvm_card from pypowervm.wrappers import logical_partition as pvm_lpar from pypowervm.wrappers import managed_system as pvm_ms from pypowervm.wrappers import network as pvm_net from nova_powervm.virt.powervm.i18n import _ from nova_powervm.virt.powervm import vm LOG = log.getLogger(__name__) SECURE_RMC_VSWITCH = 'MGMTSWITCH' SECURE_RMC_VLAN = 4094 # Provider tag for custom events from this module EVENT_PROVIDER_ID = 'NOVA_PVM_VIF' VIF_TYPE_PVM_SEA = 'pvm_sea' VIF_TYPE_PVM_OVS = 'ovs' VIF_TYPE_PVM_SRIOV = 'pvm_sriov' VIF_MAPPING = {VIF_TYPE_PVM_SEA: 'nova_powervm.virt.powervm.vif.PvmSeaVifDriver', VIF_TYPE_PVM_OVS: 'nova_powervm.virt.powervm.vif.PvmOvsVifDriver', VIF_TYPE_PVM_SRIOV: 'nova_powervm.virt.powervm.vif.PvmVnicSriovVifDriver'} # NOTE(svenkat): Manually adjust CNA child ordering to workaround bug 1731657 # TODO(svenkat) Remove workaround when pypowervm is fixed child_order = list(pvm_net.CNA._child_order) child_order.remove('VirtualNetworks') child_order.append('VirtualNetworks') pvm_net.CNA._child_order = tuple(child_order) CONF = cfg.CONF def _build_vif_driver(adapter, host_uuid, instance, vif): """Returns the appropriate VIF Driver for the given VIF. :param adapter: The pypowervm adapter API interface. :param host_uuid: The host system UUID. :param instance: The nova instance. :param vif: The virtual interface to from Nova. :return: The appropriate PvmVifDriver for the VIF. """ if vif.get('type') is None: raise exception.VirtualInterfacePlugException( _("vif_type parameter must be present for this vif_driver " "implementation")) # Check the type to the implementations if VIF_MAPPING.get(vif['type']): return importutils.import_object( VIF_MAPPING.get(vif['type']), adapter, host_uuid, instance) # No matching implementation, raise error. raise exception.VirtualInterfacePlugException( _("Unable to find appropriate PowerVM VIF Driver for VIF type " "%(vif_type)s on instance %(instance)s") % {'vif_type': vif['type'], 'instance': instance.name}) def _push_vif_event(adapter, action, vif_w, instance, vif_type): """Push a custom event to the REST server for a vif action (plug/unplug). This event prompts the neutron agent to mark the port up or down. :param adapter: The pypowervm adapter. :param action: The action taken on the vif - either 'plug' or 'unplug' :param vif_w: The pypowervm wrapper of the affected vif (CNA, VNIC, etc.) :param instance: The nova instance for the event :param vif_type: The type of event source (pvm_sea, ovs, bridge, pvm_sriov etc) """ data = vif_w.href detail = jsonutils.dumps(dict(provider=EVENT_PROVIDER_ID, action=action, mac=vif_w.mac, type=vif_type)) event = pvm_evt.Event.bld(adapter, data, detail) try: event = event.create() LOG.debug('Pushed custom event for consumption by neutron agent: %s', str(event), instance=instance) except Exception: with excutils.save_and_reraise_exception(logger=LOG): LOG.exception('Custom VIF event push failed. %s', str(event), instance=instance) def plug(adapter, host_uuid, instance, vif, slot_mgr, new_vif=True): """Plugs a virtual interface (network) into a VM. :param adapter: The pypowervm adapter. :param host_uuid: The host UUID for the PowerVM API. :param instance: The nova instance object. :param vif: The virtual interface to plug into the instance. :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client slots used when a VIF is attached to the VM. :param new_vif: (Optional, Default: True) If set, indicates that it is a brand new VIF. If False, it indicates that the VIF is already on the client but should be treated on the bridge. :return: The wrapper (CNA or VNIC) representing the plugged virtual network. None if the vnet was not created. """ vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) # Get the slot number to use for the VIF creation. May be None # indicating usage of the next highest available. slot_num = slot_mgr.build_map.get_vnet_slot(vif['address']) # Invoke the plug try: vnet_w = vif_drv.plug(vif, slot_num, new_vif=new_vif) except pvm_ex.HttpError as he: # Log the message constructed by HttpError LOG.exception("HttpError during vif plug operation.", instance=instance) raise exception.VirtualInterfacePlugException(message=he.args[0]) # Other exceptions are (hopefully) custom VirtualInterfacePlugException # generated lower in the call stack. # If the slot number hadn't been provided initially, save it for the # next rebuild if not slot_num and new_vif: slot_mgr.register_vnet(vnet_w) # Push a custom event if we really plugged the vif if vnet_w is not None: _push_vif_event(adapter, 'plug', vnet_w, instance, vif['type']) return vnet_w def unplug(adapter, host_uuid, instance, vif, slot_mgr, cna_w_list=None): """Unplugs a virtual interface (network) from a VM. :param adapter: The pypowervm adapter. :param host_uuid: The host UUID for the PowerVM API. :param instance: The nova instance object. :param vif: The virtual interface to plug into the instance. :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client slots used when a VIF is detached from the VM. :param cna_w_list: (Optional, Default: None) The list of Client Network Adapters from pypowervm. Providing this input allows for an improvement in operation speed. """ vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) try: vnet_w = vif_drv.unplug(vif, cna_w_list=cna_w_list) # Push a custom event, but only if the vif existed in the first place if vnet_w: _push_vif_event(adapter, 'unplug', vnet_w, instance, vif['type']) except pvm_ex.HttpError as he: # Log the message constructed by HttpError LOG.exception("HttpError during vif unplug operation.", instance=instance) raise exception.VirtualInterfaceUnplugException(reason=he.args[0]) if vnet_w: slot_mgr.drop_vnet(vnet_w) def pre_live_migrate_at_destination(adapter, host_uuid, instance, vif, vea_vlan_mappings): """Performs the pre live migrate on the destination host. :param adapter: The pypowervm adapter. :param host_uuid: The host UUID for the PowerVM API. :param instance: The nova instance object. :param vif: The virtual interface that will be migrated. This may be called network_info in other portions of the code. :param vea_vlan_mappings: The VEA VLAN mappings. Key is the vif mac address, value is the destination's target hypervisor VLAN. """ vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) vif_drv.pre_live_migrate_at_destination(vif, vea_vlan_mappings) def rollback_live_migration_at_destination(adapter, host_uuid, instance, vif, vea_vlan_mappings): """Performs the rollback of the live migrate on the destination host. :param adapter: The pypowervm adapter. :param host_uuid: The host UUID for the PowerVM API. :param instance: The nova instance object. :param vif: The virtual interface that is being rolled back. This may be called network_info in other portions of the code. :param vea_vlan_mappings: The VEA VLAN mappings. Key is the vif mac address, value is the destination's target hypervisor VLAN. """ vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) vif_drv.rollback_live_migration_at_destination(vif, vea_vlan_mappings) def pre_live_migrate_at_source(adapter, host_uuid, instance, vif): """Performs the pre live migrate on the source host. This is executed directly before the migration is started on the source host. :param adapter: The pypowervm adapter. :param host_uuid: The host UUID for the PowerVM API. :param instance: The nova instance object. :param vif: The virtual interface that will be migrated. This may be called network_info in other portions of the code. :return: The list of TrunkAdapter's on the source that are hosting the VM's vif. Should only return data if those trunks should be deleted after the migration. """ vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) return vif_drv.pre_live_migrate_at_source(vif) def post_live_migrate_at_source(adapter, host_uuid, instance, vif): """Performs the post live migrate on the source host. :param adapter: The pypowervm adapter. :param host_uuid: The host UUID for the PowerVM API. :param instance: The nova instance object. :param vif: The virtual interface of the instance. This may be called network_info in other portions of the code. """ vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) return vif_drv.post_live_migrate_at_source(vif) def get_secure_rmc_vswitch(adapter, host_uuid): """Returns the vSwitch that is used for secure RMC. :param adapter: The pypowervm adapter API interface. :param host_uuid: The host system UUID. :return: The wrapper for the secure RMC vSwitch. If it does not exist on the system, None is returned. """ vswitches = pvm_net.VSwitch.search( adapter, parent_type=pvm_ms.System.schema_type, parent_uuid=host_uuid, name=SECURE_RMC_VSWITCH) if len(vswitches) == 1: return vswitches[0] return None def plug_secure_rmc_vif(adapter, instance, host_uuid, slot_mgr): """Creates the Secure RMC Network Adapter on the VM. :param adapter: The pypowervm adapter API interface. :param instance: The nova instance to create the VIF against. :param host_uuid: The host system UUID. :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client slots used when a VIF is attached to the VM :return: The created network adapter wrapper. """ # Gather the mac and slot number for the mgmt vif mac, slot_num = slot_mgr.build_map.get_mgmt_vea_slot() if not mac: # This is either a deploy case or rebuild case. For remote restart, # mac will not be none, as it will be available from slot data. # Deploy case - mac is None at both slot and instance_system_metadata # and crt_cna will auto-generate it. # Rebuild case - mac is none from slot data but is available # at instance system_metadata. mac = instance.system_metadata.get('mgmt_interface_mac') # Create the adapter. lpar_uuid = vm.get_pvm_uuid(instance) cna_w = pvm_cna.crt_cna(adapter, host_uuid, lpar_uuid, SECURE_RMC_VLAN, vswitch=SECURE_RMC_VSWITCH, crt_vswitch=True, slot_num=slot_num, mac_addr=mac) # Save the mgmt vif to the slot map. # For the rebuild case, mac will be present but not slot_num. # For deploy case, both will be none. We want to register cna in both cases if not slot_num: slot_mgr.register_cna(cna_w) if cna_w.mac != mac: # Update instance system metadata to store instance management # interface mac address. instance.system_metadata.update({'mgmt_interface_mac': cna_w.mac}) return cna_w def _get_trunk_dev_name(vif): """Returns the device name for the trunk adapter. A given VIF will have a trunk adapter and a client adapter. This will return the trunk adapter's name as it will appear on the management VM. :param vif: The nova network interface :return: The device name. """ if 'devname' in vif: return vif['devname'] return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN] @six.add_metaclass(abc.ABCMeta) class PvmVifDriver(object): """Represents an abstract class for a PowerVM Vif Driver. A VIF Driver understands a given virtual interface type (network). It understands how to plug and unplug a given VIF for a virtual machine. """ def __init__(self, adapter, host_uuid, instance): """Initializes a VIF Driver. :param adapter: The pypowervm adapter API interface. :param host_uuid: The host system UUID. :param instance: The nova instance that the vif action will be run against. """ self.adapter = adapter self.host_uuid = host_uuid self.instance = instance @abc.abstractmethod def plug(self, vif, slot_num, new_vif=True): """Plugs a virtual interface (network) into a VM. :param vif: The virtual interface to plug into the instance. :param slot_num: Which slot number to plug the VIF into. May be None. :param new_vif: (Optional, Default: True) If set, indicates that it is a brand new VIF. If False, it indicates that the VIF is already on the client but should be treated on the bridge. :return: The new vif that was created. Only returned if new_vif is set to True. Otherwise None is expected. """ pass def unplug(self, vif, cna_w_list=None): """Unplugs a virtual interface (network) from a VM. :param vif: The virtual interface to plug into the instance. :param cna_w_list: (Optional, Default: None) The list of Client Network Adapters from pypowervm. Providing this input allows for an improvement in operation speed. :return cna_w: The deleted Client Network Adapter. """ # This is a default implementation that most implementations will # require. # Need to find the adapters if they were not provided if not cna_w_list: cna_w_list = vm.get_cnas(self.adapter, self.instance) cna_w = self._find_cna_for_vif(cna_w_list, vif) if not cna_w: LOG.warning('Unable to unplug VIF with mac %(mac)s. The VIF was ' 'not found on the instance.', {'mac': vif['address']}, instance=self.instance) return None LOG.info('Deleting VIF with mac %(mac)s.', {'mac': vif['address']}, instance=self.instance) try: cna_w.delete() except Exception as e: LOG.exception('Unable to unplug VIF with mac %(mac)s.', {'mac': vif['address']}, instance=self.instance) raise exception.VirtualInterfaceUnplugException( reason=six.text_type(e)) return cna_w @staticmethod def _find_cna_for_vif(cna_w_list, vif): """Finds the PowerVM CNA for a given Nova VIF. :param cna_w_list: The list of Client Network Adapter wrappers from pypowervm. :param vif: The Nova Virtual Interface (virtual network interface). :return: The CNA that corresponds to the VIF. None if one is not part of the cna_w_list. """ for cna_w in cna_w_list: # If the MAC address matched, attempt the delete. if vm.norm_mac(cna_w.mac) == vif['address']: return cna_w return None def pre_live_migrate_at_destination(self, vif, vea_vlan_mappings): """Performs the pre live migrate on the destination host. Pre live migrate at destination is invoked before pre_live_migrate_at_source. :param vif: The virtual interface that will be migrated. This may be called network_info in other portions of the code. :param vea_vlan_mappings: The VEA VLAN mappings. Key is the vif mac address, value is the destination's target hypervisor VLAN. """ pass def rollback_live_migration_at_destination(self, vif, vea_vlan_mappings): """Rolls back the pre live migrate on the destination host. :param vif: The virtual interface that was being migrated. This may be called network_info in other portions of the code. :param vea_vlan_mappings: The VEA VLAN mappings. Key is the vif mac address, value is the destination's target hypervisor VLAN. """ pass def pre_live_migrate_at_source(self, vif): """Performs the pre live migrate on the source host. This is executed directly before the migration is started on the source host. :param vif: The virtual interface that will be migrated. This may be called network_info in other portions of the code. :return: The list of TrunkAdapter's on the source that are hosting the VM's vif. Should only return data if those trunks should be deleted after the migration. """ return [] def post_live_migrate_at_source(self, vif): """Performs the post live migrate on the source host. :param vif: The virtual interface of an instance. This may be called network_info in other portions of the code. """ pass class PvmSeaVifDriver(PvmVifDriver): """The PowerVM Shared Ethernet Adapter VIF Driver.""" def plug(self, vif, slot_num, new_vif=True): """Plugs a virtual interface (network) into a VM. This method simply creates the client network adapter into the VM. :param vif: The virtual interface to plug into the instance. :param slot_num: Which slot number to plug the VIF into. May be None. :param new_vif: (Optional, Default: True) If set, indicates that it is a brand new VIF. If False, it indicates that the VIF is already on the client but should be treated on the bridge. :return: The new vif that was created. Only returned if new_vif is set to True. Otherwise None is expected. """ # Do nothing if not a new VIF if not new_vif: return None lpar_uuid = vm.get_pvm_uuid(self.instance) # CNA's require a VLAN. Nova network puts it in network-meta. # The networking-powervm neutron agent will also send it, if so via # the vif details. vlan = vif['network']['meta'].get('vlan', None) if not vlan: vlan = int(vif['details']['vlan']) LOG.debug("Creating SEA-based VIF with VLAN %s", str(vlan), instance=self.instance) cna_w = pvm_cna.crt_cna(self.adapter, self.host_uuid, lpar_uuid, vlan, mac_addr=vif['address'], slot_num=slot_num) return cna_w class PvmVnicSriovVifDriver(PvmVifDriver): """The SR-IOV VIF driver for PowerVM.""" def plug(self, vif, slot_num, new_vif=True): if not new_vif: return None physnet = vif.get_physical_network() if not physnet: # Get physnet from neutron network if not present in vif # TODO(svenkat): This section of code will be eliminated in # pike release. Design will be in place to fix any vif # that has physical_network missing. The fix will be in # compute startup code. net_id = vif['network']['id'] admin_context = ctx.get_admin_context() napi = net_api.API() network = napi.get(admin_context, net_id) physnet = network.physical_network LOG.debug("Plugging vNIC SR-IOV vif for physical network %(physnet)s.", {'physnet': physnet}, instance=self.instance) # Get the msys msys = pvm_ms.System.get(self.adapter)[0] # Physical ports for the given port label pports_w = sriovtask.find_pports_for_portlabel(physnet, self.adapter, msys) pports = [pport.loc_code for pport in pports_w] if not pports: raise exception.VirtualInterfacePlugException( _("Unable to find acceptable Ethernet ports on physical " "network '%(physnet)s' for instance %(inst)s for SRIOV " "based VIF with MAC address %(vif_mac)s.") % {'physnet': physnet, 'inst': self.instance.name, 'vif_mac': vif['address']}) # MAC mac_address = pvm_util.sanitize_mac_for_api(vif['address']) # vlan id vlan_id = int(vif['details']['vlan']) # Redundancy: plugin sets from binding:profile, then conf, then default redundancy = int(vif['details']['redundancy']) # Capacity: plugin sets from binding:profile, then conf, then default capacity = vif['details']['capacity'] maxcapacity = vif['details'].get('maxcapacity') vnic = pvm_card.VNIC.bld( self.adapter, vlan_id, slot_num=slot_num, mac_addr=mac_address, allowed_vlans=pvm_util.VLANList.NONE, allowed_macs=pvm_util.MACList.NONE) try: sriovtask.set_vnic_back_devs(vnic, pports, sys_w=msys, redundancy=redundancy, capacity=capacity, max_capacity=maxcapacity, check_port_status=True) except ValueError as ve: LOG.exception("Failed to set vNIC backing devices") msg = '' if ve.args: msg = ve.args[0] raise exception.VirtualInterfacePlugException(message=msg) return vnic.create(parent_type=pvm_lpar.LPAR, parent_uuid=vm.get_pvm_uuid(self.instance)) def unplug(self, vif, cna_w_list=None): mac = pvm_util.sanitize_mac_for_api(vif['address']) vnic = vm.get_vnics( self.adapter, self.instance, mac=mac, one_result=True) if not vnic: LOG.warning('Unable to unplug VIF with mac %(mac)s. No matching ' 'vNIC was found on the instance. VIF: %(vif)s', {'mac': mac, 'vif': vif}, instance=self.instance) return None vnic.delete() return vnic class PvmMetaAttrs(list): """Represents meta attributes for a PowerVM Vif Driver. """ def __init__(self, vif, instance): """Initializes meta attributes. :param vif: The virtual interface for the instance :param instance: The nova instance that the vif action will be run against. """ self.append('iface-id=%s' % (vif.get('ovs_interfaceid') or vif['id'])) self.append('iface-status=active') self.append('attached-mac=%s' % vif['address']) self.append('vm-uuid=%s' % instance.uuid) def __str__(self): return ','.join(self) class PvmOvsVifDriver(PvmVifDriver): """The Open vSwitch VIF driver for PowerVM.""" def plug(self, vif, slot_num, new_vif=True): """Plugs a virtual interface (network) into a VM. Extends the Lio implementation. Will make sure that the trunk device has the appropriate metadata (ex. port id) set on it so that the Open vSwitch agent picks it up properly. :param vif: The virtual interface to plug into the instance. :param slot_num: Which slot number to plug the VIF into. May be None. :param new_vif: (Optional, Default: True) If set, indicates that it is a brand new VIF. If False, it indicates that the VIF is already on the client but should be treated on the bridge. :return: The new vif that was created. Only returned if new_vif is set to True. Otherwise None is expected. """ lpar_uuid = vm.get_pvm_uuid(self.instance) mgmt_uuid = pvm_par.get_mgmt_partition(self.adapter).uuid # There will only be one trunk wrap, as we have created with just # the mgmt lpar. Next step is to connect to the OVS. mtu = vif['network'].get_meta('mtu') dev_name = _get_trunk_dev_name(vif) meta_attrs = PvmMetaAttrs(vif, self.instance) if new_vif: # Create the trunk and client adapter. return pvm_cna.crt_p2p_cna( self.adapter, self.host_uuid, lpar_uuid, [mgmt_uuid], CONF.powervm.pvm_vswitch_for_novalink_io, crt_vswitch=True, mac_addr=vif['address'], dev_name=dev_name, slot_num=slot_num, ovs_bridge=vif['network']['bridge'], ovs_ext_ids=str(meta_attrs), configured_mtu=mtu)[0] else: # Bug : https://bugs.launchpad.net/nova-powervm/+bug/1731548 # When a host is rebooted, something is discarding tap devices for # VMs deployed with OVS vif. To prevent VMs losing network # connectivity, this is fixed by recreating the tap devices during # init of the nova compute service, which will call vif plug with # new_vif==False. # Find the CNA for this vif. # TODO(svenkat) improve performance by caching VIOS wrapper(s) and # CNA lists (in case >1 vif per VM). cna_w_list = vm.get_cnas(self.adapter, self.instance) cna_w = self._find_cna_for_vif(cna_w_list, vif) # Find the corresponding trunk adapter trunks = pvm_cna.find_trunks(self.adapter, cna_w) for trunk in trunks: # Set MTU, OVS external ids, and OVS bridge metadata # TODO(svenkat) set_parm_value calls should be replaced once # pypowervm supports setting these values directly. trunk.set_parm_value('ConfiguredMTU', mtu, attrib=pvm_c.ATTR_KSV160) trunk.set_parm_value('OvsPortExternalIds', meta_attrs, attrib=pvm_c.ATTR_KSV160) trunk.set_parm_value('OvsBridge', vif['network']['bridge'], attrib=pvm_c.ATTR_KSV160) # Updating the trunk adapter will cause NovaLink to reassociate # the tap device. trunk.update() @staticmethod def get_ovs_interfaceid(vif): """Returns the interface id to set for a given VIF. When a VIF is plugged for an Open vSwitch, it needs to have the interface ID set in the OVS metadata. This returns what the appropriate interface id is. :param vif: The Nova network interface. """ return vif.get('ovs_interfaceid') or vif['id'] def unplug(self, vif, cna_w_list=None): """Unplugs a virtual interface (network) from a VM. Extends the base implementation, but before calling it will remove the adapter from the Open vSwitch and delete the trunk. :param vif: The virtual interface to plug into the instance. :param cna_w_list: (Optional, Default: None) The list of Client Network Adapters from pypowervm. Providing this input allows for an improvement in operation speed. :return cna_w: The deleted Client Network Adapter. """ # Need to find the adapters if they were not provided if not cna_w_list: cna_w_list = vm.get_cnas(self.adapter, self.instance) # Find the CNA for this vif. cna_w = self._find_cna_for_vif(cna_w_list, vif) if not cna_w: LOG.warning('Unable to unplug VIF with mac %(mac)s for. The VIF ' 'was not found on the instance.', {'mac': vif['address']}, instance=self.instance) return None # Find and delete the trunk adapters trunks = pvm_cna.find_trunks(self.adapter, cna_w) for trunk in trunks: trunk.delete() # Now delete the client CNA return super(PvmOvsVifDriver, self).unplug(vif, cna_w_list=cna_w_list) def pre_live_migrate_at_destination(self, vif, vea_vlan_mappings): """Performs the pre live migrate on the destination host. This method will create the trunk adapter on the destination host, set its link state up, and attach it to the integration OVS switch. It also updates the vea_vlan_mappings to indicate which unique hypervisor VLAN should be used for this VIF for the migration operation to complete properly. :param vif: The virtual interface that will be migrated. This may be called network_info in other portions of the code. :param vea_vlan_mappings: The VEA VLAN mappings. Key is the vif mac address, value is the destination's target hypervisor VLAN. """ self._cleanup_orphan_adapters(vif, CONF.powervm.pvm_vswitch_for_novalink_io) mgmt_wrap = pvm_par.get_mgmt_partition(self.adapter) dev = _get_trunk_dev_name(vif) meta_attrs = PvmMetaAttrs(vif, self.instance) mtu = vif['network'].get_meta('mtu') # Find a specific free VLAN and create the Trunk in a single atomic # action. cna_w = pvm_cna.crt_trunk_with_free_vlan( self.adapter, self.host_uuid, [mgmt_wrap.uuid], CONF.powervm.pvm_vswitch_for_novalink_io, dev_name=dev, ovs_bridge=vif['network']['bridge'], ovs_ext_ids=str(meta_attrs), configured_mtu=mtu)[0] # Save this data for the migration command. vea_vlan_mappings[vif['address']] = cna_w.pvid LOG.info("VIF with mac %(mac)s is going on trunk %(dev)s with PVID " "%(pvid)s", {'mac': vif['address'], 'dev': dev, 'pvid': cna_w.pvid}, instance=self.instance) def rollback_live_migration_at_destination(self, vif, vea_vlan_mappings): """Rolls back the pre live migrate on the destination host. Will delete the TrunkAdapter that pre_live_migrate_at_destination created with its unique hypervisor VLAN. This uses the vea_vlan_mappings to provide the information as to what TrunkAdapter it should remove. :param vif: The virtual interface that was being migrated. This may be called network_info in other portions of the code. :param vea_vlan_mappings: The VEA VLAN mappings. Key is the vif mac address, value is the destination's target hypervisor VLAN. """ LOG.warning("Rolling back the live migrate of VIF with mac %(mac)s.", {'mac': vif['address']}, instance=self.instance) # We know that we just attached the VIF to the NovaLink VM. Search # for a trunk adapter with the PVID and vSwitch that we specified # above. This is guaranteed to be unique. vlan = int(vea_vlan_mappings[vif['address']]) vswitch_id = pvm_net.VSwitch.search( self.adapter, parent_type=pvm_ms.System, one_result=True, name=CONF.powervm.pvm_vswitch_for_novalink_io).switch_id # Find the trunk mgmt_wrap = pvm_par.get_mgmt_partition(self.adapter) child_adpts = pvm_net.CNA.get(self.adapter, parent=mgmt_wrap) trunk = None for adpt in child_adpts: # We need a trunk adapter (so check trunk_pri). Then the trunk # is unique by PVID and PowerVM vSwitch ID. if (adpt.pvid == vlan and adpt.vswitch_id == vswitch_id): if adpt.trunk_pri: trunk = adpt break if trunk: # Delete the peer'd trunk adapter. LOG.warning("Deleting target side trunk adapter %(dev)s for " "rollback operation", {'dev': trunk.dev_name}, instance=self.instance) trunk.delete() def pre_live_migrate_at_source(self, vif): """Performs the pre live migrate on the source host. This is executed directly before the migration is started on the source host. :param vif: The virtual interface that will be migrated. This may be called network_info in other portions of the code. :return: The list of TrunkAdapter's on the source that are hosting the VM's vif. Should only return data if those trunks should be deleted after the migration. """ # Right before the migration, we need to find the trunk on the source # host. mac = pvm_util.sanitize_mac_for_api(vif['address']) cna_w = pvm_net.CNA.search( self.adapter, parent_type=pvm_lpar.LPAR.schema_type, parent_uuid=vm.get_pvm_uuid(self.instance), one_result=True, mac=mac) return pvm_cna.find_trunks(self.adapter, cna_w) def post_live_migrate_at_source(self, vif): """Performs the post live migrate on the source host. :param vif: The virtual interface of an instance. This may be called network_info in other portions of the code. """ self._cleanup_orphan_adapters(vif, CONF.powervm.pvm_vswitch_for_novalink_io) def _cleanup_orphan_adapters(self, vif, vswitch_name): """Finds and removes trunk VEAs that have no corresponding CNA.""" # Find and delete orphan adapters with macs matching our vif orphans = pvm_cna.find_orphaned_trunks(self.adapter, vswitch_name) for orphan in orphans: if vm.norm_mac(orphan.mac) == vif['address']: orphan.delete()
apache-2.0
jakirkham/nanshe
tests/test_nanshe/test_io/test_xtiff.py
3
6036
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>" __date__ = "$Aug 04, 2014 14:48:56 EDT$" import collections import json import os import os.path import shutil import tempfile import numpy import h5py import vigra import vigra.impex import nanshe.util.iters import nanshe.util.xnumpy import nanshe.io.xtiff import nanshe.converter from past.builtins import unicode class TestXTiff(object): def setup(self): self.temp_dir = "" self.filedata = collections.OrderedDict() self.offsets = None self.data = None self.pages_to_channel = 2 self.data = numpy.random.random_integers(0, 255, (500, 1, 102, 101, 2)).astype(numpy.uint8) self.offsets = list(nanshe.util.iters.irange( 0, self.data.shape[0] + 100 - 1, 100 )) self.temp_dir = tempfile.mkdtemp() for i, i_str, (a_b, a_e) in nanshe.util.iters.filled_stringify_enumerate( nanshe.util.iters.izip( *nanshe.util.iters.lagged_generators( self.offsets ) ) ): each_filename = os.path.join(self.temp_dir, "test_tiff_" + str(i) + ".tif") each_data = self.data[a_b:a_e] self.filedata[each_filename] = each_data each_data_shaped = nanshe.util.xnumpy.tagging_reorder_array(each_data, to_axis_order="zyxtc") each_data_shaped = each_data_shaped.reshape(each_data_shaped.shape[:-2] + (-1,)) vigra.impex.writeVolume(each_data_shaped[0], os.path.join(self.temp_dir, "test_tiff_" + str(i) + ".tif"), "") self.offsets = self.offsets[:-1] def test_get_multipage_tiff_shape_dtype(self): for each_filename, each_filedata in self.filedata.items(): each_shape_dtype = nanshe.io.xtiff.get_multipage_tiff_shape_dtype(each_filename) each_filedata = nanshe.util.xnumpy.tagging_reorder_array(each_filedata, to_axis_order="zyxtc")[0] print(each_shape_dtype["shape"]) print(each_filedata.shape) assert (each_shape_dtype["shape"][:-2] == each_filedata.shape[:-2]) assert ( each_shape_dtype["shape"][-2] == numpy.prod( each_filedata.shape[-2:] ) ) assert (each_shape_dtype["shape"][-1] == 1) assert (each_shape_dtype["dtype"] == each_filedata.dtype.type) def test_get_multipage_tiff_shape_dtype_transformed(self): for each_filename, each_filedata in self.filedata.items(): each_shape_dtype = nanshe.io.xtiff.get_multipage_tiff_shape_dtype_transformed( each_filename, axis_order="tzyxc", pages_to_channel=self.pages_to_channel ) assert (each_shape_dtype["shape"] == each_filedata.shape) assert (each_shape_dtype["dtype"] == each_filedata.dtype.type) def test_get_standard_tiff_array(self): for each_filename, each_filedata in self.filedata.items(): each_data = nanshe.io.xtiff.get_standard_tiff_array( each_filename, pages_to_channel=self.pages_to_channel ) assert (each_data.shape == each_filedata.shape) assert (each_data.dtype == each_filedata.dtype) assert (each_data == each_filedata).all() def test_get_standard_tiff_data(self): for each_filename, each_filedata in self.filedata.items(): each_data, each_metadata = nanshe.io.xtiff.get_standard_tiff_data( each_filename, pages_to_channel=self.pages_to_channel ) assert (each_data.shape == each_filedata.shape) assert (each_data.dtype == each_filedata.dtype) assert ( each_metadata.shape == ( each_filedata.shape[:1] + each_filedata.shape[-1:] ) ) assert (each_metadata.dtype.type == numpy.dtype(unicode).type) assert (each_data == each_filedata).all() def test_convert_tiffs(self): hdf5_filename = os.path.join(self.temp_dir, "test.h5") hdf5_filepath = hdf5_filename + "/data" nanshe.io.xtiff.convert_tiffs( list(self.filedata.keys()), hdf5_filepath, pages_to_channel=self.pages_to_channel ) assert os.path.exists(hdf5_filename) filenames = None offsets = None descriptions = None data = None with h5py.File(hdf5_filename, "r") as hdf5_handle: assert "filenames" in hdf5_handle["data"].attrs assert "offsets" in hdf5_handle["data"].attrs assert "descriptions" in hdf5_handle["data"].attrs filenames = hdf5_handle["data"].attrs["filenames"] offsets = hdf5_handle["data"].attrs["offsets"] descriptions = hdf5_handle["data"].attrs["descriptions"] descriptions = hdf5_handle[descriptions.split(".h5/")[-1]][...] data = hdf5_handle["data"].value self_data_h5 = nanshe.util.xnumpy.tagging_reorder_array( self.data, to_axis_order="cztyx" )[0, 0] self_filenames = numpy.array(list(self.filedata.keys())) assert len(filenames) == len(self_filenames) assert (filenames == self_filenames).all() assert len(offsets) == len(self.offsets) assert numpy.equal(offsets, self.offsets).all() assert len(descriptions) == len(self.data) assert all(_ == u"" for _ in descriptions) assert (data == self_data_h5).all() os.remove(hdf5_filename) def teardown(self): shutil.rmtree(self.temp_dir) self.temp_dir = "" self.filedata = collections.OrderedDict() self.data = None
bsd-3-clause
JakeLowey/HackRPI2
django/contrib/gis/db/backends/postgis/creation.py
99
2854
from django.conf import settings from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation class PostGISCreation(DatabaseCreation): geom_index_type = 'GIST' geom_index_opts = 'GIST_GEOMETRY_OPS' def sql_indexes_for_field(self, model, f, style): "Return any spatial index creation SQL for the field." from django.contrib.gis.db.models.fields import GeometryField output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style) if isinstance(f, GeometryField): gqn = self.connection.ops.geo_quote_name qn = self.connection.ops.quote_name db_table = model._meta.db_table if f.geography: # Geogrophy columns are created normally. pass else: # Geometry columns are created by `AddGeometryColumn` # stored procedure. output.append(style.SQL_KEYWORD('SELECT ') + style.SQL_TABLE('AddGeometryColumn') + '(' + style.SQL_TABLE(gqn(db_table)) + ', ' + style.SQL_FIELD(gqn(f.column)) + ', ' + style.SQL_FIELD(str(f.srid)) + ', ' + style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' + style.SQL_KEYWORD(str(f.dim)) + ');') if not f.null: # Add a NOT NULL constraint to the field output.append(style.SQL_KEYWORD('ALTER TABLE ') + style.SQL_TABLE(qn(db_table)) + style.SQL_KEYWORD(' ALTER ') + style.SQL_FIELD(qn(f.column)) + style.SQL_KEYWORD(' SET NOT NULL') + ';') if f.spatial_index: # Spatial indexes created the same way for both Geometry and # Geography columns if f.geography: index_opts = '' else: index_opts = ' ' + style.SQL_KEYWORD(self.geom_index_opts) output.append(style.SQL_KEYWORD('CREATE INDEX ') + style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) + style.SQL_KEYWORD(' ON ') + style.SQL_TABLE(qn(db_table)) + style.SQL_KEYWORD(' USING ') + style.SQL_COLTYPE(self.geom_index_type) + ' ( ' + style.SQL_FIELD(qn(f.column)) + index_opts + ' );') return output def sql_table_creation_suffix(self): qn = self.connection.ops.quote_name return ' TEMPLATE %s' % qn(getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis'))
mit
mancoast/CPythonPyc_test
fail/310_test_time.py
2
10056
from test import support import time import unittest import locale class TimeTestCase(unittest.TestCase): def setUp(self): self.t = time.time() def test_data_attributes(self): time.altzone time.daylight time.timezone time.tzname def test_clock(self): time.clock() def test_conversions(self): self.assert_(time.ctime(self.t) == time.asctime(time.localtime(self.t))) self.assert_(int(time.mktime(time.localtime(self.t))) == int(self.t)) def test_sleep(self): time.sleep(1.2) def test_strftime(self): tt = time.gmtime(self.t) for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I', 'j', 'm', 'M', 'p', 'S', 'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'): format = ' %' + directive try: time.strftime(format, tt) except ValueError: self.fail('conversion specifier: %r failed.' % format) def test_strftime_bounds_checking(self): # Make sure that strftime() checks the bounds of the various parts #of the time tuple (0 is valid for *all* values). # Check year [1900, max(int)] self.assertRaises(ValueError, time.strftime, '', (1899, 1, 1, 0, 0, 0, 0, 1, -1)) if time.accept2dyear: self.assertRaises(ValueError, time.strftime, '', (-1, 1, 1, 0, 0, 0, 0, 1, -1)) self.assertRaises(ValueError, time.strftime, '', (100, 1, 1, 0, 0, 0, 0, 1, -1)) # Check month [1, 12] + zero support self.assertRaises(ValueError, time.strftime, '', (1900, -1, 1, 0, 0, 0, 0, 1, -1)) self.assertRaises(ValueError, time.strftime, '', (1900, 13, 1, 0, 0, 0, 0, 1, -1)) # Check day of month [1, 31] + zero support self.assertRaises(ValueError, time.strftime, '', (1900, 1, -1, 0, 0, 0, 0, 1, -1)) self.assertRaises(ValueError, time.strftime, '', (1900, 1, 32, 0, 0, 0, 0, 1, -1)) # Check hour [0, 23] self.assertRaises(ValueError, time.strftime, '', (1900, 1, 1, -1, 0, 0, 0, 1, -1)) self.assertRaises(ValueError, time.strftime, '', (1900, 1, 1, 24, 0, 0, 0, 1, -1)) # Check minute [0, 59] self.assertRaises(ValueError, time.strftime, '', (1900, 1, 1, 0, -1, 0, 0, 1, -1)) self.assertRaises(ValueError, time.strftime, '', (1900, 1, 1, 0, 60, 0, 0, 1, -1)) # Check second [0, 61] self.assertRaises(ValueError, time.strftime, '', (1900, 1, 1, 0, 0, -1, 0, 1, -1)) # C99 only requires allowing for one leap second, but Python's docs say # allow two leap seconds (0..61) self.assertRaises(ValueError, time.strftime, '', (1900, 1, 1, 0, 0, 62, 0, 1, -1)) # No check for upper-bound day of week; # value forced into range by a ``% 7`` calculation. # Start check at -2 since gettmarg() increments value before taking # modulo. self.assertRaises(ValueError, time.strftime, '', (1900, 1, 1, 0, 0, 0, -2, 1, -1)) # Check day of the year [1, 366] + zero support self.assertRaises(ValueError, time.strftime, '', (1900, 1, 1, 0, 0, 0, 0, -1, -1)) self.assertRaises(ValueError, time.strftime, '', (1900, 1, 1, 0, 0, 0, 0, 367, -1)) # Check daylight savings flag [-1, 1] self.assertRaises(ValueError, time.strftime, '', (1900, 1, 1, 0, 0, 0, 0, 1, -2)) self.assertRaises(ValueError, time.strftime, '', (1900, 1, 1, 0, 0, 0, 0, 1, 2)) def test_default_values_for_zero(self): # Make sure that using all zeros uses the proper default values. # No test for daylight savings since strftime() does not change output # based on its value. expected = "2000 01 01 00 00 00 1 001" result = time.strftime("%Y %m %d %H %M %S %w %j", (0,)*9) self.assertEquals(expected, result) def test_strptime(self): # Should be able to go round-trip from strftime to strptime without # throwing an exception. tt = time.gmtime(self.t) for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I', 'j', 'm', 'M', 'p', 'S', 'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'): format = '%' + directive strf_output = time.strftime(format, tt) try: time.strptime(strf_output, format) except ValueError: self.fail("conversion specifier %r failed with '%s' input." % (format, strf_output)) def test_strptime_bytes(self): # Make sure only strings are accepted as arguments to strptime. self.assertRaises(TypeError, time.strptime, b'2009', "%Y") self.assertRaises(TypeError, time.strptime, '2009', b'%Y') def test_asctime(self): time.asctime(time.gmtime(self.t)) self.assertRaises(TypeError, time.asctime, 0) def test_tzset(self): if not hasattr(time, "tzset"): return # Can't test this; don't want the test suite to fail from os import environ # Epoch time of midnight Dec 25th 2002. Never DST in northern # hemisphere. xmas2002 = 1040774400.0 # These formats are correct for 2002, and possibly future years # This format is the 'standard' as documented at: # http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html # They are also documented in the tzset(3) man page on most Unix # systems. eastern = 'EST+05EDT,M4.1.0,M10.5.0' victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0' utc='UTC+0' org_TZ = environ.get('TZ',None) try: # Make sure we can switch to UTC time and results are correct # Note that unknown timezones default to UTC. # Note that altzone is undefined in UTC, as there is no DST environ['TZ'] = eastern time.tzset() environ['TZ'] = utc time.tzset() self.failUnlessEqual( time.gmtime(xmas2002), time.localtime(xmas2002) ) self.failUnlessEqual(time.daylight, 0) self.failUnlessEqual(time.timezone, 0) self.failUnlessEqual(time.localtime(xmas2002).tm_isdst, 0) # Make sure we can switch to US/Eastern environ['TZ'] = eastern time.tzset() self.failIfEqual(time.gmtime(xmas2002), time.localtime(xmas2002)) self.failUnlessEqual(time.tzname, ('EST', 'EDT')) self.failUnlessEqual(len(time.tzname), 2) self.failUnlessEqual(time.daylight, 1) self.failUnlessEqual(time.timezone, 18000) self.failUnlessEqual(time.altzone, 14400) self.failUnlessEqual(time.localtime(xmas2002).tm_isdst, 0) self.failUnlessEqual(len(time.tzname), 2) # Now go to the southern hemisphere. environ['TZ'] = victoria time.tzset() self.failIfEqual(time.gmtime(xmas2002), time.localtime(xmas2002)) self.failUnless(time.tzname[0] == 'AEST', str(time.tzname[0])) self.failUnless(time.tzname[1] == 'AEDT', str(time.tzname[1])) self.failUnlessEqual(len(time.tzname), 2) self.failUnlessEqual(time.daylight, 1) self.failUnlessEqual(time.timezone, -36000) self.failUnlessEqual(time.altzone, -39600) self.failUnlessEqual(time.localtime(xmas2002).tm_isdst, 1) finally: # Repair TZ environment variable in case any other tests # rely on it. if org_TZ is not None: environ['TZ'] = org_TZ elif 'TZ' in environ: del environ['TZ'] time.tzset() def test_insane_timestamps(self): # It's possible that some platform maps time_t to double, # and that this test will fail there. This test should # exempt such platforms (provided they return reasonable # results!). for func in time.ctime, time.gmtime, time.localtime: for unreasonable in -1e200, 1e200: self.assertRaises(ValueError, func, unreasonable) def test_ctime_without_arg(self): # Not sure how to check the values, since the clock could tick # at any time. Make sure these are at least accepted and # don't raise errors. time.ctime() time.ctime(None) def test_gmtime_without_arg(self): gt0 = time.gmtime() gt1 = time.gmtime(None) t0 = time.mktime(gt0) t1 = time.mktime(gt1) self.assert_(0 <= (t1-t0) < 0.2) def test_localtime_without_arg(self): lt0 = time.localtime() lt1 = time.localtime(None) t0 = time.mktime(lt0) t1 = time.mktime(lt1) self.assert_(0 <= (t1-t0) < 0.2) class TestLocale(unittest.TestCase): def setUp(self): self.oldloc = locale.setlocale(locale.LC_ALL) def tearDown(self): locale.setlocale(locale.LC_ALL, self.oldloc) def test_bug_3061(self): try: tmp = locale.setlocale(locale.LC_ALL, "fr_FR") except locale.Error: # skip this test return # This should not cause an exception time.strftime("%B", (2009,2,1,0,0,0,0,0,0)) def test_main(): support.run_unittest(TimeTestCase, TestLocale) if __name__ == "__main__": test_main()
gpl-3.0
dafrito/trac-mirror
doc/utils/runepydoc.py
3
1791
# Simple wrapper script needed to run epydoc import sys try: from epydoc.cli import cli except ImportError: print>>sys.stderr, "No epydoc installed (see http://epydoc.sourceforge.net)" sys.exit(2) # Epydoc 3.0.1 has some trouble running with recent Docutils (>= 0.6), # so we work around this bug, following the lines of the fix in # https://bugs.gentoo.org/attachment.cgi?id=210118 # (see http://bugs.gentoo.org/287546) try: from docutils.nodes import Text if not hasattr(Text, 'data'): setattr(Text, 'data', property(lambda self: self.astext())) except ImportError: print>>sys.stderr, "docutils is needed for running epydoc " \ "(see http://docutils.sourceforge.net)" sys.exit(2) # Epydoc doesn't allow much control over the generated graphs. This is # bad especially for the class graph for Component which has a lot of # subclasses, so we need to force Left-to-Right mode. # from epydoc.docwriter.html import HTMLWriter # HTMLWriter_render_graph = HTMLWriter.render_graph # def render_graph_LR(self, graph): # if graph: # graph.body += 'rankdir=LR\n' # return HTMLWriter_render_graph(self, graph) # HTMLWriter.render_graph = render_graph_LR # Well, LR mode doesn't really look better... # the ASCII-art version seems better in most cases. # Workaround "visiting unknown node type" error due to `.. note ::` # This was due to the lack of Admonitions transforms. Add it. from epydoc.markup.restructuredtext import _DocumentPseudoWriter from docutils.transforms import writer_aux orig_get_transforms = _DocumentPseudoWriter.get_transforms def pseudo_get_transforms(self): return orig_get_transforms(self) + [writer_aux.Admonitions] _DocumentPseudoWriter.get_transforms = pseudo_get_transforms # Run epydoc cli()
bsd-3-clause
benoitsteiner/tensorflow-opencl
tensorflow/contrib/learn/python/learn/estimators/svm.py
42
9030
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Support Vector Machine (SVM) Estimator.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib import layers from tensorflow.contrib.framework import deprecated from tensorflow.contrib.framework import deprecated_arg_values from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import head as head_lib from tensorflow.contrib.learn.python.learn.estimators import linear from tensorflow.contrib.learn.python.learn.estimators import prediction_key from tensorflow.contrib.linear_optimizer.python import sdca_optimizer def _as_iterable(preds, output): for pred in preds: yield pred[output] class SVM(estimator.Estimator): """Support Vector Machine (SVM) model for binary classification. Currently, only linear SVMs are supported. For the underlying optimization problem, the `SDCAOptimizer` is used. For performance and convergence tuning, the num_loss_partitions parameter passed to `SDCAOptimizer` (see `__init__()` method), should be set to (#concurrent train ops per worker) x (#workers). If num_loss_partitions is larger or equal to this value, convergence is guaranteed but becomes slower as num_loss_partitions increases. If it is set to a smaller value, the optimizer is more aggressive in reducing the global loss but convergence is not guaranteed. The recommended value in an `Estimator` (where there is one process per worker) is the number of workers running the train steps. It defaults to 1 (single machine). Example: ```python real_feature_column = real_valued_column(...) sparse_feature_column = sparse_column_with_hash_bucket(...) estimator = SVM( example_id_column='example_id', feature_columns=[real_feature_column, sparse_feature_column], l2_regularization=10.0) # Input builders def input_fn_train: # returns x, y ... def input_fn_eval: # returns x, y ... estimator.fit(input_fn=input_fn_train) estimator.evaluate(input_fn=input_fn_eval) estimator.predict(x=x) ``` Input of `fit` and `evaluate` should have following features, otherwise there will be a `KeyError`: a feature with `key=example_id_column` whose value is a `Tensor` of dtype string. if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. for each `column` in `feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. """ def __init__(self, example_id_column, feature_columns, weight_column_name=None, model_dir=None, l1_regularization=0.0, l2_regularization=0.0, num_loss_partitions=1, kernels=None, config=None, feature_engineering_fn=None): """Constructs an `SVM` estimator object. Args: example_id_column: A string defining the feature column name representing example ids. Used to initialize the underlying optimizer. feature_columns: An iterable containing all the feature columns used by the model. All items in the set should be instances of classes derived from `FeatureColumn`. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. l1_regularization: L1-regularization parameter. Refers to global L1 regularization (across all examples). l2_regularization: L2-regularization parameter. Refers to global L2 regularization (across all examples). num_loss_partitions: number of partitions of the (global) loss function optimized by the underlying optimizer (SDCAOptimizer). kernels: A list of kernels for the SVM. Currently, no kernels are supported. Reserved for future use for non-linear SVMs. config: RunConfig object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. Raises: ValueError: if kernels passed is not None. """ if kernels is not None: raise ValueError("Kernel SVMs are not currently supported.") optimizer = sdca_optimizer.SDCAOptimizer( example_id_column=example_id_column, num_loss_partitions=num_loss_partitions, symmetric_l1_regularization=l1_regularization, symmetric_l2_regularization=l2_regularization) self._feature_columns = feature_columns chief_hook = linear._SdcaUpdateWeightsHook() # pylint: disable=protected-access super(SVM, self).__init__( model_fn=linear.sdca_model_fn, model_dir=model_dir, config=config, params={ "head": head_lib.binary_svm_head( weight_column_name=weight_column_name, enable_centered_bias=False), "feature_columns": feature_columns, "optimizer": optimizer, "weight_column_name": weight_column_name, "update_weights_hook": chief_hook, }, feature_engineering_fn=feature_engineering_fn) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict_classes(self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Runs inference to determine the predicted class.""" key = prediction_key.PredictionKey.CLASSES preds = super(SVM, self).predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return _as_iterable(preds, output=key) return preds[key] @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict_proba(self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=True): """Runs inference to determine the class probability predictions.""" key = prediction_key.PredictionKey.PROBABILITIES preds = super(SVM, self).predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return _as_iterable(preds, output=key) return preds[key] # pylint: enable=protected-access @deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.") def export(self, export_dir, signature_fn=None, input_fn=None, default_batch_size=1, exports_to_keep=None): """See BaseEstimator.export.""" return self.export_with_defaults( export_dir=export_dir, signature_fn=signature_fn, input_fn=input_fn, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep) @deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.") def export_with_defaults( self, export_dir, signature_fn=None, input_fn=None, default_batch_size=1, exports_to_keep=None): """Same as BaseEstimator.export, but uses some defaults.""" def default_input_fn(unused_estimator, examples): return layers.parse_feature_columns_from_examples( examples, self._feature_columns) return super(SVM, self).export(export_dir=export_dir, signature_fn=signature_fn, input_fn=input_fn or default_input_fn, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep)
apache-2.0
sgraham/nope
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
21
5592
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest from webkitpy.layout_tests.port import mac from webkitpy.layout_tests.port import port_testcase from webkitpy.tool.mocktool import MockOptions class MacPortTest(port_testcase.PortTestCase): os_name = 'mac' os_version = 'snowleopard' port_name = 'mac' full_port_name = 'mac-snowleopard' port_maker = mac.MacPort def assert_name(self, port_name, os_version_string, expected): port = self.make_port(os_version=os_version_string, port_name=port_name) self.assertEqual(expected, port.name()) def test_versions(self): self.assertTrue(self.make_port().name() in ('mac-snowleopard', 'mac-lion', 'mac-mountainlion', 'mac-mavericks')) self.assert_name(None, 'snowleopard', 'mac-snowleopard') self.assert_name('mac', 'snowleopard', 'mac-snowleopard') self.assert_name('mac-snowleopard', 'leopard', 'mac-snowleopard') self.assert_name('mac-snowleopard', 'snowleopard', 'mac-snowleopard') self.assert_name(None, 'lion', 'mac-lion') self.assert_name(None, 'mountainlion', 'mac-mountainlion') self.assert_name(None, 'mavericks', 'mac-mavericks') self.assert_name(None, 'future', 'mac-mavericks') self.assert_name('mac', 'lion', 'mac-lion') self.assertRaises(AssertionError, self.assert_name, None, 'tiger', 'should-raise-assertion-so-this-value-does-not-matter') def test_baseline_path(self): port = self.make_port(port_name='mac-snowleopard') self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-snowleopard')) port = self.make_port(port_name='mac-lion') self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-lion')) port = self.make_port(port_name='mac-mountainlion') self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-mountainlion')) port = self.make_port(port_name='mac-mavericks') self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac')) def test_operating_system(self): self.assertEqual('mac', self.make_port().operating_system()) def test_build_path(self): # Test that optional paths are used regardless of whether they exist. options = MockOptions(configuration='Release', build_directory='/foo') self.assert_build_path(options, ['/mock-checkout/out/Release'], '/foo/Release') # Test that optional relative paths are returned unmodified. options = MockOptions(configuration='Release', build_directory='foo') self.assert_build_path(options, ['/mock-checkout/out/Release'], 'foo/Release') # Test that we prefer the legacy dir over the new dir. options = MockOptions(configuration='Release', build_directory=None) self.assert_build_path(options, ['/mock-checkout/xcodebuild/Release', '/mock-checkout/out/Release'], '/mock-checkout/xcodebuild/Release') def test_build_path_timestamps(self): options = MockOptions(configuration='Release', build_directory=None) port = self.make_port(options=options) port.host.filesystem.maybe_make_directory('/mock-checkout/out/Release') port.host.filesystem.maybe_make_directory('/mock-checkout/xcodebuild/Release') # Check with 'out' being newer. port.host.filesystem.mtime = lambda f: 5 if '/out/' in f else 4 self.assertEqual(port._build_path(), '/mock-checkout/out/Release') # Check with 'xcodebuild' being newer. port.host.filesystem.mtime = lambda f: 5 if '/xcodebuild/' in f else 4 self.assertEqual(port._build_path(), '/mock-checkout/xcodebuild/Release') def test_driver_name_option(self): self.assertTrue(self.make_port()._path_to_driver().endswith('Content Shell')) self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver')) def test_path_to_image_diff(self): self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/image_diff')
bsd-3-clause
loafbaker/django_ecommerce1
django_ecommerce1/settings.py
1
3541
""" Django settings for django_ecommerce1 project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '$o7l)_gbjj7tz_vjnrjjh_4ultecb!@sl!84=v)+)!l_lg^seo' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] DEFAULT_FROM_EMAIL = 'Jianming Chen <loafbaker@hotmail.com>' # Email Utility EMAIL_HOST = 'smtp.live.com' # 'smtp.sendgrid.net' EMAIL_HOST_USER = 'loafbaker@hotmail.com' # change to your own email address EMAIL_HOST_PASSWORD = 'yourownpassword' # change to your own password EMAIL_POT = 25 # default: 587 EMAIL_USE_TLS = True # Site settings if DEBUG: SITE_URL = 'http://127.0.0.1:8000' else: SITE_URL = 'http://cfestore.com' # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'south', 'products', 'carts', 'orders', 'accounts', 'marketing', 'localflavor', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'marketing.middleware.DisplayMarketing', ) ROOT_URLCONF = 'django_ecommerce1.urls' WSGI_APPLICATION = 'django_ecommerce1.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'django_ecommerce1.sqlite'), } } # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Asia/Shanghai' USE_I18N = True USE_L10N = True USE_TZ = True MARKETING_HOURS_OFFSET = 3 MARKETING_MINUTES_OFFSET = 0 MARKETING_SECONDS_OFFSET = 0 DEFAULT_TAX_RATE = 0.08 TEMPLATE_CONTEXT_PROCESSORS = ( "django.contrib.auth.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.request", "django.core.context_processors.static", "django.core.context_processors.tz", "django.contrib.messages.context_processors.messages", ) # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'static', 'media') STATIC_ROOT = os.path.join(BASE_DIR, 'static', 'static_root') STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static', 'static_files'), ) TEMPLATE_DIRS = ( os.path.join(BASE_DIR, 'templates'), ) STRIPE_SECRET_KEY = 'sk_test_xu0ffxH1nyLcIOfUhg3U2Kh9' STRIPE_PUBLISHABLE_KEY = 'pk_test_XgGj0KSd7oA9Nu3BVXDqZnqw'
mit
jzitelli/three.js
utils/converters/obj/split_obj.py
364
12687
"""Split single OBJ model into mutliple OBJ files by materials ------------------------------------- How to use ------------------------------------- python split_obj.py -i infile.obj -o outfile Will generate: outfile_000.obj outfile_001.obj ... outfile_XXX.obj ------------------------------------- Parser based on format description ------------------------------------- http://en.wikipedia.org/wiki/Obj ------ Author ------ AlteredQualia http://alteredqualia.com """ import fileinput import operator import random import os.path import getopt import sys import struct import math import glob # ##################################################### # Configuration # ##################################################### TRUNCATE = False SCALE = 1.0 # ##################################################### # Templates # ##################################################### TEMPLATE_OBJ = u"""\ ################################ # OBJ generated by split_obj.py ################################ # Faces: %(nfaces)d # Vertices: %(nvertices)d # Normals: %(nnormals)d # UVs: %(nuvs)d ################################ # vertices %(vertices)s # normals %(normals)s # uvs %(uvs)s # faces %(faces)s """ TEMPLATE_VERTEX = "v %f %f %f" TEMPLATE_VERTEX_TRUNCATE = "v %d %d %d" TEMPLATE_NORMAL = "vn %.5g %.5g %.5g" TEMPLATE_UV = "vt %.5g %.5g" TEMPLATE_FACE3_V = "f %d %d %d" TEMPLATE_FACE4_V = "f %d %d %d %d" TEMPLATE_FACE3_VT = "f %d/%d %d/%d %d/%d" TEMPLATE_FACE4_VT = "f %d/%d %d/%d %d/%d %d/%d" TEMPLATE_FACE3_VN = "f %d//%d %d//%d %d//%d" TEMPLATE_FACE4_VN = "f %d//%d %d//%d %d//%d %d//%d" TEMPLATE_FACE3_VTN = "f %d/%d/%d %d/%d/%d %d/%d/%d" TEMPLATE_FACE4_VTN = "f %d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d" # ##################################################### # Utils # ##################################################### def file_exists(filename): """Return true if file exists and is accessible for reading. Should be safer than just testing for existence due to links and permissions magic on Unix filesystems. @rtype: boolean """ try: f = open(filename, 'r') f.close() return True except IOError: return False # ##################################################### # OBJ parser # ##################################################### def parse_vertex(text): """Parse text chunk specifying single vertex. Possible formats: vertex index vertex index / texture index vertex index / texture index / normal index vertex index / / normal index """ v = 0 t = 0 n = 0 chunks = text.split("/") v = int(chunks[0]) if len(chunks) > 1: if chunks[1]: t = int(chunks[1]) if len(chunks) > 2: if chunks[2]: n = int(chunks[2]) return { 'v': v, 't': t, 'n': n } def parse_obj(fname): """Parse OBJ file. """ vertices = [] normals = [] uvs = [] faces = [] materials = {} mcounter = 0 mcurrent = 0 mtllib = "" # current face state group = 0 object = 0 smooth = 0 for line in fileinput.input(fname): chunks = line.split() if len(chunks) > 0: # Vertices as (x,y,z) coordinates # v 0.123 0.234 0.345 if chunks[0] == "v" and len(chunks) == 4: x = float(chunks[1]) y = float(chunks[2]) z = float(chunks[3]) vertices.append([x,y,z]) # Normals in (x,y,z) form; normals might not be unit # vn 0.707 0.000 0.707 if chunks[0] == "vn" and len(chunks) == 4: x = float(chunks[1]) y = float(chunks[2]) z = float(chunks[3]) normals.append([x,y,z]) # Texture coordinates in (u,v[,w]) coordinates, w is optional # vt 0.500 -1.352 [0.234] if chunks[0] == "vt" and len(chunks) >= 3: u = float(chunks[1]) v = float(chunks[2]) w = 0 if len(chunks)>3: w = float(chunks[3]) uvs.append([u,v,w]) # Face if chunks[0] == "f" and len(chunks) >= 4: vertex_index = [] uv_index = [] normal_index = [] for v in chunks[1:]: vertex = parse_vertex(v) if vertex['v']: vertex_index.append(vertex['v']) if vertex['t']: uv_index.append(vertex['t']) if vertex['n']: normal_index.append(vertex['n']) faces.append({ 'vertex':vertex_index, 'uv':uv_index, 'normal':normal_index, 'material':mcurrent, 'group':group, 'object':object, 'smooth':smooth, }) # Group if chunks[0] == "g" and len(chunks) == 2: group = chunks[1] # Object if chunks[0] == "o" and len(chunks) == 2: object = chunks[1] # Materials definition if chunks[0] == "mtllib" and len(chunks) == 2: mtllib = chunks[1] # Material if chunks[0] == "usemtl" and len(chunks) == 2: material = chunks[1] if not material in materials: mcurrent = mcounter materials[material] = mcounter mcounter += 1 else: mcurrent = materials[material] # Smooth shading if chunks[0] == "s" and len(chunks) == 2: smooth = chunks[1] return faces, vertices, uvs, normals, materials, mtllib # ############################################################################# # API - Breaker # ############################################################################# def break_obj(infile, outfile): """Break infile.obj to outfile.obj """ if not file_exists(infile): print "Couldn't find [%s]" % infile return faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile) # sort faces by materials chunks = {} for face in faces: material = face["material"] if not material in chunks: chunks[material] = {"faces": [], "vertices": set(), "normals": set(), "uvs": set()} chunks[material]["faces"].append(face) # extract unique vertex / normal / uv indices used per chunk for material in chunks: chunk = chunks[material] for face in chunk["faces"]: for i in face["vertex"]: chunk["vertices"].add(i) for i in face["normal"]: chunk["normals"].add(i) for i in face["uv"]: chunk["uvs"].add(i) # generate new OBJs for mi, material in enumerate(chunks): chunk = chunks[material] # generate separate vertex / normal / uv index lists for each chunk # (including mapping from original to new indices) # get well defined order new_vertices = list(chunk["vertices"]) new_normals = list(chunk["normals"]) new_uvs = list(chunk["uvs"]) # map original => new indices vmap = {} for i, v in enumerate(new_vertices): vmap[v] = i + 1 nmap = {} for i, n in enumerate(new_normals): nmap[n] = i + 1 tmap = {} for i, t in enumerate(new_uvs): tmap[t] = i + 1 # vertices pieces = [] for i in new_vertices: vertex = vertices[i-1] txt = TEMPLATE_VERTEX % (vertex[0], vertex[1], vertex[2]) pieces.append(txt) str_vertices = "\n".join(pieces) # normals pieces = [] for i in new_normals: normal = normals[i-1] txt = TEMPLATE_NORMAL % (normal[0], normal[1], normal[2]) pieces.append(txt) str_normals = "\n".join(pieces) # uvs pieces = [] for i in new_uvs: uv = uvs[i-1] txt = TEMPLATE_UV % (uv[0], uv[1]) pieces.append(txt) str_uvs = "\n".join(pieces) # faces pieces = [] for face in chunk["faces"]: txt = "" fv = face["vertex"] fn = face["normal"] ft = face["uv"] if len(fv) == 3: va = vmap[fv[0]] vb = vmap[fv[1]] vc = vmap[fv[2]] if len(fn) == 3 and len(ft) == 3: na = nmap[fn[0]] nb = nmap[fn[1]] nc = nmap[fn[2]] ta = tmap[ft[0]] tb = tmap[ft[1]] tc = tmap[ft[2]] txt = TEMPLATE_FACE3_VTN % (va, ta, na, vb, tb, nb, vc, tc, nc) elif len(fn) == 3: na = nmap[fn[0]] nb = nmap[fn[1]] nc = nmap[fn[2]] txt = TEMPLATE_FACE3_VN % (va, na, vb, nb, vc, nc) elif len(ft) == 3: ta = tmap[ft[0]] tb = tmap[ft[1]] tc = tmap[ft[2]] txt = TEMPLATE_FACE3_VT % (va, ta, vb, tb, vc, tc) else: txt = TEMPLATE_FACE3_V % (va, vb, vc) elif len(fv) == 4: va = vmap[fv[0]] vb = vmap[fv[1]] vc = vmap[fv[2]] vd = vmap[fv[3]] if len(fn) == 4 and len(ft) == 4: na = nmap[fn[0]] nb = nmap[fn[1]] nc = nmap[fn[2]] nd = nmap[fn[3]] ta = tmap[ft[0]] tb = tmap[ft[1]] tc = tmap[ft[2]] td = tmap[ft[3]] txt = TEMPLATE_FACE4_VTN % (va, ta, na, vb, tb, nb, vc, tc, nc, vd, td, nd) elif len(fn) == 4: na = nmap[fn[0]] nb = nmap[fn[1]] nc = nmap[fn[2]] nd = nmap[fn[3]] txt = TEMPLATE_FACE4_VN % (va, na, vb, nb, vc, nc, vd, nd) elif len(ft) == 4: ta = tmap[ft[0]] tb = tmap[ft[1]] tc = tmap[ft[2]] td = tmap[ft[3]] txt = TEMPLATE_FACE4_VT % (va, ta, vb, tb, vc, tc, vd, td) else: txt = TEMPLATE_FACE4_V % (va, vb, vc, vd) pieces.append(txt) str_faces = "\n".join(pieces) # generate OBJ string content = TEMPLATE_OBJ % { "nfaces" : len(chunk["faces"]), "nvertices" : len(new_vertices), "nnormals" : len(new_normals), "nuvs" : len(new_uvs), "vertices" : str_vertices, "normals" : str_normals, "uvs" : str_uvs, "faces" : str_faces } # write OBJ file outname = "%s_%03d.obj" % (outfile, mi) f = open(outname, "w") f.write(content) f.close() # ############################################################################# # Helpers # ############################################################################# def usage(): print "Usage: %s -i filename.obj -o prefix" % os.path.basename(sys.argv[0]) # ##################################################### # Main # ##################################################### if __name__ == "__main__": # get parameters from the command line try: opts, args = getopt.getopt(sys.argv[1:], "hi:o:x:", ["help", "input=", "output=", "truncatescale="]) except getopt.GetoptError: usage() sys.exit(2) infile = outfile = "" for o, a in opts: if o in ("-h", "--help"): usage() sys.exit() elif o in ("-i", "--input"): infile = a elif o in ("-o", "--output"): outfile = a elif o in ("-x", "--truncatescale"): TRUNCATE = True SCALE = float(a) if infile == "" or outfile == "": usage() sys.exit(2) print "Splitting [%s] into [%s_XXX.obj] ..." % (infile, outfile) break_obj(infile, outfile)
mit
ZLLab-Mooc/edx-platform
lms/djangoapps/course_api/blocks/tests/test_api.py
24
1106
""" Tests for Blocks api.py """ from django.test.client import RequestFactory from student.tests.factories import UserFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import SampleCourseFactory from ..api import get_blocks class TestGetBlocks(ModuleStoreTestCase): """ Tests for the get_blocks function """ def setUp(self): super(TestGetBlocks, self).setUp() self.course = SampleCourseFactory.create() self.user = UserFactory.create() self.request = RequestFactory().get("/dummy") self.request.user = self.user def test_basic(self): blocks = get_blocks(self.request, self.course.location, self.user) self.assertEquals(blocks['root'], unicode(self.course.location)) # add 1 for the orphaned course about block self.assertEquals(len(blocks['blocks']) + 1, len(self.store.get_items(self.course.id))) def test_no_user(self): with self.assertRaises(NotImplementedError): get_blocks(self.request, self.course.location)
agpl-3.0
txomon/vdsm
tests/capsTests.py
1
8335
# # Copyright 2012 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA # # Refer to the README and COPYING files for full details of the license # import os import platform from testlib import VdsmTestCase as TestCaseBase from monkeypatch import MonkeyPatch import caps from vdsm import utils def _getTestData(testFileName): testPath = os.path.realpath(__file__) dirName = os.path.dirname(testPath) path = os.path.join(dirName, testFileName) with open(path) as src: return src.read() def _getCapsNumaDistanceTestData(testFileName): return (0, _getTestData(testFileName).splitlines(False), []) class TestCaps(TestCaseBase): def tearDown(self): for name in dir(caps): obj = getattr(caps, name) if isinstance(obj, utils.memoized): obj.invalidate() def _readCaps(self, fileName): testPath = os.path.realpath(__file__) dirName = os.path.split(testPath)[0] path = os.path.join(dirName, fileName) with open(path) as f: return f.read() @MonkeyPatch(platform, 'machine', lambda: caps.Architecture.X86_64) def testCpuInfo(self): testPath = os.path.realpath(__file__) dirName = os.path.split(testPath)[0] path = os.path.join(dirName, "cpu_info.out") c = caps.CpuInfo(path) self.assertEqual(set(c.flags()), set("""fpu vme de pse tsc msr pae mce cx8 apic mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm dca sse4_1 sse4_2 popcnt aes lahf_lm arat epb dts tpr_shadow vnmi flexpriority ept vpid""".split())) self.assertEqual(c.mhz(), '2533.402') self.assertEqual(c.model(), 'Intel(R) Xeon(R) CPU E5649 @ 2.53GHz') @MonkeyPatch(platform, 'machine', lambda: caps.Architecture.PPC64) def testCpuTopologyPPC64(self): testPath = os.path.realpath(__file__) dirName = os.path.split(testPath)[0] # PPC64 4 sockets, 5 cores, 1 threads per core path = os.path.join(dirName, "caps_libvirt_ibm_S822L.out") t = caps.CpuTopology(open(path).read()) self.assertEqual(t.threads(), 20) self.assertEqual(t.cores(), 20) self.assertEqual(t.sockets(), 4) @MonkeyPatch(platform, 'machine', lambda: caps.Architecture.X86_64) def testCpuTopologyX86_64(self): testPath = os.path.realpath(__file__) dirName = os.path.split(testPath)[0] # 2 x Intel E5649 (with Hyperthreading) path = os.path.join(dirName, "caps_libvirt_intel_E5649.out") with open(path) as p: t = caps.CpuTopology(p.read()) self.assertEqual(t.threads(), 24) self.assertEqual(t.cores(), 12) self.assertEqual(t.sockets(), 2) # 2 x AMD 6272 (with Modules) path = os.path.join(dirName, "caps_libvirt_amd_6274.out") with open(path) as p: t = caps.CpuTopology(p.read()) self.assertEqual(t.threads(), 32) self.assertEqual(t.cores(), 16) self.assertEqual(t.sockets(), 2) # 1 x Intel E31220 (normal Multi-core) path = os.path.join(dirName, "caps_libvirt_intel_E31220.out") with open(path) as p: t = caps.CpuTopology(p.read()) self.assertEqual(t.threads(), 4) self.assertEqual(t.cores(), 4) self.assertEqual(t.sockets(), 1) def testEmulatedMachines(self): capsData = self._readCaps("caps_libvirt_amd_6274.out") machines = caps._getEmulatedMachines(caps.Architecture.X86_64, capsData) expectedMachines = ['pc-0.15', 'pc', 'pc-1.0', 'pc-0.14', 'pc-0.13', 'pc-0.12', 'pc-0.11', 'pc-0.10', 'isapc'] self.assertEqual(machines, expectedMachines) def test_parseKeyVal(self): lines = ["x=&2", "y& = 2", " z = 2 ", " s=3=&'5", " w=", "4&"] expectedRes = [{'x': '&2', 'y&': '2', 'z': '2', 's': "3=&'5", 'w': ''}, {'x=': '2', 'y': '= 2', 's=3=': "'5", '4': ''}] sign = ["=", "&"] for res, s in zip(expectedRes, sign): self.assertEqual(res, caps._parseKeyVal(lines, s)) @MonkeyPatch(caps, 'getMemoryStatsByNumaCell', lambda x: { 'total': '49141', 'free': '46783'}) @MonkeyPatch(caps, '_getCapsXMLStr', lambda: _getTestData( "caps_libvirt_amd_6274.out")) def testNumaTopology(self): # 2 x AMD 6272 (with Modules) t = caps.getNumaTopology() expectedNumaInfo = { '0': {'cpus': [0, 1, 2, 3, 4, 5, 6, 7], 'totalMemory': '49141'}, '1': {'cpus': [8, 9, 10, 11, 12, 13, 14, 15], 'totalMemory': '49141'}, '2': {'cpus': [16, 17, 18, 19, 20, 21, 22, 23], 'totalMemory': '49141'}, '3': {'cpus': [24, 25, 26, 27, 28, 29, 30, 31], 'totalMemory': '49141'}} self.assertEqual(t, expectedNumaInfo) @MonkeyPatch(utils, 'readMemInfo', lambda: { 'MemTotal': 50321208, 'MemFree': 47906488}) def testGetUMAMemStats(self): t = caps.getUMAHostMemoryStats() expectedInfo = {'total': '49141', 'free': '46783'} self.assertEqual(t, expectedInfo) @MonkeyPatch(utils, 'execCmd', lambda x: _getCapsNumaDistanceTestData( "caps_numactl_4_nodes.out")) def testNumaNodeDistance(self): t = caps.getNumaNodeDistance() expectedDistanceInfo = { '0': [10, 20, 20, 20], '1': [20, 10, 20, 20], '2': [20, 20, 10, 20], '3': [20, 20, 20, 10]} self.assertEqual(t, expectedDistanceInfo) @MonkeyPatch(utils, 'execCmd', lambda x: (0, ['0'], [])) def testAutoNumaBalancingInfo(self): t = caps.getAutoNumaBalancingInfo() self.assertEqual(t, 0) def testLiveSnapshotNoElementX86_64(self): '''old libvirt, backward compatibility''' capsData = self._readCaps("caps_libvirt_amd_6274.out") support = caps._getLiveSnapshotSupport(caps.Architecture.X86_64, capsData) self.assertTrue(support is None) def testLiveSnapshotX86_64(self): capsData = self._readCaps("caps_libvirt_intel_i73770.out") support = caps._getLiveSnapshotSupport(caps.Architecture.X86_64, capsData) self.assertEqual(support, True) def testLiveSnapshotDisabledX86_64(self): capsData = self._readCaps("caps_libvirt_intel_i73770_nosnap.out") support = caps._getLiveSnapshotSupport(caps.Architecture.X86_64, capsData) self.assertEqual(support, False)
gpl-2.0
InsightSoftwareConsortium/ITKExamples
src/Core/Common/CreateAIndex/Code.py
1
1043
#!/usr/bin/env python # ========================================================================== # # Copyright NumFOCUS # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ==========================================================================*/ import itk Dimension = 2 index = itk.Index[Dimension]() # Method 1 # set both index[0] and index[1] to the same value (in this case, 0). index.Fill(0) print(index) # Method 2 # set each component of the index individually. index[0] = 1 index[1] = 2 print(index)
apache-2.0
Lh4cKg/sl4a
python/src/Lib/traceback.py
61
11047
"""Extract, format and print information about Python stack traces.""" import linecache import sys import types __all__ = ['extract_stack', 'extract_tb', 'format_exception', 'format_exception_only', 'format_list', 'format_stack', 'format_tb', 'print_exc', 'format_exc', 'print_exception', 'print_last', 'print_stack', 'print_tb', 'tb_lineno'] def _print(file, str='', terminator='\n'): file.write(str+terminator) def print_list(extracted_list, file=None): """Print the list of tuples as returned by extract_tb() or extract_stack() as a formatted stack trace to the given file.""" if file is None: file = sys.stderr for filename, lineno, name, line in extracted_list: _print(file, ' File "%s", line %d, in %s' % (filename,lineno,name)) if line: _print(file, ' %s' % line.strip()) def format_list(extracted_list): """Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None. """ list = [] for filename, lineno, name, line in extracted_list: item = ' File "%s", line %d, in %s\n' % (filename,lineno,name) if line: item = item + ' %s\n' % line.strip() list.append(item) return list def print_tb(tb, limit=None, file=None): """Print up to 'limit' stack trace entries from the traceback 'tb'. If 'limit' is omitted or None, all entries are printed. If 'file' is omitted or None, the output goes to sys.stderr; otherwise 'file' should be an open file or file-like object with a write() method. """ if file is None: file = sys.stderr if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit n = 0 while tb is not None and (limit is None or n < limit): f = tb.tb_frame lineno = tb.tb_lineno co = f.f_code filename = co.co_filename name = co.co_name _print(file, ' File "%s", line %d, in %s' % (filename,lineno,name)) linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) if line: _print(file, ' ' + line.strip()) tb = tb.tb_next n = n+1 def format_tb(tb, limit = None): """A shorthand for 'format_list(extract_stack(f, limit)).""" return format_list(extract_tb(tb, limit)) def extract_tb(tb, limit = None): """Return list of up to limit pre-processed entries from traceback. This is useful for alternate formatting of stack traces. If 'limit' is omitted or None, all entries are extracted. A pre-processed stack trace entry is a quadruple (filename, line number, function name, text) representing the information that is usually printed for a stack trace. The text is a string with leading and trailing whitespace stripped; if the source is not available it is None. """ if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit list = [] n = 0 while tb is not None and (limit is None or n < limit): f = tb.tb_frame lineno = tb.tb_lineno co = f.f_code filename = co.co_filename name = co.co_name linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) if line: line = line.strip() else: line = None list.append((filename, lineno, name, line)) tb = tb.tb_next n = n+1 return list def print_exception(etype, value, tb, limit=None, file=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if traceback is not None, it prints a header "Traceback (most recent call last):"; (2) it prints the exception type and value after the stack trace; (3) if type is SyntaxError and value has the appropriate format, it prints the line where the syntax error occurred with a caret on the next line indicating the approximate position of the error. """ if file is None: file = sys.stderr if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) lines = format_exception_only(etype, value) for line in lines[:-1]: _print(file, line, ' ') _print(file, lines[-1], '') def format_exception(etype, value, tb, limit = None): """Format a stack trace and the exception information. The arguments have the same meaning as the corresponding arguments to print_exception(). The return value is a list of strings, each ending in a newline and some containing internal newlines. When these lines are concatenated and printed, exactly the same text is printed as does print_exception(). """ if tb: list = ['Traceback (most recent call last):\n'] list = list + format_tb(tb, limit) else: list = [] list = list + format_exception_only(etype, value) return list def format_exception_only(etype, value): """Format the exception part of a traceback. The arguments are the exception type and value such as given by sys.last_type and sys.last_value. The return value is a list of strings, each ending in a newline. Normally, the list contains a single string; however, for SyntaxError exceptions, it contains several lines that (when printed) display detailed information about where the syntax error occurred. The message indicating which exception occurred is always the last string in the list. """ # An instance should not have a meaningful value parameter, but # sometimes does, particularly for string exceptions, such as # >>> raise string1, string2 # deprecated # # Clear these out first because issubtype(string1, SyntaxError) # would throw another exception and mask the original problem. if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): return [_format_final_exc_line(etype, value)] stype = etype.__name__ if not issubclass(etype, SyntaxError): return [_format_final_exc_line(stype, value)] # It was a syntax error; show exactly where the problem was found. lines = [] try: msg, (filename, lineno, offset, badline) = value.args except Exception: pass else: filename = filename or "<string>" lines.append(' File "%s", line %d\n' % (filename, lineno)) if badline is not None: lines.append(' %s\n' % badline.strip()) if offset is not None: caretspace = badline[:offset].lstrip() # non-space whitespace (likes tabs) must be kept for alignment caretspace = ((c.isspace() and c or ' ') for c in caretspace) # only three spaces to account for offset1 == pos 0 lines.append(' %s^\n' % ''.join(caretspace)) value = msg lines.append(_format_final_exc_line(stype, value)) return lines def _format_final_exc_line(etype, value): """Return a list of a single line -- normal case for format_exception_only""" valuestr = _some_str(value) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line def _some_str(value): try: return str(value) except: return '<unprintable %s object>' % type(value).__name__ def print_exc(limit=None, file=None): """Shorthand for 'print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback, limit, file)'. (In fact, it uses sys.exc_info() to retrieve the same information in a thread-safe way.)""" if file is None: file = sys.stderr try: etype, value, tb = sys.exc_info() print_exception(etype, value, tb, limit, file) finally: etype = value = tb = None def format_exc(limit=None): """Like print_exc() but return a string.""" try: etype, value, tb = sys.exc_info() return ''.join(format_exception(etype, value, tb, limit)) finally: etype = value = tb = None def print_last(limit=None, file=None): """This is a shorthand for 'print_exception(sys.last_type, sys.last_value, sys.last_traceback, limit, file)'.""" if file is None: file = sys.stderr print_exception(sys.last_type, sys.last_value, sys.last_traceback, limit, file) def print_stack(f=None, limit=None, file=None): """Print a stack trace from its invocation point. The optional 'f' argument can be used to specify an alternate stack frame at which to start. The optional 'limit' and 'file' arguments have the same meaning as for print_exception(). """ if f is None: try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back print_list(extract_stack(f, limit), file) def format_stack(f=None, limit=None): """Shorthand for 'format_list(extract_stack(f, limit))'.""" if f is None: try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back return format_list(extract_stack(f, limit)) def extract_stack(f=None, limit = None): """Extract the raw traceback from the current stack frame. The return value has the same format as for extract_tb(). The optional 'f' and 'limit' arguments have the same meaning as for print_stack(). Each item in the list is a quadruple (filename, line number, function name, text), and the entries are in order from oldest to newest stack frame. """ if f is None: try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit list = [] n = 0 while f is not None and (limit is None or n < limit): lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) if line: line = line.strip() else: line = None list.append((filename, lineno, name, line)) f = f.f_back n = n+1 list.reverse() return list def tb_lineno(tb): """Calculate correct line number of traceback given in tb. Obsolete in 2.3. """ return tb.tb_lineno
apache-2.0
danbryce/dreal
benchmarks/network/airplane/airplane-single-i-p.py
2
5770
from gen import * ######## # Main # ######## flow_var[0] = """ (declare-fun tau () Real) (declare-fun r () Real) (declare-fun psi () Real) (declare-fun phi () Real) (declare-fun p () Real) (declare-fun gRDR () Real) (declare-fun gAIL () Real) (declare-fun beta () Real) """ flow_dec[0] = """ (define-ode flow_1 ((= d/dt[tau] 1) (= d/dt[r] (+ (+ (+ (+ (* 0.40742 beta) (* -0.056276 p)) (* -0.18801 r)) (* 0.005685 xAIL)) (* -0.106592 xRDR))) (= d/dt[psi] (* (/ 9.80555 92.8277) phi)) (= d/dt[phi] p) (= d/dt[p] (+ (+ (+ (+ (* -1.70098 beta) (* -1.18465 p)) (* 0.223908 r)) (* 0.531304 xAIL)) (* 0.049766 xRDR))) (= d/dt[gRDR] 0) (= d/dt[gAIL] 0) (= d/dt[beta] (+ (+ (- (* -0.099593 beta) r) (* (/ 9.80555 92.8277) phi)) (* 0.740361 xRDR))))) """ state_dec[0] = """ (declare-fun time_{0} () Real) (declare-fun tau_{0}_0 () Real) (declare-fun tau_{0}_t () Real) (declare-fun r_{0}_0 () Real) (declare-fun r_{0}_t () Real) (declare-fun psi_{0}_0 () Real) (declare-fun psi_{0}_t () Real) (declare-fun phi_{0}_0 () Real) (declare-fun phi_{0}_t () Real) (declare-fun p_{0}_0 () Real) (declare-fun p_{0}_t () Real) (declare-fun gRDR_{0}_0 () Real) (declare-fun gRDR_{0}_t () Real) (declare-fun gAIL_{0}_0 () Real) (declare-fun gAIL_{0}_t () Real) (declare-fun beta_{0}_0 () Real) (declare-fun beta_{0}_t () Real) """ state_val[0] = """ (assert (<= 0 time_{0})) (assert (<= time_{0} 1)) (assert (<= 0 tau_{0}_0)) (assert (<= tau_{0}_0 0.5)) (assert (<= 0 tau_{0}_t)) (assert (<= tau_{0}_t 0.5)) (assert (<= -3.14159 r_{0}_0)) (assert (<= r_{0}_0 3.14159)) (assert (<= -3.14159 r_{0}_t)) (assert (<= r_{0}_t 3.14159)) (assert (<= -3.14159 psi_{0}_0)) (assert (<= psi_{0}_0 3.14159)) (assert (<= -3.14159 psi_{0}_t)) (assert (<= psi_{0}_t 3.14159)) (assert (<= -3.14159 phi_{0}_0)) (assert (<= phi_{0}_0 3.14159)) (assert (<= -3.14159 phi_{0}_t)) (assert (<= phi_{0}_t 3.14159)) (assert (<= -3.14159 p_{0}_0)) (assert (<= p_{0}_0 3.14159)) (assert (<= -3.14159 p_{0}_t)) (assert (<= p_{0}_t 3.14159)) (assert (<= -3.14159 gRDR_{0}_0)) (assert (<= gRDR_{0}_0 3.14159)) (assert (<= -3.14159 gRDR_{0}_t)) (assert (<= gRDR_{0}_t 3.14159)) (assert (<= -3.14159 gAIL_{0}_0)) (assert (<= gAIL_{0}_0 3.14159)) (assert (<= -3.14159 gAIL_{0}_t)) (assert (<= gAIL_{0}_t 3.14159)) (assert (<= -3.14159 beta_{0}_0)) (assert (<= beta_{0}_0 3.14159)) (assert (<= -3.14159 beta_{0}_t)) (assert (<= beta_{0}_t 3.14159)) """ cont_cond[0] = [ """ (assert (and (>= tau_{0}_0 0) (<= tau_{0}_0 0.5) (>= tau_{0}_t 0) (<= tau_{0}_t 0.5) )) (assert (and (= [xRDR_{0}_t xAIL_{0}_t tau_{0}_t r_{0}_t psi_{0}_t phi_{0}_t p_{0}_t gRDR_{0}_t gAIL_{0}_t beta_{0}_t] (pintegral 0. time_{0} [xRDR_{0}_0 xAIL_{0}_0 tau_{0}_0 r_{0}_0 psi_{0}_0 phi_{0}_0 p_{0}_0 gRDR_{0}_0 gAIL_{0}_0 beta_{0}_0] [holder_{1} holder_{2} holder_{3}])) (connect holder_{3} flow_1)))"""] jump_cond[0] = [ """ (assert (and (= tau_{0}_t 0.5) (= tau_{1}_0 0) (= gRDR_{1}_0 gRDR_{0}_t) (= gAIL_{1}_0 gAIL_{0}_t) (= psi_{1}_0 psi_{0}_t) (= phi_{1}_0 phi_{0}_t) (= r_{1}_0 r_{0}_t) (= p_{1}_0 p_{0}_t) (= beta_{1}_0 beta_{0}_t)))"""] ########### # Aileron # ########### flow_var[1] = """ (declare-fun xAIL () Real) """ flow_dec[1] = """ (define-ode flow_2 ((= d/dt[xAIL] 0.25))) (define-ode flow_3 ((= d/dt[xAIL] -0.25))) """ state_dec[1] = """ (declare-fun mode_1_{0} () Int) (declare-fun xAIL_{0}_0 () Real) (declare-fun xAIL_{0}_t () Real) """ state_val[1] = """ (assert (<= -3.14159 xAIL_{0}_0)) (assert (<= xAIL_{0}_0 3.14159)) (assert (<= -3.14159 xAIL_{0}_t)) (assert (<= xAIL_{0}_t 3.14159)) """ cont_cond[1] = [""" (assert (or (not (= mode_1_{0} 1)) (not (= mode_1_{0} 2)))) (assert (or (not (= mode_2_{0} 2)) (not (= mode_2_{0} 1)))) (assert (or (and (= mode_1_{0} 2) (connect holder_{1} flow_2)) (and (= mode_1_{0} 1) (connect holder_{1} flow_3)))) (assert (not (and (connect holder_{1} flow_2) (connect holder_{1} flow_3))))"""] jump_cond[1] = [""" (assert (and (= xAIL_{1}_0 xAIL_{0}_t))) (assert (or (and (>= gAIL_{0}_t xAIL_{0}_t) (= mode_1_{1} 2)) (and (< gAIL_{0}_t xAIL_{0}_t) (= mode_1_{1} 1))))"""] ########## # Rudder # ########## flow_var[2] = """ (declare-fun xRDR () Real) """ flow_dec[2] = """ (define-ode flow_4 ((= d/dt[xRDR] 0.5))) (define-ode flow_5 ((= d/dt[xRDR] -0.5))) """ state_dec[2] = """ (declare-fun mode_2_{0} () Int) (declare-fun xRDR_{0}_0 () Real) (declare-fun xRDR_{0}_t () Real) """ state_val[2] = """ (assert (<= -3.14159 xRDR_{0}_0)) (assert (<= xRDR_{0}_0 3.14159)) (assert (<= -3.14159 xRDR_{0}_t)) (assert (<= xRDR_{0}_t 3.14159)) """ cont_cond[2] = [""" (assert (or (and (= mode_2_{0} 2) (connect holder_{2} flow_4)) (and (= mode_2_{0} 1) (connect holder_{2} flow_5)))) (assert (not (and (connect holder_{2} flow_4) (connect holder_{2} flow_5))))"""] jump_cond[2] = [""" (assert (= xRDR_{1}_0 xRDR_{0}_t)) (assert (or (and (>= gRDR_{0}_t xRDR_{0}_t) (= mode_2_{1} 2)) (and (< gRDR_{0}_t xRDR_{0}_t) (= mode_2_{1} 1))))"""] ############# # Init/Goal # ############# init_cond = """ (assert (and (= tau_{0}_0 0) (= gRDR_{0}_0 0) (= gAIL_{0}_0 0) (= psi_{0}_0 0) (= phi_{0}_0 0) (= r_{0}_0 0) (= p_{0}_0 0) (= beta_{0}_0 0))) (assert (and (= xAIL_{0}_0 0) (= mode_1_{0} 2))) (assert (and (= xRDR_{0}_0 0) (= mode_2_{0} 2))) """ goal_cond = """ (assert (> (^ (^ beta_{0}_t 2) 0.5) 0.75)) """ import sys try: bound = int(sys.argv[1]) except: print("Usage:", sys.argv[0], "<Bound>") else: generate(bound, 1, [0,1,2], 3, init_cond, goal_cond)
gpl-2.0
lijiancheng0614/poem_generator
get_topic.py
1
3538
# -*- coding: utf-8 -*- import os import re import time import jieba import codecs import pickle import argparse from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn import decomposition TIME_FORMAT = '%Y-%m-%d %H:%M:%S' BASE_FOLDER = os.path.abspath(os.path.dirname(__file__)) DATA_FOLDER = os.path.join(BASE_FOLDER, 'data') DEFAULT_FIN = os.path.join(DATA_FOLDER, 'poem.txt') DEFAULT_FTOPICS = os.path.join(DATA_FOLDER, 'topics.txt') DEFAULT_FWORDS = os.path.join(DATA_FOLDER, 'words') DEFAULT_FTOPIC_WORDS = os.path.join(DATA_FOLDER, 'topic_words') DEFAULT_N_TOPIC = 10 DEFAULT_N_TOPIC_WORDS = 20 reg_sep = re.compile('([^\u4e00-\u9fa5]+)') n_topic = 10 n_topic_words = 20 count_vect = CountVectorizer() def read_data(fin): poem_words = list() title_flag = False title = '' fd = codecs.open(fin, 'r', 'utf-8') for line in fd: line = line.strip() line = reg_sep.sub(' ', line) title_flag = not title_flag if title_flag: title = line else: words = ' '.join(jieba.cut(title + line)) poem_words.append(words) fd.close() print('Read data done.') return poem_words def write_topics(ftopics, fwords, ftopics_words, poem_words, n_topic, n_topic_words): count_matrix = count_vect.fit_transform(poem_words) tfidf = TfidfTransformer().fit_transform(count_matrix) nmf = decomposition.NMF(n_components=n_topic).fit(tfidf) feature_names = count_vect.get_feature_names() fw = codecs.open(ftopics, 'w', 'utf-8') for topic in nmf.components_: fw.write(' '.join([feature_names[i] for i in topic.argsort()[:-n_topic_words - 1:-1]]) + '\n') fw.close() print('Write topics done.') fw = codecs.open(fwords, 'wb') pickle.dump(feature_names, fw) fw.close() print('Write words done.') fw = codecs.open(ftopics_words, 'wb') pickle.dump(nmf.components_, fw) fw.close() print('Write topic_words done.') def set_arguments(): parser = argparse.ArgumentParser(description='Get topics') parser.add_argument('--fin', type=str, default=DEFAULT_FIN, help='Input file path, default is {}'.format(DEFAULT_FIN)) parser.add_argument('--ftopics', type=str, default=DEFAULT_FTOPICS, help='Output topics file path, default is {}'.format(DEFAULT_FTOPICS)) parser.add_argument('--ftopics_words', type=str, default=DEFAULT_FTOPIC_WORDS, help='Output topic_words file path, default is {}'.format(DEFAULT_FTOPIC_WORDS)) parser.add_argument('--fwords', type=str, default=DEFAULT_FWORDS, help='Output words file path, default is {}'.format(DEFAULT_FWORDS)) parser.add_argument('--n_topic', type=int, default=DEFAULT_N_TOPIC, help='Topics count, default is {}'.format(DEFAULT_N_TOPIC)) parser.add_argument('--n_topic_words', type=int, default=DEFAULT_N_TOPIC_WORDS, help='Topic words count, default is {}'.format(DEFAULT_N_TOPIC_WORDS)) return parser if __name__ == '__main__': parser = set_arguments() cmd_args = parser.parse_args() print('{} START'.format(time.strftime(TIME_FORMAT))) poem_words = read_data(cmd_args.fin) write_topics(cmd_args.ftopics, cmd_args.fwords, cmd_args.ftopics_words,\ poem_words, cmd_args.n_topic, cmd_args.n_topic_words) print('{} STOP'.format(time.strftime(TIME_FORMAT)))
apache-2.0
DNFcode/edx-platform
lms/startup.py
3
4760
""" Module for code that should run during LMS startup """ from django.conf import settings # Force settings to run so that the python path is modified settings.INSTALLED_APPS # pylint: disable=pointless-statement from django_startup import autostartup import edxmako import logging from monkey_patch import django_utils_translation import analytics log = logging.getLogger(__name__) def run(): """ Executed during django startup """ django_utils_translation.patch() autostartup() add_mimetypes() if settings.FEATURES.get('USE_CUSTOM_THEME', False): enable_theme() if settings.FEATURES.get('USE_MICROSITES', False): enable_microsites() if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH', False): enable_third_party_auth() # Initialize Segment.io analytics module. Flushes first time a message is received and # every 50 messages thereafter, or if 10 seconds have passed since last flush if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'): analytics.init(settings.SEGMENT_IO_LMS_KEY, flush_at=50) def add_mimetypes(): """ Add extra mimetypes. Used in xblock_resource. If you add a mimetype here, be sure to also add it in cms/startup.py. """ import mimetypes mimetypes.add_type('application/vnd.ms-fontobject', '.eot') mimetypes.add_type('application/x-font-opentype', '.otf') mimetypes.add_type('application/x-font-ttf', '.ttf') mimetypes.add_type('application/font-woff', '.woff') def enable_theme(): """ Enable the settings for a custom theme, whose files should be stored in ENV_ROOT/themes/THEME_NAME (e.g., edx_all/themes/stanford). """ # Workaround for setting THEME_NAME to an empty # string which is the default due to this ansible # bug: https://github.com/ansible/ansible/issues/4812 if settings.THEME_NAME == "": settings.THEME_NAME = None return assert settings.FEATURES['USE_CUSTOM_THEME'] settings.FAVICON_PATH = 'themes/{name}/images/favicon.ico'.format( name=settings.THEME_NAME ) # Calculate the location of the theme's files theme_root = settings.ENV_ROOT / "themes" / settings.THEME_NAME # Include the theme's templates in the template search paths settings.TEMPLATE_DIRS.insert(0, theme_root / 'templates') edxmako.paths.add_lookup('main', theme_root / 'templates', prepend=True) # Namespace the theme's static files to 'themes/<theme_name>' to # avoid collisions with default edX static files settings.STATICFILES_DIRS.append( (u'themes/{}'.format(settings.THEME_NAME), theme_root / 'static') ) # Include theme locale path for django translations lookup settings.LOCALE_PATHS = (theme_root / 'conf/locale',) + settings.LOCALE_PATHS def enable_microsites(): """ Enable the use of microsites, which are websites that allow for subdomains for the edX platform, e.g. foo.edx.org """ microsites_root = settings.MICROSITE_ROOT_DIR microsite_config_dict = settings.MICROSITE_CONFIGURATION for ms_name, ms_config in microsite_config_dict.items(): # Calculate the location of the microsite's files ms_root = microsites_root / ms_name ms_config = microsite_config_dict[ms_name] # pull in configuration information from each # microsite root if ms_root.isdir(): # store the path on disk for later use ms_config['microsite_root'] = ms_root template_dir = ms_root / 'templates' ms_config['template_dir'] = template_dir ms_config['microsite_name'] = ms_name log.info('Loading microsite {0}'.format(ms_root)) else: # not sure if we have application logging at this stage of # startup log.error('Error loading microsite {0}. Directory does not exist'.format(ms_root)) # remove from our configuration as it is not valid del microsite_config_dict[ms_name] # if we have any valid microsites defined, let's wire in the Mako and STATIC_FILES search paths if microsite_config_dict: settings.TEMPLATE_DIRS.append(microsites_root) edxmako.paths.add_lookup('main', microsites_root) settings.STATICFILES_DIRS.insert(0, microsites_root) def enable_third_party_auth(): """ Enable the use of third_party_auth, which allows users to sign in to edX using other identity providers. For configuration details, see common/djangoapps/third_party_auth/settings.py. """ from third_party_auth import settings as auth_settings auth_settings.apply_settings(settings.THIRD_PARTY_AUTH, settings)
agpl-3.0
ahoyosid/scikit-learn
examples/gaussian_process/plot_gp_regression.py
252
4054
#!/usr/bin/python # -*- coding: utf-8 -*- r""" ========================================================= Gaussian Processes regression: basic introductory example ========================================================= A simple one-dimensional regression exercise computed in two different ways: 1. A noise-free case with a cubic correlation model 2. A noisy case with a squared Euclidean correlation model In both cases, the model parameters are estimated using the maximum likelihood principle. The figures illustrate the interpolating property of the Gaussian Process model as well as its probabilistic nature in the form of a pointwise 95% confidence interval. Note that the parameter ``nugget`` is applied as a Tikhonov regularization of the assumed covariance between the training points. In the special case of the squared euclidean correlation model, nugget is mathematically equivalent to a normalized variance: That is .. math:: \mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2 """ print(__doc__) # Author: Vincent Dubourg <vincent.dubourg@gmail.com> # Jake Vanderplas <vanderplas@astro.washington.edu> # Licence: BSD 3 clause import numpy as np from sklearn.gaussian_process import GaussianProcess from matplotlib import pyplot as pl np.random.seed(1) def f(x): """The function to predict.""" return x * np.sin(x) #---------------------------------------------------------------------- # First the noiseless case X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T # Observations y = f(X).ravel() # Mesh the input space for evaluations of the real function, the prediction and # its MSE x = np.atleast_2d(np.linspace(0, 10, 1000)).T # Instanciate a Gaussian Process model gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1, random_start=100) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, MSE = gp.predict(x, eval_MSE=True) sigma = np.sqrt(MSE) # Plot the function, the prediction and the 95% confidence interval based on # the MSE fig = pl.figure() pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') pl.plot(X, y, 'r.', markersize=10, label=u'Observations') pl.plot(x, y_pred, 'b-', label=u'Prediction') pl.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') pl.xlabel('$x$') pl.ylabel('$f(x)$') pl.ylim(-10, 20) pl.legend(loc='upper left') #---------------------------------------------------------------------- # now the noisy case X = np.linspace(0.1, 9.9, 20) X = np.atleast_2d(X).T # Observations and noise y = f(X).ravel() dy = 0.5 + 1.0 * np.random.random(y.shape) noise = np.random.normal(0, dy) y += noise # Mesh the input space for evaluations of the real function, the prediction and # its MSE x = np.atleast_2d(np.linspace(0, 10, 1000)).T # Instanciate a Gaussian Process model gp = GaussianProcess(corr='squared_exponential', theta0=1e-1, thetaL=1e-3, thetaU=1, nugget=(dy / y) ** 2, random_start=100) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, MSE = gp.predict(x, eval_MSE=True) sigma = np.sqrt(MSE) # Plot the function, the prediction and the 95% confidence interval based on # the MSE fig = pl.figure() pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations') pl.plot(x, y_pred, 'b-', label=u'Prediction') pl.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') pl.xlabel('$x$') pl.ylabel('$f(x)$') pl.ylim(-10, 20) pl.legend(loc='upper left') pl.show()
bsd-3-clause
gorcz/security_monkey
security_monkey/__init__.py
6
6344
# Copyright 2014 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module: security_monkey :platform: Unix .. version:: $$VERSION$$ .. moduleauthor:: Patrick Kelley <patrick@netflix.com> """ ### FLASK ### from flask import Flask from flask import render_template from flask.ext.sqlalchemy import SQLAlchemy app = Flask(__name__) app.config.from_envvar("SECURITY_MONKEY_SETTINGS") db = SQLAlchemy(app) # For ELB and/or Eureka @app.route('/healthcheck') def healthcheck(): return 'ok' ### LOGGING ### import logging from logging import Formatter from logging.handlers import RotatingFileHandler from logging import StreamHandler handler = RotatingFileHandler(app.config.get('LOG_FILE'), maxBytes=10000000, backupCount=100) handler.setFormatter( Formatter('%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]') ) handler.setLevel(app.config.get('LOG_LEVEL')) app.logger.setLevel(app.config.get('LOG_LEVEL')) app.logger.addHandler(handler) app.logger.addHandler(StreamHandler()) ### Flask-WTF CSRF Protection ### from flask_wtf.csrf import CsrfProtect csrf = CsrfProtect() csrf.init_app(app) @csrf.error_handler def csrf_error(reason): app.logger.debug("CSRF ERROR: {}".format(reason)) return render_template('csrf_error.json', reason=reason), 400 ### Flask-Login ### from flask.ext.login import LoginManager login_manager = LoginManager() login_manager.init_app(app) from security_monkey.datastore import User, Role @login_manager.user_loader def load_user(email): """ For Flask-Login, returns the user object given the userid. :return: security_monkey.datastore.User object """ app.logger.info("Inside load_user!") user = User.query.filter(User.email == email).first() if not user: user = User(email=email) db.session.add(user) db.session.commit() db.session.close() user = User.query.filter(User.email == email).first() return user ### Flask-Security ### from flask.ext.security import Security, SQLAlchemyUserDatastore user_datastore = SQLAlchemyUserDatastore(db, User, Role) security = Security(app, user_datastore) ### Flask Mail ### from flask_mail import Mail mail = Mail(app=app) from security_monkey.common.utils.utils import send_email as common_send_email @security.send_mail_task def send_email(msg): """ Overrides the Flask-Security/Flask-Mail integration to send emails out via boto and ses. """ common_send_email(subject=msg.subject, recipients=msg.recipients, html=msg.html) ### FLASK API ### from flask.ext.restful import Api api = Api(app) from security_monkey.views.account import AccountGetPutDelete from security_monkey.views.account import AccountPostList api.add_resource(AccountGetPutDelete, '/api/1/accounts/<int:account_id>') api.add_resource(AccountPostList, '/api/1/accounts') from security_monkey.views.distinct import Distinct api.add_resource(Distinct, '/api/1/distinct/<string:key_id>') from security_monkey.views.ignore_list import IgnoreListGetPutDelete from security_monkey.views.ignore_list import IgnorelistListPost api.add_resource(IgnoreListGetPutDelete, '/api/1/ignorelistentries/<int:item_id>') api.add_resource(IgnorelistListPost, '/api/1/ignorelistentries') from security_monkey.views.item import ItemList from security_monkey.views.item import ItemGet api.add_resource(ItemList, '/api/1/items') api.add_resource(ItemGet, '/api/1/items/<int:item_id>') from security_monkey.views.item_comment import ItemCommentPost from security_monkey.views.item_comment import ItemCommentDelete from security_monkey.views.item_comment import ItemCommentGet api.add_resource(ItemCommentPost, '/api/1/items/<int:item_id>/comments') api.add_resource(ItemCommentDelete, '/api/1/items/<int:item_id>/comments/<int:comment_id>') api.add_resource(ItemCommentGet, '/api/1/items/<int:item_id>/comments/<int:comment_id>') from security_monkey.views.item_issue import ItemAuditGet from security_monkey.views.item_issue import ItemAuditList api.add_resource(ItemAuditList, '/api/1/issues') api.add_resource(ItemAuditGet, '/api/1/issues/<int:audit_id>') from security_monkey.views.item_issue_justification import JustifyPostDelete api.add_resource(JustifyPostDelete, '/api/1/issues/<int:audit_id>/justification') from security_monkey.views.logout import Logout api.add_resource(Logout, '/api/1/logout') from security_monkey.views.revision import RevisionList from security_monkey.views.revision import RevisionGet api.add_resource(RevisionList, '/api/1/revisions') api.add_resource(RevisionGet, '/api/1/revisions/<int:revision_id>') from security_monkey.views.revision_comment import RevisionCommentPost from security_monkey.views.revision_comment import RevisionCommentGet from security_monkey.views.revision_comment import RevisionCommentDelete api.add_resource(RevisionCommentPost, '/api/1/revisions/<int:revision_id>/comments') api.add_resource(RevisionCommentGet, '/api/1/revisions/<int:revision_id>/comments/<int:comment_id>') api.add_resource(RevisionCommentDelete, '/api/1/revisions/<int:revision_id>/comments/<int:comment_id>') from security_monkey.views.user_settings import UserSettings api.add_resource(UserSettings, '/api/1/settings') from security_monkey.views.whitelist import WhitelistGetPutDelete from security_monkey.views.whitelist import WhitelistListPost api.add_resource(WhitelistGetPutDelete, '/api/1/whitelistcidrs/<int:item_id>') api.add_resource(WhitelistListPost, '/api/1/whitelistcidrs') from security_monkey.views.auditor_settings import AuditorSettingsGet from security_monkey.views.auditor_settings import AuditorSettingsPut api.add_resource(AuditorSettingsGet, '/api/1/auditorsettings') api.add_resource(AuditorSettingsPut, '/api/1/auditorsettings/<int:as_id>')
apache-2.0
phil-el/phetools
history_credit/credits.py
1
3599
# -*- coding: utf-8 -*- import sys import json import os from common import serialize import random from get_credit import get_credit class SerializerHtml(serialize.SerializerBase): def __init__(self, serializer_type): serialize.SerializerBase.__init__(self, serializer_type) def mime_type(self): return 'text/html' def serialize(self, result): html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html> <head></head> <body>""" for r in result: html += str(r) + ': ' + str(result[r]) + '<br />' return html + "</body></html>" def get_serializer(serializer_type): html_serializer = { 'html' : SerializerHtml } return serialize.get_serializer(serializer_type, html_serializer) def split_param(params): if params: return params.split('|') return [] def query_params(environ): import cgi field = cgi.FieldStorage(fp = environ['wsgi.input'], environ = environ) rdict = { 'format' : 'text', 'cmd' : 'history', 'book' : '', 'page' : '', 'image' : '', 'lang' : '' } for name in field: rdict[name] = field[name].value rdict['book'] = split_param(rdict['book']) rdict['page'] = split_param(rdict['page']) rdict['image'] = split_param(rdict['image']) if rdict['lang'] in [ 'www', '' ]: rdict['lang'] = 'old' return rdict def handle_query(params, start_response): # Avoid to flood log. if not random.randint(0, 100) % 100: print >> sys.stderr, params # FIXME: handle ill formed request (400) result = get_credit(domain = params['lang'], family = 'wikisource', books = params['book'], pages = params['page'], images = params['image']) serializer = get_serializer(params['format']) text = serializer.serialize(result) start_response('200 OK', [('Content-Type', serializer.content_type() + '; charset=UTF-8'), ('Content-Length', str(len(text))), ('Access-Control-Allow-Origin', '*')]) return [ text ] def handle_status(start_response): # pseudo ping, as we run on the web server, we always return 1 ms. text = json.dumps( { 'error' : 0, 'text' : 'pong', 'server' : 'history_credit', 'ping' : 0.001 } ) start_response('200 OK', [('Content-Type', 'text/plain; charset=UTF-8'), ('Content-Length', str(len(text))), ('Access-Control-Allow-Origin', '*')]) return [ text ] def myapp(environ, start_response): params = query_params(environ) # Note than &status or &status= doesn't works cgi.FieldStorage expect # &status=something to accept to store a parameter, so ?lang=fr&status= # will return 200 and an empty answer, counter-intuitive... if params['lang'] and params['cmd'] == 'history': return handle_query(params, start_response) else: return handle_status(start_response) if __name__ == "__main__": sys.stderr = open(os.path.expanduser('~/log/credits.err'), 'a') from flup.server.cgi import WSGIServer try: WSGIServer(myapp).run() except BaseException: import traceback traceback.print_exc()
gpl-3.0
HybridF5/nova
nova/tests/unit/virt/test_virt_drivers.py
6
39624
# Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import sys import traceback import fixtures import mock import netaddr from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import timeutils import six from nova.compute import manager from nova.console import type as ctype from nova import context from nova import exception from nova import objects from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.unit import fake_block_device from nova.tests.unit.image import fake as fake_image from nova.tests.unit import utils as test_utils from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.virt import block_device as driver_block_device from nova.virt import event as virtevent from nova.virt import fake from nova.virt import hardware from nova.virt import libvirt from nova.virt.libvirt import imagebackend LOG = logging.getLogger(__name__) def catch_notimplementederror(f): """Decorator to simplify catching drivers raising NotImplementedError If a particular call makes a driver raise NotImplementedError, we log it so that we can extract this information afterwards as needed. """ def wrapped_func(self, *args, **kwargs): try: return f(self, *args, **kwargs) except NotImplementedError: frame = traceback.extract_tb(sys.exc_info()[2])[-1] LOG.error("%(driver)s does not implement %(method)s " "required for test %(test)s" % {'driver': type(self.connection), 'method': frame[2], 'test': f.__name__}) wrapped_func.__name__ = f.__name__ wrapped_func.__doc__ = f.__doc__ return wrapped_func class _FakeDriverBackendTestCase(object): def _setup_fakelibvirt(self): # So that the _supports_direct_io does the test based # on the current working directory, instead of the # default instances_path which doesn't exist self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) # Put fakelibvirt in place if 'libvirt' in sys.modules: self.saved_libvirt = sys.modules['libvirt'] else: self.saved_libvirt = None import nova.tests.unit.virt.libvirt.fake_imagebackend as \ fake_imagebackend import nova.tests.unit.virt.libvirt.fake_libvirt_utils as \ fake_libvirt_utils import nova.tests.unit.virt.libvirt.fakelibvirt as fakelibvirt import nova.tests.unit.virt.libvirt.fake_os_brick_connector as \ fake_os_brick_connector sys.modules['libvirt'] = fakelibvirt import nova.virt.libvirt.driver import nova.virt.libvirt.firewall import nova.virt.libvirt.host self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.imagebackend', fake_imagebackend)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.host.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.firewall.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.connector', fake_os_brick_connector)) fakelibvirt.disable_event_thread(self) self.flags(rescue_image_id="2", rescue_kernel_id="3", rescue_ramdisk_id=None, snapshots_directory='./', sysinfo_serial='none', group='libvirt') def fake_extend(image, size): pass def fake_migrateToURI(*a): pass def fake_make_drive(_self, _path): pass def fake_get_instance_disk_info(_self, instance, xml=None, block_device_info=None): return '[]' def fake_delete_instance_files(_self, _instance): pass def fake_wait(): pass def fake_detach_device_with_retry(_self, get_device_conf_func, device, persistent, live, max_retry_count=7, inc_sleep_time=2, max_sleep_time=30): # Still calling detach, but instead of returning function # that actually checks if device is gone from XML, just continue # because XML never gets updated in these tests _self.detach_device(get_device_conf_func(device), persistent=persistent, live=live) return fake_wait self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver, '_get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(nova.virt.libvirt.driver.disk, 'extend', fake_extend) self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver, 'delete_instance_files', fake_delete_instance_files) self.stubs.Set(nova.virt.libvirt.guest.Guest, 'detach_device_with_retry', fake_detach_device_with_retry) # Like the existing fakelibvirt.migrateToURI, do nothing, # but don't fail for these tests. self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain, 'migrateToURI', fake_migrateToURI) # We can't actually make a config drive v2 because ensure_tree has # been faked out self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder, 'make_drive', fake_make_drive) def _teardown_fakelibvirt(self): # Restore libvirt if self.saved_libvirt: sys.modules['libvirt'] = self.saved_libvirt def setUp(self): super(_FakeDriverBackendTestCase, self).setUp() # TODO(sdague): it would be nice to do this in a way that only # the relevant backends where replaced for tests, though this # should not harm anything by doing it for all backends fake_image.stub_out_image_service(self) self._setup_fakelibvirt() def tearDown(self): fake_image.FakeImageService_reset() self._teardown_fakelibvirt() super(_FakeDriverBackendTestCase, self).tearDown() class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase): """Test that ComputeManager can successfully load both old style and new style drivers and end up with the correct final class. """ # if your driver supports being tested in a fake way, it can go here # # both long form and short form drivers are supported new_drivers = { 'nova.virt.fake.FakeDriver': 'FakeDriver', 'nova.virt.libvirt.LibvirtDriver': 'LibvirtDriver', 'fake.FakeDriver': 'FakeDriver', 'libvirt.LibvirtDriver': 'LibvirtDriver' } def test_load_new_drivers(self): for cls, driver in six.iteritems(self.new_drivers): self.flags(compute_driver=cls) # NOTE(sdague) the try block is to make it easier to debug a # failure by knowing which driver broke try: cm = manager.ComputeManager() except Exception as e: self.fail("Couldn't load driver %s - %s" % (cls, e)) self.assertEqual(cm.driver.__class__.__name__, driver, "Could't load driver %s" % cls) def test_fail_to_load_new_drivers(self): self.flags(compute_driver='nova.virt.amiga') def _fake_exit(error): raise test.TestingException() self.stubs.Set(sys, 'exit', _fake_exit) self.assertRaises(test.TestingException, manager.ComputeManager) class _VirtDriverTestCase(_FakeDriverBackendTestCase): def setUp(self): super(_VirtDriverTestCase, self).setUp() self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) self.connection = importutils.import_object(self.driver_module, fake.FakeVirtAPI()) self.ctxt = test_utils.get_test_admin_context() self.image_service = fake_image.FakeImageService() # NOTE(dripton): resolve_driver_format does some file reading and # writing and chowning that complicate testing too much by requiring # using real directories with proper permissions. Just stub it out # here; we test it in test_imagebackend.py self.stubs.Set(imagebackend.Image, 'resolve_driver_format', imagebackend.Image._get_driver_format) def _get_running_instance(self, obj=True): instance_ref = test_utils.get_test_instance(obj=obj) network_info = test_utils.get_test_network_info() network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \ '1.1.1.1' image_meta = test_utils.get_test_image_object(None, instance_ref) self.connection.spawn(self.ctxt, instance_ref, image_meta, [], 'herp', network_info=network_info) return instance_ref, network_info @catch_notimplementederror def test_init_host(self): self.connection.init_host('myhostname') @catch_notimplementederror def test_list_instances(self): self.connection.list_instances() @catch_notimplementederror def test_list_instance_uuids(self): self.connection.list_instance_uuids() @catch_notimplementederror def test_spawn(self): instance_ref, network_info = self._get_running_instance() domains = self.connection.list_instances() self.assertIn(instance_ref['name'], domains) num_instances = self.connection.get_num_instances() self.assertEqual(1, num_instances) @catch_notimplementederror def test_snapshot_not_running(self): instance_ref = test_utils.get_test_instance() img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'}) self.assertRaises(exception.InstanceNotRunning, self.connection.snapshot, self.ctxt, instance_ref, img_ref['id'], lambda *args, **kwargs: None) @catch_notimplementederror def test_snapshot_running(self): img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'}) instance_ref, network_info = self._get_running_instance() self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'], lambda *args, **kwargs: None) @catch_notimplementederror def test_post_interrupted_snapshot_cleanup(self): instance_ref, network_info = self._get_running_instance() self.connection.post_interrupted_snapshot_cleanup(self.ctxt, instance_ref) @catch_notimplementederror def test_reboot(self): reboot_type = "SOFT" instance_ref, network_info = self._get_running_instance() self.connection.reboot(self.ctxt, instance_ref, network_info, reboot_type) @catch_notimplementederror def test_get_host_ip_addr(self): host_ip = self.connection.get_host_ip_addr() # Will raise an exception if it's not a valid IP at all ip = netaddr.IPAddress(host_ip) # For now, assume IPv4. self.assertEqual(ip.version, 4) @catch_notimplementederror def test_set_admin_password(self): instance, network_info = self._get_running_instance(obj=True) self.connection.set_admin_password(instance, 'p4ssw0rd') @catch_notimplementederror def test_inject_file(self): instance_ref, network_info = self._get_running_instance() self.connection.inject_file(instance_ref, base64.b64encode('/testfile'), base64.b64encode('testcontents')) @catch_notimplementederror def test_resume_state_on_host_boot(self): instance_ref, network_info = self._get_running_instance() self.connection.resume_state_on_host_boot(self.ctxt, instance_ref, network_info) @catch_notimplementederror def test_rescue(self): image_meta = objects.ImageMeta.from_dict({}) instance_ref, network_info = self._get_running_instance() self.connection.rescue(self.ctxt, instance_ref, network_info, image_meta, '') @catch_notimplementederror def test_unrescue_unrescued_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.unrescue(instance_ref, network_info) @catch_notimplementederror def test_unrescue_rescued_instance(self): image_meta = objects.ImageMeta.from_dict({}) instance_ref, network_info = self._get_running_instance() self.connection.rescue(self.ctxt, instance_ref, network_info, image_meta, '') self.connection.unrescue(instance_ref, network_info) @catch_notimplementederror def test_poll_rebooting_instances(self): instances = [self._get_running_instance()] self.connection.poll_rebooting_instances(10, instances) @catch_notimplementederror def test_migrate_disk_and_power_off(self): instance_ref, network_info = self._get_running_instance() flavor_ref = test_utils.get_test_flavor() self.connection.migrate_disk_and_power_off( self.ctxt, instance_ref, 'dest_host', flavor_ref, network_info) @catch_notimplementederror def test_power_off(self): instance_ref, network_info = self._get_running_instance() self.connection.power_off(instance_ref) @catch_notimplementederror def test_power_on_running(self): instance_ref, network_info = self._get_running_instance() self.connection.power_on(self.ctxt, instance_ref, network_info, None) @catch_notimplementederror def test_power_on_powered_off(self): instance_ref, network_info = self._get_running_instance() self.connection.power_off(instance_ref) self.connection.power_on(self.ctxt, instance_ref, network_info, None) @catch_notimplementederror def test_trigger_crash_dump(self): instance_ref, network_info = self._get_running_instance() self.connection.trigger_crash_dump(instance_ref) @catch_notimplementederror def test_soft_delete(self): instance_ref, network_info = self._get_running_instance(obj=True) self.connection.soft_delete(instance_ref) @catch_notimplementederror def test_restore_running(self): instance_ref, network_info = self._get_running_instance() self.connection.restore(instance_ref) @catch_notimplementederror def test_restore_soft_deleted(self): instance_ref, network_info = self._get_running_instance() self.connection.soft_delete(instance_ref) self.connection.restore(instance_ref) @catch_notimplementederror def test_pause(self): instance_ref, network_info = self._get_running_instance() self.connection.pause(instance_ref) @catch_notimplementederror def test_unpause_unpaused_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.unpause(instance_ref) @catch_notimplementederror def test_unpause_paused_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.pause(instance_ref) self.connection.unpause(instance_ref) @catch_notimplementederror def test_suspend(self): instance_ref, network_info = self._get_running_instance() self.connection.suspend(self.ctxt, instance_ref) @catch_notimplementederror def test_resume_unsuspended_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.resume(self.ctxt, instance_ref, network_info) @catch_notimplementederror def test_resume_suspended_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.suspend(self.ctxt, instance_ref) self.connection.resume(self.ctxt, instance_ref, network_info) @catch_notimplementederror def test_destroy_instance_nonexistent(self): fake_instance = test_utils.get_test_instance(obj=True) network_info = test_utils.get_test_network_info() self.connection.destroy(self.ctxt, fake_instance, network_info) @catch_notimplementederror def test_destroy_instance(self): instance_ref, network_info = self._get_running_instance() self.assertIn(instance_ref['name'], self.connection.list_instances()) self.connection.destroy(self.ctxt, instance_ref, network_info) self.assertNotIn(instance_ref['name'], self.connection.list_instances()) @catch_notimplementederror def test_get_volume_connector(self): result = self.connection.get_volume_connector({'id': 'fake'}) self.assertIn('ip', result) self.assertIn('initiator', result) self.assertIn('host', result) @catch_notimplementederror def test_get_volume_connector_storage_ip(self): ip = 'my_ip' storage_ip = 'storage_ip' self.flags(my_block_storage_ip=storage_ip, my_ip=ip) result = self.connection.get_volume_connector({'id': 'fake'}) self.assertIn('ip', result) self.assertIn('initiator', result) self.assertIn('host', result) self.assertEqual(storage_ip, result['ip']) @catch_notimplementederror def test_attach_detach_volume(self): instance_ref, network_info = self._get_running_instance() connection_info = { "driver_volume_type": "fake", "serial": "fake_serial", "data": {} } self.assertIsNone( self.connection.attach_volume(None, connection_info, instance_ref, '/dev/sda')) self.assertIsNone( self.connection.detach_volume(connection_info, instance_ref, '/dev/sda')) @catch_notimplementederror def test_swap_volume(self): instance_ref, network_info = self._get_running_instance() self.assertIsNone( self.connection.attach_volume(None, {'driver_volume_type': 'fake', 'data': {}}, instance_ref, '/dev/sda')) self.assertIsNone( self.connection.swap_volume({'driver_volume_type': 'fake', 'data': {}}, {'driver_volume_type': 'fake', 'data': {}}, instance_ref, '/dev/sda', 2)) @catch_notimplementederror def test_attach_detach_different_power_states(self): instance_ref, network_info = self._get_running_instance() connection_info = { "driver_volume_type": "fake", "serial": "fake_serial", "data": {} } self.connection.power_off(instance_ref) self.connection.attach_volume(None, connection_info, instance_ref, '/dev/sda') bdm = { 'root_device_name': None, 'swap': None, 'ephemerals': [], 'block_device_mapping': driver_block_device.convert_volumes([ objects.BlockDeviceMapping( self.ctxt, **fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'instance_uuid': instance_ref['uuid'], 'device_name': '/dev/sda', 'source_type': 'volume', 'destination_type': 'volume', 'delete_on_termination': False, 'snapshot_id': None, 'volume_id': 'abcdedf', 'volume_size': None, 'no_device': None })), ]) } bdm['block_device_mapping'][0]['connection_info'] = ( {'driver_volume_type': 'fake', 'data': {}}) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save'): self.connection.power_on( self.ctxt, instance_ref, network_info, bdm) self.connection.detach_volume(connection_info, instance_ref, '/dev/sda') @catch_notimplementederror def test_get_info(self): instance_ref, network_info = self._get_running_instance() info = self.connection.get_info(instance_ref) self.assertIsInstance(info, hardware.InstanceInfo) @catch_notimplementederror def test_get_info_for_unknown_instance(self): fake_instance = test_utils.get_test_instance(obj=True) self.assertRaises(exception.NotFound, self.connection.get_info, fake_instance) @catch_notimplementederror def test_get_diagnostics(self): instance_ref, network_info = self._get_running_instance(obj=True) self.connection.get_diagnostics(instance_ref) @catch_notimplementederror def test_get_instance_diagnostics(self): instance_ref, network_info = self._get_running_instance(obj=True) instance_ref['launched_at'] = timeutils.utcnow() self.connection.get_instance_diagnostics(instance_ref) @catch_notimplementederror def test_block_stats(self): instance_ref, network_info = self._get_running_instance() stats = self.connection.block_stats(instance_ref, 'someid') self.assertEqual(len(stats), 5) @catch_notimplementederror def test_get_console_output(self): fake_libvirt_utils.files['dummy.log'] = '' instance_ref, network_info = self._get_running_instance() console_output = self.connection.get_console_output(self.ctxt, instance_ref) self.assertIsInstance(console_output, six.string_types) @catch_notimplementederror def test_get_vnc_console(self): instance, network_info = self._get_running_instance(obj=True) vnc_console = self.connection.get_vnc_console(self.ctxt, instance) self.assertIsInstance(vnc_console, ctype.ConsoleVNC) @catch_notimplementederror def test_get_spice_console(self): instance_ref, network_info = self._get_running_instance() spice_console = self.connection.get_spice_console(self.ctxt, instance_ref) self.assertIsInstance(spice_console, ctype.ConsoleSpice) @catch_notimplementederror def test_get_rdp_console(self): instance_ref, network_info = self._get_running_instance() rdp_console = self.connection.get_rdp_console(self.ctxt, instance_ref) self.assertIsInstance(rdp_console, ctype.ConsoleRDP) @catch_notimplementederror def test_get_serial_console(self): instance_ref, network_info = self._get_running_instance() serial_console = self.connection.get_serial_console(self.ctxt, instance_ref) self.assertIsInstance(serial_console, ctype.ConsoleSerial) @catch_notimplementederror def test_get_mks_console(self): instance_ref, network_info = self._get_running_instance() mks_console = self.connection.get_mks_console(self.ctxt, instance_ref) self.assertIsInstance(mks_console, ctype.ConsoleMKS) @catch_notimplementederror def test_get_console_pool_info(self): instance_ref, network_info = self._get_running_instance() console_pool = self.connection.get_console_pool_info(instance_ref) self.assertIn('address', console_pool) self.assertIn('username', console_pool) self.assertIn('password', console_pool) @catch_notimplementederror def test_refresh_security_group_rules(self): # FIXME: Create security group and add the instance to it instance_ref, network_info = self._get_running_instance() self.connection.refresh_security_group_rules(1) @catch_notimplementederror def test_refresh_instance_security_rules(self): # FIXME: Create security group and add the instance to it instance_ref, network_info = self._get_running_instance() self.connection.refresh_instance_security_rules(instance_ref) @catch_notimplementederror def test_ensure_filtering_for_instance(self): instance = test_utils.get_test_instance(obj=True) network_info = test_utils.get_test_network_info() self.connection.ensure_filtering_rules_for_instance(instance, network_info) @catch_notimplementederror def test_unfilter_instance(self): instance_ref = test_utils.get_test_instance() network_info = test_utils.get_test_network_info() self.connection.unfilter_instance(instance_ref, network_info) def test_live_migration(self): instance_ref, network_info = self._get_running_instance() fake_context = context.RequestContext('fake', 'fake') migration = objects.Migration(context=fake_context, id=1) migrate_data = objects.LibvirtLiveMigrateData( migration=migration, bdms=[], block_migration=False) self.connection.live_migration(self.ctxt, instance_ref, 'otherhost', lambda *a: None, lambda *a: None, migrate_data=migrate_data) @catch_notimplementederror def test_live_migration_force_complete(self): instance_ref, network_info = self._get_running_instance() self.connection.live_migration_force_complete(instance_ref) @catch_notimplementederror def test_live_migration_abort(self): instance_ref, network_info = self._get_running_instance() self.connection.live_migration_abort(instance_ref) @catch_notimplementederror def _check_available_resource_fields(self, host_status): keys = ['vcpus', 'memory_mb', 'local_gb', 'vcpus_used', 'memory_mb_used', 'hypervisor_type', 'hypervisor_version', 'hypervisor_hostname', 'cpu_info', 'disk_available_least', 'supported_instances'] for key in keys: self.assertIn(key, host_status) self.assertIsInstance(host_status['hypervisor_version'], int) @catch_notimplementederror def test_get_available_resource(self): available_resource = self.connection.get_available_resource( 'myhostname') self._check_available_resource_fields(available_resource) @catch_notimplementederror def test_get_available_nodes(self): self.connection.get_available_nodes(False) @catch_notimplementederror def _check_host_cpu_status_fields(self, host_cpu_status): self.assertIn('kernel', host_cpu_status) self.assertIn('idle', host_cpu_status) self.assertIn('user', host_cpu_status) self.assertIn('iowait', host_cpu_status) self.assertIn('frequency', host_cpu_status) @catch_notimplementederror def test_get_host_cpu_stats(self): host_cpu_status = self.connection.get_host_cpu_stats() self._check_host_cpu_status_fields(host_cpu_status) @catch_notimplementederror def test_set_host_enabled(self): self.connection.set_host_enabled(True) @catch_notimplementederror def test_get_host_uptime(self): self.connection.get_host_uptime() @catch_notimplementederror def test_host_power_action_reboot(self): self.connection.host_power_action('reboot') @catch_notimplementederror def test_host_power_action_shutdown(self): self.connection.host_power_action('shutdown') @catch_notimplementederror def test_host_power_action_startup(self): self.connection.host_power_action('startup') @catch_notimplementederror def test_add_to_aggregate(self): self.connection.add_to_aggregate(self.ctxt, 'aggregate', 'host') @catch_notimplementederror def test_remove_from_aggregate(self): self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host') def test_events(self): got_events = [] def handler(event): got_events.append(event) self.connection.register_event_listener(handler) event1 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_STARTED) event2 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_PAUSED) self.connection.emit_event(event1) self.connection.emit_event(event2) want_events = [event1, event2] self.assertEqual(want_events, got_events) event3 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_RESUMED) event4 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_STOPPED) self.connection.emit_event(event3) self.connection.emit_event(event4) want_events = [event1, event2, event3, event4] self.assertEqual(want_events, got_events) def test_event_bad_object(self): # Passing in something which does not inherit # from virtevent.Event def handler(event): pass self.connection.register_event_listener(handler) badevent = { "foo": "bar" } self.assertRaises(ValueError, self.connection.emit_event, badevent) def test_event_bad_callback(self): # Check that if a callback raises an exception, # it does not propagate back out of the # 'emit_event' call def handler(event): raise Exception("Hit Me!") self.connection.register_event_listener(handler) event1 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_STARTED) self.connection.emit_event(event1) def test_set_bootable(self): self.assertRaises(NotImplementedError, self.connection.set_bootable, 'instance', True) @catch_notimplementederror def test_get_instance_disk_info(self): # This should be implemented by any driver that supports live migrate. instance_ref, network_info = self._get_running_instance() self.connection.get_instance_disk_info(instance_ref, block_device_info={}) @catch_notimplementederror def test_get_device_name_for_instance(self): instance, _ = self._get_running_instance() self.connection.get_device_name_for_instance( instance, [], mock.Mock(spec=objects.BlockDeviceMapping)) def test_network_binding_host_id(self): # NOTE(jroll) self._get_running_instance calls spawn(), so we can't # use it to test this method. Make a simple object instead; we just # need instance.host. instance = objects.Instance(self.ctxt, host='somehost') self.assertEqual(instance.host, self.connection.network_binding_host_id(self.ctxt, instance)) class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase): def setUp(self): self.driver_module = "nova.virt.driver.ComputeDriver" super(AbstractDriverTestCase, self).setUp() def test_live_migration(self): self.skipTest('Live migration is not implemented in the base ' 'virt driver.') class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase): def setUp(self): self.driver_module = 'nova.virt.fake.FakeDriver' fake.set_nodes(['myhostname']) super(FakeConnectionTestCase, self).setUp() def _check_available_resource_fields(self, host_status): super(FakeConnectionTestCase, self)._check_available_resource_fields( host_status) hypervisor_type = host_status['hypervisor_type'] supported_instances = host_status['supported_instances'] try: # supported_instances could be JSON wrapped supported_instances = jsonutils.loads(supported_instances) except TypeError: pass self.assertTrue(any(hypervisor_type in x for x in supported_instances)) class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase): REQUIRES_LOCKING = True def setUp(self): # Point _VirtDriverTestCase at the right module self.driver_module = 'nova.virt.libvirt.LibvirtDriver' super(LibvirtConnTestCase, self).setUp() self.stubs.Set(self.connection, '_set_host_enabled', mock.MagicMock()) self.useFixture(fixtures.MonkeyPatch( 'nova.context.get_admin_context', self._fake_admin_context)) # This is needed for the live migration tests which spawn off the # operation for monitoring. self.useFixture(nova_fixtures.SpawnIsSynchronousFixture()) def _fake_admin_context(self, *args, **kwargs): return self.ctxt def test_force_hard_reboot(self): self.flags(wait_soft_reboot_seconds=0, group='libvirt') self.test_reboot() def test_migrate_disk_and_power_off(self): # there is lack of fake stuff to execute this method. so pass. self.skipTest("Test nothing, but this method" " needed to override superclass.") def test_internal_set_host_enabled(self): self.mox.UnsetStubs() service_mock = mock.MagicMock() # Previous status of the service: disabled: False service_mock.configure_mock(disabled_reason='None', disabled=False) with mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock): self.connection._set_host_enabled(False, 'ERROR!') self.assertTrue(service_mock.disabled) self.assertEqual(service_mock.disabled_reason, 'AUTO: ERROR!') def test_set_host_enabled_when_auto_disabled(self): self.mox.UnsetStubs() service_mock = mock.MagicMock() # Previous status of the service: disabled: True, 'AUTO: ERROR' service_mock.configure_mock(disabled_reason='AUTO: ERROR', disabled=True) with mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock): self.connection._set_host_enabled(True) self.assertFalse(service_mock.disabled) self.assertIsNone(service_mock.disabled_reason) def test_set_host_enabled_when_manually_disabled(self): self.mox.UnsetStubs() service_mock = mock.MagicMock() # Previous status of the service: disabled: True, 'Manually disabled' service_mock.configure_mock(disabled_reason='Manually disabled', disabled=True) with mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock): self.connection._set_host_enabled(True) self.assertTrue(service_mock.disabled) self.assertEqual(service_mock.disabled_reason, 'Manually disabled') def test_set_host_enabled_dont_override_manually_disabled(self): self.mox.UnsetStubs() service_mock = mock.MagicMock() # Previous status of the service: disabled: True, 'Manually disabled' service_mock.configure_mock(disabled_reason='Manually disabled', disabled=True) with mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock): self.connection._set_host_enabled(False, 'ERROR!') self.assertTrue(service_mock.disabled) self.assertEqual(service_mock.disabled_reason, 'Manually disabled') @catch_notimplementederror @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs') def test_unplug_vifs_with_destroy_vifs_false(self, unplug_vifs_mock): instance_ref, network_info = self._get_running_instance() self.connection.cleanup(self.ctxt, instance_ref, network_info, destroy_vifs=False) self.assertEqual(unplug_vifs_mock.call_count, 0) @catch_notimplementederror @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs') def test_unplug_vifs_with_destroy_vifs_true(self, unplug_vifs_mock): instance_ref, network_info = self._get_running_instance() self.connection.cleanup(self.ctxt, instance_ref, network_info, destroy_vifs=True) self.assertEqual(unplug_vifs_mock.call_count, 1) unplug_vifs_mock.assert_called_once_with(instance_ref, network_info, True) def test_get_device_name_for_instance(self): self.skipTest("Tested by the nova.tests.unit.virt.libvirt suite") @catch_notimplementederror @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch("nova.virt.libvirt.host.Host.has_min_version") def test_set_admin_password(self, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} instance, network_info = self._get_running_instance(obj=True) self.connection.set_admin_password(instance, 'p4ssw0rd')
apache-2.0
toobaz/pandas
pandas/tests/reductions/test_stat_reductions.py
2
9374
""" Tests for statistical reductions of 2nd moment or higher: var, skew, kurt, ... """ import inspect import numpy as np import pytest import pandas.util._test_decorators as td import pandas as pd from pandas import DataFrame, Series from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray import pandas.util.testing as tm class TestDatetimeLikeStatReductions: @pytest.mark.parametrize("box", [Series, pd.Index, DatetimeArray]) def test_dt64_mean(self, tz_naive_fixture, box): tz = tz_naive_fixture dti = pd.date_range("2001-01-01", periods=11, tz=tz) # shuffle so that we are not just working with monotone-increasing dti = dti.take([4, 1, 3, 10, 9, 7, 8, 5, 0, 2, 6]) dtarr = dti._data obj = box(dtarr) assert obj.mean() == pd.Timestamp("2001-01-06", tz=tz) assert obj.mean(skipna=False) == pd.Timestamp("2001-01-06", tz=tz) # dtarr[-2] will be the first date 2001-01-1 dtarr[-2] = pd.NaT obj = box(dtarr) assert obj.mean() == pd.Timestamp("2001-01-06 07:12:00", tz=tz) assert obj.mean(skipna=False) is pd.NaT @pytest.mark.parametrize("box", [Series, pd.Index, PeriodArray]) def test_period_mean(self, box): # GH#24757 dti = pd.date_range("2001-01-01", periods=11) # shuffle so that we are not just working with monotone-increasing dti = dti.take([4, 1, 3, 10, 9, 7, 8, 5, 0, 2, 6]) # use hourly frequency to avoid rounding errors in expected results # TODO: flesh this out with different frequencies parr = dti._data.to_period("H") obj = box(parr) with pytest.raises(TypeError, match="ambiguous"): obj.mean() with pytest.raises(TypeError, match="ambiguous"): obj.mean(skipna=True) # parr[-2] will be the first date 2001-01-1 parr[-2] = pd.NaT with pytest.raises(TypeError, match="ambiguous"): obj.mean() with pytest.raises(TypeError, match="ambiguous"): obj.mean(skipna=True) @pytest.mark.parametrize("box", [Series, pd.Index, TimedeltaArray]) def test_td64_mean(self, box): tdi = pd.TimedeltaIndex([0, 3, -2, -7, 1, 2, -1, 3, 5, -2, 4], unit="D") tdarr = tdi._data obj = box(tdarr) result = obj.mean() expected = np.array(tdarr).mean() assert result == expected tdarr[0] = pd.NaT assert obj.mean(skipna=False) is pd.NaT result2 = obj.mean(skipna=True) assert result2 == tdi[1:].mean() # exact equality fails by 1 nanosecond assert result2.round("us") == (result * 11.0 / 10).round("us") class TestSeriesStatReductions: # Note: the name TestSeriesStatReductions indicates these tests # were moved from a series-specific test file, _not_ that these tests are # intended long-term to be series-specific def _check_stat_op( self, name, alternate, string_series_, check_objects=False, check_allna=False ): with pd.option_context("use_bottleneck", False): f = getattr(Series, name) # add some NaNs string_series_[5:15] = np.NaN # mean, idxmax, idxmin, min, and max are valid for dates if name not in ["max", "min", "mean"]: ds = Series(pd.date_range("1/1/2001", periods=10)) with pytest.raises(TypeError): f(ds) # skipna or no assert pd.notna(f(string_series_)) assert pd.isna(f(string_series_, skipna=False)) # check the result is correct nona = string_series_.dropna() tm.assert_almost_equal(f(nona), alternate(nona.values)) tm.assert_almost_equal(f(string_series_), alternate(nona.values)) allna = string_series_ * np.nan if check_allna: assert np.isnan(f(allna)) # dtype=object with None, it works! s = Series([1, 2, 3, None, 5]) f(s) # GH#2888 items = [0] items.extend(range(2 ** 40, 2 ** 40 + 1000)) s = Series(items, dtype="int64") tm.assert_almost_equal(float(f(s)), float(alternate(s.values))) # check date range if check_objects: s = Series(pd.bdate_range("1/1/2000", periods=10)) res = f(s) exp = alternate(s) assert res == exp # check on string data if name not in ["sum", "min", "max"]: with pytest.raises(TypeError): f(Series(list("abc"))) # Invalid axis. with pytest.raises(ValueError): f(string_series_, axis=1) # Unimplemented numeric_only parameter. if "numeric_only" in inspect.getfullargspec(f).args: with pytest.raises(NotImplementedError, match=name): f(string_series_, numeric_only=True) def test_sum(self): string_series = tm.makeStringSeries().rename("series") self._check_stat_op("sum", np.sum, string_series, check_allna=False) def test_mean(self): string_series = tm.makeStringSeries().rename("series") self._check_stat_op("mean", np.mean, string_series) def test_median(self): string_series = tm.makeStringSeries().rename("series") self._check_stat_op("median", np.median, string_series) # test with integers, test failure int_ts = Series(np.ones(10, dtype=int), index=range(10)) tm.assert_almost_equal(np.median(int_ts), int_ts.median()) def test_prod(self): string_series = tm.makeStringSeries().rename("series") self._check_stat_op("prod", np.prod, string_series) def test_min(self): string_series = tm.makeStringSeries().rename("series") self._check_stat_op("min", np.min, string_series, check_objects=True) def test_max(self): string_series = tm.makeStringSeries().rename("series") self._check_stat_op("max", np.max, string_series, check_objects=True) def test_var_std(self): string_series = tm.makeStringSeries().rename("series") datetime_series = tm.makeTimeSeries().rename("ts") alt = lambda x: np.std(x, ddof=1) self._check_stat_op("std", alt, string_series) alt = lambda x: np.var(x, ddof=1) self._check_stat_op("var", alt, string_series) result = datetime_series.std(ddof=4) expected = np.std(datetime_series.values, ddof=4) tm.assert_almost_equal(result, expected) result = datetime_series.var(ddof=4) expected = np.var(datetime_series.values, ddof=4) tm.assert_almost_equal(result, expected) # 1 - element series with ddof=1 s = datetime_series.iloc[[0]] result = s.var(ddof=1) assert pd.isna(result) result = s.std(ddof=1) assert pd.isna(result) def test_sem(self): string_series = tm.makeStringSeries().rename("series") datetime_series = tm.makeTimeSeries().rename("ts") alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x)) self._check_stat_op("sem", alt, string_series) result = datetime_series.sem(ddof=4) expected = np.std(datetime_series.values, ddof=4) / np.sqrt( len(datetime_series.values) ) tm.assert_almost_equal(result, expected) # 1 - element series with ddof=1 s = datetime_series.iloc[[0]] result = s.sem(ddof=1) assert pd.isna(result) @td.skip_if_no_scipy def test_skew(self): from scipy.stats import skew string_series = tm.makeStringSeries().rename("series") alt = lambda x: skew(x, bias=False) self._check_stat_op("skew", alt, string_series) # test corner cases, skew() returns NaN unless there's at least 3 # values min_N = 3 for i in range(1, min_N + 1): s = Series(np.ones(i)) df = DataFrame(np.ones((i, i))) if i < min_N: assert np.isnan(s.skew()) assert np.isnan(df.skew()).all() else: assert 0 == s.skew() assert (df.skew() == 0).all() @td.skip_if_no_scipy def test_kurt(self): from scipy.stats import kurtosis string_series = tm.makeStringSeries().rename("series") alt = lambda x: kurtosis(x, bias=False) self._check_stat_op("kurt", alt, string_series) index = pd.MultiIndex( levels=[["bar"], ["one", "two", "three"], [0, 1]], codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]], ) s = Series(np.random.randn(6), index=index) tm.assert_almost_equal(s.kurt(), s.kurt(level=0)["bar"]) # test corner cases, kurt() returns NaN unless there's at least 4 # values min_N = 4 for i in range(1, min_N + 1): s = Series(np.ones(i)) df = DataFrame(np.ones((i, i))) if i < min_N: assert np.isnan(s.kurt()) assert np.isnan(df.kurt()).all() else: assert 0 == s.kurt() assert (df.kurt() == 0).all()
bsd-3-clause
timhuanggithub/MyPOX
pox/lib/threadpool.py
44
2860
# Copyright 2012 James McCauley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Totally untested thread pool class. Tries to not get more than "maximum" (but this is not a hard limit). Kills off up to around half of its workers when more than half are idle. """ from __future__ import print_function from __future__ import with_statement from threading import Thread, RLock from Queue import Queue CYCLE_TIME = 3 class WorkerThread (Thread): def __init__ (self, pool): Thread.__init__(self) self._pool = pool self.daemon = True self.start() def run (self): with self._pool._lock: self._pool._total += 1 while self._pool.running: with self._pool._lock: self._pool._available += 1 try: func, args, kw = self._pool._tasks.get(True, CYCLE_TIME) if func is None: return except: continue finally: with self._pool._lock: self._pool._available -= 1 assert self._pool._available >= 0 try: func(*args, **kw) except Exception as e: print("Worker thread exception", e) self._pool._tasks.task_done() with self._pool._lock: self._pool._total -= 1 assert self._pool._total >= 0 class ThreadPool (object): #NOTE: Assumes only one thread manipulates the pool # (Add some locks to fix) def __init__ (self, initial = 0, maximum = None): self._available = 0 self._total = 0 self._tasks = Queue() self.maximum = maximum self._lock = RLock() for i in xrange(initial): self._new_worker def _new_worker (self): with self._lock: if self.maximum is not None: if self._total >= self.maximum: # Too many! return False WorkerThread(self) return True def add (_self, _func, *_args, **_kwargs): self.add_task(_func, args=_args, kwargs=_kwargs) def add_task (self, func, args=(), kwargs={}): while True: self._lock.acquire() if self._available == 0: self._lock.release() self._new_worker() else: break self._tasks.put((func, args, kwargs)) if self.available > self._total / 2 and self.total > 8: for i in xrange(self._total / 2 - 1): self._tasks.put((None,None,None)) self._lock.release() def join (self): self._tasks.join()
apache-2.0
keshavramaswamy/ThinkStats2
code/chap08soln.py
65
6172
"""This file contains code used in "Think Stats", by Allen B. Downey, available from greenteapress.com Copyright 2014 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function import thinkstats2 import thinkplot import math import random import numpy as np from scipy import stats from estimation import RMSE, MeanError """This file contains a solution to exercises in Think Stats: Exercise 8.1 In this chapter we used $\xbar$ and median to estimate $\mu$, and found that $\xbar$ yields lower MSE. Also, we used $S^2$ and $S_{n-1}^2$ to estimate $\sigma$, and found that $S^2$ is biased and $S_{n-1}^2$ unbiased. Run similar experiments to see if $\xbar$ and median are biased estimates of $\mu$. Also check whether $S^2$ or $S_{n-1}^2$ yields a lower MSE. My conclusions: 1) xbar and median yield lower mean error as m increases, so neither one is obviously biased, as far as we can tell from the experiment. 2) The biased estimator of variance yields lower RMSE than the unbiased estimator, by about 10%. And the difference holds up as m increases. Exercise 8.2 Suppose you draw a sample with size $n=10$ from a population with an exponential disrtribution with $\lambda=2$. Simulate this experiment 1000 times and plot the sampling distribution of the estimate $\lamhat$. Compute the standard error of the estimate and the 90\% confidence interval. Repeat the experiment with a few different values of $n$ and make a plot of standard error versus $n$. 1) With sample size 10: standard error 0.896717911545 confidence interval (1.2901330772324622, 3.8692334892427911) 2) As sample size increases, standard error and the width of the CI decrease: 10 0.90 (1.3, 3.9) 100 0.21 (1.7, 2.4) 1000 0.06 (1.9, 2.1) All three confidence intervals contain the actual value, 2. Exercise 8.3 In games like hockey and soccer, the time between goals is roughly exponential. So you could estimate a team's goal-scoring rate by observing the number of goals they score in a game. This estimation process is a little different from sampling the time between goals, so let's see how it works. Write a function that takes a goal-scoring rate, {\tt lam}, in goals per game, and simulates a game by generating the time between goals until the total time exceeds 1 game, then returns the number of goals scored. Write another function that simulates many games, stores the estimates of {\tt lam}, then computes their mean error and RMSE. Is this way of making an estimate biased? Plot the sampling distribution of the estimates and the 90\% confidence interval. What is the standard error? What happens to sampling error for increasing values of {\tt lam}? My conclusions: 1) RMSE for this way of estimating lambda is 1.4 2) The mean error is small and decreases with m, so this estimator appears to be unbiased. One note: If the time between goals is exponential, the distribution of goals scored in a game is Poisson. See https://en.wikipedia.org/wiki/Poisson_distribution """ def Estimate1(n=7, m=100000): """Mean error for xbar and median as estimators of population mean. n: sample size m: number of iterations """ mu = 0 sigma = 1 means = [] medians = [] for _ in range(m): xs = [random.gauss(mu, sigma) for i in range(n)] xbar = np.mean(xs) median = np.median(xs) means.append(xbar) medians.append(median) print('Experiment 1') print('mean error xbar', MeanError(means, mu)) print('mean error median', MeanError(medians, mu)) def Estimate2(n=7, m=100000): """RMSE for biased and unbiased estimators of population variance. n: sample size m: number of iterations """ mu = 0 sigma = 1 estimates1 = [] estimates2 = [] for _ in range(m): xs = [random.gauss(mu, sigma) for i in range(n)] biased = np.var(xs) unbiased = np.var(xs, ddof=1) estimates1.append(biased) estimates2.append(unbiased) print('Experiment 2') print('RMSE biased', RMSE(estimates1, sigma**2)) print('RMSE unbiased', RMSE(estimates2, sigma**2)) def SimulateSample(lam=2, n=10, m=1000): """Sampling distribution of L as an estimator of exponential parameter. lam: parameter of an exponential distribution n: sample size m: number of iterations """ def VertLine(x, y=1): thinkplot.Plot([x, x], [0, y], color='0.8', linewidth=3) estimates = [] for j in range(m): xs = np.random.exponential(1.0/lam, n) lamhat = 1.0 / np.mean(xs) estimates.append(lamhat) stderr = RMSE(estimates, lam) print('standard error', stderr) cdf = thinkstats2.Cdf(estimates) ci = cdf.Percentile(5), cdf.Percentile(95) print('confidence interval', ci) VertLine(ci[0]) VertLine(ci[1]) # plot the CDF thinkplot.Cdf(cdf) thinkplot.Save(root='estimation2', xlabel='estimate', ylabel='CDF', title='Sampling distribution') return stderr def SimulateGame(lam): """Simulates a game and returns the estimated goal-scoring rate. lam: actual goal scoring rate in goals per game """ goals = 0 t = 0 while True: time_between_goals = random.expovariate(lam) t += time_between_goals if t > 1: break goals += 1 # estimated goal-scoring rate is the actual number of goals scored L = goals return L def Estimate4(lam=2, m=1000000): estimates = [] for i in range(m): L = SimulateGame(lam) estimates.append(L) print('Experiment 4') print('rmse L', RMSE(estimates, lam)) print('mean error L', MeanError(estimates, lam)) pmf = thinkstats2.Pmf(estimates) thinkplot.Hist(pmf) thinkplot.Show() def main(): thinkstats2.RandomSeed(17) Estimate1() Estimate2() print('Experiment 3') for n in [10, 100, 1000]: stderr = SimulateSample(n=n) print(n, stderr) Estimate4() if __name__ == '__main__': main()
gpl-3.0
HybridF5/nova
nova/tests/unit/virt/libvirt/storage/test_rbd.py
9
22834
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.compute import task_states from nova import exception from nova import objects from nova import test from nova import utils from nova.virt.libvirt.storage import rbd_utils from nova.virt.libvirt import utils as libvirt_utils CEPH_MON_DUMP = """dumped monmap epoch 1 { "epoch": 1, "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa", "modified": "2013-05-22 17:44:56.343618", "created": "2013-05-22 17:44:56.343618", "mons": [ { "rank": 0, "name": "a", "addr": "[::1]:6789\/0"}, { "rank": 1, "name": "b", "addr": "[::1]:6790\/0"}, { "rank": 2, "name": "c", "addr": "[::1]:6791\/0"}, { "rank": 3, "name": "d", "addr": "127.0.0.1:6792\/0"}, { "rank": 4, "name": "e", "addr": "example.com:6791\/0"}], "quorum": [ 0, 1, 2]} """ class RbdTestCase(test.NoDBTestCase): @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') def setUp(self, mock_rados, mock_rbd): super(RbdTestCase, self).setUp() self.mock_rados = mock_rados self.mock_rados.Rados = mock.Mock self.mock_rados.Rados.ioctx = mock.Mock() self.mock_rados.Rados.connect = mock.Mock() self.mock_rados.Rados.shutdown = mock.Mock() self.mock_rados.Rados.open_ioctx = mock.Mock() self.mock_rados.Rados.open_ioctx.return_value = \ self.mock_rados.Rados.ioctx self.mock_rados.Error = Exception self.mock_rbd = mock_rbd self.mock_rbd.RBD = mock.Mock self.mock_rbd.Image = mock.Mock self.mock_rbd.Image.close = mock.Mock() self.mock_rbd.RBD.Error = Exception self.rbd_pool = 'rbd' self.driver = rbd_utils.RBDDriver(self.rbd_pool, None, None) self.volume_name = u'volume-00000001' self.snap_name = u'test-snap' def tearDown(self): super(RbdTestCase, self).tearDown() def test_good_locations(self): locations = ['rbd://fsid/pool/image/snap', 'rbd://%2F/%2F/%2F/%2F', ] map(self.driver.parse_url, locations) def test_bad_locations(self): locations = ['rbd://image', 'http://path/to/somewhere/else', 'rbd://image/extra', 'rbd://image/', 'rbd://fsid/pool/image/', 'rbd://fsid/pool/image/snap/', 'rbd://///', ] image_meta = {'disk_format': 'raw'} for loc in locations: self.assertRaises(exception.ImageUnacceptable, self.driver.parse_url, loc) self.assertFalse(self.driver.is_cloneable({'url': loc}, image_meta)) @mock.patch.object(rbd_utils.RBDDriver, 'get_fsid') @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') def test_cloneable(self, mock_rados, mock_rbd, mock_get_fsid): mock_get_fsid.return_value = 'abc' location = {'url': 'rbd://abc/pool/image/snap'} image_meta = {'disk_format': 'raw'} self.assertTrue(self.driver.is_cloneable(location, image_meta)) self.assertTrue(mock_get_fsid.called) @mock.patch.object(rbd_utils.RBDDriver, 'get_fsid') def test_uncloneable_different_fsid(self, mock_get_fsid): mock_get_fsid.return_value = 'abc' location = {'url': 'rbd://def/pool/image/snap'} image_meta = {'disk_format': 'raw'} self.assertFalse( self.driver.is_cloneable(location, image_meta)) self.assertTrue(mock_get_fsid.called) @mock.patch.object(rbd_utils.RBDDriver, 'get_fsid') @mock.patch.object(rbd_utils, 'RBDVolumeProxy') @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') def test_uncloneable_unreadable(self, mock_rados, mock_rbd, mock_proxy, mock_get_fsid): mock_get_fsid.return_value = 'abc' location = {'url': 'rbd://abc/pool/image/snap'} mock_proxy.side_effect = mock_rbd.Error image_meta = {'disk_format': 'raw'} self.assertFalse( self.driver.is_cloneable(location, image_meta)) mock_proxy.assert_called_once_with(self.driver, 'image', pool='pool', snapshot='snap', read_only=True) self.assertTrue(mock_get_fsid.called) @mock.patch.object(rbd_utils.RBDDriver, 'get_fsid') def test_uncloneable_bad_format(self, mock_get_fsid): mock_get_fsid.return_value = 'abc' location = {'url': 'rbd://abc/pool/image/snap'} formats = ['qcow2', 'vmdk', 'vdi'] for f in formats: image_meta = {'disk_format': f} self.assertFalse( self.driver.is_cloneable(location, image_meta)) self.assertTrue(mock_get_fsid.called) @mock.patch.object(rbd_utils.RBDDriver, 'get_fsid') def test_uncloneable_missing_format(self, mock_get_fsid): mock_get_fsid.return_value = 'abc' location = {'url': 'rbd://abc/pool/image/snap'} image_meta = {} self.assertFalse( self.driver.is_cloneable(location, image_meta)) self.assertTrue(mock_get_fsid.called) @mock.patch.object(utils, 'execute') def test_get_mon_addrs(self, mock_execute): mock_execute.return_value = (CEPH_MON_DUMP, '') hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] self.assertEqual((hosts, ports), self.driver.get_mon_addrs()) @mock.patch.object(rbd_utils.RBDDriver, '_connect_to_rados') @mock.patch.object(rbd_utils, 'rbd') def test_rbd_conf_features(self, mock_rbd, mock_connect): mock_rbd.RBD_FEATURE_LAYERING = 1 mock_cluster = mock.Mock() mock_cluster.conf_get = mock.Mock() mock_cluster.conf_get.return_value = None mock_connect.return_value = (mock_cluster, None) client = rbd_utils.RADOSClient(self.driver) self.assertEqual(1, client.features) mock_cluster.conf_get.return_value = '2' self.assertEqual(2, client.features) @mock.patch.object(rbd_utils, 'RADOSClient') @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') def test_clone(self, mock_rados, mock_rbd, mock_client): pool = u'images' image = u'image-name' snap = u'snapshot-name' location = {'url': u'rbd://fsid/%s/%s/%s' % (pool, image, snap)} client_stack = [] def mock__enter__(inst): def _inner(): client_stack.append(inst) return inst return _inner client = mock_client.return_value # capture both rados client used to perform the clone client.__enter__.side_effect = mock__enter__(client) rbd = mock_rbd.RBD.return_value self.driver.clone(location, self.volume_name) args = [client_stack[0].ioctx, str(image), str(snap), client_stack[1].ioctx, str(self.volume_name)] kwargs = {'features': client.features} rbd.clone.assert_called_once_with(*args, **kwargs) self.assertEqual(2, client.__enter__.call_count) @mock.patch.object(rbd_utils, 'RADOSClient') @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') def test_clone_eperm(self, mock_rados, mock_rbd, mock_client): pool = u'images' image = u'image-name' snap = u'snapshot-name' location = {'url': u'rbd://fsid/%s/%s/%s' % (pool, image, snap)} client_stack = [] def mock__enter__(inst): def _inner(): client_stack.append(inst) return inst return _inner client = mock_client.return_value # capture both rados client used to perform the clone client.__enter__.side_effect = mock__enter__(client) setattr(mock_rbd, 'PermissionError', test.TestingException) rbd = mock_rbd.RBD.return_value rbd.clone.side_effect = test.TestingException self.assertRaises(exception.Forbidden, self.driver.clone, location, self.volume_name) @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_resize(self, mock_proxy): size = 1024 proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.resize(self.volume_name, size) proxy.resize.assert_called_once_with(size) @mock.patch.object(rbd_utils.RBDDriver, '_disconnect_from_rados') @mock.patch.object(rbd_utils.RBDDriver, '_connect_to_rados') @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') def test_rbd_volume_proxy_init(self, mock_rados, mock_rbd, mock_connect_from_rados, mock_disconnect_from_rados): mock_connect_from_rados.return_value = (None, None) mock_disconnect_from_rados.return_value = (None, None) with rbd_utils.RBDVolumeProxy(self.driver, self.volume_name): mock_connect_from_rados.assert_called_once_with(None) self.assertFalse(mock_disconnect_from_rados.called) mock_disconnect_from_rados.assert_called_once_with(None, None) @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') def test_connect_to_rados_default(self, mock_rados, mock_rbd): ret = self.driver._connect_to_rados() self.assertTrue(self.mock_rados.Rados.connect.called) self.assertTrue(self.mock_rados.Rados.open_ioctx.called) self.assertIsInstance(ret[0], self.mock_rados.Rados) self.assertEqual(self.mock_rados.Rados.ioctx, ret[1]) self.mock_rados.Rados.open_ioctx.assert_called_with(self.rbd_pool) @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') def test_connect_to_rados_different_pool(self, mock_rados, mock_rbd): ret = self.driver._connect_to_rados('alt_pool') self.assertTrue(self.mock_rados.Rados.connect.called) self.assertTrue(self.mock_rados.Rados.open_ioctx.called) self.assertIsInstance(ret[0], self.mock_rados.Rados) self.assertEqual(self.mock_rados.Rados.ioctx, ret[1]) self.mock_rados.Rados.open_ioctx.assert_called_with('alt_pool') @mock.patch.object(rbd_utils, 'rados') def test_connect_to_rados_error(self, mock_rados): mock_rados.Rados.open_ioctx.side_effect = mock_rados.Error self.assertRaises(mock_rados.Error, self.driver._connect_to_rados) mock_rados.Rados.open_ioctx.assert_called_once_with(self.rbd_pool) mock_rados.Rados.shutdown.assert_called_once_with() def test_ceph_args_none(self): self.driver.rbd_user = None self.driver.ceph_conf = None self.assertEqual([], self.driver.ceph_args()) def test_ceph_args_rbd_user(self): self.driver.rbd_user = 'foo' self.driver.ceph_conf = None self.assertEqual(['--id', 'foo'], self.driver.ceph_args()) def test_ceph_args_ceph_conf(self): self.driver.rbd_user = None self.driver.ceph_conf = '/path/bar.conf' self.assertEqual(['--conf', '/path/bar.conf'], self.driver.ceph_args()) def test_ceph_args_rbd_user_and_ceph_conf(self): self.driver.rbd_user = 'foo' self.driver.ceph_conf = '/path/bar.conf' self.assertEqual(['--id', 'foo', '--conf', '/path/bar.conf'], self.driver.ceph_args()) @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_exists(self, mock_proxy): snapshot = 'snap' proxy = mock_proxy.return_value self.assertTrue(self.driver.exists(self.volume_name, self.rbd_pool, snapshot)) proxy.__enter__.assert_called_once_with() proxy.__exit__.assert_called_once_with(None, None, None) @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') @mock.patch.object(rbd_utils, 'RADOSClient') def test_cleanup_volumes(self, mock_client, mock_rados, mock_rbd): instance = objects.Instance(id=1, uuid='12345', task_state=None) rbd = mock_rbd.RBD.return_value rbd.list.return_value = ['12345_test', '111_test'] client = mock_client.return_value self.driver.cleanup_volumes(instance) rbd.remove.assert_called_once_with(client.ioctx, '12345_test') client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') @mock.patch.object(rbd_utils, 'RADOSClient') def _test_cleanup_exception(self, exception_name, mock_client, mock_rados, mock_rbd): instance = objects.Instance(id=1, uuid='12345', task_state=None) setattr(mock_rbd, exception_name, test.TestingException) rbd = mock_rbd.RBD.return_value rbd.remove.side_effect = test.TestingException rbd.list.return_value = ['12345_test', '111_test'] client = mock_client.return_value with mock.patch('eventlet.greenthread.sleep'): self.driver.cleanup_volumes(instance) rbd.remove.assert_any_call(client.ioctx, '12345_test') # NOTE(danms): 10 retries + 1 final attempt to propagate = 11 self.assertEqual(11, len(rbd.remove.call_args_list)) def test_cleanup_volumes_fail_not_found(self): self._test_cleanup_exception('ImageBusy') def test_cleanup_volumes_fail_snapshots(self): self._test_cleanup_exception('ImageHasSnapshots') def test_cleanup_volumes_fail_other(self): self.assertRaises(test.TestingException, self._test_cleanup_exception, 'DoesNotExist') @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') @mock.patch.object(rbd_utils, 'RADOSClient') @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_cleanup_volumes_pending_resize(self, mock_proxy, mock_client, mock_rados, mock_rbd): instance = objects.Instance(id=1, uuid='12345', task_state=None) setattr(mock_rbd, 'ImageHasSnapshots', test.TestingException) rbd = mock_rbd.RBD.return_value rbd.remove.side_effect = [test.TestingException, None] rbd.list.return_value = ['12345_test', '111_test'] proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.list_snaps.return_value = [ {'name': libvirt_utils.RESIZE_SNAPSHOT_NAME}] client = mock_client.return_value self.driver.cleanup_volumes(instance) remove_call = mock.call(client.ioctx, '12345_test') rbd.remove.assert_has_calls([remove_call, remove_call]) proxy.remove_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') @mock.patch.object(rbd_utils, 'RADOSClient') def test_cleanup_volumes_reverting_resize(self, mock_client, mock_rados, mock_rbd): instance = objects.Instance(id=1, uuid='12345', task_state=task_states.RESIZE_REVERTING) rbd = mock_rbd.RBD.return_value rbd.list.return_value = ['12345_test', '111_test', '12345_test_disk.local'] client = mock_client.return_value self.driver.cleanup_volumes(instance) rbd.remove.assert_called_once_with(client.ioctx, '12345_test_disk.local') client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') @mock.patch.object(rbd_utils, 'RADOSClient') def test_destroy_volume(self, mock_client, mock_rados, mock_rbd): rbd = mock_rbd.RBD.return_value vol = '12345_test' client = mock_client.return_value self.driver.destroy_volume(vol) rbd.remove.assert_called_once_with(client.ioctx, vol) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'rados') @mock.patch.object(rbd_utils, 'RADOSClient') def test_remove_image(self, mock_client, mock_rados, mock_rbd): name = '12345_disk.config.rescue' rbd = mock_rbd.RBD.return_value client = mock_client.return_value self.driver.remove_image(name) rbd.remove.assert_called_once_with(client.ioctx, name) # Make sure that we entered and exited the RADOSClient client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_create_snap(self, mock_proxy): proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.create_snap(self.volume_name, self.snap_name) proxy.create_snap.assert_called_once_with(self.snap_name) @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_create_protected_snap(self, mock_proxy): proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.is_protected_snap.return_value = False self.driver.create_snap(self.volume_name, self.snap_name, protect=True) proxy.create_snap.assert_called_once_with(self.snap_name) proxy.is_protected_snap.assert_called_once_with(self.snap_name) proxy.protect_snap.assert_called_once_with(self.snap_name) @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_remove_snap(self, mock_proxy): proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.list_snaps.return_value = [{'name': self.snap_name}] proxy.is_protected_snap.return_value = False self.driver.remove_snap(self.volume_name, self.snap_name) proxy.remove_snap.assert_called_once_with(self.snap_name) @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_remove_snap_force(self, mock_proxy): proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.is_protected_snap.return_value = True proxy.list_snaps.return_value = [{'name': self.snap_name}] self.driver.remove_snap(self.volume_name, self.snap_name, force=True) proxy.is_protected_snap.assert_called_once_with(self.snap_name) proxy.unprotect_snap.assert_called_once_with(self.snap_name) proxy.remove_snap.assert_called_once_with(self.snap_name) @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_remove_snap_does_nothing_when_no_snapshot(self, mock_proxy): proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.list_snaps.return_value = [{'name': 'some-other-snaphot'}] self.driver.remove_snap(self.volume_name, self.snap_name) self.assertFalse(proxy.remove_snap.called) @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_remove_snap_does_nothing_when_protected(self, mock_proxy): proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.is_protected_snap.return_value = True proxy.list_snaps.return_value = [{'name': self.snap_name}] self.driver.remove_snap(self.volume_name, self.snap_name) self.assertFalse(proxy.remove_snap.called) @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_remove_snap_protected_ignore_errors(self, mock_proxy): proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.is_protected_snap.return_value = True proxy.list_snaps.return_value = [{'name': self.snap_name}] self.driver.remove_snap(self.volume_name, self.snap_name, ignore_errors=True) proxy.remove_snap.assert_called_once_with(self.snap_name) @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_parent_info(self, mock_proxy): proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.parent_info(self.volume_name) proxy.parent_info.assert_called_once_with() @mock.patch.object(rbd_utils, 'rbd') @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_parent_info_throws_exception_on_error(self, mock_proxy, mock_rbd): setattr(mock_rbd, 'ImageNotFound', test.TestingException) proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.parent_info.side_effect = test.TestingException self.assertRaises(exception.ImageUnacceptable, self.driver.parent_info, self.volume_name) @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_flatten(self, mock_proxy): proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.flatten(self.volume_name) proxy.flatten.assert_called_once_with() @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_rollback_to_snap(self, mock_proxy): proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy self.assertRaises(exception.SnapshotNotFound, self.driver.rollback_to_snap, self.volume_name, self.snap_name) proxy.list_snaps.return_value = [{'name': self.snap_name}, ] self.driver.rollback_to_snap(self.volume_name, self.snap_name) proxy.rollback_to_snap.assert_called_once_with(self.snap_name)
apache-2.0
srvg/ansible
lib/ansible/module_utils/facts/network/generic_bsd.py
52
12273
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re import socket import struct from ansible.module_utils.facts.network.base import Network class GenericBsdIfconfigNetwork(Network): """ This is a generic BSD subclass of Network using the ifconfig command. It defines - interfaces (a list of interface names) - interface_<name> dictionary of ipv4, ipv6, and mac address information. - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses. """ platform = 'Generic_BSD_Ifconfig' def populate(self, collected_facts=None): network_facts = {} ifconfig_path = self.module.get_bin_path('ifconfig') if ifconfig_path is None: return network_facts route_path = self.module.get_bin_path('route') if route_path is None: return network_facts default_ipv4, default_ipv6 = self.get_default_interfaces(route_path) interfaces, ips = self.get_interfaces_info(ifconfig_path) interfaces = self.detect_type_media(interfaces) self.merge_default_interface(default_ipv4, interfaces, 'ipv4') self.merge_default_interface(default_ipv6, interfaces, 'ipv6') network_facts['interfaces'] = sorted(list(interfaces.keys())) for iface in interfaces: network_facts[iface] = interfaces[iface] network_facts['default_ipv4'] = default_ipv4 network_facts['default_ipv6'] = default_ipv6 network_facts['all_ipv4_addresses'] = ips['all_ipv4_addresses'] network_facts['all_ipv6_addresses'] = ips['all_ipv6_addresses'] return network_facts def detect_type_media(self, interfaces): for iface in interfaces: if 'media' in interfaces[iface]: if 'ether' in interfaces[iface]['media'].lower(): interfaces[iface]['type'] = 'ether' return interfaces def get_default_interfaces(self, route_path): # Use the commands: # route -n get default # route -n get -inet6 default # to find out the default outgoing interface, address, and gateway command = dict(v4=[route_path, '-n', 'get', 'default'], v6=[route_path, '-n', 'get', '-inet6', 'default']) interface = dict(v4={}, v6={}) for v in 'v4', 'v6': if v == 'v6' and not socket.has_ipv6: continue rc, out, err = self.module.run_command(command[v]) if not out: # v6 routing may result in # RTNETLINK answers: Invalid argument continue for line in out.splitlines(): words = line.strip().split(': ') # Collect output from route command if len(words) > 1: if words[0] == 'interface': interface[v]['interface'] = words[1] if words[0] == 'gateway': interface[v]['gateway'] = words[1] # help pick the right interface address on OpenBSD if words[0] == 'if address': interface[v]['address'] = words[1] # help pick the right interface address on NetBSD if words[0] == 'local addr': interface[v]['address'] = words[1] return interface['v4'], interface['v6'] def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'): interfaces = {} current_if = {} ips = dict( all_ipv4_addresses=[], all_ipv6_addresses=[], ) # FreeBSD, DragonflyBSD, NetBSD, OpenBSD and macOS all implicitly add '-a' # when running the command 'ifconfig'. # Solaris must explicitly run the command 'ifconfig -a'. rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options]) for line in out.splitlines(): if line: words = line.split() if words[0] == 'pass': continue elif re.match(r'^\S', line) and len(words) > 3: current_if = self.parse_interface_line(words) interfaces[current_if['device']] = current_if elif words[0].startswith('options='): self.parse_options_line(words, current_if, ips) elif words[0] == 'nd6': self.parse_nd6_line(words, current_if, ips) elif words[0] == 'ether': self.parse_ether_line(words, current_if, ips) elif words[0] == 'media:': self.parse_media_line(words, current_if, ips) elif words[0] == 'status:': self.parse_status_line(words, current_if, ips) elif words[0] == 'lladdr': self.parse_lladdr_line(words, current_if, ips) elif words[0] == 'inet': self.parse_inet_line(words, current_if, ips) elif words[0] == 'inet6': self.parse_inet6_line(words, current_if, ips) elif words[0] == 'tunnel': self.parse_tunnel_line(words, current_if, ips) else: self.parse_unknown_line(words, current_if, ips) return interfaces, ips def parse_interface_line(self, words): device = words[0][0:-1] current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'} current_if['flags'] = self.get_options(words[1]) if 'LOOPBACK' in current_if['flags']: current_if['type'] = 'loopback' current_if['macaddress'] = 'unknown' # will be overwritten later if len(words) >= 5: # Newer FreeBSD versions current_if['metric'] = words[3] current_if['mtu'] = words[5] else: current_if['mtu'] = words[3] return current_if def parse_options_line(self, words, current_if, ips): # Mac has options like this... current_if['options'] = self.get_options(words[0]) def parse_nd6_line(self, words, current_if, ips): # FreeBSD has options like this... current_if['options'] = self.get_options(words[1]) def parse_ether_line(self, words, current_if, ips): current_if['macaddress'] = words[1] current_if['type'] = 'ether' def parse_media_line(self, words, current_if, ips): # not sure if this is useful - we also drop information current_if['media'] = words[1] if len(words) > 2: current_if['media_select'] = words[2] if len(words) > 3: current_if['media_type'] = words[3][1:] if len(words) > 4: current_if['media_options'] = self.get_options(words[4]) def parse_status_line(self, words, current_if, ips): current_if['status'] = words[1] def parse_lladdr_line(self, words, current_if, ips): current_if['lladdr'] = words[1] def parse_inet_line(self, words, current_if, ips): # netbsd show aliases like this # lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 33184 # inet 127.0.0.1 netmask 0xff000000 # inet alias 127.1.1.1 netmask 0xff000000 if words[1] == 'alias': del words[1] address = {'address': words[1]} # cidr style ip address (eg, 127.0.0.1/24) in inet line # used in netbsd ifconfig -e output after 7.1 if '/' in address['address']: ip_address, cidr_mask = address['address'].split('/') address['address'] = ip_address netmask_length = int(cidr_mask) netmask_bin = (1 << 32) - (1 << 32 >> int(netmask_length)) address['netmask'] = socket.inet_ntoa(struct.pack('!L', netmask_bin)) if len(words) > 5: address['broadcast'] = words[3] else: # deal with hex netmask if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8: words[3] = '0x' + words[3] if words[3].startswith('0x'): address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16))) else: # otherwise assume this is a dotted quad address['netmask'] = words[3] # calculate the network address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0] netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0] address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin)) if 'broadcast' not in address: # broadcast may be given or we need to calculate if len(words) > 5: address['broadcast'] = words[5] else: address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff))) # add to our list of addresses if not words[1].startswith('127.'): ips['all_ipv4_addresses'].append(address['address']) current_if['ipv4'].append(address) def parse_inet6_line(self, words, current_if, ips): address = {'address': words[1]} # using cidr style addresses, ala NetBSD ifconfig post 7.1 if '/' in address['address']: ip_address, cidr_mask = address['address'].split('/') address['address'] = ip_address address['prefix'] = cidr_mask if len(words) > 5: address['scope'] = words[5] else: if (len(words) >= 4) and (words[2] == 'prefixlen'): address['prefix'] = words[3] if (len(words) >= 6) and (words[4] == 'scopeid'): address['scope'] = words[5] localhost6 = ['::1', '::1/128', 'fe80::1%lo0'] if address['address'] not in localhost6: ips['all_ipv6_addresses'].append(address['address']) current_if['ipv6'].append(address) def parse_tunnel_line(self, words, current_if, ips): current_if['type'] = 'tunnel' def parse_unknown_line(self, words, current_if, ips): # we are going to ignore unknown lines here - this may be # a bad idea - but you can override it in your subclass pass # TODO: these are module scope static function candidates # (most of the class is really...) def get_options(self, option_string): start = option_string.find('<') + 1 end = option_string.rfind('>') if (start > 0) and (end > 0) and (end > start + 1): option_csv = option_string[start:end] return option_csv.split(',') else: return [] def merge_default_interface(self, defaults, interfaces, ip_type): if 'interface' not in defaults: return if not defaults['interface'] in interfaces: return ifinfo = interfaces[defaults['interface']] # copy all the interface values across except addresses for item in ifinfo: if item != 'ipv4' and item != 'ipv6': defaults[item] = ifinfo[item] ipinfo = [] if 'address' in defaults: ipinfo = [x for x in ifinfo[ip_type] if x['address'] == defaults['address']] if len(ipinfo) == 0: ipinfo = ifinfo[ip_type] if len(ipinfo) > 0: for item in ipinfo[0]: defaults[item] = ipinfo[0][item]
gpl-3.0
SaturdayNeighborhoodHealthClinic/clintools
workup/test_views.py
2
11939
from __future__ import print_function from __future__ import unicode_literals from builtins import range from django.test import TestCase from django.utils.timezone import now from django.core.urlresolvers import reverse from pttrack.models import Patient, ProviderType from pttrack.test_views import build_provider, log_in_provider from . import models from .tests import wu_dict class ViewsExistTest(TestCase): ''' Verify that views involving the wokrup are functioning. ''' fixtures = ['workup', 'pttrack'] def setUp(self): models.ClinicDate.objects.create( clinic_type=models.ClinicType.objects.first(), clinic_date=now().date()) self.provider = build_provider() log_in_provider(self.client, self.provider) self.wu = models.Workup.objects.create( clinic_day=models.ClinicDate.objects.first(), chief_complaint="SOB", diagnosis="MI", HPI="A", PMH_PSH="B", meds="C", allergies="D", fam_hx="E", soc_hx="F", ros="", pe="", A_and_P="", author=models.Provider.objects.first(), author_type=ProviderType.objects.first(), patient=Patient.objects.first()) def test_clindate_create_redirect(self): '''Verify that if no clindate exists, we're properly redirected to a clindate create page.''' # First delete clindate that's created in setUp. models.ClinicDate.objects.all().delete() pt = Patient.objects.first() pt_url = 'new-workup' response = self.client.get(reverse(pt_url, args=(pt.id,))) self.assertEqual(response.status_code, 302) self.assertRedirects(response, reverse('new-clindate', args=(pt.id,))) def test_new_workup_view(self): pt = Patient.objects.first() response = self.client.get(reverse('new-workup', args=(pt.id,))) self.assertEqual(response.status_code, 200) def test_workup_urls(self): wu_urls = ['workup', 'workup-update'] # test the creation of many workups, just in case. for i in range(10): models.Workup.objects.bulk_create( [models.Workup(**wu_dict()) for i in range(77)]) wu = models.Workup.objects.last() wu.diagnosis_categories.add(models.DiagnosisType.objects.first()) for wu_url in wu_urls: response = self.client.get(reverse(wu_url, args=(wu.id,))) self.assertEqual(response.status_code, 200) def test_workup_initial(self): pt = Patient.objects.first() date_string = self.wu.written_datetime.strftime("%B %d, %Y") heading_text = "Migrated from previous workup on %s. Please delete this heading and modify the following:\n\n" % date_string # TODO test use of settings.OSLER_WORKUP_COPY_FORWARD_FIELDS response = self.client.get(reverse('new-workup', args=(pt.id,))) self.assertEqual(response.context['form'].initial['PMH_PSH'], heading_text + "B") self.assertEqual(response.context['form'].initial['meds'], heading_text + "C") self.assertEqual(response.context['form'].initial['allergies'], heading_text + "D") self.assertEqual(response.context['form'].initial['fam_hx'], heading_text + "E") self.assertEqual(response.context['form'].initial['soc_hx'], heading_text + "F") def test_workup_update(self): ''' Updating should be possible always for attendings, only without attestation for non-attendings. ''' # if the wu is unsigned, all can access update. for role in ["Preclinical", "Clinical", "Coordinator", "Attending"]: log_in_provider(self.client, build_provider([role])) response = self.client.get( reverse('workup-update', args=(self.wu.id,))) self.assertEqual(response.status_code, 200) self.wu.sign(build_provider(["Attending"]).associated_user) self.wu.save() # nonattesting cannot access for role in ["Preclinical", "Clinical", "Coordinator"]: log_in_provider(self.client, build_provider([role])) response = self.client.get( reverse('workup-update', args=(self.wu.id,))) self.assertRedirects(response, reverse('workup', args=(self.wu.id,))) # attesting can log_in_provider(self.client, build_provider(["Attending"])) response = self.client.get( reverse('workup-update', args=(self.wu.id,))) self.assertEqual(response.status_code, 200) def test_workup_signing(self): ''' Verify that singing is possible for attendings, and not for others. ''' wu_url = "workup-sign" self.wu.diagnosis_categories.add(models.DiagnosisType.objects.first()) self.wu.save() # Fresh workups should be unsigned self.assertFalse(self.wu.signed()) # Providers with can_attend == False should not be able to sign for nonattesting_role in ["Preclinical", "Clinical", "Coordinator"]: log_in_provider(self.client, build_provider([nonattesting_role])) response = self.client.get( reverse(wu_url, args=(self.wu.id,))) self.assertRedirects(response, reverse('workup', args=(self.wu.id,))) self.assertFalse(models.Workup.objects.get(pk=self.wu.id).signed()) # Providers able to attend should be able to sign. log_in_provider(self.client, build_provider(["Attending"])) response = self.client.get(reverse(wu_url, args=(self.wu.id,))) self.assertRedirects(response, reverse('workup', args=(self.wu.id,)),) # the self.wu has been updated, so we have to hit the db again. self.assertTrue(models.Workup.objects.get(pk=self.wu.id).signed()) def test_workup_pdf(self): ''' Verify that pdf download with the correct naming protocol is working ''' wu_url = "workup-pdf" self.wu.diagnosis_categories.add(models.DiagnosisType.objects.first()) self.wu.save() for nonstaff_role in ProviderType.objects.filter(staff_view=False): log_in_provider(self.client, build_provider([nonstaff_role])) response = self.client.get(reverse(wu_url, args=(self.wu.id,))) self.assertRedirects(response, reverse('workup', args=(self.wu.id,))) for staff_role in ProviderType.objects.filter(staff_view=True): log_in_provider(self.client, build_provider([staff_role.pk])) response = self.client.get(reverse(wu_url, args=(self.wu.id,))) self.assertEqual(response.status_code, 200) def test_workup_submit(self): """verify we can submit a valid workup as a signer and nonsigner""" for provider_type in ["Attending", "Clinical"]: provider = build_provider([provider_type]) log_in_provider(self.client, provider) pt_id = Patient.objects.first().pk wu_count = models.Workup.objects.all().count() wu_data = wu_dict(units=True) wu_data['diagnosis_categories'] = [ models.DiagnosisType.objects.first().pk] wu_data['clinic_day'] = wu_data['clinic_day'].pk r = self.client.post( reverse('new-workup', args=(pt_id,)), data=wu_data) self.assertRedirects(r, reverse("patient-detail", args=(pt_id,))) self.assertEqual(wu_count + 1, models.Workup.objects.all().count()) self.assertEqual( models.Workup.objects.last().signed(), provider.clinical_roles.first().signs_charts) def test_invalid_workup_submit_preserves_units(self): # first, craft a workup that has units, but fail to set the # diagnosis categories, so that it will fail to be accepted. wu_data = wu_dict(units=True) pt_id = Patient.objects.first().pk r = self.client.post( reverse('new-workup', args=(pt_id,)), data=wu_data) # verify we're bounced back to workup-create self.assertEqual(r.status_code, 200) self.assertTemplateUsed(r, 'workup/workup-create.html') self.assertFormError(r, 'form', 'diagnosis_categories', 'This field is required.') for unit in ['height_units', 'weight_units', 'temperature_units']: self.assertContains(r, '<input name="%s"' % (unit)) self.assertEqual( r.context['form'][unit].value(), wu_data[unit]) class TestProgressNoteViews(TestCase): ''' Verify that views involving the wokrup are functioning. ''' fixtures = ['workup', 'pttrack'] def setUp(self): self.formdata = { 'title': 'Depression', 'text': 'so sad does testing work???', 'patient': Patient.objects.first(), 'author': models.Provider.objects.first(), 'author_type': ProviderType.objects.first() } models.ClinicDate.objects.create( clinic_type=models.ClinicType.objects.first(), clinic_date=now().date()) provider = build_provider() log_in_provider(self.client, provider) def test_progressnote_urls(self): url = reverse('new-progress-note', args=(1,)) response = self.client.get(url) self.assertEqual(response.status_code, 200) response = self.client.post(url, self.formdata) self.assertRedirects(response, reverse('patient-detail', args=(1,))) response = self.client.get(reverse('progress-note-update', args=(1,))) self.assertEqual(response.status_code, 200) self.formdata['text'] = 'actually not so bad' response = self.client.post(url, self.formdata) self.assertRedirects( response, reverse('patient-detail', args=(1,))) def test_progressnote_signing(self): """Verify that singing is possible for attendings and not for others. """ sign_url = "progress-note-sign" pn = models.ProgressNote.objects.create( title='Depression', text='so sad does testing work???', patient=Patient.objects.first(), author=models.Provider.objects.first(), author_type=ProviderType.objects.first() ) # Fresh notes should be unsigned self.assertFalse(pn.signed()) # Providers with can_attend == False should not be able to sign for nonattesting_role in ["Preclinical", "Clinical", "Coordinator"]: log_in_provider(self.client, build_provider([nonattesting_role])) response = self.client.get( reverse(sign_url, args=(pn.id,))) self.assertRedirects(response, reverse('progress-note-detail', args=(pn.id,))) self.assertFalse(models.ProgressNote.objects .get(pk=pn.id) .signed()) # Providers able to attend should be able to sign. log_in_provider(self.client, build_provider(["Attending"])) response = self.client.get(reverse(sign_url, args=(pn.id,))) self.assertRedirects(response, reverse('progress-note-detail', args=(pn.id,)),) # the pn has been updated, so we have to hit the db again. self.assertTrue(models.ProgressNote.objects.get(pk=pn.id).signed())
mit
crafty78/ansible
lib/ansible/modules/network/eos/_eos_template.py
24
7407
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = """ --- module: eos_template version_added: "2.1" author: "Peter Sprygada (@privateip)" short_description: Manage Arista EOS device configurations description: - Manages network device configurations over SSH or eAPI. This module allows implementers to work with the device running-config. It provides a way to push a set of commands onto a network device by evaluating the current running-config and only pushing configuration commands that are not already configured. The config source can be a set of commands or a template. deprecated: Deprecated in 2.2. Use eos_config instead extends_documentation_fragment: eos options: src: description: - The path to the config source. The source can be either a file with config or a template that will be merged during runtime. By default the task will search for the source file in role or playbook root folder in templates directory. required: true force: description: - The force argument instructs the module to not consider the current devices running-config. When set to true, this will cause the module to push the contents of I(src) into the device without first checking if already configured. required: false default: false choices: ['yes', 'no'] include_defaults: description: - By default when the M(eos_template) connects to the remote device to retrieve the configuration it will issue the C(show running-config) command. If this option is set to True then the issued command will be C(show running-config all). required: false default: false choices: ['yes', 'no'] backup: description: - When this argument is configured true, the module will backup the running-config from the node prior to making any changes. The backup file will be written to backup_{{ hostname }} in the root of the playbook directory. required: false default: false choices: ['yes', 'no'] replace: description: - This argument will cause the provided configuration to be replaced on the destination node. The use of the replace argument will always cause the task to set changed to true and will implies C(force=true). This argument is only valid with C(transport=eapi). required: false default: false choices: ['yes', 'no'] config: description: - The module, by default, will connect to the remote device and retrieve the current running-config to use as a base for comparing against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The I(config) argument allows the implementer to pass in the configuration to use as the base config for comparison. required: false default: null """ EXAMPLES = """ - name: Push a configuration onto the device eos_template: src: config.j2 - name: Forceable push a configuration onto the device eos_template: src: config.j2 force: yes - name: Provide the base configuration for comparison eos_template: src: candidate_config.txt config: current_config.txt """ RETURN = """ updates: description: The set of commands that will be pushed to the remote device returned: always type: list sample: ['...', '...'] responses: description: The set of responses from issuing the commands on the device returned: when not check_mode type: list sample: ['...', '...'] """ import re import ansible.module_utils.eos from ansible.module_utils.network import NetworkModule from ansible.module_utils.netcfg import NetworkConfig, dumps def get_config(module): config = module.params.get('config') defaults = module.params['include_defaults'] if not config and not module.params['force']: config = module.config.get_config(include_defaults=defaults) return config def filter_exit(commands): # Filter out configuration mode commands followed immediately by an # exit command indented by one level only, e.g. # - route-map map01 permit 10 # - exit # # Build a temporary list as we filter, then copy the temp list # back onto the commands list. temp = [] ind_prev = 999 count = 0 for c in commands: ind_this = c.count(' ') if re.search(r"^\s*exit$", c) and ind_this == ind_prev + 1: temp.pop() count -= 1 if count != 0: ind_prev = temp[-1].count(' ') continue temp.append(c) ind_prev = ind_this count += 1 return temp def main(): """ main entry point for module execution """ argument_spec = dict( src=dict(required=True), force=dict(default=False, type='bool'), include_defaults=dict(default=False, type='bool'), backup=dict(default=False, type='bool'), replace=dict(default=False, type='bool'), config=dict() ) mutually_exclusive = [('config', 'backup'), ('config', 'force')] module = NetworkModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True) replace = module.params['replace'] commands = list() running = None result = dict(changed=False) candidate = NetworkConfig(contents=module.params['src'], indent=3) if replace: if module.params['transport'] == 'cli': module.fail_json(msg='config replace is only supported over eapi') commands = str(candidate).split('\n') else: contents = get_config(module) if contents: running = NetworkConfig(contents=contents, indent=3) result['_backup'] = contents if not module.params['force']: commands = candidate.difference((running or list())) commands = dumps(commands, 'commands').split('\n') commands = [str(c) for c in commands if c] else: commands = str(candidate).split('\n') commands = filter_exit(commands) if commands: if not module.check_mode: response = module.config.load_config(commands, replace=replace, commit=True) result['responses'] = response result['changed'] = True result['updates'] = commands module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
thiriel/maps
venv/lib/python2.7/site-packages/django/contrib/auth/tests/models.py
76
4304
from django.conf import settings from django.test import TestCase from django.test.utils import override_settings from django.contrib.auth.models import (Group, User, SiteProfileNotAvailable, UserManager) class ProfileTestCase(TestCase): fixtures = ['authtestdata.json'] def setUp(self): """Backs up the AUTH_PROFILE_MODULE""" self.old_AUTH_PROFILE_MODULE = getattr(settings, 'AUTH_PROFILE_MODULE', None) def tearDown(self): """Restores the AUTH_PROFILE_MODULE -- if it was not set it is deleted, otherwise the old value is restored""" if self.old_AUTH_PROFILE_MODULE is None and \ hasattr(settings, 'AUTH_PROFILE_MODULE'): del settings.AUTH_PROFILE_MODULE if self.old_AUTH_PROFILE_MODULE is not None: settings.AUTH_PROFILE_MODULE = self.old_AUTH_PROFILE_MODULE def test_site_profile_not_available(self): # calling get_profile without AUTH_PROFILE_MODULE set if hasattr(settings, 'AUTH_PROFILE_MODULE'): del settings.AUTH_PROFILE_MODULE user = User.objects.get(username='testclient') self.assertRaises(SiteProfileNotAvailable, user.get_profile) # Bad syntax in AUTH_PROFILE_MODULE: settings.AUTH_PROFILE_MODULE = 'foobar' self.assertRaises(SiteProfileNotAvailable, user.get_profile) # module that doesn't exist settings.AUTH_PROFILE_MODULE = 'foo.bar' self.assertRaises(SiteProfileNotAvailable, user.get_profile) ProfileTestCase = override_settings(USE_TZ=False)(ProfileTestCase) class NaturalKeysTestCase(TestCase): fixtures = ['authtestdata.json'] def test_user_natural_key(self): staff_user = User.objects.get(username='staff') self.assertEquals(User.objects.get_by_natural_key('staff'), staff_user) self.assertEquals(staff_user.natural_key(), ('staff',)) def test_group_natural_key(self): users_group = Group.objects.create(name='users') self.assertEquals(Group.objects.get_by_natural_key('users'), users_group) NaturalKeysTestCase = override_settings(USE_TZ=False)(NaturalKeysTestCase) class LoadDataWithoutNaturalKeysTestCase(TestCase): fixtures = ['regular.json'] def test_user_is_created_and_added_to_group(self): user = User.objects.get(username='my_username') group = Group.objects.get(name='my_group') self.assertEquals(group, user.groups.get()) LoadDataWithoutNaturalKeysTestCase = override_settings(USE_TZ=False)(LoadDataWithoutNaturalKeysTestCase) class LoadDataWithNaturalKeysTestCase(TestCase): fixtures = ['natural.json'] def test_user_is_created_and_added_to_group(self): user = User.objects.get(username='my_username') group = Group.objects.get(name='my_group') self.assertEquals(group, user.groups.get()) LoadDataWithNaturalKeysTestCase = override_settings(USE_TZ=False)(LoadDataWithNaturalKeysTestCase) class UserManagerTestCase(TestCase): def test_create_user(self): email_lowercase = 'normal@normal.com' user = User.objects.create_user('user', email_lowercase) self.assertEquals(user.email, email_lowercase) self.assertEquals(user.username, 'user') self.assertEquals(user.password, '!') def test_create_user_email_domain_normalize_rfc3696(self): # According to http://tools.ietf.org/html/rfc3696#section-3 # the "@" symbol can be part of the local part of an email address returned = UserManager.normalize_email(r'Abc\@DEF@EXAMPLE.com') self.assertEquals(returned, r'Abc\@DEF@example.com') def test_create_user_email_domain_normalize(self): returned = UserManager.normalize_email('normal@DOMAIN.COM') self.assertEquals(returned, 'normal@domain.com') def test_create_user_email_domain_normalize_with_whitespace(self): returned = UserManager.normalize_email('email\ with_whitespace@D.COM') self.assertEquals(returned, 'email\ with_whitespace@d.com') def test_empty_username(self): self.assertRaisesMessage(ValueError, 'The given username must be set', User.objects.create_user, username='')
bsd-3-clause
crafty78/ansible
lib/ansible/modules/database/misc/mongodb_parameter.py
16
7296
#!/usr/bin/python # -*- coding: utf-8 -*- """ (c) 2016, Loic Blot <loic.blot@unix-experience.fr> Sponsored by Infopro Digital. http://www.infopro-digital.com/ Sponsored by E.T.A.I. http://www.etai.fr/ This file is part of Ansible Ansible is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Ansible is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Ansible. If not, see <http://www.gnu.org/licenses/>. """ ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: mongodb_parameter short_description: Change an administrative parameter on a MongoDB server. description: - Change an administrative parameter on a MongoDB server. version_added: "2.1" options: login_user: description: - The username used to authenticate with required: false default: null login_password: description: - The password used to authenticate with required: false default: null login_host: description: - The host running the database required: false default: localhost login_port: description: - The port to connect to required: false default: 27017 login_database: description: - The database where login credentials are stored required: false default: null replica_set: description: - Replica set to connect to (automatically connects to primary for writes) required: false default: null database: description: - The name of the database to add/remove the user from required: true ssl: description: - Whether to use an SSL connection when connecting to the database required: false default: false param: description: - MongoDB administrative parameter to modify required: true value: description: - MongoDB administrative parameter value to set required: true param_type: description: - Define the parameter value (str, int) required: false default: str notes: - Requires the pymongo Python package on the remote host, version 2.4.2+. This can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html requirements: [ "pymongo" ] author: "Loic Blot (@nerzhul)" ''' EXAMPLES = ''' # Set MongoDB syncdelay to 60 (this is an int) - mongodb_parameter: param: syncdelay value: 60 param_type: int ''' RETURN = ''' before: description: value before modification returned: success type: string after: description: value after modification returned: success type: string ''' import ConfigParser try: from pymongo.errors import ConnectionFailure from pymongo.errors import OperationFailure from pymongo import version as PyMongoVersion from pymongo import MongoClient except ImportError: try: # for older PyMongo 2.2 from pymongo import Connection as MongoClient except ImportError: pymongo_found = False else: pymongo_found = True else: pymongo_found = True # ========================================= # MongoDB module specific support methods. # def load_mongocnf(): config = ConfigParser.RawConfigParser() mongocnf = os.path.expanduser('~/.mongodb.cnf') try: config.readfp(open(mongocnf)) creds = dict( user=config.get('client', 'user'), password=config.get('client', 'pass') ) except (ConfigParser.NoOptionError, IOError): return False return creds # ========================================= # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( login_user=dict(default=None), login_password=dict(default=None, no_log=True), login_host=dict(default='localhost'), login_port=dict(default=27017, type='int'), login_database=dict(default=None), replica_set=dict(default=None), param=dict(default=None, required=True), value=dict(default=None, required=True), param_type=dict(default="str", choices=['str', 'int']), ssl=dict(default=False, type='bool'), ) ) if not pymongo_found: module.fail_json(msg='the python pymongo module is required') login_user = module.params['login_user'] login_password = module.params['login_password'] login_host = module.params['login_host'] login_port = module.params['login_port'] login_database = module.params['login_database'] replica_set = module.params['replica_set'] ssl = module.params['ssl'] param = module.params['param'] param_type = module.params['param_type'] value = module.params['value'] # Verify parameter is coherent with specified type try: if param_type == 'int': value = int(value) except ValueError: e = get_exception() module.fail_json(msg="value '%s' is not %s" % (value, param_type)) try: if replica_set: client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl) else: client = MongoClient(login_host, int(login_port), ssl=ssl) if login_user is None and login_password is None: mongocnf_creds = load_mongocnf() if mongocnf_creds is not False: login_user = mongocnf_creds['user'] login_password = mongocnf_creds['password'] elif login_password is None or login_user is None: module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') if login_user is not None and login_password is not None: client.admin.authenticate(login_user, login_password, source=login_database) except ConnectionFailure: e = get_exception() module.fail_json(msg='unable to connect to database: %s' % str(e)) db = client.admin try: after_value = db.command("setParameter", **{param: int(value)}) except OperationFailure: e = get_exception() module.fail_json(msg="unable to change parameter: %s" % str(e)) if "was" not in after_value: module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.") else: module.exit_json(changed=(value != after_value["was"]), before=after_value["was"], after=value) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.pycompat24 import get_exception if __name__ == '__main__': main()
gpl-3.0
mbrukman/flocker
flocker/route/_memory.py
15
1939
# Copyright Hybrid Logic Ltd. See LICENSE file for details. """ Objects related to an in-memory implementation of ``INetwork``. """ from zope.interface import implementer from eliot import Logger from ._interfaces import INetwork from ._model import Proxy, OpenPort @implementer(INetwork) class MemoryNetwork(object): """ An isolated, in-memory-only implementation of ``INetwork``. :ivar set _proxies: A ``set`` of ``Proxy`` instances representing all of the proxies supposedly configured on this network. """ logger = Logger() def __init__(self, used_ports): self._proxies = set() self._used_ports = used_ports self._open_ports = set() def create_proxy_to(self, ip, port): proxy = Proxy(ip=ip, port=port) self._proxies.add(proxy) return proxy def delete_proxy(self, proxy): self._proxies.remove(proxy) def open_port(self, port): open_port = OpenPort(port=port) self._open_ports.add(open_port) return open_port def delete_open_port(self, open_port): self._open_ports.remove(open_port) def enumerate_proxies(self): return list(self._proxies) def enumerate_open_ports(self): return list(self._open_ports) def enumerate_used_ports(self): proxy_ports = frozenset(proxy.port for proxy in self._proxies) open_ports = frozenset(open_port.port for open_port in self._open_ports) return proxy_ports | open_ports | self._used_ports def make_memory_network(used_ports=frozenset()): """ Create a new, isolated, in-memory-only provider of ``INetwork``. :param frozenset used_ports: Some port numbers which are to be considered already used and included in the result of ``enumerate_used_ports`` when called on the returned object. """ return MemoryNetwork(used_ports=used_ports)
apache-2.0
jmhsi/justin_tinker
data_science/courses/learning_dl_packages/models/research/object_detection/builders/input_reader_builder_test.py
21
3531
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for input_reader_builder.""" import os import numpy as np import tensorflow as tf from google.protobuf import text_format from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from object_detection.builders import input_reader_builder from object_detection.core import standard_fields as fields from object_detection.protos import input_reader_pb2 class InputReaderBuilderTest(tf.test.TestCase): def create_tf_record(self): path = os.path.join(self.get_temp_dir(), 'tfrecord') writer = tf.python_io.TFRecordWriter(path) image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) with self.test_session(): encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval() example = example_pb2.Example(features=feature_pb2.Features(feature={ 'image/encoded': feature_pb2.Feature( bytes_list=feature_pb2.BytesList(value=[encoded_jpeg])), 'image/format': feature_pb2.Feature( bytes_list=feature_pb2.BytesList(value=['jpeg'.encode('utf-8')])), 'image/object/bbox/xmin': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[0.0])), 'image/object/bbox/xmax': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[1.0])), 'image/object/bbox/ymin': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[0.0])), 'image/object/bbox/ymax': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[1.0])), 'image/object/class/label': feature_pb2.Feature( int64_list=feature_pb2.Int64List(value=[2])), })) writer.write(example.SerializeToString()) writer.close() return path def test_build_tf_record_input_reader(self): tf_record_path = self.create_tf_record() input_reader_text_proto = """ shuffle: false num_readers: 1 tf_record_input_reader {{ input_path: '{0}' }} """.format(tf_record_path) input_reader_proto = input_reader_pb2.InputReader() text_format.Merge(input_reader_text_proto, input_reader_proto) tensor_dict = input_reader_builder.build(input_reader_proto) sv = tf.train.Supervisor(logdir=self.get_temp_dir()) with sv.prepare_or_wait_for_session() as sess: sv.start_queue_runners(sess) output_dict = sess.run(tensor_dict) self.assertEquals( (4, 5, 3), output_dict[fields.InputDataFields.image].shape) self.assertEquals( [2], output_dict[fields.InputDataFields.groundtruth_classes]) self.assertEquals( (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) self.assertAllEqual( [0.0, 0.0, 1.0, 1.0], output_dict[fields.InputDataFields.groundtruth_boxes][0]) if __name__ == '__main__': tf.test.main()
apache-2.0
edylim/simpl-css
node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py
2720
1804
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio project reader/writer.""" import gyp.common import gyp.easy_xml as easy_xml class Writer(object): """Visual Studio XML tool file writer.""" def __init__(self, tool_file_path, name): """Initializes the tool file. Args: tool_file_path: Path to the tool file. name: Name of the tool file. """ self.tool_file_path = tool_file_path self.name = name self.rules_section = ['Rules'] def AddCustomBuildRule(self, name, cmd, description, additional_dependencies, outputs, extensions): """Adds a rule to the tool file. Args: name: Name of the rule. description: Description of the rule. cmd: Command line of the rule. additional_dependencies: other files which may trigger the rule. outputs: outputs of the rule. extensions: extensions handled by the rule. """ rule = ['CustomBuildRule', {'Name': name, 'ExecutionDescription': description, 'CommandLine': cmd, 'Outputs': ';'.join(outputs), 'FileExtensions': ';'.join(extensions), 'AdditionalDependencies': ';'.join(additional_dependencies) }] self.rules_section.append(rule) def WriteIfChanged(self): """Writes the tool file.""" content = ['VisualStudioToolFile', {'Version': '8.00', 'Name': self.name }, self.rules_section ] easy_xml.WriteXmlIfChanged(content, self.tool_file_path, encoding="Windows-1252")
mit
Darkmoth/python-django-4
Thing/env/Lib/site-packages/django/utils/lru_cache.py
269
7647
try: from functools import lru_cache except ImportError: # backport of Python's 3.3 lru_cache, written by Raymond Hettinger and # licensed under MIT license, from: # <http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/> # Should be removed when Django only supports Python 3.2 and above. from collections import namedtuple from functools import update_wrapper from threading import RLock _CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) class _HashedSeq(list): __slots__ = 'hashvalue' def __init__(self, tup, hash=hash): self[:] = tup self.hashvalue = hash(tup) def __hash__(self): return self.hashvalue def _make_key(args, kwds, typed, kwd_mark = (object(),), fasttypes = {int, str, frozenset, type(None)}, sorted=sorted, tuple=tuple, type=type, len=len): 'Make a cache key from optionally typed positional and keyword arguments' key = args if kwds: sorted_items = sorted(kwds.items()) key += kwd_mark for item in sorted_items: key += item if typed: key += tuple(type(v) for v in args) if kwds: key += tuple(type(v) for k, v in sorted_items) elif len(key) == 1 and type(key[0]) in fasttypes: return key[0] return _HashedSeq(key) def lru_cache(maxsize=100, typed=False): """Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """ # Users should only access the lru_cache through its public API: # cache_info, cache_clear, and f.__wrapped__ # The internals of the lru_cache are encapsulated for thread safety and # to allow the implementation to change (including a possible C version). def decorating_function(user_function): cache = dict() stats = [0, 0] # make statistics updateable non-locally HITS, MISSES = 0, 1 # names for the stats fields make_key = _make_key cache_get = cache.get # bound method to lookup key or return None _len = len # localize the global len() function lock = RLock() # because linkedlist updates aren't threadsafe root = [] # root of the circular doubly linked list root[:] = [root, root, None, None] # initialize by pointing to self nonlocal_root = [root] # make updateable non-locally PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields if maxsize == 0: def wrapper(*args, **kwds): # no caching, just do a statistics update after a successful call result = user_function(*args, **kwds) stats[MISSES] += 1 return result elif maxsize is None: def wrapper(*args, **kwds): # simple caching without ordering or size limit key = make_key(args, kwds, typed) result = cache_get(key, root) # root used here as a unique not-found sentinel if result is not root: stats[HITS] += 1 return result result = user_function(*args, **kwds) cache[key] = result stats[MISSES] += 1 return result else: def wrapper(*args, **kwds): # size limited caching that tracks accesses by recency key = make_key(args, kwds, typed) if kwds or typed else args with lock: link = cache_get(key) if link is not None: # record recent use of the key by moving it to the front of the list root, = nonlocal_root link_prev, link_next, key, result = link link_prev[NEXT] = link_next link_next[PREV] = link_prev last = root[PREV] last[NEXT] = root[PREV] = link link[PREV] = last link[NEXT] = root stats[HITS] += 1 return result result = user_function(*args, **kwds) with lock: root, = nonlocal_root if key in cache: # getting here means that this same key was added to the # cache while the lock was released. since the link # update is already done, we need only return the # computed result and update the count of misses. pass elif _len(cache) >= maxsize: # use the old root to store the new key and result oldroot = root oldroot[KEY] = key oldroot[RESULT] = result # empty the oldest link and make it the new root root = nonlocal_root[0] = oldroot[NEXT] oldkey = root[KEY] oldvalue = root[RESULT] root[KEY] = root[RESULT] = None # now update the cache dictionary for the new links del cache[oldkey] cache[key] = oldroot else: # put result in a new link at the front of the list last = root[PREV] link = [last, root, key, result] last[NEXT] = root[PREV] = cache[key] = link stats[MISSES] += 1 return result def cache_info(): """Report cache statistics""" with lock: return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache)) def cache_clear(): """Clear the cache and cache statistics""" with lock: cache.clear() root = nonlocal_root[0] root[:] = [root, root, None, None] stats[:] = [0, 0] wrapper.__wrapped__ = user_function wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return update_wrapper(wrapper, user_function) return decorating_function
gpl-2.0
simongoffin/my_odoo_tutorial
openerp/tools/cache.py
99
5907
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013 OpenERP (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## # decorator makes wrappers that have the same API as their wrapped function; # this is important for the openerp.api.guess() that relies on signatures from decorator import decorator from inspect import getargspec import lru import logging logger = logging.getLogger(__name__) class ormcache(object): """ LRU cache decorator for orm methods. """ def __init__(self, skiparg=2, size=8192, multi=None, timeout=None): self.skiparg = skiparg self.size = size self.stat_miss = 0 self.stat_hit = 0 self.stat_err = 0 def __call__(self, method): self.method = method lookup = decorator(self.lookup, method) lookup.clear_cache = self.clear return lookup def stat(self): return "lookup-stats hit=%s miss=%s err=%s ratio=%.1f" % \ (self.stat_hit, self.stat_miss, self.stat_err, (100*float(self.stat_hit))/(self.stat_miss+self.stat_hit)) def lru(self, model): ormcache = model._ormcache try: d = ormcache[self.method] except KeyError: d = ormcache[self.method] = lru.LRU(self.size) return d def lookup(self, method, *args, **kwargs): d = self.lru(args[0]) key = args[self.skiparg:] try: r = d[key] self.stat_hit += 1 return r except KeyError: self.stat_miss += 1 value = d[key] = self.method(*args, **kwargs) return value except TypeError: self.stat_err += 1 return self.method(*args, **kwargs) def clear(self, model, *args): """ Remove *args entry from the cache or all keys if *args is undefined """ d = self.lru(model) if args: logger.warn("ormcache.clear arguments are deprecated and ignored " "(while clearing caches on (%s).%s)", model._name, self.method.__name__) d.clear() model.pool._any_cache_cleared = True class ormcache_context(ormcache): def __init__(self, skiparg=2, size=8192, accepted_keys=()): super(ormcache_context,self).__init__(skiparg,size) self.accepted_keys = accepted_keys def __call__(self, method): # remember which argument is context args = getargspec(method)[0] self.context_pos = args.index('context') return super(ormcache_context, self).__call__(method) def lookup(self, method, *args, **kwargs): d = self.lru(args[0]) # Note. The decorator() wrapper (used in __call__ above) will resolve # arguments, and pass them positionally to lookup(). This is why context # is not passed through kwargs! if self.context_pos < len(args): context = args[self.context_pos] else: context = kwargs.get('context') or {} ckey = [(k, context[k]) for k in self.accepted_keys if k in context] # Beware: do not take the context from args! key = args[self.skiparg:self.context_pos] + tuple(ckey) try: r = d[key] self.stat_hit += 1 return r except KeyError: self.stat_miss += 1 value = d[key] = self.method(*args, **kwargs) return value except TypeError: self.stat_err += 1 return self.method(*args, **kwargs) class ormcache_multi(ormcache): def __init__(self, skiparg=2, size=8192, multi=3): assert skiparg <= multi super(ormcache_multi, self).__init__(skiparg, size) self.multi = multi def lookup(self, method, *args, **kwargs): d = self.lru(args[0]) base_key = args[self.skiparg:self.multi] + args[self.multi+1:] ids = args[self.multi] result = {} missed = [] # first take what is available in the cache for i in ids: key = base_key + (i,) try: result[i] = d[key] self.stat_hit += 1 except Exception: self.stat_miss += 1 missed.append(i) if missed: # call the method for the ids that were not in the cache args = list(args) args[self.multi] = missed result.update(method(*args, **kwargs)) # store those new results back in the cache for i in missed: key = base_key + (i,) d[key] = result[i] return result class dummy_cache(object): """ Cache decorator replacement to actually do no caching. """ def __init__(self, *l, **kw): pass def __call__(self, fn): fn.clear_cache = self.clear return fn def clear(self, *l, **kw): pass # For backward compatibility cache = ormcache # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
n0m4dz/odoo
addons/auth_openid/__openerp__.py
307
1628
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'OpenID Authentification', 'version': '2.0', 'category': 'Tools', 'description': """ Allow users to login through OpenID. ==================================== """, 'author': 'OpenERP s.a.', 'maintainer': 'OpenERP s.a.', 'website': 'https://www.odoo.com', 'depends': ['base', 'web'], 'data': [ 'res_users.xml', 'views/auth_openid.xml', ], 'qweb': ['static/src/xml/auth_openid.xml'], 'external_dependencies': { 'python' : ['openid'], }, 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
philsch/ansible
lib/ansible/plugins/callback/osx_say.py
42
3032
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com> # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import subprocess import os from ansible.plugins.callback import CallbackBase FAILED_VOICE = "Zarvox" REGULAR_VOICE = "Trinoids" HAPPY_VOICE = "Cellos" LASER_VOICE = "Princess" SAY_CMD = "/usr/bin/say" class CallbackModule(CallbackBase): """ makes Ansible much more exciting on OS X. """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'osx_say' CALLBACK_NEEDS_WHITELIST = True def __init__(self): super(CallbackModule, self).__init__() # plugin disable itself if say is not present # ansible will not call any callback if disabled is set to True if not os.path.exists(SAY_CMD): self.disabled = True self._display.warning("%s does not exist, plugin %s disabled" % (SAY_CMD, os.path.basename(__file__))) def say(self, msg, voice): subprocess.call([SAY_CMD, msg, "--voice=%s" % (voice)]) def runner_on_failed(self, host, res, ignore_errors=False): self.say("Failure on host %s" % host, FAILED_VOICE) def runner_on_ok(self, host, res): self.say("pew", LASER_VOICE) def runner_on_skipped(self, host, item=None): self.say("pew", LASER_VOICE) def runner_on_unreachable(self, host, res): self.say("Failure on host %s" % host, FAILED_VOICE) def runner_on_async_ok(self, host, res, jid): self.say("pew", LASER_VOICE) def runner_on_async_failed(self, host, res, jid): self.say("Failure on host %s" % host, FAILED_VOICE) def playbook_on_start(self): self.say("Running Playbook", REGULAR_VOICE) def playbook_on_notify(self, host, handler): self.say("pew", LASER_VOICE) def playbook_on_task_start(self, name, is_conditional): if not is_conditional: self.say("Starting task: %s" % name, REGULAR_VOICE) else: self.say("Notifying task: %s" % name, REGULAR_VOICE) def playbook_on_setup(self): self.say("Gathering facts", REGULAR_VOICE) def playbook_on_play_start(self, name): self.say("Starting play: %s" % name, HAPPY_VOICE) def playbook_on_stats(self, stats): self.say("Play complete", HAPPY_VOICE)
gpl-3.0
jag1g13/lammps
tools/moltemplate/src/extract_lammps_data.py
29
4357
#!/usr/bin/env python lammps_data_sections = set(['Atoms', 'Masses', 'Bonds', 'Bond Coeffs', 'Angles', 'Angle Coeffs', 'Dihedrals', 'Dihedral Coeffs', 'Impropers', 'Improper Coeffs', 'BondBond Coeffs', # class2 angles 'BondAngle Coeffs', # class2 angles 'MiddleBondTorsion Coeffs', # class2 dihedrals 'EndBondTorsion Coeffs', # class2 dihedrals 'AngleTorsion Coeffs', # class2 dihedrals 'AngleAngleTorsion Coeffs', # class2 dihedrals 'BondBond13 Coeffs', # class2 dihedrals 'AngleAngle Coeffs', # class2 impropers 'Angles By Type', # new. not standard LAMMPS 'Dihedrals By Type',# new. not standard LAMMPS 'Angles By Type']) # new. not standard LAMMPS def DeleteComments(string, escape='\\', comment_char='#'): escaped_state = False for i in range(0,len(string)): if string[i] in escape: if escaped_state: escaped_state = False else: escaped_state = True elif string[i] == comment_char: if not escaped_state: return string[0:i] return string def ExtractDataSection(f, section_name, comment_char = '#', include_section_name = False, return_line_nums = False): inside_section = False if section_name in ('header','Header'): #"Header" section includes beginning inside_section = True nonblank_encountered = False nonheader_encountered = False i = 0 for line_orig in f: return_this_line = False line = DeleteComments(line_orig).strip() if line in lammps_data_sections: nonheader_encountered = True if section_name in ('header', 'Header'): # The "header" section includes all lines at the beginning of the # before any other section is encountered. if nonheader_encountered: return_this_line = False else: return_this_line = True elif line == section_name: inside_section = True nonblank_encountered = False if include_section_name: return_this_line = True # A block of blank lines (which dont immediately follow # the section_name) signal the end of a section: elif len(line) == 0: if inside_section and include_section_name: return_this_line = True if nonblank_encountered: inside_section = False elif line[0] != comment_char: if inside_section: nonblank_encountered = True return_this_line = True if return_this_line: if return_line_nums: yield i else: yield line_orig i += 1 if __name__ == "__main__": import sys lines = sys.stdin.readlines() exclude_sections = False if sys.argv[1] == '-n': exclude_sections = True del sys.argv[1] if not exclude_sections: for section_name in sys.argv[1:]: for line in ExtractDataSection(lines, section_name): sys.stdout.write(line) else: line_nums_exclude = set([]) for section_name in sys.argv[1:]: for line_num in ExtractDataSection(lines, section_name, include_section_name=True, return_line_nums=True): line_nums_exclude.add(line_num) for i in range(0, len(lines)): if i not in line_nums_exclude: sys.stdout.write(lines[i])
gpl-2.0
strk/QGIS
python/plugins/processing/script/ScriptEdit.py
32
2311
# -*- coding: utf-8 -*- """ *************************************************************************** ScriptEdit.py --------------------- Date : April 2013 Copyright : (C) 2013 by Alexander Bruy Email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Alexander Bruy' __date__ = 'April 2013' __copyright__ = '(C) 2013, Alexander Bruy' from qgis.PyQt.QtCore import Qt from qgis.PyQt.QtGui import QKeySequence from qgis.PyQt.QtWidgets import QShortcut from qgis.gui import QgsCodeEditorPython from qgis.PyQt.Qsci import QsciScintilla class ScriptEdit(QgsCodeEditorPython): def __init__(self, parent=None): super().__init__(parent) self.initShortcuts() def initShortcuts(self): (ctrl, shift) = (self.SCMOD_CTRL << 16, self.SCMOD_SHIFT << 16) # Disable some shortcuts self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('D') + ctrl) self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl) self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl + shift) self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('T') + ctrl) # self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord("Z") + ctrl) # self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord("Y") + ctrl) # Use Ctrl+Space for autocompletion self.shortcutAutocomplete = QShortcut(QKeySequence(Qt.CTRL + Qt.Key_Space), self) self.shortcutAutocomplete.setContext(Qt.WidgetShortcut) self.shortcutAutocomplete.activated.connect(self.autoComplete)
gpl-2.0
philsch/ansible
lib/ansible/modules/database/misc/elasticsearch_plugin.py
69
6754
#!/usr/bin/python # -*- coding: utf-8 -*- # Ansible module to manage elasticsearch plugins # (c) 2015, Mathew Davies <thepixeldeveloper@googlemail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: elasticsearch_plugin short_description: Manage Elasticsearch plugins description: - Manages Elasticsearch plugins. version_added: "2.0" author: Mathew Davies (@ThePixelDeveloper) options: name: description: - Name of the plugin to install. In ES 2.x, the name can be an url or file location required: True state: description: - Desired state of a plugin. required: False choices: ["present", "absent"] default: present url: description: - Set exact URL to download the plugin from (Only works for ES 1.x) required: False default: None timeout: description: - "Timeout setting: 30s, 1m, 1h..." required: False default: 1m plugin_bin: description: - Location of the plugin binary required: False default: /usr/share/elasticsearch/bin/plugin plugin_dir: description: - Your configured plugin directory specified in Elasticsearch required: False default: /usr/share/elasticsearch/plugins/ proxy_host: description: - Proxy host to use during plugin installation required: False default: None version_added: "2.1" proxy_port: description: - Proxy port to use during plugin installation required: False default: None version_added: "2.1" version: description: - Version of the plugin to be installed. If plugin exists with previous version, it will NOT be updated required: False default: None ''' EXAMPLES = ''' # Install Elasticsearch head plugin - elasticsearch_plugin: state: present name: mobz/elasticsearch-head # Install specific version of a plugin - elasticsearch_plugin: state: present name: com.github.kzwang/elasticsearch-image version: '1.2.0' # Uninstall Elasticsearch head plugin - elasticsearch_plugin: state: absent name: mobz/elasticsearch-head ''' import os PACKAGE_STATE_MAP = dict( present="install", absent="remove" ) def parse_plugin_repo(string): elements = string.split("/") # We first consider the simplest form: pluginname repo = elements[0] # We consider the form: username/pluginname if len(elements) > 1: repo = elements[1] # remove elasticsearch- prefix # remove es- prefix for string in ("elasticsearch-", "es-"): if repo.startswith(string): return repo[len(string):] return repo def is_plugin_present(plugin_dir, working_dir): return os.path.isdir(os.path.join(working_dir, plugin_dir)) def parse_error(string): reason = "reason: " try: return string[string.index(reason) + len(reason):].strip() except ValueError: return string def install_plugin(module, plugin_bin, plugin_name, version, url, proxy_host, proxy_port, timeout): cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name] if version: plugin_name = plugin_name + '/' + version if proxy_host and proxy_port: cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) if url: cmd_args.append("--url %s" % url) if timeout: cmd_args.append("--timeout %s" % timeout) cmd = " ".join(cmd_args) if module.check_mode: rc, out, err = 0, "check mode", "" else: rc, out, err = module.run_command(cmd) if rc != 0: reason = parse_error(out) module.fail_json(msg=reason) return True, cmd, out, err def remove_plugin(module, plugin_bin, plugin_name): cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] cmd = " ".join(cmd_args) if module.check_mode: rc, out, err = 0, "check mode", "" else: rc, out, err = module.run_command(cmd) if rc != 0: reason = parse_error(out) module.fail_json(msg=reason) return True, cmd, out, err def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()), url=dict(default=None), timeout=dict(default="1m"), plugin_bin=dict(default="/usr/share/elasticsearch/bin/plugin", type="path"), plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"), proxy_host=dict(default=None), proxy_port=dict(default=None), version=dict(default=None) ), supports_check_mode=True ) name = module.params["name"] state = module.params["state"] url = module.params["url"] timeout = module.params["timeout"] plugin_bin = module.params["plugin_bin"] plugin_dir = module.params["plugin_dir"] proxy_host = module.params["proxy_host"] proxy_port = module.params["proxy_port"] version = module.params["version"] present = is_plugin_present(parse_plugin_repo(name), plugin_dir) # skip if the state is correct if (present and state == "present") or (state == "absent" and not present): module.exit_json(changed=False, name=name, state=state) if state == "present": changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, url, proxy_host, proxy_port, timeout) elif state == "absent": changed, cmd, out, err = remove_plugin(module, plugin_bin, name) module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
philsch/ansible
lib/ansible/modules/database/postgresql/postgresql_db.py
53
10855
#!/usr/bin/python # -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: postgresql_db short_description: Add or remove PostgreSQL databases from a remote host. description: - Add or remove PostgreSQL databases from a remote host. version_added: "0.6" options: name: description: - name of the database to add or remove required: true default: null owner: description: - Name of the role to set as owner of the database required: false default: null template: description: - Template used to create the database required: false default: null encoding: description: - Encoding of the database required: false default: null lc_collate: description: - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template. required: false default: null lc_ctype: description: - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0) is used as template. required: false default: null state: description: - The database state required: false default: present choices: [ "present", "absent" ] author: "Ansible Core Team" extends_documentation_fragment: - postgres ''' EXAMPLES = ''' # Create a new database with name "acme" - postgresql_db: name: acme # Create a new database with name "acme" and specific encoding and locale # settings. If a template different from "template0" is specified, encoding # and locale settings must match those of the template. - postgresql_db: name: acme encoding: UTF-8 lc_collate: de_DE.UTF-8 lc_ctype: de_DE.UTF-8 template: template0 ''' HAS_PSYCOPG2 = False try: import psycopg2 import psycopg2.extras except ImportError: pass else: HAS_PSYCOPG2 = True from ansible.module_utils.six import iteritems import traceback import ansible.module_utils.postgres as pgutils from ansible.module_utils.database import SQLParseError, pg_quote_identifier from ansible.module_utils.basic import get_exception, AnsibleModule class NotSupportedError(Exception): pass # =========================================== # PostgreSQL module specific support methods. # def set_owner(cursor, db, owner): query = "ALTER DATABASE %s OWNER TO %s" % ( pg_quote_identifier(db, 'database'), pg_quote_identifier(owner, 'role')) cursor.execute(query) return True def get_encoding_id(cursor, encoding): query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;" cursor.execute(query, {'encoding': encoding}) return cursor.fetchone()['encoding_id'] def get_db_info(cursor, db): query = """ SELECT rolname AS owner, pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id, datcollate AS lc_collate, datctype AS lc_ctype FROM pg_database JOIN pg_roles ON pg_roles.oid = pg_database.datdba WHERE datname = %(db)s """ cursor.execute(query, {'db': db}) return cursor.fetchone() def db_exists(cursor, db): query = "SELECT * FROM pg_database WHERE datname=%(db)s" cursor.execute(query, {'db': db}) return cursor.rowcount == 1 def db_delete(cursor, db): if db_exists(cursor, db): query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database') cursor.execute(query) return True else: return False def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype): params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype) if not db_exists(cursor, db): query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')] if owner: query_fragments.append('OWNER %s' % pg_quote_identifier(owner, 'role')) if template: query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database')) if encoding: query_fragments.append('ENCODING %(enc)s') if lc_collate: query_fragments.append('LC_COLLATE %(collate)s') if lc_ctype: query_fragments.append('LC_CTYPE %(ctype)s') query = ' '.join(query_fragments) cursor.execute(query, params) return True else: db_info = get_db_info(cursor, db) if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): raise NotSupportedError( 'Changing database encoding is not supported. ' 'Current encoding: %s' % db_info['encoding'] ) elif lc_collate and lc_collate != db_info['lc_collate']: raise NotSupportedError( 'Changing LC_COLLATE is not supported. ' 'Current LC_COLLATE: %s' % db_info['lc_collate'] ) elif lc_ctype and lc_ctype != db_info['lc_ctype']: raise NotSupportedError( 'Changing LC_CTYPE is not supported.' 'Current LC_CTYPE: %s' % db_info['lc_ctype'] ) elif owner and owner != db_info['owner']: return set_owner(cursor, db, owner) else: return False def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype): if not db_exists(cursor, db): return False else: db_info = get_db_info(cursor, db) if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): return False elif lc_collate and lc_collate != db_info['lc_collate']: return False elif lc_ctype and lc_ctype != db_info['lc_ctype']: return False elif owner and owner != db_info['owner']: return False else: return True # =========================================== # Module execution. # def main(): argument_spec = pgutils.postgres_common_argument_spec() argument_spec.update(dict( db=dict(required=True, aliases=['name']), owner=dict(default=""), template=dict(default=""), encoding=dict(default=""), lc_collate=dict(default=""), lc_ctype=dict(default=""), state=dict(default="present", choices=["absent", "present"]), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode = True ) if not HAS_PSYCOPG2: module.fail_json(msg="the python psycopg2 module is required") db = module.params["db"] port = module.params["port"] owner = module.params["owner"] template = module.params["template"] encoding = module.params["encoding"] lc_collate = module.params["lc_collate"] lc_ctype = module.params["lc_ctype"] state = module.params["state"] sslrootcert = module.params["ssl_rootcert"] changed = False # To use defaults values, keyword arguments must be absent, so # check which values are empty and don't include in the **kw # dictionary params_map = { "login_host":"host", "login_user":"user", "login_password":"password", "port":"port", "ssl_mode":"sslmode", "ssl_rootcert":"sslrootcert" } kw = dict( (params_map[k], v) for (k, v) in iteritems(module.params) if k in params_map and v != '' and v is not None) # If a login_unix_socket is specified, incorporate it here. is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" if is_localhost and module.params["login_unix_socket"] != "": kw["host"] = module.params["login_unix_socket"] try: pgutils.ensure_libs(sslrootcert=module.params.get('ssl_rootcert')) db_connection = psycopg2.connect(database="postgres", **kw) # Enable autocommit so we can create databases if psycopg2.__version__ >= '2.4.2': db_connection.autocommit = True else: db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) except pgutils.LibraryError: e = get_exception() module.fail_json(msg="unable to connect to database: {0}".format(str(e)), exception=traceback.format_exc()) except TypeError: e = get_exception() if 'sslrootcert' in e.args[0]: module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(e), exception=traceback.format_exc()) module.fail_json(msg="unable to connect to database: %s" % e, exception=traceback.format_exc()) except Exception: e = get_exception() module.fail_json(msg="unable to connect to database: %s" % e, exception=traceback.format_exc()) try: if module.check_mode: if state == "absent": changed = db_exists(cursor, db) elif state == "present": changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype) module.exit_json(changed=changed, db=db) if state == "absent": try: changed = db_delete(cursor, db) except SQLParseError: e = get_exception() module.fail_json(msg=str(e)) elif state == "present": try: changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype) except SQLParseError: e = get_exception() module.fail_json(msg=str(e)) except NotSupportedError: e = get_exception() module.fail_json(msg=str(e)) except SystemExit: # Avoid catching this on Python 2.4 raise except Exception: e = get_exception() module.fail_json(msg="Database query failed: %s" % e) module.exit_json(changed=changed, db=db) if __name__ == '__main__': main()
gpl-3.0
quintusdias/python-xmp-toolkit
libxmp/__init__.py
2
2325
# -*- coding: utf-8 -*- # # Copyright (c) 2008-2009, European Space Agency & European Southern # Observatory (ESA/ESO) # Copyright (c) 2008-2009, CRS4 - Centre for Advanced Studies, Research and # Development in Sardinia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of the European Space Agency, European Southern # Observatory, CRS4 nor the names of its contributors may be used to # endorse or promote products derived from this software without specific # prior written permission. # # THIS SOFTWARE IS PROVIDED BY ESA/ESO AND CRS4 ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL ESA/ESO BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE import ctypes import ctypes.util from ctypes.util import find_library import os class ExempiLoadError(Exception): """ Error signaling that the Exempi library cannot be loaded. """ pass class XMPError(Exception): """ General XMP Error. """ pass # Import classes into global namespace from .core import XMPMeta, XMPIterator from . import files, core, version from .files import XMPFiles __version__ = version.VERSION __all__ = ['XMPMeta', 'XMPFiles', 'XMPError', 'ExempiLoadError', 'files', 'core'] from . import exempi exempi.init()
bsd-3-clause
mmgen/mmgen
mmgen/seed.py
1
3277
#!/usr/bin/env python3 # # mmgen = Multi-Mode GENerator, command-line Bitcoin cold storage solution # Copyright (C)2013-2022 The MMGen Project <mmgen@tuta.io> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ seed: Seed-related classes and methods for the MMGen suite """ from .util import make_chksum_8,hexdigits_uc from .objmethods import Hilite,InitErrors,MMGenObject from .obj import ImmutableAttr,get_obj class SeedID(str,Hilite,InitErrors): color = 'blue' width = 8 trunc_ok = False def __new__(cls,seed=None,sid=None): if type(sid) == cls: return sid try: if seed: assert isinstance(seed,SeedBase),'not a subclass of SeedBase' return str.__new__(cls,make_chksum_8(seed.data)) elif sid: assert set(sid) <= set(hexdigits_uc), 'not uppercase hex digits' assert len(sid) == cls.width, f'not {cls.width} characters wide' return str.__new__(cls,sid) raise ValueError('no arguments provided') except Exception as e: return cls.init_fail(e,seed or sid) def is_seed_id(s): return get_obj( SeedID, sid=s, silent=True, return_bool=True ) class SeedBase(MMGenObject): lens = ( 128, 192, 256 ) dfl_len = 256 data = ImmutableAttr(bytes,typeconv=False) sid = ImmutableAttr(SeedID,typeconv=False) def __init__(self,seed_bin=None,nSubseeds=None): if not seed_bin: from .opts import opt from .crypto import get_random from hashlib import sha256 # Truncate random data for smaller seed lengths seed_bin = sha256(get_random(1033)).digest()[:(opt.seed_len or self.dfl_len)//8] elif len(seed_bin)*8 not in self.lens: die(3,f'{len(seed_bin)*8}: invalid seed bit length') self.data = seed_bin self.sid = SeedID(seed=self) self.nSubseeds = nSubseeds # will override opt.subseeds @property def bitlen(self): return len(self.data) * 8 @property def byte_len(self): return len(self.data) @property def hexdata(self): return self.data.hex() @property def fn_stem(self): return self.sid class Seed(SeedBase): @property def subseeds(self): if not hasattr(self,'_subseeds'): from .subseed import SubSeedList from .opts import opt self._subseeds = SubSeedList( self, length = self.nSubseeds or opt.subseeds ) return self._subseeds def subseed(self,*args,**kwargs): return self.subseeds.get_subseed_by_ss_idx(*args,**kwargs) def subseed_by_seed_id(self,*args,**kwargs): return self.subseeds.get_subseed_by_seed_id(*args,**kwargs) def split(self,*args,**kwargs): from .seedsplit import SeedShareList return SeedShareList(self,*args,**kwargs) @staticmethod def join_shares(*args,**kwargs): from .seedsplit import join_shares return join_shares(*args,**kwargs)
gpl-3.0
timlau/yumex
src/poc/test-liststore.py
1
2777
#!/usr/bin/python -tt import gtk import time import gobject import base64 class Dummy: def __init__(self): self.name = 'package' self.ver = '1.0' self.rel = '0.1.fc14' self.arch = 'x86_64' self.summary = "This is a packages" def list_store(model, num=10): for i in xrange(num): print model[i][1] def test_store1(): print "Unsorted ListStore" start = time.time() d = Dummy() store = gtk.ListStore(gobject.TYPE_PYOBJECT, str, str, str, str, str, long) #store = gtk.ListStore(gobject.TYPE_PYOBJECT, str) for i in xrange(20000): store.append([d, "%s" % base64.b64encode(str(i)), "Some text", "Some text", "Some text", "Some text", 1000L]) #store.append([d, d.name]) end = time.time() print ("test_store1 time : %.2f " % (end - start)) list_store(store) def test_store2(): print "TreeModelSort (set_sort_column_id(1, gtk.SORT_ASCENDING) before population)" start = time.time() d = Dummy() store = gtk.ListStore(gobject.TYPE_PYOBJECT, str, str, str, str, str, long) sort_store = gtk.TreeModelSort(store) sort_store.set_sort_column_id(1, gtk.SORT_ASCENDING) for i in xrange(20000): store.append([d, "%s" % base64.b64encode(str(i)), "Some text", "Some text", "Some text", "Some text", 1000L]) end = time.time() print ("test_store2 time : %.2f " % (end - start)) list_store(sort_store) def test_store3(): print "TreeModelSort (set_sort_column_id(1, gtk.SORT_ASCENDING) after population)" start = time.time() d = Dummy() store = gtk.ListStore(gobject.TYPE_PYOBJECT, str, str, str, str, str, long) sort_store = gtk.TreeModelSort(store) #sort_store.set_default_sort_func(lambda *args: -1) for i in xrange(20000): store.append([d, "%s" % base64.b64encode(str(i)), "Some text", "Some text", "Some text", "Some text", 1000L]) sort_store.set_sort_column_id(1, gtk.SORT_ASCENDING) end = time.time() print ("test_store3 time : %.2f " % (end - start)) list_store(sort_store) def test_store4(): start = time.time() d = Dummy() store = gtk.ListStore(gobject.TYPE_PYOBJECT, str, str, str, str, str, long) sort_store = gtk.TreeModelSort(store) #sort_store.set_default_sort_func(lambda *args: -1) sort_store.set_sort_column_id(-1, gtk.SORT_ASCENDING) for i in xrange(20000): store.append([d, "%s" % base64.b64encode(str(i)), "Some text", "Some text", "Some text", "Some text", 1000L]) sort_store.set_sort_column_id(1, gtk.SORT_ASCENDING) end = time.time() print ("test_store4 time : %.2f " % (end - start)) list_store(sort_store) if __name__ == "__main__": test_store1() test_store2() test_store3() test_store4()
gpl-2.0
XWARIOSWX/wnframework
webnotes/model/workflow.py
34
1073
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import webnotes def get_workflow_name(doctype): if getattr(webnotes.local, "workflow_names", None) is None: webnotes.local.workflow_names = {} if doctype not in webnotes.local.workflow_names: workflow_name = webnotes.conn.get_value("Workflow", {"document_type": doctype, "is_active": "1"}, "name") # no active? get default workflow if not workflow_name: workflow_name = webnotes.conn.get_value("Workflow", {"document_type": doctype}, "name") webnotes.local.workflow_names[doctype] = workflow_name return webnotes.local.workflow_names[doctype] def get_default_state(doctype): workflow_name = get_workflow_name(doctype) return webnotes.conn.get_value("Workflow Document State", {"parent": workflow_name, "idx":1}, "state") def get_state_fieldname(doctype): workflow_name = get_workflow_name(doctype) return webnotes.conn.get_value("Workflow", workflow_name, "workflow_state_field")
mit
massot/account-invoice-reporting
account_invoice_production_lot/__init__.py
4
1173
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Author: Lorenzo Battistini <lorenzo.battistini@agilebg.com> # Copyright (C) 2011 Domsense s.r.l. (<http://www.domsense.com>). # Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import invoice from . import tests
agpl-3.0
sunqm/pyscf
examples/mcscf/00-simple_casci.py
2
3014
#!/usr/bin/env python # # Author: Qiming Sun <osirpt.sun@gmail.com> # ''' A simple example to run CASCI calculation. ''' import pyscf mol = pyscf.M( atom = 'O 0 0 0; O 0 0 1.2', basis = 'ccpvdz', spin = 2) myhf = mol.RHF().run() # 6 orbitals, 8 electrons mycas = myhf.CASCI(6, 8).run() # # Note this mycas object can also be created using the APIs of mcscf module: # # from pyscf import mcscf # mycas = mcscf.CASCI(myhf, 6, 8).run() # Natural occupancy in CAS space, Mulliken population etc. mycas.verbose = 4 mycas.analyze() # # By default, the output of analyze() method has 6 parts. # # First two parts are the natural orbital analysis of the active space. The # natural orbitals by default was expanded on the "meta-Lowdin" atomic orbitals. # Meta-lowdin AO is one type of orthogonal orbital, which largely keeps the # atomic nature of the core and valence space. The character of each orbitals # can be roughly read based on the square of the coefficients. # # Natural occ [1.98127707 1.95671369 1.95671369 1.04270854 1.04270854 0.01987847] # Natural orbital (expansion on meta-Lowdin AOs) in CAS space # #1 #2 #3 #4 #5 # 0 O 1s 0.00063 0.00000 0.00000 -0.00000 0.00000 # 0 O 2s 0.30447 0.00000 -0.00000 0.00000 -0.00000 # 0 O 3s 0.04894 -0.00000 -0.00000 -0.00000 0.00000 # 0 O 2px -0.00000 0.05038 0.70413 -0.70572 0.04213 # 0 O 2py -0.00000 0.70413 -0.05038 -0.04213 -0.70572 # 0 O 2pz -0.63298 -0.00000 -0.00000 0.00000 0.00000 # ... # # Next part prints the overlap between the canonical MCSCF orbitals and # HF orbitals of the initial guess. It can be used to measure how close the # initial guess and the MCSCF results are. # ... # <mo_coeff-mcscf|mo_coeff-hf> 12 12 0.60371478 # <mo_coeff-mcscf|mo_coeff-hf> 12 13 0.79720035 # <mo_coeff-mcscf|mo_coeff-hf> 13 12 0.79720035 # <mo_coeff-mcscf|mo_coeff-hf> 13 13 -0.60371478 # <mo_coeff-mcscf|mo_coeff-hf> 14 14 0.99998785 # <mo_coeff-mcscf|mo_coeff-hf> 15 15 -0.61646818 # ... # # Next session is the analysis for CI coefficients. This part is not available # for external FCI solver (such as DMRG, QMC). # # ** Largest CI components ** # [alpha occ-orbitals] [beta occ-orbitals] CI coefficient # [0 1 2 3 4] [0 1 2] 0.973574063441 # [0 1 2 3 4] [0 3 4] -0.187737433798 # # The last two parts of the output are the Mulliken population analysis. To # obtain better transferability, the electron population was computed based on # meta-Lowdin orthogonal orbitals (than the input raw basis which may not # possess AO character) # # ** Mulliken pop on meta-lowdin orthogonal AOs ** # ** Mulliken pop ** # pop of 0 O 1s 1.99999 # pop of 0 O 2s 1.78300 # pop of 0 O 3s 0.00789 # pop of 0 O 2px 1.49626 # pop of 0 O 2py 1.49626 # pop of 0 O 2pz 1.19312 # ...
apache-2.0
brodyberg/autorest
AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/Lro/autorestlongrunningoperationtestservice/models/sub_product.py
4
1592
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .sub_resource import SubResource class SubProduct(SubResource): """SubProduct Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Sub Resource Id :vartype id: str :param provisioning_state: :type provisioning_state: str :ivar provisioning_state_values: Possible values include: 'Succeeded', 'Failed', 'canceled', 'Accepted', 'Creating', 'Created', 'Updating', 'Updated', 'Deleting', 'Deleted', 'OK' :vartype provisioning_state_values: str """ _validation = { 'id': {'readonly': True}, 'provisioning_state_values': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'provisioning_state_values': {'key': 'properties.provisioningStateValues', 'type': 'str'}, } def __init__(self, provisioning_state=None): super(SubProduct, self).__init__() self.provisioning_state = provisioning_state self.provisioning_state_values = None
mit
ianyh/heroku-buildpack-python-opencv
vendor/.heroku/lib/python2.7/email/encoders.py
261
2015
# Copyright (C) 2001-2006 Python Software Foundation # Author: Barry Warsaw # Contact: email-sig@python.org """Encodings and related functions.""" __all__ = [ 'encode_7or8bit', 'encode_base64', 'encode_noop', 'encode_quopri', ] import base64 from quopri import encodestring as _encodestring def _qencode(s): enc = _encodestring(s, quotetabs=True) # Must encode spaces, which quopri.encodestring() doesn't do return enc.replace(' ', '=20') def _bencode(s): # We can't quite use base64.encodestring() since it tacks on a "courtesy # newline". Blech! if not s: return s hasnewline = (s[-1] == '\n') value = base64.encodestring(s) if not hasnewline and value[-1] == '\n': return value[:-1] return value def encode_base64(msg): """Encode the message's payload in Base64. Also, add an appropriate Content-Transfer-Encoding header. """ orig = msg.get_payload() encdata = _bencode(orig) msg.set_payload(encdata) msg['Content-Transfer-Encoding'] = 'base64' def encode_quopri(msg): """Encode the message's payload in quoted-printable. Also, add an appropriate Content-Transfer-Encoding header. """ orig = msg.get_payload() encdata = _qencode(orig) msg.set_payload(encdata) msg['Content-Transfer-Encoding'] = 'quoted-printable' def encode_7or8bit(msg): """Set the Content-Transfer-Encoding header to 7bit or 8bit.""" orig = msg.get_payload() if orig is None: # There's no payload. For backwards compatibility we use 7bit msg['Content-Transfer-Encoding'] = '7bit' return # We play a trick to make this go fast. If encoding to ASCII succeeds, we # know the data must be 7bit, otherwise treat it as 8bit. try: orig.encode('ascii') except UnicodeError: msg['Content-Transfer-Encoding'] = '8bit' else: msg['Content-Transfer-Encoding'] = '7bit' def encode_noop(msg): """Do nothing."""
mit
kevhill/luigi
test/mock_test.py
62
2526
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function from helpers import unittest from luigi.mock import MockTarget, MockFileSystem class MockFileTest(unittest.TestCase): def test_1(self): t = MockTarget('test') p = t.open('w') print('test', file=p) p.close() q = t.open('r') self.assertEqual(list(q), ['test\n']) q.close() def test_with(self): t = MockTarget("foo") with t.open('w') as b: b.write("bar") with t.open('r') as b: self.assertEqual(list(b), ['bar']) # That should work in python2 because of the autocast # That should work in python3 because the default format is Text def test_unicode(self): t = MockTarget("foo") with t.open('w') as b: b.write(u"bar") with t.open('r') as b: self.assertEqual(b.read(), u'bar') class MockFileSystemTest(unittest.TestCase): fs = MockFileSystem() def _touch(self, path): t = MockTarget(path) with t.open('w'): pass def setUp(self): self.fs.clear() self.path = "/tmp/foo" self.path2 = "/tmp/bar" self._touch(self.path) self._touch(self.path2) def test_exists(self): self.assertTrue(self.fs.exists(self.path)) def test_remove(self): self.fs.remove(self.path) self.assertFalse(self.fs.exists(self.path)) def test_remove_recursive(self): self.fs.remove("/tmp", recursive=True) self.assertFalse(self.fs.exists(self.path)) self.assertFalse(self.fs.exists(self.path2)) def test_listdir(self): self.assertEqual(sorted([self.path, self.path2]), sorted(self.fs.listdir("/tmp"))) class TestImportMockFile(unittest.TestCase): def test_mockfile(self): from luigi.mock import MockFile self.assertTrue(isinstance(MockFile('foo'), MockTarget))
apache-2.0
twobob/buildroot-kindle
output/build/host-python-2.7.2/Lib/email/test/test_email_codecs_renamed.py
294
2842
# Copyright (C) 2002-2006 Python Software Foundation # Contact: email-sig@python.org # email package unit tests for (optional) Asian codecs import unittest from test.test_support import run_unittest from email.test.test_email import TestEmailBase from email.charset import Charset from email.header import Header, decode_header from email.message import Message # We're compatible with Python 2.3, but it doesn't have the built-in Asian # codecs, so we have to skip all these tests. try: unicode('foo', 'euc-jp') except LookupError: raise unittest.SkipTest class TestEmailAsianCodecs(TestEmailBase): def test_japanese_codecs(self): eq = self.ndiffAssertEqual j = Charset("euc-jp") g = Charset("iso-8859-1") h = Header("Hello World!") jhello = '\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc\xa5\xeb\xa5\xc9\xa1\xaa' ghello = 'Gr\xfc\xdf Gott!' h.append(jhello, j) h.append(ghello, g) # BAW: This used to -- and maybe should -- fold the two iso-8859-1 # chunks into a single encoded word. However it doesn't violate the # standard to have them as two encoded chunks and maybe it's # reasonable <wink> for each .append() call to result in a separate # encoded word. eq(h.encode(), """\ Hello World! =?iso-2022-jp?b?GyRCJU8lbSE8JW8hPCVrJUkhKhsoQg==?= =?iso-8859-1?q?Gr=FC=DF?= =?iso-8859-1?q?_Gott!?=""") eq(decode_header(h.encode()), [('Hello World!', None), ('\x1b$B%O%m!<%o!<%k%I!*\x1b(B', 'iso-2022-jp'), ('Gr\xfc\xdf Gott!', 'iso-8859-1')]) long = 'test-ja \xa4\xd8\xc5\xea\xb9\xc6\xa4\xb5\xa4\xec\xa4\xbf\xa5\xe1\xa1\xbc\xa5\xeb\xa4\xcf\xbb\xca\xb2\xf1\xbc\xd4\xa4\xce\xbe\xb5\xc7\xa7\xa4\xf2\xc2\xd4\xa4\xc3\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9' h = Header(long, j, header_name="Subject") # test a very long header enc = h.encode() # TK: splitting point may differ by codec design and/or Header encoding eq(enc , """\ =?iso-2022-jp?b?dGVzdC1qYSAbJEIkWEVqOUYkNSRsJD8lYSE8JWskTztKGyhC?= =?iso-2022-jp?b?GyRCMnE8VCROPjVHJyRyQlQkQyRGJCQkXiQ5GyhC?=""") # TK: full decode comparison eq(h.__unicode__().encode('euc-jp'), long) def test_payload_encoding(self): jhello = '\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc\xa5\xeb\xa5\xc9\xa1\xaa' jcode = 'euc-jp' msg = Message() msg.set_payload(jhello, jcode) ustr = unicode(msg.get_payload(), msg.get_content_charset()) self.assertEqual(jhello, ustr.encode(jcode)) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(TestEmailAsianCodecs)) return suite def test_main(): run_unittest(TestEmailAsianCodecs) if __name__ == '__main__': unittest.main(defaultTest='suite')
gpl-2.0
jmacmahon/invenio
modules/bibclassify/lib/bibclassify_text_extractor.py
3
6185
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2008, 2009, 2010, 2011 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ BibClassify text extractor. This module provides method to extract the fulltext from local or remote documents. Currently 2 formats of documents are supported: PDF and text documents. 2 methods provide the functionality of the module: text_lines_from_local_file and text_lines_from_url. This module also provides the utility 'is_pdf' that uses GNU file in order to determine if a local file is a PDF file. This module is STANDALONE safe """ import os import re import sys import tempfile import urllib2 from invenio import bibclassify_config as bconfig if bconfig.STANDALONE: from urllib2 import urlopen else: from invenio.urlutils import make_invenio_opener urlopen = make_invenio_opener('BibClassify').open log = bconfig.get_logger("bibclassify.text_extractor") _ONE_WORD = re.compile("[A-Za-z]{2,}") def text_lines_from_local_file(document, remote=False): """Returns the fulltext of the local file. @var document: fullpath to the file that should be read @var remote: boolean, if True does not count lines (gosh!) @return: list of lines if st was read or an empty list""" # FIXME - this does not care if we open anything, including binary files try: if is_pdf(document): if not executable_exists("pdftotext"): log.error("pdftotext is not available on the system.") cmd = "pdftotext -q -enc UTF-8 %s -" % re.escape(document) filestream = os.popen(cmd) else: filestream = open(document, "r") except IOError, ex1: log.error("Unable to read from file %s. (%s)" % (document, ex1.strerror)) return [] # FIXME - we assume it is utf-8 encoded / that is not good lines = [line.decode("utf-8", 'replace') for line in filestream] filestream.close() if not _is_english_text('\n'.join(lines)): log.warning("It seems the file '%s' is unvalid and doesn't " "contain text. Please communicate this file to the Invenio " "team." % document) line_nb = len(lines) word_nb = 0 for line in lines: word_nb += len(re.findall("\S+", line)) # Discard lines that do not contain at least one word. lines = [line for line in lines if _ONE_WORD.search(line) is not None] if not remote: log.info("Local file has %d lines and %d words." % (line_nb, word_nb)) return lines def _is_english_text(text): """ Checks if a text is correct english. Computes the number of words in the text and compares it to the expected number of words (based on an average size of words of 5.1 letters). @param text_lines: the text to analyze @type text_lines: string @return: True if the text is English, False otherwise @rtype: Boolean """ # Consider one word and one space. avg_word_length = 5.1 + 1 expected_word_number = float(len(text)) / avg_word_length words = [word for word in re.split('\W', text) if word.isalpha()] word_number = len(words) return word_number > .5 * expected_word_number def text_lines_from_url(url, user_agent=""): """Returns the fulltext of the file found at the URL.""" request = urllib2.Request(url) if user_agent: request.add_header("User-Agent", user_agent) try: distant_stream = urlopen(request) # Write the URL content to a temporary file. local_file = tempfile.mkstemp(prefix="bibclassify.")[1] local_stream = open(local_file, "w") local_stream.write(distant_stream.read()) local_stream.close() except: log.error("Unable to read from URL %s." % url) return None else: # Read lines from the temporary file. lines = text_lines_from_local_file(local_file, remote=True) os.remove(local_file) line_nb = len(lines) word_nb = 0 for line in lines: word_nb += len(re.findall("\S+", line)) log.info("Remote file has %d lines and %d words." % (line_nb, word_nb)) return lines def executable_exists(executable): """Tests if an executable is available on the system.""" for directory in os.getenv("PATH").split(":"): if os.path.exists(os.path.join(directory, executable)): return True return False def is_pdf(document): """Checks if a document is a PDF file. Returns True if is is.""" if not executable_exists('pdftotext'): log.warning("GNU file was not found on the system. " "Switching to a weak file extension test.") if document.lower().endswith(".pdf"): return True return False # Tested with file version >= 4.10. First test is secure and works # with file version 4.25. Second condition is tested for file # version 4.10. file_output = os.popen('file ' + re.escape(document)).read() try: filetype = file_output.split(":")[1] except IndexError: log.error("Your version of the 'file' utility seems to " "be unsupported. Please report this to cds.support@cern.ch.") raise Exception('Incompatible pdftotext') pdf = filetype.find("PDF") > -1 # This is how it should be done however this is incompatible with # file version 4.10. #os.popen('file -bi ' + document).read().find("application/pdf") return pdf
gpl-2.0
softlayer/softlayer-python
SoftLayer/CLI/securitygroup/rule.py
2
6250
"""Manage security group rules.""" # :license: MIT, see LICENSE for more details. import click import SoftLayer from SoftLayer.CLI import environment from SoftLayer.CLI import exceptions from SoftLayer.CLI import formatting COLUMNS = ['id', 'remoteIp', 'remoteGroupId', 'direction', 'ethertype', 'portRangeMin', 'portRangeMax', 'protocol', 'createDate', 'modifyDate'] REQUEST_BOOL_COLUMNS = ['requestId', 'response'] REQUEST_RULES_COLUMNS = ['requestId', 'rules'] @click.command(cls=SoftLayer.CLI.command.SLCommand, ) @click.argument('securitygroup_id') @click.option('--sortby', help='Column to sort by', type=click.Choice(COLUMNS)) @environment.pass_env def rule_list(env, securitygroup_id, sortby): """List security group rules.""" mgr = SoftLayer.NetworkManager(env.client) table = formatting.Table(COLUMNS) table.sortby = sortby rules = mgr.list_securitygroup_rules(securitygroup_id) for rule in rules: port_min = rule.get('portRangeMin') port_max = rule.get('portRangeMax') if port_min is None: port_min = formatting.blank() if port_max is None: port_max = formatting.blank() table.add_row([ rule['id'], rule.get('remoteIp') or formatting.blank(), rule.get('remoteGroupId') or formatting.blank(), rule['direction'], rule.get('ethertype') or formatting.blank(), port_min, port_max, rule.get('protocol') or formatting.blank(), rule.get('createDate') or formatting.blank(), rule.get('modifyDate') or formatting.blank() ]) env.fout(table) @click.command(cls=SoftLayer.CLI.command.SLCommand, ) @click.argument('securitygroup_id') @click.option('--remote-ip', '-r', help='The remote IP/CIDR to enforce') @click.option('--remote-group', '-s', type=click.INT, help='The ID of the remote security group to enforce') @click.option('--direction', '-d', help=('The direction of traffic to enforce ' '(ingress, egress)')) @click.option('--ethertype', '-e', help='The ethertype (IPv4 or IPv6) to enforce') @click.option('--port-max', '-M', type=click.INT, help=('The upper port bound to enforce. When the protocol is ICMP, ' 'this specifies the ICMP code to permit')) @click.option('--port-min', '-m', type=click.INT, help=('The lower port bound to enforce. When the protocol is ICMP, ' 'this specifies the ICMP type to permit')) @click.option('--protocol', '-p', help='The protocol (icmp, tcp, udp) to enforce') @environment.pass_env def add(env, securitygroup_id, remote_ip, remote_group, direction, ethertype, port_max, port_min, protocol): """Add a security group rule to a security group. \b Examples: # Add an SSH rule (TCP port 22) to a security group slcli sg rule-add 384727 \\ --direction ingress \\ --protocol tcp \\ --port-min 22 \\ --port-max 22 \b # Add a ping rule (ICMP type 8 code 0) to a security group slcli sg rule-add 384727 \\ --direction ingress \\ --protocol icmp \\ --port-min 8 \\ --port-max 0 """ mgr = SoftLayer.NetworkManager(env.client) ret = mgr.add_securitygroup_rule(securitygroup_id, remote_ip, remote_group, direction, ethertype, port_max, port_min, protocol) if not ret: raise exceptions.CLIAbort("Failed to add security group rule") table = formatting.Table(REQUEST_RULES_COLUMNS) table.add_row([ret['requestId'], str(ret['rules'])]) env.fout(table) @click.command(cls=SoftLayer.CLI.command.SLCommand, ) @click.argument('securitygroup_id') @click.argument('rule_id') @click.option('--remote-ip', '-r', help='The remote IP/CIDR to enforce') @click.option('--remote-group', '-s', help='The ID of the remote security group to enforce') @click.option('--direction', '-d', help='The direction of traffic to enforce') @click.option('--ethertype', '-e', help='The ethertype (IPv4 or IPv6) to enforce') @click.option('--port-max', '-M', help='The upper port bound to enforce') @click.option('--port-min', '-m', help='The lower port bound to enforce') @click.option('--protocol', '-p', help='The protocol (icmp, tcp, udp) to enforce') @environment.pass_env def edit(env, securitygroup_id, rule_id, remote_ip, remote_group, direction, ethertype, port_max, port_min, protocol): """Edit a security group rule in a security group.""" mgr = SoftLayer.NetworkManager(env.client) data = {} if remote_ip: data['remote_ip'] = remote_ip if remote_group: data['remote_group'] = remote_group if direction: data['direction'] = direction if ethertype: data['ethertype'] = ethertype if port_max is not None: data['port_max'] = port_max if port_min is not None: data['port_min'] = port_min if protocol: data['protocol'] = protocol ret = mgr.edit_securitygroup_rule(securitygroup_id, rule_id, **data) if not ret: raise exceptions.CLIAbort("Failed to edit security group rule") table = formatting.Table(REQUEST_BOOL_COLUMNS) table.add_row([ret['requestId']]) env.fout(table) @click.command(cls=SoftLayer.CLI.command.SLCommand, ) @click.argument('securitygroup_id') @click.argument('rule_id') @environment.pass_env def remove(env, securitygroup_id, rule_id): """Remove a rule from a security group.""" mgr = SoftLayer.NetworkManager(env.client) ret = mgr.remove_securitygroup_rule(securitygroup_id, rule_id) if not ret: raise exceptions.CLIAbort("Failed to remove security group rule") table = formatting.Table(REQUEST_BOOL_COLUMNS) table.add_row([ret['requestId']]) env.fout(table)
mit
Gabotero/GNURadioNext
gr-digital/python/utils/gray_code.py
73
2163
#!/usr/bin/env python # # Copyright 2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # class GrayCodeGenerator(object): """ Generates and caches gray codes. """ def __init__(self): self.gcs = [0, 1] # The last power of two passed through. self.lp2 = 2 # The next power of two that will be passed through. self.np2 = 4 # Curent index self.i = 2 def get_gray_code(self, length): """ Returns a list of gray code of given length. """ if len(self.gcs) < length: self.generate_new_gray_code(length) return self.gcs[:length] def generate_new_gray_code(self, length): """ Generates new gray code and places into cache. """ while len(self.gcs) < length: if self.i == self.lp2: # if i is a power of two then gray number is of form 1100000... result = self.i + self.i/2 else: # if not we take advantage of the symmetry of all but the last bit # around a power of two. result = self.gcs[2*self.lp2-1-self.i] + self.lp2 self.gcs.append(result) self.i += 1 if self.i == self.np2: self.lp2 = self.i self.np2 = self.i*2 _gray_code_generator = GrayCodeGenerator() gray_code = _gray_code_generator.get_gray_code
gpl-3.0
mweisman/QGIS
tests/src/python/test_qgsrelation.py
4
4125
# -*- coding: utf-8 -*- """QGIS Unit tests for QgsRelation. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Matthias Kuhn' __date__ = '07/10/2013' __copyright__ = 'Copyright 2013, The QGIS Project' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os import qgis from PyQt4.QtCore import QVariant, QObject, SIGNAL from PyQt4.QtGui import QPainter from qgis.core import (QGis, QgsVectorLayer, QgsFeature, QgsRelation, QgsGeometry, QgsPoint, QgsMapLayerRegistry ) from utilities import (unitTestDataPath, getQgisTestApp, TestCase, unittest, #expectedFailure ) QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp() def createReferencingLayer(): layer = QgsVectorLayer("Point?field=fldtxt:string&field=foreignkey:integer", "referencinglayer", "memory") pr = layer.dataProvider() f1 = QgsFeature() f1.setFields( layer.pendingFields() ) f1.setAttributes(["test1", 123]) f1.setGeometry(QgsGeometry.fromPoint(QgsPoint(100,200))) f2 = QgsFeature() f2.setFields( layer.pendingFields() ) f2.setAttributes(["test2", 123]) f2.setGeometry(QgsGeometry.fromPoint(QgsPoint(101,201))) assert pr.addFeatures([f1,f2]) return layer def createReferencedLayer(): layer = QgsVectorLayer( "Point?field=x:string&field=y:integer&field=z:integer", "referencedlayer", "memory") pr = layer.dataProvider() f1 = QgsFeature() f1.setFields( layer.pendingFields() ) f1.setAttributes(["foo", 123, 321]) f1.setGeometry(QgsGeometry.fromPoint(QgsPoint(1,1))) f2 = QgsFeature() f2.setFields( layer.pendingFields() ) f2.setAttributes(["bar", 456, 654]) f2.setGeometry(QgsGeometry.fromPoint(QgsPoint(2,2))) f3 = QgsFeature() f3.setFields( layer.pendingFields() ) f3.setAttributes(["foobar", 789, 554]) f3.setGeometry(QgsGeometry.fromPoint(QgsPoint(2,3))) assert pr.addFeatures([f1, f2, f3]) return layer def formatAttributes(attrs): return repr([ unicode(a) for a in attrs ]) class TestQgsRelation( TestCase ): def test_isValid(self): referencedLayer = createReferencedLayer() referencingLayer = createReferencingLayer() QgsMapLayerRegistry.instance().addMapLayers([referencedLayer,referencingLayer]) rel = QgsRelation() assert not rel.isValid() rel.setRelationId( 'rel1' ) assert not rel.isValid() rel.setRelationName( 'Relation Number One' ) assert not rel.isValid() rel.setReferencingLayer( referencingLayer.id() ) assert not rel.isValid() rel.setReferencedLayer( referencedLayer.id() ) assert not rel.isValid() rel.addFieldPair( 'foreignkey', 'y' ) assert rel.isValid() QgsMapLayerRegistry.instance().removeAllMapLayers() def test_getRelatedFeatures(self): referencedLayer = createReferencedLayer() referencingLayer = createReferencingLayer() QgsMapLayerRegistry.instance().addMapLayers([referencedLayer,referencingLayer]) rel = QgsRelation() rel.setRelationId( 'rel1' ) rel.setRelationName( 'Relation Number One' ) rel.setReferencingLayer( referencingLayer.id() ) rel.setReferencedLayer( referencedLayer.id() ) rel.addFieldPair( 'foreignkey', 'y' ) feat = referencedLayer.getFeatures().next() it = rel.getRelatedFeatures( feat ) [ a.attributes() for a in it ] == [[u'test1', 123], [u'test2', 123]] QgsMapLayerRegistry.instance().removeAllMapLayers() if __name__ == '__main__': unittest.main()
gpl-2.0
tzq668766/python
burness/0002/save_to_mysql.py
40
1463
#-*- coding: utf-8-*- import mysql.connector config = { 'user': 'root', 'password': 'root', 'host': '127.0.0.1', 'database': 'test', 'raise_on_warnings': True, } class save_keys_to_mysql: def __init__(self,path): self.path=path print(self.path) def __conn(self,**conf): try: conn=mysql.connector.connect(**conf) print(conn) except mysql.connector.Error as err: if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: print("something is wrong with your user name or password") elif err.errno == errorcode.ER_BAD_DB_ERROR: prnt("database does not exists") else: print(err) return conn #print(self.conn) def save_to_mysql(self,**conf): conn=self.__conn(**conf) path=self.path cursor=conn.cursor() cursor.execute('drop table if exists act_keys') cursor.execute('create table act_keys (id int(8) primary key, act_keys varchar(50))') row=0 with open('keys_text.txt','r') as f: for line in f.readlines(): #row_no='0000'+str(row) act_keys=line.rstrip() cursor.execute('insert into act_keys (id, act_keys) values (%s, %s)',[row,act_keys]) row+=1 conn.commit() cursor.close() conn.close() def see_all(self,**conf): conn=self.__conn(**conf) cursor=conn.cursor() cursor.execute('select * from act_keys') values=cursor.fetchall() print(values) cursor.close() conn.close() test=save_keys_to_mysql('keys_text.txt') test.save_to_mysql(**config) test.see_all(**config)
mit
olofer/LightGBM
tests/python_package_test/test_sklearn.py
4
12325
# coding: utf-8 # pylint: skip-file import math import os import unittest import lightgbm as lgb import numpy as np from sklearn.base import clone from sklearn.datasets import (load_boston, load_breast_cancer, load_digits, load_iris, load_svmlight_file) from sklearn.externals import joblib from sklearn.metrics import log_loss, mean_squared_error from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.utils.estimator_checks import (_yield_all_checks, SkipTest, check_parameters_default_constructible) try: from sklearn.utils.estimator_checks import check_no_fit_attributes_set_in_init sklearn_at_least_019 = True except ImportError: sklearn_at_least_019 = False try: import pandas as pd IS_PANDAS_INSTALLED = True except ImportError: IS_PANDAS_INSTALLED = False def multi_error(y_true, y_pred): return np.mean(y_true != y_pred) def multi_logloss(y_true, y_pred): return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)]) class TestSklearn(unittest.TestCase): def test_binary(self): X, y = load_breast_cancer(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMClassifier(n_estimators=50, silent=True) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False) ret = log_loss(y_test, gbm.predict_proba(X_test)) self.assertLess(ret, 0.15) self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['binary_logloss'][gbm.best_iteration_ - 1], places=5) def test_regression(self): X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(n_estimators=50, silent=True) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False) ret = mean_squared_error(y_test, gbm.predict(X_test)) self.assertLess(ret, 16) self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['l2'][gbm.best_iteration_ - 1], places=5) def test_multiclass(self): X, y = load_digits(10, True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMClassifier(n_estimators=50, silent=True) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False) ret = multi_error(y_test, gbm.predict(X_test)) self.assertLess(ret, 0.2) ret = multi_logloss(y_test, gbm.predict_proba(X_test)) self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['multi_logloss'][gbm.best_iteration_ - 1], places=5) def test_lambdarank(self): X_train, y_train = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.train')) X_test, y_test = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.test')) q_train = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.train.query')) q_test = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.test.query')) gbm = lgb.LGBMRanker() gbm.fit(X_train, y_train, group=q_train, eval_set=[(X_test, y_test)], eval_group=[q_test], eval_at=[1, 3], early_stopping_rounds=5, verbose=False, callbacks=[lgb.reset_parameter(learning_rate=lambda x: 0.95 ** x * 0.1)]) def test_regression_with_custom_objective(self): def objective_ls(y_true, y_pred): grad = (y_pred - y_true) hess = np.ones(len(y_true)) return grad, hess X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(n_estimators=50, silent=True, objective=objective_ls) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False) ret = mean_squared_error(y_test, gbm.predict(X_test)) self.assertLess(ret, 100) self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['l2'][gbm.best_iteration_ - 1], places=5) def test_binary_classification_with_custom_objective(self): def logregobj(y_true, y_pred): y_pred = 1.0 / (1.0 + np.exp(-y_pred)) grad = y_pred - y_true hess = y_pred * (1.0 - y_pred) return grad, hess X, y = load_digits(2, True) def binary_error(y_test, y_pred): return np.mean([int(p > 0.5) != y for y, p in zip(y_test, y_pred)]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMClassifier(n_estimators=50, silent=True, objective=logregobj) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False) ret = binary_error(y_test, gbm.predict(X_test)) self.assertLess(ret, 0.1) def test_dart(self): X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(boosting_type='dart') gbm.fit(X_train, y_train) self.assertLessEqual(gbm.score(X_train, y_train), 1.) def test_grid_search(self): X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = {'boosting_type': ['dart', 'gbdt'], 'n_estimators': [5, 8], 'drop_rate': [0.05, 0.1]} gbm = GridSearchCV(lgb.LGBMRegressor(), params, cv=3) gbm.fit(X_train, y_train) self.assertIn(gbm.best_params_['n_estimators'], [5, 8]) def test_clone_and_property(self): X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(n_estimators=100, silent=True) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=10, verbose=False) gbm_clone = clone(gbm) self.assertIsInstance(gbm.booster_, lgb.Booster) self.assertIsInstance(gbm.feature_importances_, np.ndarray) X, y = load_digits(2, True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) clf = lgb.LGBMClassifier() clf.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=10, verbose=False) self.assertListEqual(sorted(clf.classes_), [0, 1]) self.assertEqual(clf.n_classes_, 2) self.assertIsInstance(clf.booster_, lgb.Booster) self.assertIsInstance(clf.feature_importances_, np.ndarray) def test_joblib(self): X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(n_estimators=100, silent=True) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=10, verbose=False) joblib.dump(gbm, 'lgb.pkl') gbm_pickle = joblib.load('lgb.pkl') self.assertIsInstance(gbm_pickle.booster_, lgb.Booster) self.assertDictEqual(gbm.get_params(), gbm_pickle.get_params()) self.assertListEqual(list(gbm.feature_importances_), list(gbm_pickle.feature_importances_)) X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False) gbm_pickle.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False) for key in gbm.evals_result_: for evals in zip(gbm.evals_result_[key], gbm_pickle.evals_result_[key]): self.assertAlmostEqual(*evals, places=5) pred_origin = gbm.predict(X_test) pred_pickle = gbm_pickle.predict(X_test) self.assertEqual(len(pred_origin), len(pred_pickle)) for preds in zip(pred_origin, pred_pickle): self.assertAlmostEqual(*preds, places=5) def test_feature_importances_single_leaf(self): clf = lgb.LGBMClassifier(n_estimators=100) data = load_iris() clf.fit(data.data, data.target) importances = clf.feature_importances_ self.assertEqual(len(importances), 4) def test_sklearn_backward_compatibility(self): iris = load_iris() X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42) # Tests that `seed` is the same as `random_state` clf_1 = lgb.sklearn.LGBMClassifier(seed=42, subsample=0.6, colsample_bytree=0.8) clf_2 = lgb.sklearn.LGBMClassifier(random_state=42, subsample=0.6, colsample_bytree=0.8) y_pred_1 = clf_1.fit(X_train, y_train).predict_proba(X_test) y_pred_2 = clf_2.fit(X_train, y_train).predict_proba(X_test) np.testing.assert_allclose(y_pred_1, y_pred_2) def test_sklearn_integration(self): # sklearn <0.19 cannot accept instance, but many tests could be passed only with min_data=1 and min_data_in_bin=1 if sklearn_at_least_019: # we cannot use `check_estimator` directly since there is no skip test mechanism for name, estimator in ((lgb.sklearn.LGBMClassifier.__name__, lgb.sklearn.LGBMClassifier), (lgb.sklearn.LGBMRegressor.__name__, lgb.sklearn.LGBMRegressor)): check_parameters_default_constructible(name, estimator) check_no_fit_attributes_set_in_init(name, estimator) # we cannot leave default params (see https://github.com/Microsoft/LightGBM/issues/833) estimator = estimator(min_child_samples=1, min_data_in_bin=1) for check in _yield_all_checks(name, estimator): if check.__name__ == 'check_estimators_nan_inf': continue # skip test because LightGBM deals with nan try: check(name, estimator) except SkipTest as message: warnings.warn(message, SkipTestWarning) @unittest.skipIf(not IS_PANDAS_INSTALLED, 'pandas not installed') def test_pandas_categorical(self): X = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'c', 'd'] * 75), # str "B": np.random.permutation([1, 2, 3] * 100), # int "C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float "D": np.random.permutation([True, False] * 150)}) # bool y = np.random.permutation([0, 1] * 150) X_test = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'e'] * 20), "B": np.random.permutation([1, 3] * 30), "C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15), "D": np.random.permutation([True, False] * 30)}) for col in ["A", "B", "C", "D"]: X[col] = X[col].astype('category') X_test[col] = X_test[col].astype('category') gbm0 = lgb.sklearn.LGBMClassifier().fit(X, y) pred0 = list(gbm0.predict(X_test)) gbm1 = lgb.sklearn.LGBMClassifier().fit(X, y, categorical_feature=[0]) pred1 = list(gbm1.predict(X_test)) gbm2 = lgb.sklearn.LGBMClassifier().fit(X, y, categorical_feature=['A']) pred2 = list(gbm2.predict(X_test)) gbm3 = lgb.sklearn.LGBMClassifier().fit(X, y, categorical_feature=['A', 'B', 'C', 'D']) pred3 = list(gbm3.predict(X_test)) gbm3.booster_.save_model('categorical.model') gbm4 = lgb.Booster(model_file='categorical.model') pred4 = list(gbm4.predict(X_test)) pred_prob = list(gbm0.predict_proba(X_test)[:, 1]) np.testing.assert_almost_equal(pred0, pred1) np.testing.assert_almost_equal(pred0, pred2) np.testing.assert_almost_equal(pred0, pred3) np.testing.assert_almost_equal(pred_prob, pred4)
mit
MER-GROUP/intellij-community
python/lib/Lib/site-packages/django/core/files/uploadhandler.py
236
7028
""" Base file upload handler classes, and the built-in concrete subclasses """ try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.files.uploadedfile import TemporaryUploadedFile, InMemoryUploadedFile from django.utils import importlib __all__ = ['UploadFileException','StopUpload', 'SkipFile', 'FileUploadHandler', 'TemporaryFileUploadHandler', 'MemoryFileUploadHandler', 'load_handler', 'StopFutureHandlers'] class UploadFileException(Exception): """ Any error having to do with uploading files. """ pass class StopUpload(UploadFileException): """ This exception is raised when an upload must abort. """ def __init__(self, connection_reset=False): """ If ``connection_reset`` is ``True``, Django knows will halt the upload without consuming the rest of the upload. This will cause the browser to show a "connection reset" error. """ self.connection_reset = connection_reset def __unicode__(self): if self.connection_reset: return u'StopUpload: Halt current upload.' else: return u'StopUpload: Consume request data, then halt.' class SkipFile(UploadFileException): """ This exception is raised by an upload handler that wants to skip a given file. """ pass class StopFutureHandlers(UploadFileException): """ Upload handers that have handled a file and do not want future handlers to run should raise this exception instead of returning None. """ pass class FileUploadHandler(object): """ Base class for streaming upload handlers. """ chunk_size = 64 * 2 ** 10 #: The default chunk size is 64 KB. def __init__(self, request=None): self.file_name = None self.content_type = None self.content_length = None self.charset = None self.request = request def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None): """ Handle the raw input from the client. Parameters: :input_data: An object that supports reading via .read(). :META: ``request.META``. :content_length: The (integer) value of the Content-Length header from the client. :boundary: The boundary from the Content-Type header. Be sure to prepend two '--'. """ pass def new_file(self, field_name, file_name, content_type, content_length, charset=None): """ Signal that a new file has been started. Warning: As with any data from the client, you should not trust content_length (and sometimes won't even get it). """ self.field_name = field_name self.file_name = file_name self.content_type = content_type self.content_length = content_length self.charset = charset def receive_data_chunk(self, raw_data, start): """ Receive data from the streamed upload parser. ``start`` is the position in the file of the chunk. """ raise NotImplementedError() def file_complete(self, file_size): """ Signal that a file has completed. File size corresponds to the actual size accumulated by all the chunks. Subclasses should return a valid ``UploadedFile`` object. """ raise NotImplementedError() def upload_complete(self): """ Signal that the upload is complete. Subclasses should perform cleanup that is necessary for this handler. """ pass class TemporaryFileUploadHandler(FileUploadHandler): """ Upload handler that streams data into a temporary file. """ def __init__(self, *args, **kwargs): super(TemporaryFileUploadHandler, self).__init__(*args, **kwargs) def new_file(self, file_name, *args, **kwargs): """ Create the file object to append to as data is coming in. """ super(TemporaryFileUploadHandler, self).new_file(file_name, *args, **kwargs) self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset) def receive_data_chunk(self, raw_data, start): self.file.write(raw_data) def file_complete(self, file_size): self.file.seek(0) self.file.size = file_size return self.file class MemoryFileUploadHandler(FileUploadHandler): """ File upload handler to stream uploads into memory (used for small files). """ def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None): """ Use the content_length to signal whether or not this handler should be in use. """ # Check the content-length header to see if we should # If the post is too large, we cannot use the Memory handler. if content_length > settings.FILE_UPLOAD_MAX_MEMORY_SIZE: self.activated = False else: self.activated = True def new_file(self, *args, **kwargs): super(MemoryFileUploadHandler, self).new_file(*args, **kwargs) if self.activated: self.file = StringIO() raise StopFutureHandlers() def receive_data_chunk(self, raw_data, start): """ Add the data to the StringIO file. """ if self.activated: self.file.write(raw_data) else: return raw_data def file_complete(self, file_size): """ Return a file object if we're activated. """ if not self.activated: return self.file.seek(0) return InMemoryUploadedFile( file = self.file, field_name = self.field_name, name = self.file_name, content_type = self.content_type, size = file_size, charset = self.charset ) def load_handler(path, *args, **kwargs): """ Given a path to a handler, return an instance of that handler. E.g.:: >>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request) <TemporaryFileUploadHandler object at 0x...> """ i = path.rfind('.') module, attr = path[:i], path[i+1:] try: mod = importlib.import_module(module) except ImportError, e: raise ImproperlyConfigured('Error importing upload handler module %s: "%s"' % (module, e)) except ValueError, e: raise ImproperlyConfigured('Error importing upload handler module. Is FILE_UPLOAD_HANDLERS a correctly defined list or tuple?') try: cls = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured('Module "%s" does not define a "%s" upload handler backend' % (module, attr)) return cls(*args, **kwargs)
apache-2.0
jesterchen/einmaleins
einmaleins.py
1
3478
import datetime import itertools from random import shuffle, randrange def generate_exercises(limit, num_exercises, type_exercises): exercise_list = [] for i, j in itertools.product(range(1, limit + 1), range(1, limit + 1)): exercise_list.append((i, j, (i) * (j), '*')) while len(exercise_list) < num_exercises: exercise_list += exercise_list shuffle(exercise_list) if type_exercises == 'd': exercise_list = [type_exercise_to_division(x) for x in exercise_list] if type_exercises == 'b': global percentage_division counter = 0 for i in range(len(exercise_list)): if randrange(100) <= percentage_division: counter+=1 exercise_list[i] = type_exercise_to_division(exercise_list[i]) print(str(counter)+" von "+str(num_exercises)+" Aufgaben sind Division.") return exercise_list[0:num_exercises] def type_exercise_to_division(exercise): if randrange(2) == 1: exercise = exercise[2], exercise[1], exercise[0], '/' else: exercise = exercise[2], exercise[0], exercise[1], '/' return exercise def get_user_input(question, default_value): choice = '' while choice == '': try: choice = input(question + " ") if choice == '': choice = default_value choice = int(choice) except: choice = '' return choice def get_user_value(question, allowed_values): choice = '' while choice not in allowed_values: choice = str(input(question + " ")).lower() return choice if __name__ == "__main__": gridsize = get_user_input("Bis zu welcher Zahl soll das 1*1 gehen? [10]", 10) print("OK, dann geht es bis %s*%s." % (gridsize, gridsize)) num_exercises = get_user_input("Wie viele Aufgaben sollen gestellt werden? [%s]" % (gridsize ** 2), gridsize ** 2) print("Es werden also %i Aufgaben gestellt." % (num_exercises)) type_exercises = get_user_value("Nur (M)ultiplikation, nur (D)ivision oder (b)eides?", ('m', 'd', 'b')) if type_exercises=='b': percentage_division = get_user_input("Wieviel Prozent sollen Division sein? [50]", 50) exercise_list = generate_exercises(gridsize, num_exercises, type_exercises) incorrect_answers = 0 started = datetime.datetime.now().replace(microsecond=0) while len(exercise_list) > 0: if len(exercise_list) % 10 == 0 and len(exercise_list) != num_exercises: print("Es sind noch %s Aufgaben uebrig." % (len(exercise_list))) exercise = exercise_list.pop(0) result = '' failed = False while True: result = input(str(exercise[0]) + ' ' + exercise[3] + ' ' + str(exercise[1]) + " = ") if result != str(exercise[2]): if not failed: print("Das Ergebnis ist leider nicht richtig. Versuche es noch einmal.") incorrect_answers += 1 failed = True else: print("Dieses Ergebnis ist leider auch falsch. Denk noch einmal in Ruhe nach!") else: break ended = datetime.datetime.now().replace(microsecond=0) print("Du hast von %s Aufgaben %s im ersten Anlauf richtig beantwortet." % ( num_exercises, num_exercises - incorrect_answers)) print("Insgesamt hast Du %s gebraucht." % (ended - started))
gpl-2.0
omnirom/android_external_chromium-org
build/util/lastchange.py
28
8872
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ lastchange.py -- Chromium revision fetching utility. """ import re import optparse import os import subprocess import sys _GIT_SVN_ID_REGEX = re.compile(r'.*git-svn-id:\s*([^@]*)@([0-9]+)', re.DOTALL) class VersionInfo(object): def __init__(self, url, revision): self.url = url self.revision = revision def FetchSVNRevision(directory, svn_url_regex): """ Fetch the Subversion branch and revision for a given directory. Errors are swallowed. Returns: A VersionInfo object or None on error. """ try: proc = subprocess.Popen(['svn', 'info'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=directory, shell=(sys.platform=='win32')) except OSError: # command is apparently either not installed or not executable. return None if not proc: return None attrs = {} for line in proc.stdout: line = line.strip() if not line: continue key, val = line.split(': ', 1) attrs[key] = val try: match = svn_url_regex.search(attrs['URL']) if match: url = match.group(2) else: url = '' revision = attrs['Revision'] except KeyError: return None return VersionInfo(url, revision) def RunGitCommand(directory, command): """ Launches git subcommand. Errors are swallowed. Returns: A process object or None. """ command = ['git'] + command # Force shell usage under cygwin. This is a workaround for # mysterious loss of cwd while invoking cygwin's git. # We can't just pass shell=True to Popen, as under win32 this will # cause CMD to be used, while we explicitly want a cygwin shell. if sys.platform == 'cygwin': command = ['sh', '-c', ' '.join(command)] try: proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=directory, shell=(sys.platform=='win32')) return proc except OSError: return None def FetchGitRevision(directory): """ Fetch the Git hash for a given directory. Errors are swallowed. Returns: A VersionInfo object or None on error. """ hsh = '' proc = RunGitCommand(directory, ['rev-parse', 'HEAD']) if proc: output = proc.communicate()[0].strip() if proc.returncode == 0 and output: hsh = output if not hsh: return None pos = '' proc = RunGitCommand(directory, ['show', '-s', '--format=%B', 'HEAD']) if proc: output = proc.communicate()[0] if proc.returncode == 0 and output: for line in reversed(output.splitlines()): if line.startswith('Cr-Commit-Position:'): pos = line.rsplit()[-1].strip() if not pos: return VersionInfo('git', hsh) return VersionInfo('git', '%s-%s' % (hsh, pos)) def FetchGitSVNURLAndRevision(directory, svn_url_regex): """ Fetch the Subversion URL and revision through Git. Errors are swallowed. Returns: A tuple containing the Subversion URL and revision. """ proc = RunGitCommand(directory, ['log', '-1', '--format=%b']) if proc: output = proc.communicate()[0].strip() if proc.returncode == 0 and output: # Extract the latest SVN revision and the SVN URL. # The target line is the last "git-svn-id: ..." line like this: # git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85528 0039d316.... match = _GIT_SVN_ID_REGEX.search(output) if match: revision = match.group(2) url_match = svn_url_regex.search(match.group(1)) if url_match: url = url_match.group(2) else: url = '' return url, revision return None, None def FetchGitSVNRevision(directory, svn_url_regex): """ Fetch the Git-SVN identifier for the local tree. Errors are swallowed. """ url, revision = FetchGitSVNURLAndRevision(directory, svn_url_regex) if url and revision: return VersionInfo(url, revision) return None def FetchVersionInfo(default_lastchange, directory=None, directory_regex_prior_to_src_url='chrome|blink|svn'): """ Returns the last change (in the form of a branch, revision tuple), from some appropriate revision control system. """ svn_url_regex = re.compile( r'.*/(' + directory_regex_prior_to_src_url + r')(/.*)') version_info = (FetchSVNRevision(directory, svn_url_regex) or FetchGitSVNRevision(directory, svn_url_regex) or FetchGitRevision(directory)) if not version_info: if default_lastchange and os.path.exists(default_lastchange): revision = open(default_lastchange, 'r').read().strip() version_info = VersionInfo(None, revision) else: version_info = VersionInfo(None, None) return version_info def GetHeaderGuard(path): """ Returns the header #define guard for the given file path. This treats everything after the last instance of "src/" as being a relevant part of the guard. If there is no "src/", then the entire path is used. """ src_index = path.rfind('src/') if src_index != -1: guard = path[src_index + 4:] else: guard = path guard = guard.upper() return guard.replace('/', '_').replace('.', '_').replace('\\', '_') + '_' def GetHeaderContents(path, define, version): """ Returns what the contents of the header file should be that indicate the given revision. Note that the #define is specified as a string, even though it's currently always a SVN revision number, in case we need to move to git hashes. """ header_guard = GetHeaderGuard(path) header_contents = """/* Generated by lastchange.py, do not edit.*/ #ifndef %(header_guard)s #define %(header_guard)s #define %(define)s "%(version)s" #endif // %(header_guard)s """ header_contents = header_contents % { 'header_guard': header_guard, 'define': define, 'version': version } return header_contents def WriteIfChanged(file_name, contents): """ Writes the specified contents to the specified file_name iff the contents are different than the current contents. """ try: old_contents = open(file_name, 'r').read() except EnvironmentError: pass else: if contents == old_contents: return os.unlink(file_name) open(file_name, 'w').write(contents) def main(argv=None): if argv is None: argv = sys.argv parser = optparse.OptionParser(usage="lastchange.py [options]") parser.add_option("-d", "--default-lastchange", metavar="FILE", help="Default last change input FILE.") parser.add_option("-m", "--version-macro", help="Name of C #define when using --header. Defaults to " + "LAST_CHANGE.", default="LAST_CHANGE") parser.add_option("-o", "--output", metavar="FILE", help="Write last change to FILE. " + "Can be combined with --header to write both files.") parser.add_option("", "--header", metavar="FILE", help="Write last change to FILE as a C/C++ header. " + "Can be combined with --output to write both files.") parser.add_option("--revision-only", action='store_true', help="Just print the SVN revision number. Overrides any " + "file-output-related options.") parser.add_option("-s", "--source-dir", metavar="DIR", help="Use repository in the given directory.") opts, args = parser.parse_args(argv[1:]) out_file = opts.output header = opts.header while len(args) and out_file is None: if out_file is None: out_file = args.pop(0) if args: sys.stderr.write('Unexpected arguments: %r\n\n' % args) parser.print_help() sys.exit(2) if opts.source_dir: src_dir = opts.source_dir else: src_dir = os.path.dirname(os.path.abspath(__file__)) version_info = FetchVersionInfo(opts.default_lastchange, src_dir) if version_info.revision == None: version_info.revision = '0' if opts.revision_only: print version_info.revision else: contents = "LASTCHANGE=%s\n" % version_info.revision if not out_file and not opts.header: sys.stdout.write(contents) else: if out_file: WriteIfChanged(out_file, contents) if header: WriteIfChanged(header, GetHeaderContents(header, opts.version_macro, version_info.revision)) return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
gdowding/pyvmomi
tests/test_container_view.py
12
1730
# VMware vSphere Python SDK # Copyright (c) 2008-2015 VMware, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tests import vcr from pyVim import connect from pyVmomi import vim class ContainerViewTests(tests.VCRTestBase): @vcr.use_cassette('basic_container_view.yaml', cassette_library_dir=tests.fixtures_path, record_mode='once') def test_basic_container_view(self): # see: http://python3porting.com/noconv.html si = connect.SmartConnect(host='vcsa', user='my_user', pwd='my_password') content = si.RetrieveContent() datacenter_object_view = content.viewManager.CreateContainerView( content.rootFolder, [vim.Datacenter], True) for datacenter in datacenter_object_view.view: datastores = datacenter.datastore # NOTE (hartsocks): the object handle here is a managed object # reference, until we ask for more details, no other detail is # transmitted. Our sample fixture is quite small. self.assertEqual(1, len(datastores)) datacenter_object_view.Destroy()
apache-2.0
cosven/FeelUOwn
feeluown/gui/uimodels/collection.py
1
1722
""" 本地收藏管理 ~~~~~~~~~~~~~ """ import base64 from feeluown.utils.utils import elfhash from feeluown.gui.widgets.collections import CollectionsModel from feeluown.collection import CollectionType class CollectionUiManager: def __init__(self, app): self._app = app self.model = CollectionsModel(app) self._id_coll_mapping = {} def get(self, identifier): return self._id_coll_mapping.get(identifier, None) def get_coll_id(self, coll): # TODO: 目前还没想好 collection identifier 计算方法,故添加这个函数 # 现在把 fpath 当作 identifier 使用,但对外透明 return elfhash(base64.b64encode(bytes(coll.fpath, 'utf-8'))) def get_coll_library(self): for coll in self._id_coll_mapping.values(): if coll.type == CollectionType.sys_library: return coll raise Exception('collection library not found') def add(self, collection): coll_id = self.get_coll_id(collection) self._id_coll_mapping[coll_id] = collection self.model.add(collection) def clear(self): self._id_coll_mapping.clear() self.model.clear() def initialize(self): self._scan() def refresh(self): """重新加载本地收藏列表""" self.model.clear() self._scan() def _scan(self): colls = [] library_coll = None for coll in self._app.coll_mgr.scan(): if coll.type == CollectionType.sys_library: library_coll = coll continue colls.append(coll) colls.insert(0, library_coll) for coll in colls: self.add(coll)
gpl-3.0
lukeiwanski/tensorflow-opencl
tensorflow/tools/dist_test/scripts/k8s_tensorflow.py
129
3846
#!/usr/bin/python # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Generates YAML configuration files for distributed TensorFlow workers. The workers will be run in a Kubernetes (k8s) container cluster. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys import k8s_tensorflow_lib # Note: It is intentional that we do not import tensorflow in this script. The # machine that launches a TensorFlow k8s cluster does not have to have the # Python package of TensorFlow installed on it. DEFAULT_DOCKER_IMAGE = 'tensorflow/tf_grpc_test_server' DEFAULT_PORT = 2222 def main(): """Do arg parsing.""" parser = argparse.ArgumentParser() parser.register( 'type', 'bool', lambda v: v.lower() in ('true', 't', 'y', 'yes')) parser.add_argument('--num_workers', type=int, default=2, help='How many worker pods to run') parser.add_argument('--num_parameter_servers', type=int, default=1, help='How many paramater server pods to run') parser.add_argument('--grpc_port', type=int, default=DEFAULT_PORT, help='GRPC server port (Default: %d)' % DEFAULT_PORT) parser.add_argument('--request_load_balancer', type='bool', default=False, help='To request worker0 to be exposed on a public IP ' 'address via an external load balancer, enabling you to ' 'run client processes from outside the cluster') parser.add_argument('--docker_image', type=str, default=DEFAULT_DOCKER_IMAGE, help='Override default docker image for the TensorFlow ' 'GRPC server') parser.add_argument('--name_prefix', type=str, default='tf', help='Prefix for job names. Jobs will be named as ' '<name_prefix>_worker|ps<task_id>') parser.add_argument('--use_shared_volume', type='bool', default=True, help='Whether to mount /shared directory from host to ' 'the pod') args = parser.parse_args() if args.num_workers <= 0: sys.stderr.write('--num_workers must be greater than 0; received %d\n' % args.num_workers) sys.exit(1) if args.num_parameter_servers <= 0: sys.stderr.write( '--num_parameter_servers must be greater than 0; received %d\n' % args.num_parameter_servers) sys.exit(1) # Generate contents of yaml config yaml_config = k8s_tensorflow_lib.GenerateConfig( args.num_workers, args.num_parameter_servers, args.grpc_port, args.request_load_balancer, args.docker_image, args.name_prefix, env_vars=None, use_shared_volume=args.use_shared_volume) print(yaml_config) # pylint: disable=superfluous-parens if __name__ == '__main__': main()
apache-2.0
EpicCM/SPH-D700-Kernel
external/webkit/WebKitTools/Scripts/webkitpy/style_references.py
3
2783
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """References to non-style modules used by the style package.""" # This module is a simple facade to the functionality used by the # style package that comes from WebKit modules outside the style # package. # # With this module, the only intra-package references (i.e. # references to webkitpy modules outside the style folder) that # the style package needs to make are relative references to # this module. For example-- # # > from .. style_references import parse_patch # # Similarly, people maintaining non-style code are not beholden # to the contents of the style package when refactoring or # otherwise changing non-style code. They only have to be aware # of this module. import os from diff_parser import DiffParser from scm import detect_scm_system def parse_patch(patch_string): """Parse a patch string and return the affected files.""" patch = DiffParser(patch_string.splitlines()) return patch.files class SimpleScm(object): """Simple facade to SCM for use by style package.""" def __init__(self): cwd = os.path.abspath('.') self._scm = detect_scm_system(cwd) def checkout_root(self): """Return the source control root as an absolute path.""" return self._scm.checkout_root def create_patch(self): return self._scm.create_patch() def create_patch_since_local_commit(self, commit): return self._scm.create_patch_since_local_commit(commit)
gpl-2.0
hnu2013wwj/DH-AutoAC
Python/hnu.py
2
2158
import time import requests import answer min_question_id = 10000 max_question_id = 13528 sleep_time = 30 submit_url = 'http://acm.hnu.cn/online/?action=problem&type=submit' login_url = 'http://acm.hnu.cn/online/?action=user&type=login' headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/47.0.2526.106 Safari/537.36" } class HNU: def __init__(self, username, password): self.session = requests.Session() self.session.headers = headers self.username = username self.password = password def login(self): data = { "userpass": self.password, "username": self.username, 'login': 'Sign In' } res = self.session.post(login_url, data=data) if res.status_code == 200: return True return False def submit(self, problem, code): data = { "id": str(problem), "langid": '1', "sourcecode": code, } res = self.session.post(submit_url, data=data) # print(res.text.encode('UTF-8')) # TODO if res.status_code == 200: return True return False def get_session(self): return self.session def get_state(self, problem): # TODO return False def solve(user, id): answers = answer.get_answer("%s%d" % ('hnu oj', id)) if answers is None or answers == []: print(None) return None count = 1 for ans in answers: # print(ans) user.submit(id, ans) print('submit', count) if count is 2: break count += 1 time.sleep(sleep_time) return None def hnu(): print("HDU:") # name = input("Your username: ") # password = input("Your password: ") name = 'printhello' password = '123456' my = HNU(name, password) if my.login() is False: return for i in range(1000, max_question_id): print('id :', i) solve(my, i) time.sleep(sleep_time) return None if __name__ == '__main__': hnu()
gpl-2.0
greggyNapalm/lunaport_server
lunaport_server/dao/test.py
1
13207
# -*- encoding: utf-8 -*- """ lunaport.dao.test ~~~~~~~~~~~~~~~~~ Storage interaction logic for test resource. """ import pprint pp = pprint.PrettyPrinter(indent=4).pprint import json import redis import dateutil.parser from sqlalchemy import text, exc from lunaport_worker.tasks.check import reduce_arts from ..wsgi import app, db from .. domain.test import TestBuilder from exceptions import StorageError class Filter(object): params_allowed = { 'case': ( 'AND c.name = :case', 'AND c.name = ANY (:case)'), 'owner': ( 'AND owner.login = :owner', 'AND owner.login = ANY (:owner)'), 'issue': ( 'AND issue.name = :issue', 'AND issue.name = ANY (:issue)'), 'load_src': ( 'AND load_src.fqdn = :load_src', 'AND load_src.fqdn = ANY (:load_src)'), 'load_dst': ( 'AND load_dst.fqdn = :load_dst', 'AND load_dst.fqdn = ANY (:load_dst)'), 'parent': ( 'AND t.parent_id = :parent', 'AND t.parent_id = ANY (:parent)'), 'ammo': ( 'AND t.ammo_id = :ammo', 'AND t.ammo_id = ANY (:ammo)'), 'status': ( 'AND s.name = :status', 'AND s.name = ANY (:status)'), } cast_to_int = ['parent'] def __init__(self, **kw): self.rule = [] self.q_params = {} for p, v in kw.iteritems(): if p not in self.params_allowed.keys(): continue if isinstance(v, (list, tuple)): if p in self.cast_to_int: # autocast doesn't work for ARRAY v = [int(el) for el in v] self.rule.append(self.params_allowed[p][1]) self.q_params.update({p: v}) elif isinstance(v, (unicode, basestring)): self.rule.append(self.params_allowed[p][0]) self.q_params.update({p: v}) else: raise StorageError('Wrong *{}* param type.'.format(p)) def cmpl_query(self): sql_text = '\n' + ' '.join(self.rule) return sql_text, self.q_params class Dao(object): """Interface for test storage""" @classmethod def insert(cls, test): raise NotImplemented() @classmethod def update_by_id(cls, test_id, test_diff): raise NotImplemented() @classmethod def get_by_id(cls, **kw): raise NotImplemented() @classmethod def get_many(cls, **kw): raise NotImplemented() class RDBMS(Dao): """PostgreSQL wrapper, implementing test.dao interface""" json_fileds = ['lll', 'files', 'generator_cfg'] dt_fileds = ['added_at', 'started_at', 'finished_at'] per_page_default = app.config.get('TEST_PER_PAGE_DEFAULT') or 10 per_page_max = app.config.get('TEST_PER_PAGE_MAX') or 100 select_join_part = ''' SELECT t.*, s.name AS status, c.name AS case, c.id AS case_id, eng.name AS engine, env.name AS env, inic.login AS initiator, issue.name AS issue, load_src.fqdn AS load_src, load_dst.fqdn AS load_dst FROM test t, t_status s, "case" c, engine eng, environment env, "user" inic, issue, server load_src, server load_dst WHERE t.case_id = c.id AND t.t_status_id = s.id AND t.engine_id = eng.id AND t.environment_id = env.id AND t.initiator_id = inic.id AND t.issue_id = issue.id AND t.load_src_id = load_src.id AND t.load_dst_id = load_dst.id''' @staticmethod def rdbms_call(q_test, q_params): return db.engine.connect().execute(text(q_test), **q_params) @classmethod def insert(cls, test): kw = test.as_dict(cuted=False) for filed in cls.json_fileds: kw.update({filed: json.dumps(kw[filed])}) def query(parent_test_id): parent_str = \ '(SELECT root_test_id FROM "case" WHERE name = :case),' if parent_test_id and isinstance(parent_test_id, int): parent_str = '(SELECT {}),'.format(parent_test_id) return cls.rdbms_call(''' INSERT INTO test (t_status_id, case_id, ammo_id, parent_id, engine_id, environment_id, lll_id, lll, initiator_id, "name", descr, issue_id, load_src_id, load_dst_id, started_at, finished_at, files, generator_cfg) VALUES ( (SELECT id FROM t_status WHERE name = :status), (SELECT id FROM "case" WHERE name = :case), (SELECT id FROM "ammo" WHERE path = :ammo_path), ''' + parent_str + ''' (SELECT id FROM engine WHERE name = :engine), (SELECT id FROM environment WHERE name = :env), :lll_id, :lll, (SELECT id FROM "user" WHERE login = :initiator), :name, :descr, (SELECT id FROM "issue" WHERE name = :issue), (SELECT id FROM server WHERE fqdn = :load_src), (SELECT id FROM server WHERE fqdn = :load_dst), :started_at, :finished_at, :files, :generator_cfg) returning id''', kw) try: pk_id = [r for r in query(kw.get('parent_id'))].pop()[0] except exc.IntegrityError as e: if 'null value in column "initiator_id"' in str(e): raise StorageError( 'unknown *initiator* value:{}'.format(kw.get('initiator')), missing_resource_type='user', missing_resource_value=kw.get('initiator'),) if 'null value in column "case_id"' in str(e): raise StorageError( 'unknown *case* value:{}'.format(kw.get('case')), missing_resource_type='case', missing_resource_value=kw.get('case'),) if 'null value in column "issue_id"' in str(e): raise StorageError( 'unknown *issue* value:{}'.format(kw.get('issue')), missing_resource_type='issue', missing_resource_value=kw.get('issue'),) if 'null value in column "load_src_id"' in str(e): raise StorageError( 'unknown *load_src* value:{}'.format(kw.get('load_src')), missing_resource_type='host', missing_resource_value=kw.get('load_src'),) if 'null value in column "load_dst_id"' in str(e): raise StorageError( 'unknown *load_dst* value:{}'.format(kw.get('load_dst')), missing_resource_type='host', missing_resource_value=kw.get('load_dst'),) raise StorageError('Some kind of IntegrityError') return pk_id @classmethod def update_by_id(cls, test_id, test_diff): def query(): q_params = test_diff q_params.update({ 'test_id': test_id, }) set_stmt = [] if q_params.get('status'): set_stmt.append( 't_status_id=(SELECT id FROM t_status WHERE name = :status)') if 'finished_at' in q_params: set_stmt.append('finished_at = :finished_at') q_params['finished_at'] = dateutil.parser.parse( q_params['finished_at']) if 'started_at' in q_params: set_stmt.append('started_at = :started_at') q_params['started_at'] = dateutil.parser.parse( q_params['started_at']) if 'resolution' in q_params: set_stmt.append('resolution = :resolution') if 'lll' in q_params: if q_params['lll'].get('n'): q_params['lll_id'] = q_params['lunapark']['n'] set_stmt.append('lll_id = :lunapark_id') q_params['lll'] = json.dumps(q_params['lunapark']) set_stmt.append('lll = :lunapark') q_stmt = ''' UPDATE "test" SET {} WHERE id = :test_id RETURNING id'''.format(',\n'.join(set_stmt)) if not set_stmt: # nothing to update raise AssertionError('Nothing to update') return cls.rdbms_call(q_stmt, q_params) try: #pk_id = [r for r in query()].pop()[0] query() except exc.IntegrityError as e: if 'null value in column "t_status_id"' in str(e): raise StorageError( "unknown *status* value:{}".format(test_diff['status'])) raise StorageError('Some kind of IntegrityError') except IndexError: raise StorageError( 'no such test with id:{}'.format(test_diff['test_id'])) return cls.get_by_id(test_id=test_id) @classmethod def get_by_id(cls, **kw): if kw.get('test_id'): query_params = { 'test_id': kw.get('test_id'), } filter_part = '\nAND t.id = :test_id' elif kw.get('lll_id'): query_params = { 'lll_id': kw.get('lunapark_id'), } filter_part = '\nAND t.lll_id = :lunapark_id' rv = cls.rdbms_call(''.join([cls.select_join_part, filter_part]), query_params) row = rv.first() if not row: return None t_kw = dict(zip(rv.keys(), row)) return TestBuilder.from_row(**t_kw) @classmethod def get_many(cls, **kw): """pagination""" pagination_part = '\nORDER BY id DESC\nLIMIT :limit OFFSET :offset' param_per_page = kw.get('per_page') if param_per_page and (param_per_page <= cls.per_page_max): per_page = param_per_page else: per_page = cls.per_page_default page_num = kw.get('page') # page number starts from 1, page 0 and 1 mean the same - # first slice from data set. if page_num and isinstance(page_num, int) and (page_num >= 2): offset = (page_num - 1) * per_page next_page = page_num + 1 prev_page = page_num - 1 else: offset = 0 next_page = 2 prev_page = None query_params = { 'limit': per_page, 'offset': offset, } """filtering""" f = Filter(**kw) filter_part, q_params_up = f.cmpl_query() query_params.update(q_params_up) try: rv = cls.rdbms_call( ''.join([cls.select_join_part, filter_part, pagination_part]), query_params) rows = rv.fetchall() except exc.IntegrityError: raise StorageError('Some kind of IntegrityError') except exc.DataError: raise StorageError('One of params malformed or has a wrong type') if len(rows) == 0: return None, None, None, None elif len(rows) < per_page: # last chunk of data next_page = None def create_test(row): t_kw = dict(zip(rv.keys(), row)) return TestBuilder.from_row(**t_kw) return map(create_test, rows), per_page, next_page, prev_page class SideEffect(Dao): """Side effect wrapper, implementing test.dao interface. """ def __init__(self, dao): self.dao = dao self.redis = redis.Redis(**app.config.get('REDIS_CLIENT')) self.rds_monitor_finish = 'lll_monitor_finish' self.rds_monitor_start = 'lll_monitor_start' def insert(self, test): test.id = self.dao.insert(test) if test.env in ['luna-tank-api', 'luna-tank-api-force'] and\ test.status != 'finished': self.schedule_to_monitor_finish(test) if test.env == 'luna-tank-api-force': self.schedule_to_monitor_start(test) elif test.env == 'yandex-tank': reduce_arts.apply_async(args=[test.id, test.files]) return test.id def update_by_id(self, test_id, test_diff): return self.dao.update_by_id(test_id, test_diff) def get_by_id(self, **kw): return self.dao.get_by_id(**kw) def get_many(self, **kw): return self.dao.get_many(**kw) def schedule_to_monitor(self, key, test): """ Put test data to Redis hash whith running lll tests. This hash periodically pulled by Celery workers which fires reduce jobs on just finished tests. """ self.redis.hset(key, test.id, test.to_monitor_dct()) def schedule_to_monitor_finish(self, test): self.schedule_to_monitor(self.rds_monitor_finish, test) def schedule_to_monitor_start(self, test): self.schedule_to_monitor(self.rds_monitor_start, test)
apache-2.0
virgree/odoo
addons/l10n_bo/__openerp__.py
253
1698
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2012 Cubic ERP - Teradata SAC (<http://cubicerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { "name": "Bolivia Localization Chart Account", "version": "1.0", "description": """ Bolivian accounting chart and tax localization. Plan contable boliviano e impuestos de acuerdo a disposiciones vigentes """, "author": "Cubic ERP", "website": "http://cubicERP.com", "category": "Localization/Account Charts", "depends": [ "account_chart", ], "data":[ "account_tax_code.xml", "l10n_bo_chart.xml", "account_tax.xml", "l10n_bo_wizard.xml", ], "demo_xml": [ ], "data": [ ], "active": False, "installable": True, "certificate" : "", } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
code-sauce/tensorflow
tensorflow/python/kernel_tests/reshape_op_test.py
74
5438
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.reshape_op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.platform import test class ReshapeTest(test.TestCase): def _testReshape(self, x, y, use_gpu=False): with self.test_session(use_gpu=use_gpu): np_ans = x.reshape(y) tf_ans = array_ops.reshape(x, y) out = tf_ans.eval() self.assertEqual(tf_ans.get_shape(), out.shape) self.assertShapeEqual(np_ans, tf_ans) def _testBothReshape(self, x, y): self._testReshape(x, y, False) self._testReshape(x, y, True) def testFloatBasic(self): x = np.arange(1., 7.).reshape([1, 6]).astype(np.float32) self._testBothReshape(x, [2, 3]) def testDoubleBasic(self): x = np.arange(1., 7.).reshape([1, 6]).astype(np.float64) self._testBothReshape(x, [2, 3]) def testInt32Basic(self): x = np.arange(1., 7.).reshape([1, 6]).astype(np.int32) self._testBothReshape(x, [2, 3]) def testComplex64Basic(self): x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex64) self._testBothReshape(x, [2, 3]) def testComplex128Basic(self): x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex128) self._testBothReshape(x, [2, 3]) def testFloatReshapeThreeDimensions(self): x = np.arange(1., 28.).reshape([1, 27]).astype(np.float32) self._testBothReshape(x, [3, 3, 3]) def testFloatUnspecifiedDimOnly(self): x = np.arange(1., 7.).reshape([6]).astype(np.float32) self._testBothReshape(x, [-1]) def testFloatUnspecifiedDimBegin(self): x = np.arange(1., 7.).reshape([6]).astype(np.float32) self._testBothReshape(x, [-1, 2]) def testFloatUnspecifiedDimEnd(self): x = np.arange(1., 7.).reshape([6]).astype(np.float32) self._testBothReshape(x, [3, -1]) # TODO(vrv): Add tests for failure conditions once python test_util # reports errors. def testFloatReshapeGradThreeDimensions(self): x = np.arange(1., 25.).reshape([2, 3, 4]).astype(np.float32) s = list(np.shape(x)) with self.test_session(): input_tensor = constant_op.constant(x) reshape_out = array_ops.reshape(input_tensor, [1, 8, 3]) err = gradient_checker.compute_gradient_error( input_tensor, s, reshape_out, s, x_init_value=x) print("Reshape gradient error = " % err) self.assertLess(err, 1e-3) def testFloatEmpty(self): x = np.empty((0, 0, 0, 0), dtype=np.float32) self._testBothReshape(x, [1, 2, 3, 0]) self._testBothReshape(x, [1, 0, 0, 4]) self._testBothReshape(x, [0, 0, 0, 0]) self._testBothReshape(x, [1, 2, 0]) self._testBothReshape(x, [0, 0, 0]) self._testBothReshape(x, [1, -1, 5]) def testErrors(self): y = constant_op.constant(0.0, shape=[23, 29, 31]) with self.assertRaisesRegexp(ValueError, "must be evenly divisible by 17"): array_ops.reshape(y, [17, -1]) z = constant_op.constant(0.0, shape=[32, 128]) with self.assertRaisesRegexp(ValueError, "Cannot reshape a tensor with 4096 elements"): array_ops.reshape(z, [4095]) def testPartialShapes(self): x = array_ops.placeholder(dtypes.float32) # Unknown input shape, partial new shape. y = array_ops.reshape(x, [1, 1, -1, 1]) self.assertEqual([1, 1, None, 1], y.get_shape().as_list()) # Unknown input shape, unknown new shape. y = array_ops.reshape(x, array_ops.placeholder(dtypes.int32)) self.assertEqual(None, y.get_shape().ndims) # Unknown input shape, known rank for new shape. y = array_ops.reshape(x, array_ops.placeholder(dtypes.int32, shape=(3,))) self.assertEqual([None, None, None], y.get_shape().as_list()) # Unknown input shape, partial new shape using `tf.stack()`. y = array_ops.reshape(x, [array_ops.placeholder(dtypes.int32), 37]) self.assertEqual([None, 37], y.get_shape().as_list()) # Unknown input shape, partial new shape using `tf.concat()`. y = array_ops.reshape( x, array_ops.concat( [array_ops.placeholder( dtypes.int32, shape=(2,)), [37, 42]], 0)) self.assertEqual([None, None, 37, 42], y.get_shape().as_list()) # Unknown input shape, partial new shape using `tf.shape()`. y = array_ops.reshape( x, array_ops.shape( array_ops.placeholder( dtypes.float32, shape=[None, 37, None]))) self.assertEqual([None, 37, None], y.get_shape().as_list()) if __name__ == "__main__": test.main()
apache-2.0
fatihzkaratana/cluster-organizer
tests/__init__.py
1
3511
# coding: utf-8 """ The MIT License (MIT) Copyright (c) 2013 Fatih Karatana Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. @package @date 19/06/14 @author fatih @version 1.0.0 """ __author__ = 'fatih' __date__ = '19/06/14' __version__ = '' import unittest import sys import os # Set parent directory to import required files, packages or objects PARENT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, '%s' % PARENT_DIR) # import Statistics class to call its method and test them. from app import Statistics class TestStatistics(unittest.TestCase): """ Test statistics app tests """ statistics = Statistics() malformed_host_file = PARENT_DIR + "/statistics/tests/data/HostState.txt" malformed_instance_file = PARENT_DIR + "/statistics/tests/data/InstanceState.txt" def testHostFileFormer(self): """ Check file format is former or malformed @return void """ with open(self.statistics.host_file) as file_to_test: self.assertTrue(self.statistics.check_file(file_to_test.read())) def testInstanceFileFormer(self): """ Check file format is former or malformed @return void """ with open(self.statistics.instance_file) as file_to_test: self.assertTrue(self.statistics.check_file(file_to_test.read())) def testMalformedHostFile(self): """ Check file format is former or malformed @return void """ with open(self.malformed_host_file) as file_to_test: self.assertFalse(self.statistics.check_file(file_to_test.read())) def testMalformedInstanceFile(self): """ Check file format is former or malformed @return void """ with open(self.malformed_instance_file) as file_to_test: self.assertFalse(self.statistics.check_file(file_to_test.read())) def testWriteTarget(self): """ Check if targeted file is written @return void """ dummy_content = ["HostClustering: 8, 0.75","DatacentreClustering: 8, 0.36","AvailableHosts: 3,2,5,10,6"] self.assertTrue(self.statistics.write_target(dummy_content)) def run(): suite = unittest.TestLoader().loadTestsFromTestCase(TestStatistics) unittest.TextTestRunner(verbosity=2).run(suite) if __name__ == "__main__": suite = unittest.TestLoader().loadTestsFromTestCase(TestStatistics) unittest.TextTestRunner(verbosity=2).run(suite)
mit
theo-l/django
django/db/models/sql/datastructures.py
19
6592
""" Useful auxiliary data structures for query construction. Not useful outside the SQL domain. """ from django.db.models.sql.constants import INNER, LOUTER class MultiJoin(Exception): """ Used by join construction code to indicate the point at which a multi-valued join was attempted (if the caller wants to treat that exceptionally). """ def __init__(self, names_pos, path_with_names): self.level = names_pos # The path travelled, this includes the path to the multijoin. self.names_with_path = path_with_names class Empty: pass class Join: """ Used by sql.Query and sql.SQLCompiler to generate JOIN clauses into the FROM entry. For example, the SQL generated could be LEFT OUTER JOIN "sometable" T1 ON ("othertable"."sometable_id" = "sometable"."id") This class is primarily used in Query.alias_map. All entries in alias_map must be Join compatible by providing the following attributes and methods: - table_name (string) - table_alias (possible alias for the table, can be None) - join_type (can be None for those entries that aren't joined from anything) - parent_alias (which table is this join's parent, can be None similarly to join_type) - as_sql() - relabeled_clone() """ def __init__(self, table_name, parent_alias, table_alias, join_type, join_field, nullable, filtered_relation=None): # Join table self.table_name = table_name self.parent_alias = parent_alias # Note: table_alias is not necessarily known at instantiation time. self.table_alias = table_alias # LOUTER or INNER self.join_type = join_type # A list of 2-tuples to use in the ON clause of the JOIN. # Each 2-tuple will create one join condition in the ON clause. self.join_cols = join_field.get_joining_columns() # Along which field (or ForeignObjectRel in the reverse join case) self.join_field = join_field # Is this join nullabled? self.nullable = nullable self.filtered_relation = filtered_relation def as_sql(self, compiler, connection): """ Generate the full LEFT OUTER JOIN sometable ON sometable.somecol = othertable.othercol, params clause for this join. """ join_conditions = [] params = [] qn = compiler.quote_name_unless_alias qn2 = connection.ops.quote_name # Add a join condition for each pair of joining columns. for lhs_col, rhs_col in self.join_cols: join_conditions.append('%s.%s = %s.%s' % ( qn(self.parent_alias), qn2(lhs_col), qn(self.table_alias), qn2(rhs_col), )) # Add a single condition inside parentheses for whatever # get_extra_restriction() returns. extra_cond = self.join_field.get_extra_restriction( compiler.query.where_class, self.table_alias, self.parent_alias) if extra_cond: extra_sql, extra_params = compiler.compile(extra_cond) join_conditions.append('(%s)' % extra_sql) params.extend(extra_params) if self.filtered_relation: extra_sql, extra_params = compiler.compile(self.filtered_relation) if extra_sql: join_conditions.append('(%s)' % extra_sql) params.extend(extra_params) if not join_conditions: # This might be a rel on the other end of an actual declared field. declared_field = getattr(self.join_field, 'field', self.join_field) raise ValueError( "Join generated an empty ON clause. %s did not yield either " "joining columns or extra restrictions." % declared_field.__class__ ) on_clause_sql = ' AND '.join(join_conditions) alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias) sql = '%s %s%s ON (%s)' % (self.join_type, qn(self.table_name), alias_str, on_clause_sql) return sql, params def relabeled_clone(self, change_map): new_parent_alias = change_map.get(self.parent_alias, self.parent_alias) new_table_alias = change_map.get(self.table_alias, self.table_alias) if self.filtered_relation is not None: filtered_relation = self.filtered_relation.clone() filtered_relation.path = [change_map.get(p, p) for p in self.filtered_relation.path] else: filtered_relation = None return self.__class__( self.table_name, new_parent_alias, new_table_alias, self.join_type, self.join_field, self.nullable, filtered_relation=filtered_relation, ) def equals(self, other, with_filtered_relation): return ( isinstance(other, self.__class__) and self.table_name == other.table_name and self.parent_alias == other.parent_alias and self.join_field == other.join_field and (not with_filtered_relation or self.filtered_relation == other.filtered_relation) ) def __eq__(self, other): return self.equals(other, with_filtered_relation=True) def demote(self): new = self.relabeled_clone({}) new.join_type = INNER return new def promote(self): new = self.relabeled_clone({}) new.join_type = LOUTER return new class BaseTable: """ The BaseTable class is used for base table references in FROM clause. For example, the SQL "foo" in SELECT * FROM "foo" WHERE somecond could be generated by this class. """ join_type = None parent_alias = None filtered_relation = None def __init__(self, table_name, alias): self.table_name = table_name self.table_alias = alias def as_sql(self, compiler, connection): alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias) base_sql = compiler.quote_name_unless_alias(self.table_name) return base_sql + alias_str, [] def relabeled_clone(self, change_map): return self.__class__(self.table_name, change_map.get(self.table_alias, self.table_alias)) def equals(self, other, with_filtered_relation): return ( isinstance(self, other.__class__) and self.table_name == other.table_name and self.table_alias == other.table_alias )
bsd-3-clause
xFleury/crawl-0.13.0-fairplay
source/webserver/terminal.py
1
5628
import pty import termios import os import fcntl import struct import resource import signal import sys import time BUFSIZ = 2048 class TerminalRecorder(object): def __init__(self, command, filename, id_header, logger, io_loop, termsize): self.io_loop = io_loop self.command = command if filename: self.ttyrec = open(filename, "w", 0) else: self.ttyrec = None self.id = id self.returncode = None self.output_buffer = "" self.termsize = termsize self.pid = None self.child_fd = None self.end_callback = None self.output_callback = None self.activity_callback = None self.errpipe_read = None self.error_buffer = "" self.logger = logger if id_header: self.write_ttyrec_chunk(id_header) self._spawn() def _spawn(self): self.errpipe_read, errpipe_write = os.pipe() self.pid, self.child_fd = pty.fork() if self.pid == 0: # We're the child def handle_signal(signal, f): sys.exit(0) signal.signal(1, handle_signal) # Set window size cols, lines = self.get_terminal_size() s = struct.pack("HHHH", lines, cols, 0, 0) fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, s) os.close(self.errpipe_read) os.dup2(errpipe_write, 2) # Make sure not to retain any files from the parent max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0] for i in range(3, max_fd): try: os.close(i) except OSError: pass # And exec env = dict(os.environ) env["COLUMNS"] = str(cols) env["LINES"] = str(lines) env["TERM"] = "linux" try: os.execvpe(self.command[0], self.command, env) except OSError: sys.exit(1) # We're the parent os.close(errpipe_write) self.io_loop.add_handler(self.child_fd, self._handle_read, self.io_loop.ERROR | self.io_loop.READ) self.io_loop.add_handler(self.errpipe_read, self._handle_err_read, self.io_loop.READ) def _handle_read(self, fd, events): if events & self.io_loop.READ: buf = os.read(fd, BUFSIZ) if len(buf) > 0: self.write_ttyrec_chunk(buf) if self.activity_callback: self.activity_callback() self.output_buffer += buf self._do_output_callback() self.poll() if events & self.io_loop.ERROR: self.poll() def _handle_err_read(self, fd, events): if events & self.io_loop.READ: buf = os.read(fd, BUFSIZ) if len(buf) > 0: self.error_buffer += buf self._log_error_output() self.poll() def write_ttyrec_header(self, sec, usec, l): if self.ttyrec is None: return s = struct.pack("<iii", sec, usec, l) self.ttyrec.write(s) def write_ttyrec_chunk(self, data): if self.ttyrec is None: return t = time.time() self.write_ttyrec_header(int(t), int((t % 1) * 1000000), len(data)) self.ttyrec.write(data) def _do_output_callback(self): pos = self.output_buffer.find("\n") while pos >= 0: line = self.output_buffer[:pos] self.output_buffer = self.output_buffer[pos + 1:] if len(line) > 0: if line[-1] == "\r": line = line[:-1] if self.output_callback: self.output_callback(line) pos = self.output_buffer.find("\n") def _log_error_output(self): pos = self.error_buffer.find("\n") while pos >= 0: line = self.error_buffer[:pos] self.error_buffer = self.error_buffer[pos + 1:] if len(line) > 0: if line[-1] == "\r": line = line[:-1] self.logger.info("ERR: %s", line) pos = self.error_buffer.find("\n") def send_signal(self, signal): os.kill(self.pid, signal) def poll(self): if self.returncode is None: pid, status = os.waitpid(self.pid, os.WNOHANG) if pid == self.pid: if os.WIFSIGNALED(status): self.returncode = -os.WTERMSIG(status) elif os.WIFEXITED(status): self.returncode = os.WEXITSTATUS(status) else: # Should never happen raise RuntimeError("Unknown child exit status!") if self.returncode is not None: self.io_loop.remove_handler(self.child_fd) self.io_loop.remove_handler(self.errpipe_read) os.close(self.child_fd) os.close(self.errpipe_read) if self.ttyrec: self.ttyrec.close() if self.end_callback: self.end_callback() return self.returncode def get_terminal_size(self): return self.termsize def write_input(self, data): if self.poll() is not None: return while len(data) > 0: written = os.write(self.child_fd, data) data = data[written:]
gpl-2.0
foreni-packages/RATDecoders
SpyGate.py
9
5759
#!/usr/bin/env python ''' CyberGate Config Decoder ''' __description__ = 'CyberGate Config Extractor' __author__ = 'Kevin Breen http://techanarchy.net' __version__ = '0.1' __date__ = '2014/03/15' import sys import string from optparse import OptionParser import pype32 def run(rawData): #try: rawconfig = rawData.split("abccba") if len(rawconfig) > 1: print "Running Abccba" dict = oldversions(rawconfig) else: print "Running pype32" pe = pype32.PE(data=rawData) rawConfig = getStream(pe) if rawConfig.startswith("bute"): # workaround for an error in pype32 will still work when fixed rawConfig = rawConfig[8:] dict = parseConfig(rawConfig) #except: #return None print dict # Confirm if there is Net MetaData in the File def getStream(pe): counter = 0 for dir in pe.ntHeaders.optionalHeader.dataDirectory: if dir.name.value == "NET_METADATA_DIRECTORY": rawConfig = findUSStream(pe, counter) else: counter += 1 return rawConfig # I only want to extract the User Strings Section def findUSStream(pe, dir): for i in range(0,4): name = pe.ntHeaders.optionalHeader.dataDirectory[dir].info.netMetaDataStreams[i].name.value if name.startswith("#US"): return pe.ntHeaders.optionalHeader.dataDirectory[dir].info.netMetaDataStreams[i].info #Walk the User Strings and create a list of individual strings def parseConfig(rawConfig): stringList = [] offset = 1 config = bytearray(rawConfig) while offset < len(config): length = int(config[offset]) that = config[offset+1:offset+int(length)] stringList.append(str(that.replace("\x00", ""))) offset += int(length+1) print stringList dict = {} for i in range(0,60): dict["Domain"] = stringList[37] dict["Port"] = stringList[39] dict["Campaign Name"] = stringList[38] dict["FolderName"] = stringList[41] dict["Exe Name"] = stringList[40] dict["Install Folder"] = stringList[44] return dict def oldversions(config): dict = {} if len(config) == 48: dict["Version"] = "V0.2.6" for i in range(1, len(config)): dict["Domain"] = config[1] # dict["Port"] = config[2] # dict["Campaign Name"] = config[3] # dict["Dan Option"] = config[5] # dict["Startup Name"] = config[7] # dict["Password"] = config[9] # dict["Anti Kill Server"] = config[10] # dict["USB Spread / lnk"] = config[11] dict["Anti Process Explorer"] = config[12] dict["Anti Process Hacker"] = config[13] dict["Anti ApateDNS"] = config[14] dict["Anti MalwareBytes"] = config[15] dict["Anti AntiLogger"] = config[16] dict["Block Virus Total"] = config[17] # dict["Mutex"] = config[18] # dict["Persistance"] = config[19] # dict["SpyGate Key"] = config[20] dict["Startup Folder"] = config[21] # dict["Anti Avira"] = config[23] dict["USB Spread / exe"] = config[24] # 25 if statement below dict["Install Folder1"] = config[26] # dict["StartUp Name"] = config[27] # dict["Melt After Run"] = config[28] # dict["Hide After Run"] = config[29] # #dict[""] = config[30] #dict[""] = config[31] #dict[""] = config[32] dict["Install Folder2"] = config[33] # # 34 and 35 in if statement below dict["Install Folder3"] = config[36] #dict[""] = config[37] dict["Anti SbieCtrl"] = config[38] dict["Anti SpyTheSpy"] = config[39] dict["Anti SpeedGear"] = config[40] dict["Anti Wireshark"] = config[41] dict["Anti IPBlocker"] = config[42] dict["Anti Cports"] = config[43] dict["Anti AVG"] = config[44] dict["Anti OllyDbg"] = config[45] dict["Anti X Netstat"] = config[46] #dict["Anti Keyscrambler"] = config[47] if config[25] == "True": dict["Application Data Folder"] = "True" else: dict["Application Data Folder"] = "False" if config[34] == "True": dict["Templates Folder"] = "True" else: dict["Templates Folder"] = "False" if config[35] == "True": dict["Programs Folder"] = "True" else: dict["Programs Folder"] = "False" elif len(config) == 18: dict["Version"] = "V2.0" for i in range(1, len(config)): print i, config[i] dict["Domain"] = config[1] # dict["Port"] = config[2] # dict["Campaign Name"] = config[3] # dict["Dan Option"] = config[5] # dict["Add To Startup"] = config[5] # dict["Startup Key"] = config[7] # dict["Password"] = config[9] # dict["Anti Kill Server"] = config[10] # dict["USB Spread"] = config[11] # dict["Kill Process Explorer"] = config[12] # dict["Anti Process Hacker"] = config[13] # dict["Anti ApateDNS"] = config[14] dict["Anti MalwareBytes"] = config[15] dict["Anti AntiLogger"] = config[16] dict["Block Virus Total"] = config[17] else: return None return dict if __name__ == "__main__": parser = OptionParser(usage='usage: %prog inFile outConfig\n' + __description__, version='%prog ' + __version__) (options, args) = parser.parse_args() if len(args) > 0: pass else: parser.print_help() sys.exit() try: print "[+] Reading file" fileData = open(args[0], 'rb').read() except: print "[+] Couldn't Open File {0}".format(args[0]) print "[+] Searching for Config" config = run(fileData) if config == None: print "[+] Config not found" sys.exit() if len(args) == 2: print "[+] Writing Config to file {0}".format(args[1]) with open(args[1], 'a') as outFile: for key, value in sorted(config.iteritems()): clean_value = filter(lambda x: x in string.printable, value) outFile.write("Key: {0}\t Value: {1}\n".format(key,clean_value)) else: print "[+] Printing Config to screen" for key, value in sorted(config.iteritems()): clean_value = filter(lambda x: x in string.printable, value) print " [-] Key: {0}\t Value: {1}".format(key,clean_value) print "[+] End of Config"
gpl-3.0
iocast/poiservice
lib/js/OpenLayers-2.11/tools/toposort.py
304
1086
""" toposort.py Sorts dictionary keys based on lists of dependencies. """ class MissingDependency(Exception): """Exception raised when a listed dependency is not in the dictionary.""" class Sorter(object): def __init__(self, dependencies): self.dependencies = dependencies self.visited = set() self.sorted = () def sort(self): for key in self.dependencies: self._visit(key) return self.sorted def _visit(self, key): if key not in self.visited: self.visited.add(key) if not self.dependencies.has_key(key): raise MissingDependency(key) for depends in self.dependencies[key]: self._visit(depends) self.sorted += (key,) def toposort(dependencies): """Returns a tuple of the dependencies dictionary keys sorted by entries in the dependency lists. Given circular dependencies, sort will impose an order. Raises MissingDependency if a key is not found. """ s = Sorter(dependencies) return s.sort()
mit
flwh/you-get
src/you_get/extractors/lizhi.py
18
1710
#!/usr/bin/env python __all__ = ['lizhi_download'] import json from ..common import * def lizhi_download_playlist(url, output_dir = '.', merge = True, info_only = False): # like this http://www.lizhi.fm/#/31365/ #api desc: s->start l->length band->some radio #http://www.lizhi.fm/api/radio_audios?s=0&l=100&band=31365 band_id = match1(url,r'#/(\d+)') #try to get a considerable large l to reduce html parsing task. api_url = 'http://www.lizhi.fm/api/radio_audios?s=0&l=65535&band='+band_id content_json = json.loads(get_content(api_url)) for sound in content_json: title = sound["name"] res_url = sound["url"] songtype, ext, size = url_info(res_url,faker=True) print_info(site_info, title, songtype, size) if not info_only: #no referer no speed! download_urls([res_url], title, ext, size, output_dir, merge=merge ,refer = 'http://www.lizhi.fm',faker=True) pass def lizhi_download(url, output_dir = '.', merge = True, info_only = False): # url like http://www.lizhi.fm/#/549759/18864883431656710 api_id = match1(url,r'#/(\d+/\d+)') api_url = 'http://www.lizhi.fm/api/audio/'+api_id content_json = json.loads(get_content(api_url)) title = content_json["audio"]["name"] res_url = content_json["audio"]["url"] songtype, ext, size = url_info(res_url,faker=True) print_info(site_info, title, songtype, size) if not info_only: #no referer no speed! download_urls([res_url], title, ext, size, output_dir, merge=merge ,refer = 'http://www.lizhi.fm',faker=True) site_info = "lizhi.fm" download = lizhi_download download_playlist = lizhi_download_playlist
mit
qizenguf/MLC-STT
src/arch/x86/isa/insts/general_purpose/data_conversion/bcd_adjust.py
91
2276
# Copyright (c) 2007 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = "" #let {{ # class DAA(Inst): # "GenFault ${new UnimpInstFault}" # class DAS(Inst): # "GenFault ${new UnimpInstFault}" #}};
bsd-3-clause
wylliam-silva/leiteilustrador
node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py
2720
6387
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio project reader/writer.""" import gyp.common import gyp.easy_xml as easy_xml #------------------------------------------------------------------------------ class Tool(object): """Visual Studio tool.""" def __init__(self, name, attrs=None): """Initializes the tool. Args: name: Tool name. attrs: Dict of tool attributes; may be None. """ self._attrs = attrs or {} self._attrs['Name'] = name def _GetSpecification(self): """Creates an element for the tool. Returns: A new xml.dom.Element for the tool. """ return ['Tool', self._attrs] class Filter(object): """Visual Studio filter - that is, a virtual folder.""" def __init__(self, name, contents=None): """Initializes the folder. Args: name: Filter (folder) name. contents: List of filenames and/or Filter objects contained. """ self.name = name self.contents = list(contents or []) #------------------------------------------------------------------------------ class Writer(object): """Visual Studio XML project writer.""" def __init__(self, project_path, version, name, guid=None, platforms=None): """Initializes the project. Args: project_path: Path to the project file. version: Format version to emit. name: Name of the project. guid: GUID to use for project, if not None. platforms: Array of string, the supported platforms. If null, ['Win32'] """ self.project_path = project_path self.version = version self.name = name self.guid = guid # Default to Win32 for platforms. if not platforms: platforms = ['Win32'] # Initialize the specifications of the various sections. self.platform_section = ['Platforms'] for platform in platforms: self.platform_section.append(['Platform', {'Name': platform}]) self.tool_files_section = ['ToolFiles'] self.configurations_section = ['Configurations'] self.files_section = ['Files'] # Keep a dict keyed on filename to speed up access. self.files_dict = dict() def AddToolFile(self, path): """Adds a tool file to the project. Args: path: Relative path from project to tool file. """ self.tool_files_section.append(['ToolFile', {'RelativePath': path}]) def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools): """Returns the specification for a configuration. Args: config_type: Type of configuration node. config_name: Configuration name. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. Returns: """ # Handle defaults if not attrs: attrs = {} if not tools: tools = [] # Add configuration node and its attributes node_attrs = attrs.copy() node_attrs['Name'] = config_name specification = [config_type, node_attrs] # Add tool nodes and their attributes if tools: for t in tools: if isinstance(t, Tool): specification.append(t._GetSpecification()) else: specification.append(Tool(t)._GetSpecification()) return specification def AddConfig(self, name, attrs=None, tools=None): """Adds a configuration to the project. Args: name: Configuration name. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. """ spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools) self.configurations_section.append(spec) def _AddFilesToNode(self, parent, files): """Adds files and/or filters to the parent node. Args: parent: Destination node files: A list of Filter objects and/or relative paths to files. Will call itself recursively, if the files list contains Filter objects. """ for f in files: if isinstance(f, Filter): node = ['Filter', {'Name': f.name}] self._AddFilesToNode(node, f.contents) else: node = ['File', {'RelativePath': f}] self.files_dict[f] = node parent.append(node) def AddFiles(self, files): """Adds files to the project. Args: files: A list of Filter objects and/or relative paths to files. This makes a copy of the file/filter tree at the time of this call. If you later add files to a Filter object which was passed into a previous call to AddFiles(), it will not be reflected in this project. """ self._AddFilesToNode(self.files_section, files) # TODO(rspangler) This also doesn't handle adding files to an existing # filter. That is, it doesn't merge the trees. def AddFileConfig(self, path, config, attrs=None, tools=None): """Adds a configuration to a file. Args: path: Relative path to the file. config: Name of configuration to add. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. Raises: ValueError: Relative path does not match any file added via AddFiles(). """ # Find the file node with the right relative path parent = self.files_dict.get(path) if not parent: raise ValueError('AddFileConfig: file "%s" not in project.' % path) # Add the config to the file node spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs, tools) parent.append(spec) def WriteIfChanged(self): """Writes the project file.""" # First create XML content definition content = [ 'VisualStudioProject', {'ProjectType': 'Visual C++', 'Version': self.version.ProjectVersion(), 'Name': self.name, 'ProjectGUID': self.guid, 'RootNamespace': self.name, 'Keyword': 'Win32Proj' }, self.platform_section, self.tool_files_section, self.configurations_section, ['References'], # empty section self.files_section, ['Globals'] # empty section ] easy_xml.WriteXmlIfChanged(content, self.project_path, encoding="Windows-1252")
mit
JetBrains/kotlin-web-site
src/processors/processors.py
2
3374
import re replace_simple_code = False languageMimeTypeMap = { "kotlin": "text/x-kotlin", "java": "text/x-java", "groovy": "text/x-groovy", "xml": "application/xml", "yaml": "text/x-yaml", "bash": "text/x-sh", "shell": "text/x-sh", "swift": "text/x-swift", "obj-c": "text/x-objectivec", "html": "application/xml", "javascript": "text/javascript", "json": "application/json", "js": "text/javascript", "c": "text/x-csrc", "text": "text/plain" } def set_replace_simple_code(v: bool): global replace_simple_code replace_simple_code = v def find_closest_tag(element, tagname): current_element = element.parent while current_element is not None and current_element.name != tagname: current_element = current_element.parent return current_element processors = { 'h1': 'typo-header typo-h1', 'h2': 'typo-header typo-h2', 'h3': 'typo-header typo-h3', 'h4': 'typo-header typo-h4', 'ul': 'typo-list typo-list_type_simple', 'ol': 'typo-list typo-list_type_ordered', 'li': 'typo-list__item', 'p': 'typo-para', 'a': 'typo-link', 'blockquote': 'typo-quote', 'hr': 'typo-hr', 'img': 'typo-image', 'strong': 'typo-strong', 'table': 'typo-table', 'tr': 'typo-table__row', 'td': 'typo-table__column', } def process_markdown_html(tree): tree = process_code_blocks(tree) for element in tree.select('*'): appendClass = processors.get(element.name) if appendClass is not None: if element.has_attr('class'): element['class'].append(processors.get(element.name)) else: element['class'] = processors.get(element.name) return tree def process_code_blocks(tree): if replace_simple_code: # some spellcheckers may not know what to do with <code> elements, # we replace in-line code blocks with span to improve spellcheckers # TODO: avoid global variable hack here and pass the parameter explicitly for element in tree.select("code"): if len(element.attrs) == 0: element.name = "span" element['style'] = "font-style: italic; text-decoration: underline;" for element in tree.select('pre > code'): class_names = element.get("class") lang = None if class_names is not None: for class_name in class_names: if class_name.startswith("language-"): lang = class_name[len("language-"):] if lang is None: continue parent_div = find_closest_tag(element, 'div') # Skip executable samples if parent_div is not None and parent_div.has_attr('class') and "sample" in parent_div['class']: continue element['data-lang'] = languageMimeTypeMap[lang] element['class'] = "code _highlighted" return tree def process_header_ids(tree): header_elements = tree.select('h1,h2,h3') for header in header_elements: if header.get("id") is not None: continue generated_id = re.sub(r'[^a-zA-Z0-9 \\-]', '', header.text) generated_id = generated_id.replace(' ', '-') generated_id = generated_id.lower() generated_id = generated_id.strip() header['id'] = generated_id return tree
apache-2.0
jblackburne/scikit-learn
examples/tree/plot_tree_regression.py
93
1516
""" =================================================================== Decision Tree Regression =================================================================== A 1D regression with decision tree. The :ref:`decision trees <tree>` is used to fit a sine curve with addition noisy observation. As a result, it learns local linear regressions approximating the sine curve. We can see that if the maximum depth of the tree (controlled by the `max_depth` parameter) is set too high, the decision trees learn too fine details of the training data and learn from the noise, i.e. they overfit. """ print(__doc__) # Import the necessary modules and libraries import numpy as np from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt # Create a random dataset rng = np.random.RandomState(1) X = np.sort(5 * rng.rand(80, 1), axis=0) y = np.sin(X).ravel() y[::5] += 3 * (0.5 - rng.rand(16)) # Fit regression model regr_1 = DecisionTreeRegressor(max_depth=2) regr_2 = DecisionTreeRegressor(max_depth=5) regr_1.fit(X, y) regr_2.fit(X, y) # Predict X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis] y_1 = regr_1.predict(X_test) y_2 = regr_2.predict(X_test) # Plot the results plt.figure() plt.scatter(X, y, c="darkorange", label="data") plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2) plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2) plt.xlabel("data") plt.ylabel("target") plt.title("Decision Tree Regression") plt.legend() plt.show()
bsd-3-clause
winndows/cinder
cinder/db/sqlalchemy/migrate_repo/versions/014_add_name_id.py
42
1203
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import String, Column, MetaData, Table def upgrade(migrate_engine): """Add _name_id column to volumes.""" meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) _name_id = Column('_name_id', String(36)) volumes.create_column(_name_id) volumes.update().values(_name_id=None).execute() def downgrade(migrate_engine): """Remove _name_id column from volumes.""" meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) _name_id = volumes.columns._name_id volumes.drop_column(_name_id)
apache-2.0
scottpurdy/nupic
tests/integration/nupic/engine/network_twonode_test.py
15
6093
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2014, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ This test demonstrates building and running a two node network. Some features demonstrated include: - Can add regions to network and set dimensions - Linking induces dimensions correctly - Network computation happens in correct order - Direct (zero-copy) access to outputs - Linking correctly maps outputs to inputs """ import logging import unittest2 as unittest from nupic.engine import Network, Dimensions LOGGER = logging.getLogger(__name__) class NetworkTwoNodeTest(unittest.TestCase): def testTwoNode(self): # ===================================================== # Build and run the network # ===================================================== net = Network() level1 = net.addRegion("level1", "TestNode", "{int32Param: 15}") dims = Dimensions([6, 4]) level1.setDimensions(dims) level2 = net.addRegion("level2", "TestNode", "{real64Param: 128.23}") net.link("level1", "level2", "TestFanIn2", "") # Could call initialize here, but not necessary as net.run() # initializes implicitly. # net.initialize() net.run(1) LOGGER.info("Successfully created network and ran for one iteration") # ===================================================== # Check everything # ===================================================== dims = level1.getDimensions() self.assertEquals(len(dims), 2) self.assertEquals(dims[0], 6) self.assertEquals(dims[1], 4) dims = level2.getDimensions() self.assertEquals(len(dims), 2) self.assertEquals(dims[0], 3) self.assertEquals(dims[1], 2) # Check L1 output. "False" means don't copy, i.e. # get a pointer to the actual output # Actual output values are determined by the TestNode # compute() behavior. l1output = level1.getOutputData("bottomUpOut") self.assertEquals(len(l1output), 48) # 24 nodes; 2 values per node for i in xrange(24): self.assertEquals(l1output[2*i], 0) # size of input to each node is 0 self.assertEquals(l1output[2*i+1], i) # node number # check L2 output. l2output = level2.getOutputData("bottomUpOut", ) self.assertEquals(len(l2output), 12) # 6 nodes; 2 values per node # Output val = node number + sum(inputs) # Can compute from knowing L1 layout # # 00 01 | 02 03 | 04 05 # 06 07 | 08 09 | 10 11 # --------------------- # 12 13 | 14 15 | 16 17 # 18 19 | 20 21 | 22 23 outputVals = [] outputVals.append(0 + (0 + 1 + 6 + 7)) outputVals.append(1 + (2 + 3 + 8 + 9)) outputVals.append(2 + (4 + 5 + 10 + 11)) outputVals.append(3 + (12 + 13 + 18 + 19)) outputVals.append(4 + (14 + 15 + 20 + 21)) outputVals.append(5 + (16 + 17 + 22 + 23)) for i in xrange(6): self.assertEquals(l2output[2*i], 8) # size of input for each node is 8 self.assertEquals(l2output[2*i+1], outputVals[i]) # ===================================================== # Run for one more iteration # ===================================================== LOGGER.info("Running for a second iteration") net.run(1) # ===================================================== # Check everything again # ===================================================== # Outputs are all the same except that the first output is # incremented by the iteration number for i in xrange(24): self.assertEquals(l1output[2*i], 1) self.assertEquals(l1output[2*i+1], i) for i in xrange(6): self.assertEquals(l2output[2*i], 9) self.assertEquals(l2output[2*i+1], outputVals[i] + 4) def testLinkingDownwardDimensions(self): # # Linking can induce dimensions downward # net = Network() level1 = net.addRegion("level1", "TestNode", "") level2 = net.addRegion("level2", "TestNode", "") dims = Dimensions([3, 2]) level2.setDimensions(dims) net.link("level1", "level2", "TestFanIn2", "") net.initialize() # Level1 should now have dimensions [6, 4] self.assertEquals(level1.getDimensions()[0], 6) self.assertEquals(level1.getDimensions()[1], 4) # # We get nice error messages when network can't be initialized # LOGGER.info("=====") LOGGER.info("Creating a 3 level network in which levels 1 and 2 have") LOGGER.info("dimensions but network initialization will fail because") LOGGER.info("level3 does not have dimensions") LOGGER.info("Error message follows:") net = Network() level1 = net.addRegion("level1", "TestNode", "") level2 = net.addRegion("level2", "TestNode", "") _level3 = net.addRegion("level3", "TestNode", "") dims = Dimensions([6, 4]) level1.setDimensions(dims) net.link("level1", "level2", "TestFanIn2", "") self.assertRaises(RuntimeError, net.initialize) LOGGER.info("=====") LOGGER.info("======") LOGGER.info("Creating a link with incompatible dimensions. \ Error message follows") net.link("level2", "level3", "TestFanIn2", "") self.assertRaises(RuntimeError, net.initialize) if __name__ == "__main__": unittest.main()
agpl-3.0
snhack/LogoBot
ci/ci.py
1
9452
#!/usr/bin/env python # run the continuous integration process # includes watching the git repo for pull requests and commits import os import sys import requests import json from time import sleep, gmtime, strftime from subprocess import call, check_output, CalledProcessError repo_owner = 'snhack' repo_name = 'LogoBot' repos_rel_dir = '../../' primary_repo_dir = 'LogoBot' staging_repo_dir = primary_repo_dir + 'Staging' ci_log_name = 'ci.log' prhist = [] def dict_in_array(dict, key, value): res = False for item in dict: if item[key] == value: res = True exit return res def poll(un, pw, proxies): cilog = open(ci_log_name, 'a+') cilog.seek(0) # load cilog into prhist lines = cilog.readlines() for line in lines: line = line.split('_') num = int(line[0]) dt = line[1] if not dict_in_array(prhist, 'number', num): prhist.append({'number':num, 'updated_at':dt}) print("Polling for pull requests for commits...") print("") while True: print(strftime("%H:%M:%S", gmtime())) print("Getting list of pull requests...") r = requests.get('https://api.github.com/repos/'+repo_owner+'/'+repo_name+'/pulls', auth=(un, pw), proxies=proxies) jso = r.json() print(" Found: "+str(len(jso))+" pull request(s)") for p in jso: print("Checking: #"+str(p['number']) + " - "+ p['title'] + " by "+p['user']['login']) """ print(p['body']) print(p['state']) print(p['merged_at']) print(p['updated_at']) """ # check if the pull request is ready to be merged if p['state'] == 'open' and p['merged_at']== None: # check if we've done it before? if not dict_in_array(prhist, 'number', p['number']): try: errorlevel = 0 oplog = 'Build Log\n---\n' comments_url = p['_links']['comments']['href'] # comment payload = { 'body':'CI: Starting build process...' } r = requests.post(comments_url, auth=(un, pw), proxies=proxies, data=json.dumps(payload)) # Discard any local changes print(" Clean working tree") o = check_output(['git','reset']) o += check_output(['git','checkout','--','.']) o += check_output(['git','clean','-f']) print(o) oplog += "\nClean working tree\n" oplog += o print(" Remote update") o = check_output(['git','remote','update','-p']) print(o) oplog += "\nRemote Update\n" oplog += o # rebase to master print(" Merge fast-forward master") o = check_output(['git','merge','--ff-only','master']) print(o) oplog += "\nMerge fast-forward\n" oplog += o branch = p['head']['ref'] print(" Checkout master") o = check_output(['git','checkout','master']) print(o) oplog += "\nCheckout master\n" oplog += o print(" Merge branch: "+branch) o = '' try: o = check_output(['git','merge','--strategy-option','theirs','--no-commit','origin/'+branch]) print(o) except CalledProcessError as e: print(" Error: "+ str(e.returncode)) errorlevel = e.returncode oplog += "\nMerge branch: origin/"+branch+"\n" oplog += o if errorlevel == 0: # Now run the build process print(" Building") os.chdir('hardware/ci') o = '' try: o = check_output(['./build.py']) print(o) except CalledProcessError as e: print(" Error: "+ str(e.returncode)) errorlevel = 1 os.chdir('../../') oplog += "\n\nBuilding\n--------\n" oplog += o if errorlevel == 0: print(" Passed, auto-merging into master...") # comment payload = { 'body':'CI: Build process successful - auto-merging into master\n\n' + oplog } r = requests.post(comments_url, auth=(un, pw), proxies=proxies, data=json.dumps(payload)) # merge payload = { 'commit_message':p['title'] } r = requests.put(p['_links']['self']['href'] + '/merge', auth=(un, pw), proxies=proxies, data=json.dumps(payload)) print(r) else: print(" Errors, adding to pull request comments...") # log the error payload = { 'body':'CI: Unable to auto-merge, build process encountered errors\n\n' + oplog } r = requests.post(comments_url, auth=(un, pw), proxies=proxies, data=json.dumps(payload)) # Log this request so we don't process it again hist = {'number':p['number'], 'updated_at':p['updated_at']} prhist.append(hist) cilog.write(str(p['number']) + '_' + p['updated_at'] + '_' + str(errorlevel)+'\n') cilog.flush() print(" Done") except CalledProcessError as e: print("Error: "+ str(e.returncode)) else: print(" Skipping") else: print(" Error: Pull request not open or already merged") print("") sleep(60) call(['clear']) cilog.close() def ci(un, pw, http_proxy="", https_proxy=""): print("Continuous Integration") print("----------------------") proxies = { "http": http_proxy, "https": https_proxy, } print("") print("Checking connection to github...") r = requests.get('https://api.github.com/user', auth=(un, pw), proxies=proxies) if r.status_code == 200: print(" OK") print("Changing working directory...") os.chdir(repos_rel_dir) print(" Now in: "+os.getcwd()) print("Changing to staging dir: "+staging_repo_dir) if os.path.isdir(staging_repo_dir): os.chdir(staging_repo_dir) print(" OK") # Could check for empty dir here and if so, do a git clone? # git clone git@github.com:snhack/LogoBot . contents = os.listdir('.') if len(contents) == 0: print(" Staging empty - cloning repo") o = check_output(['git','clone','git@github.com:'+repo_owner+'/'+repo_name,'.']) poll(un, pw, proxies) else: print(" Error: Staging dir does not exist") else: print(" Error") print(" Status Code: "+r.status_code) print(" Response: "+r.text) # o = check_output(['git','branch']) if __name__ == '__main__': if len(sys.argv) == 3: ci(sys.argv[1], sys.argv[2], "", "") elif len(sys.argv) > 3: ci(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) else: print("Usage: ci <git-username> <git-password> <http_proxy> <https_proxy>")
mit
dmillington/ansible-modules-core
system/systemd.py
3
15510
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Brian Coca <bcoca@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' module: systemd author: - "Ansible Core Team" version_added: "2.2" short_description: Manage services. description: - Controls systemd services on remote hosts. options: name: required: true description: - Name of the service. aliases: ['unit', 'service'] state: required: false default: null choices: [ 'started', 'stopped', 'restarted', 'reloaded' ] description: - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary. C(restarted) will always bounce the service. C(reloaded) will always reload. enabled: required: false choices: [ "yes", "no" ] default: null description: - Whether the service should start on boot. B(At least one of state and enabled are required.) masked: required: false choices: [ "yes", "no" ] default: null description: - Whether the unit should be masked or not, a masked unit is impossible to start. daemon_reload: required: false default: no choices: [ "yes", "no" ] description: - run daemon-reload before doing any other operations, to make sure systemd has read any changes. aliases: ['daemon-reload'] user: required: false default: no choices: [ "yes", "no" ] description: - run systemctl talking to the service manager of the calling user, rather than the service manager of the system. notes: - One option other than name is required. requirements: - A system managed by systemd ''' EXAMPLES = ''' # Example action to start service httpd, if not running - systemd: state=started name=httpd # Example action to stop service cron on debian, if running - systemd: name=cron state=stopped # Example action to restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes - systemd: state: restarted daemon_reload: yes name: crond # Example action to reload service httpd, in all cases - systemd: name: httpd state: reloaded # Example action to enable service httpd and ensure it is not masked - systemd: name: httpd enabled: yes masked: no # Example action to enable a timer for dnf-automatic - systemd: name: dnf-automatic.timer state: started enabled: True ''' RETURN = ''' status: description: A dictionary with the key=value pairs returned from `systemctl show` returned: success type: complex sample: { "ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT", "ActiveEnterTimestampMonotonic": "8135942", "ActiveExitTimestampMonotonic": "0", "ActiveState": "active", "After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice", "AllowIsolate": "no", "Before": "shutdown.target multi-user.target", "BlockIOAccounting": "no", "BlockIOWeight": "1000", "CPUAccounting": "no", "CPUSchedulingPolicy": "0", "CPUSchedulingPriority": "0", "CPUSchedulingResetOnFork": "no", "CPUShares": "1024", "CanIsolate": "no", "CanReload": "yes", "CanStart": "yes", "CanStop": "yes", "CapabilityBoundingSet": "18446744073709551615", "ConditionResult": "yes", "ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT", "ConditionTimestampMonotonic": "7902742", "Conflicts": "shutdown.target", "ControlGroup": "/system.slice/crond.service", "ControlPID": "0", "DefaultDependencies": "yes", "Delegate": "no", "Description": "Command Scheduler", "DevicePolicy": "auto", "EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)", "ExecMainCode": "0", "ExecMainExitTimestampMonotonic": "0", "ExecMainPID": "595", "ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT", "ExecMainStartTimestampMonotonic": "8134990", "ExecMainStatus": "0", "ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "FragmentPath": "/usr/lib/systemd/system/crond.service", "GuessMainPID": "yes", "IOScheduling": "0", "Id": "crond.service", "IgnoreOnIsolate": "no", "IgnoreOnSnapshot": "no", "IgnoreSIGPIPE": "yes", "InactiveEnterTimestampMonotonic": "0", "InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT", "InactiveExitTimestampMonotonic": "8135942", "JobTimeoutUSec": "0", "KillMode": "process", "KillSignal": "15", "LimitAS": "18446744073709551615", "LimitCORE": "18446744073709551615", "LimitCPU": "18446744073709551615", "LimitDATA": "18446744073709551615", "LimitFSIZE": "18446744073709551615", "LimitLOCKS": "18446744073709551615", "LimitMEMLOCK": "65536", "LimitMSGQUEUE": "819200", "LimitNICE": "0", "LimitNOFILE": "4096", "LimitNPROC": "3902", "LimitRSS": "18446744073709551615", "LimitRTPRIO": "0", "LimitRTTIME": "18446744073709551615", "LimitSIGPENDING": "3902", "LimitSTACK": "18446744073709551615", "LoadState": "loaded", "MainPID": "595", "MemoryAccounting": "no", "MemoryLimit": "18446744073709551615", "MountFlags": "0", "Names": "crond.service", "NeedDaemonReload": "no", "Nice": "0", "NoNewPrivileges": "no", "NonBlocking": "no", "NotifyAccess": "none", "OOMScoreAdjust": "0", "OnFailureIsolate": "no", "PermissionsStartOnly": "no", "PrivateNetwork": "no", "PrivateTmp": "no", "RefuseManualStart": "no", "RefuseManualStop": "no", "RemainAfterExit": "no", "Requires": "basic.target", "Restart": "no", "RestartUSec": "100ms", "Result": "success", "RootDirectoryStartOnly": "no", "SameProcessGroup": "no", "SecureBits": "0", "SendSIGHUP": "no", "SendSIGKILL": "yes", "Slice": "system.slice", "StandardError": "inherit", "StandardInput": "null", "StandardOutput": "journal", "StartLimitAction": "none", "StartLimitBurst": "5", "StartLimitInterval": "10000000", "StatusErrno": "0", "StopWhenUnneeded": "no", "SubState": "running", "SyslogLevelPrefix": "yes", "SyslogPriority": "30", "TTYReset": "no", "TTYVHangup": "no", "TTYVTDisallocate": "no", "TimeoutStartUSec": "1min 30s", "TimeoutStopUSec": "1min 30s", "TimerSlackNSec": "50000", "Transient": "no", "Type": "simple", "UMask": "0022", "UnitFileState": "enabled", "WantedBy": "multi-user.target", "Wants": "system.slice", "WatchdogTimestampMonotonic": "0", "WatchdogUSec": "0", } ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing from ansible.module_utils._text import to_native # =========================================== # Main control flow def main(): # initialize module = AnsibleModule( argument_spec = dict( name = dict(required=True, type='str', aliases=['unit', 'service']), state = dict(choices=[ 'started', 'stopped', 'restarted', 'reloaded'], type='str'), enabled = dict(type='bool'), masked = dict(type='bool'), daemon_reload= dict(type='bool', default=False, aliases=['daemon-reload']), user= dict(type='bool', default=False), ), supports_check_mode=True, required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']], ) systemctl = module.get_bin_path('systemctl') if module.params['user']: systemctl = systemctl + " --user" unit = module.params['name'] rc = 0 out = err = '' result = { 'name': unit, 'changed': False, 'status': {}, 'warnings': [], } # Run daemon-reload first, if requested if module.params['daemon_reload']: (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl)) if rc != 0: module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err)) found = False is_initd = sysv_exists(unit) is_systemd = False # check service data, cannot error out on rc as it changes across versions, assume not found (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit)) if rc == 0: # load return of systemctl show into dictionary for easy access and return multival = [] if out: k = None for line in to_native(out).split('\n'): # systemd can have multiline values delimited with {} if line.strip(): if k is None: if '=' in line: k,v = line.split('=', 1) if v.lstrip().startswith('{'): if not v.rstrip().endswith('}'): multival.append(line) continue result['status'][k] = v.strip() k = None else: if line.rstrip().endswith('}'): result['status'][k] = '\n'.join(multival).strip() multival = [] k = None else: multival.append(line) is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found' # Check for loading error if is_systemd and 'LoadError' in result['status']: module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError'])) # Does service exist? found = is_systemd or is_initd if is_initd and not is_systemd: result['warnings'].append('The service (%s) is actually an init script but the system is managed by systemd' % unit) # mask/unmask the service, if requested, can operate on services before they are installed if module.params['masked'] is not None: # state is not masked unless systemd affirms otherwise masked = ('LoadState' in result['status'] and result['status']['LoadState'] == 'masked') if masked != module.params['masked']: result['changed'] = True if module.params['masked']: action = 'mask' else: action = 'unmask' if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't fail_if_missing(module, found, unit, msg='host') # Enable/disable service startup at boot if requested if module.params['enabled'] is not None: if module.params['enabled']: action = 'enable' else: action = 'disable' fail_if_missing(module, found, unit, msg='host') # do we need to enable the service? enabled = False (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit)) # check systemctl result or if it is a init script if rc == 0: enabled = True elif rc == 1: # if both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries if is_initd and (not out.startswith('disabled') or sysv_is_enabled(unit)): enabled = True # default to current state result['enabled'] = enabled # Change enable/disable if needed if enabled != module.params['enabled']: result['changed'] = True if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err)) result['enabled'] = not enabled # set service state if requested if module.params['state'] is not None: fail_if_missing(module, found, unit, msg="host") # default to desired state result['state'] = module.params['state'] # What is current service state? if 'ActiveState' in result['status']: action = None if module.params['state'] == 'started': if result['status']['ActiveState'] != 'active': action = 'start' elif module.params['state'] == 'stopped': if result['status']['ActiveState'] == 'active': action = 'stop' else: action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded result['state'] = 'started' if action: result['changed'] = True if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err)) else: # this should not happen? module.fail_json(msg="Service is in unknown state", status=result['status']) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
sethmachine/iaa
Corpora/corpus.py
1
14949
__author__ = "Zachary Yocum" __email__ = "zyocum@brandeis.edu" import os from warnings import warn from bs4 import BeautifulSoup as BS, CData from bs4.element import Tag dummy_tag = Tag(name='NULL') # Classes class Extent(object): """A class for loading tagged data from XML doc with surrounding token and tag data""" def __init__(self, sent, tag_dict, movelink_tag_dict, olink_tag_dict, qslink_tag_dict, front, back, basename, document): self.token = [t for t, l in sent[front:back]] self.lex = [l for t, l in sent[front:back]] self.prev_tokens = sent[:front] self.next_tokens = sent [back:] self.tag = tag_dict.get(self.lex[0].begin, {}) self.prev_tags = [ tag_dict.get(l.begin, {}) for t, l in self.prev_tokens if l.begin in tag_dict.keys() ] self.next_tags = [ tag_dict.get(l.begin, {}) for t, l in self.next_tokens if l.begin in tag_dict.keys() ] self.basename = basename self.document = document class Document(BS): """A class for working with MAE annotation XMLs.""" def __init__(self, doc_file): super(Document, self).__init__(doc_file.read(), "xml") from tokenizer import Tokenizer self.root = self.children.next() self.task = self.root.name self.name = doc_file.name self.basename = os.path.basename(self.name) self.dirname = os.path.dirname(self.name) self.tokenizer = Tokenizer(self.text()) self.tokenizer.tokenize_text() def __repr__(self): return "Document:{d}".format(d=os.path.basename(self.name)) def text(self): return u''.join(map(lambda t : t.decode_contents(), self('TEXT'))) def tags(self, ttypes=None): """Return all annotation tags whose type is in ttypes (if ttypes is unspecified, all tags are returned).""" is_tag = lambda item : isinstance(item, Tag) if not self.find('TAGS'): tags = [] else: tags = filter(is_tag, self.find('TAGS').children) if ttypes: tags = filter(lambda tag : tag.name in ttypes, tags) return tags def query_extents(self, ttypes, start, end): """Return a list of extent tags whose types are in the list of ttypes and whose start and end attributes match the given start and end.""" matches = lambda t : \ map(int, (t['start'], t['end'])) == map(int, (start, end)) return filter(matches, self.tags(ttypes=ttypes)) def query_links(self, ttypes, trigger_id): """Return a list of link tags whose types are in the list of ttypes and whose trigger has the specified trigger id.""" matches = lambda t : unicode(t['trigger']) == unicode(trigger_id) return filter(matches, self.tags(ttypes=ttypes)) def query_links_by_attr(self, ttypes, attr_name, attr_value): """Return a list of link tags whose types are in the list of ttypes and whose attribute field has the specified value.""" matches = lambda t : unicode(t[attr_name]) == unicode(attr_value) return filter(matches, self.tags(ttypes=ttypes)) def query(self, tag_id): """Return the tag whose identifier matches the specified id.""" matches = lambda t : t.attrs.get('id', object()) == unicode(tag_id) results = filter(matches, self.tags()) if any(results): return results[0] else: return None def add_attribute(self, attribute, value=u'', ttypes=None): """Add an attribute to a tag (and possibly specify it's value).""" for tag in self.tags(ttypes): if not attribute in tag.attrs.keys(): tag[attribute] = value def rename_attribute(self, old_ttype, new_ttype, ttypes=None): """Change the name of attributes for all tags with the given ttypes.""" for tag in self.tags(ttypes): if tag.attrs.get(old_ttype): tag.attrs[new_ttype] = tag.attrs.pop(old_ttype) def rename_tag(self, old_ttype, new_ttype): """Rename a tag.""" for tag in self.tags([old_ttype]): tag.name = new_ttype def rename_task(self, new_task): """Rename the document task (the XML root tag type).""" self.task = new_task def consuming_tags(self): """Return extent annotation tags with non-negative starting offsets.""" is_extent_tag = lambda t : t.attrs.has_key('start') is_consuming = lambda t : int(t['start']) >= 0 return filter(is_consuming, filter(is_extent_tag, self.tags())) def sort_tags_by_begin_offset(self): """Make dictionary of tag objects keyed on their 'start' field. Used for matching tags to tokens using offsets""" tag_dict = {} movelink_tag_dict = {} olink_tag_dict = {} qslink_tag_dict = {} tags = self.tags() for t in tags: # load entity / event / signal tags if 'start' in t.attrs: tag_dict[int(t.attrs['start'])] = t.attrs # {start offset: xml tokens, offsets, spatial data} # load movelink tags if t.attrs.get('id', '').startswith('mvl'): movelink_tag_dict[t.attrs['trigger']] = t.attrs # load qslinks if t.attrs.get('id', '').startswith('qs'): if t.attrs['trigger']: qslink_tag_dict[t.attrs['trigger']] = t.attrs elif t.attrs['fromText']: qslink_tag_dict[t.attrs['fromID']] = t.attrs elif t.attrs['toText']: qslink_tag_dict[t.attrs['toID']] = t.attrs # load olinks if t.attrs.get('id', '').startswith('ol'): if t.attrs['trigger']: olink_tag_dict[t.attrs['trigger']] = t.attrs elif t.attrs['fromText']: olink_tag_dict[t.attrs['fromID']] = t.attrs elif t.attrs['toText']: olink_tag_dict[t.attrs['toID']] = t.attrs return tag_dict, movelink_tag_dict, olink_tag_dict, qslink_tag_dict def extents(self, indices_function, extent_class=Extent): tag_dict, movelink_tag_dict, olink_tag_dict, qslink_tag_dict = self.sort_tags_by_begin_offset() for s in self.tokenizer.tokenize_text().sentences: sent = s.as_pairs() # [ (token, lexeme obj), (token, lexeme obj), ...] offsets = indices_function(sent, tag_dict) for begin, end in offsets: extent = extent_class(sent, tag_dict, movelink_tag_dict, olink_tag_dict, qslink_tag_dict, begin, end, self.basename, self) yield extent def qs_o_link_triples(self, indices_function, extent_class=Extent): tag_dict, movelink_tag_dict, olink_tag_dict, qslink_tag_dict = self.sort_tags_by_begin_offset() for s in self.tokenizer.tokenize_text().sentences: sent = s.as_pairs() # [ (token, lexeme obj), (token, lexeme obj), ...] offsets = indices_function(sent, tag_dict) for begin, end in offsets: extent = extent_class(sent, tag_dict, movelink_tag_dict, olink_tag_dict, qslink_tag_dict, begin, end, self.basename, self) trigger = extent.tag tags = extent.prev_tags + extent.next_tags for from_tag in tags: for to_tag in tags: if to_tag['id'] != from_tag['id']: alt_extent = extent_class(sent, tag_dict, movelink_tag_dict, olink_tag_dict, qslink_tag_dict, begin, end, self.basename, self) alt_extent.token = (trigger, from_tag, to_tag) yield alt_extent def move_link_triples(self, indices_function, extent_class=Extent): tag_dict, movelink_tag_dict, olink_tag_dict, qslink_tag_dict = self.sort_tags_by_begin_offset() for s in self.tokenizer.tokenize_text().sentences: sent = s.as_pairs() # [ (token, lexeme obj), (token, lexeme obj), ...] offsets = indices_function(sent, tag_dict) for begin, end in offsets: extent = extent_class(sent, tag_dict, movelink_tag_dict, olink_tag_dict, qslink_tag_dict, begin, end, self.basename, self) tags = extent.prev_tags + extent.next_tags + [{'id': '', 'start': '-1', 'end': '-1'}] for to_tag in tags: alt_extent = extent_class(sent, tag_dict, movelink_tag_dict, olink_tag_dict, qslink_tag_dict, begin, end, self.basename, self) alt_extent.token = (extent.tag, extent.tag, to_tag) yield alt_extent def validate(self): is_valid = True tag_count = len(self.tags()) if not (tag_count > 0): is_valid = False warning = '\n'.join([ 'No tag elements found', "\tFile : '{doc}'" ]).format(doc=self.name) warn(warning, RuntimeWarning) for tag in self.consuming_tags(): start, end = int(tag['start']), int(tag['end']) extent = slice(start, end) text_attribute = tag['text'].encode('utf-8') text_slice = self.text()[extent].encode('utf-8').replace('\n', ' ') if text_attribute != text_slice: is_valid = False warning = '\n'.join([ 'Misaligned extent tag', "\tFile : '{doc}'", '\tSpan : [{start}:{end}]', "\tTag : '{id}'", "\tText : '{text}'", "\tSlice : '{slice}'" ]).format( doc=self.name, start=start, end=end, id=tag['id'], text=text_attribute, slice=text_slice ) warn(warning, RuntimeWarning) return is_valid def get_xml(self): xml = u'<?xml version="1.0" encoding="UTF-8" ?>\n' root = Tag(name=self.task) text = Tag(name='TEXT') text.append(CData(self.text())) tags = self.TAGS tokens = (BS( self.tokenizer.get_tokenized_as_xml().encode('utf-8'), 'xml' )).TOKENS elements = [u'\n', text, u'\n', tags, u'\n', tokens, u'\n'] for element in elements: if element: # if missing tags, system will crash root.append(element) xml += unicode(root) return xml def save_xml(self, file): if isinstance(file, basestring): with open(file, 'wb') as file: file.write(self.get_xml().encode('utf-8')) else: file.write(self.get_xml().encode('utf-8')) class Corpus(object): """A class for working with collections of Documents.""" def __init__(self, directory, pattern='.*\.xml', recursive=True): super(Corpus, self).__init__() self.directory = directory self.pattern = pattern self.recursive = recursive self.validate() def documents(self): candidates = find_files(self.directory, self.pattern, self.recursive) for xml_path in filter(is_xml, candidates): with open(xml_path, 'rb') as file: yield Document(file) def extents(self, indices_function, extent_class=Extent): for doc in self.documents(): tag_dict, movelink_tag_dict, olink_tag_dict, qslink_tag_dict = doc.sort_tags_by_begin_offset() for s in doc.tokenizer.tokenize_text().sentences: sent = s.as_pairs() # [ (token, lexeme obj), (token, lexeme obj), ...] offsets = indices_function(sent, tag_dict) for begin, end in offsets: extent = extent_class(sent, tag_dict, movelink_tag_dict, olink_tag_dict, qslink_tag_dict, begin, end, doc.basename, doc) yield extent def qs_o_link_triples(self, indices_function, extent_class=Extent): extents = [] for doc in self.documents(): doc_extents = doc.qs_o_link_triples(indices_function, extent_class) extents.extend(doc_extents) return extents def move_link_triples(self, indices_function, extent_class=Extent): extents = [] for doc in self.documents(): doc_extents = doc.move_link_triples(indices_function, extent_class) extents.extend(doc_extents) return extents def validate(self): map(Document.validate, self.documents()) class HypotheticalDocument(Document): """docstring for HypotheticalDocument""" def insert_tag(self, tag_dict): """docstring for insert_tag""" tag = Tag(name=tag_dict.pop('name')) tag.attrs = tag_dict if not self.findAll('TAGS'): self.root.append(Tag(name='TAGS')) self.TAGS.append(tag) self.TAGS.append('\n') class HypotheticalCorpus(Corpus): """docstring for HypotheticalCorpus""" def documents(self): candidates = find_files(self.directory, self.pattern, self.recursive) for xml_path in filter(is_xml, candidates): with open(xml_path, 'rb') as file: yield HypotheticalDocument(file) # General functions def validate_mime_type(file_path, valid_mime_types): from mimetypes import guess_type mime_type, encoding = guess_type(file_path) valid = mime_type in valid_mime_types if not valid: warning = '\n\t'.join([ 'Invalid MIME type', 'File : {c_path}', 'Type : {type}' ]) warn(warning.format(path=file_path, type=mime_type), RuntimeWarning) return valid def is_xml(file_path): return validate_mime_type(file_path, set(['application/xml', 'text/xml'])) def find_files(directory='.', pattern='.*', recursive=True): import re if recursive: return (os.path.join(directory, filename) for directory, subdirectories, filenames in os.walk(directory) for filename in filenames if re.match(pattern, filename)) else: return (os.path.join(directory, filename) for filename in os.listdir(directory) if re.match(pattern, filename))
mit
yasoob/PythonRSSReader
venv/lib/python2.7/dist-packages/twisted/web/_auth/basic.py
66
1635
# -*- test-case-name: twisted.web.test.test_httpauth -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ HTTP BASIC authentication. @see: U{http://tools.ietf.org/html/rfc1945} @see: U{http://tools.ietf.org/html/rfc2616} @see: U{http://tools.ietf.org/html/rfc2617} """ import binascii from zope.interface import implements from twisted.cred import credentials, error from twisted.web.iweb import ICredentialFactory class BasicCredentialFactory(object): """ Credential Factory for HTTP Basic Authentication @type authenticationRealm: C{str} @ivar authenticationRealm: The HTTP authentication realm which will be issued in challenges. """ implements(ICredentialFactory) scheme = 'basic' def __init__(self, authenticationRealm): self.authenticationRealm = authenticationRealm def getChallenge(self, request): """ Return a challenge including the HTTP authentication realm with which this factory was created. """ return {'realm': self.authenticationRealm} def decode(self, response, request): """ Parse the base64-encoded, colon-separated username and password into a L{credentials.UsernamePassword} instance. """ try: creds = binascii.a2b_base64(response + '===') except binascii.Error: raise error.LoginFailed('Invalid credentials') creds = creds.split(':', 1) if len(creds) == 2: return credentials.UsernamePassword(*creds) else: raise error.LoginFailed('Invalid credentials')
mit
npiganeau/odoo
addons/l10n_gr/__openerp__.py
168
1883
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2009 P. Christeas <p_christ@hol.gr>. All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Greece - Accounting', 'version': '0.2', 'author': 'P. Christeas, OpenERP SA.', 'website': 'http://openerp.hellug.gr/', 'category': 'Localization/Account Charts', 'description': """ This is the base module to manage the accounting chart for Greece. ================================================================== Greek accounting chart and localization. """, 'depends': ['base', 'account', 'base_iban', 'base_vat', 'account_chart'], 'demo': [], 'data': [ 'account_types.xml', 'account_chart.xml', 'account_full_chart.xml', 'account_tax.xml', 'account_tax_vat.xml', 'l10n_gr_wizard.xml' ], 'installable': True, 'images': ['images/config_chart_l10n_gr.jpeg','images/l10n_gr_chart.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
hilaskis/UAV_MissionPlanner
Lib/distutils/core.py
175
9093
"""distutils.core The only module that needs to be imported to use the Distutils; provides the 'setup' function (which is to be called from the setup script). Also indirectly provides the Distribution and Command classes, although they are really defined in distutils.dist and distutils.cmd. """ __revision__ = "$Id$" import sys import os from distutils.debug import DEBUG from distutils.errors import (DistutilsSetupError, DistutilsArgError, DistutilsError, CCompilerError) from distutils.util import grok_environment_error # Mainly import these so setup scripts can "from distutils.core import" them. from distutils.dist import Distribution from distutils.cmd import Command from distutils.config import PyPIRCCommand from distutils.extension import Extension # This is a barebones help message generated displayed when the user # runs the setup script with no arguments at all. More useful help # is generated with various --help options: global help, list commands, # and per-command help. USAGE = """\ usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...] or: %(script)s --help [cmd1 cmd2 ...] or: %(script)s --help-commands or: %(script)s cmd --help """ def gen_usage(script_name): script = os.path.basename(script_name) return USAGE % {'script': script} # Some mild magic to control the behaviour of 'setup()' from 'run_setup()'. _setup_stop_after = None _setup_distribution = None # Legal keyword arguments for the setup() function setup_keywords = ('distclass', 'script_name', 'script_args', 'options', 'name', 'version', 'author', 'author_email', 'maintainer', 'maintainer_email', 'url', 'license', 'description', 'long_description', 'keywords', 'platforms', 'classifiers', 'download_url', 'requires', 'provides', 'obsoletes', ) # Legal keyword arguments for the Extension constructor extension_keywords = ('name', 'sources', 'include_dirs', 'define_macros', 'undef_macros', 'library_dirs', 'libraries', 'runtime_library_dirs', 'extra_objects', 'extra_compile_args', 'extra_link_args', 'swig_opts', 'export_symbols', 'depends', 'language') def setup(**attrs): """The gateway to the Distutils: do everything your setup script needs to do, in a highly flexible and user-driven way. Briefly: create a Distribution instance; find and parse config files; parse the command line; run each Distutils command found there, customized by the options supplied to 'setup()' (as keyword arguments), in config files, and on the command line. The Distribution instance might be an instance of a class supplied via the 'distclass' keyword argument to 'setup'; if no such class is supplied, then the Distribution class (in dist.py) is instantiated. All other arguments to 'setup' (except for 'cmdclass') are used to set attributes of the Distribution instance. The 'cmdclass' argument, if supplied, is a dictionary mapping command names to command classes. Each command encountered on the command line will be turned into a command class, which is in turn instantiated; any class found in 'cmdclass' is used in place of the default, which is (for command 'foo_bar') class 'foo_bar' in module 'distutils.command.foo_bar'. The command class must provide a 'user_options' attribute which is a list of option specifiers for 'distutils.fancy_getopt'. Any command-line options between the current and the next command are used to set attributes of the current command object. When the entire command-line has been successfully parsed, calls the 'run()' method on each command object in turn. This method will be driven entirely by the Distribution object (which each command object has a reference to, thanks to its constructor), and the command-specific options that became attributes of each command object. """ global _setup_stop_after, _setup_distribution # Determine the distribution class -- either caller-supplied or # our Distribution (see below). klass = attrs.get('distclass') if klass: del attrs['distclass'] else: klass = Distribution if 'script_name' not in attrs: attrs['script_name'] = os.path.basename(sys.argv[0]) if 'script_args' not in attrs: attrs['script_args'] = sys.argv[1:] # Create the Distribution instance, using the remaining arguments # (ie. everything except distclass) to initialize it try: _setup_distribution = dist = klass(attrs) except DistutilsSetupError, msg: if 'name' in attrs: raise SystemExit, "error in %s setup command: %s" % \ (attrs['name'], msg) else: raise SystemExit, "error in setup command: %s" % msg if _setup_stop_after == "init": return dist # Find and parse the config file(s): they will override options from # the setup script, but be overridden by the command line. dist.parse_config_files() if DEBUG: print "options (after parsing config files):" dist.dump_option_dicts() if _setup_stop_after == "config": return dist # Parse the command line and override config files; any # command-line errors are the end user's fault, so turn them into # SystemExit to suppress tracebacks. try: ok = dist.parse_command_line() except DistutilsArgError, msg: raise SystemExit, gen_usage(dist.script_name) + "\nerror: %s" % msg if DEBUG: print "options (after parsing command line):" dist.dump_option_dicts() if _setup_stop_after == "commandline": return dist # And finally, run all the commands found on the command line. if ok: try: dist.run_commands() except KeyboardInterrupt: raise SystemExit, "interrupted" except (IOError, os.error), exc: error = grok_environment_error(exc) if DEBUG: sys.stderr.write(error + "\n") raise else: raise SystemExit, error except (DistutilsError, CCompilerError), msg: if DEBUG: raise else: raise SystemExit, "error: " + str(msg) return dist def run_setup(script_name, script_args=None, stop_after="run"): """Run a setup script in a somewhat controlled environment, and return the Distribution instance that drives things. This is useful if you need to find out the distribution meta-data (passed as keyword args from 'script' to 'setup()', or the contents of the config files or command-line. 'script_name' is a file that will be run with 'execfile()'; 'sys.argv[0]' will be replaced with 'script' for the duration of the call. 'script_args' is a list of strings; if supplied, 'sys.argv[1:]' will be replaced by 'script_args' for the duration of the call. 'stop_after' tells 'setup()' when to stop processing; possible values: init stop after the Distribution instance has been created and populated with the keyword arguments to 'setup()' config stop after config files have been parsed (and their data stored in the Distribution instance) commandline stop after the command-line ('sys.argv[1:]' or 'script_args') have been parsed (and the data stored in the Distribution) run [default] stop after all commands have been run (the same as if 'setup()' had been called in the usual way Returns the Distribution instance, which provides all information used to drive the Distutils. """ if stop_after not in ('init', 'config', 'commandline', 'run'): raise ValueError, "invalid value for 'stop_after': %r" % (stop_after,) global _setup_stop_after, _setup_distribution _setup_stop_after = stop_after save_argv = sys.argv g = {'__file__': script_name} l = {} try: try: sys.argv[0] = script_name if script_args is not None: sys.argv[1:] = script_args f = open(script_name) try: exec f.read() in g, l finally: f.close() finally: sys.argv = save_argv _setup_stop_after = None except SystemExit: # Hmm, should we do something if exiting with a non-zero code # (ie. error)? pass except: raise if _setup_distribution is None: raise RuntimeError, \ ("'distutils.core.setup()' was never called -- " "perhaps '%s' is not a Distutils setup script?") % \ script_name # I wonder if the setup script's namespace -- g and l -- would be of # any interest to callers? return _setup_distribution
gpl-2.0
variablehair/Eggplantato
discord/utils.py
1
9060
# -*- coding: utf-8 -*- """ The MIT License (MIT) Copyright (c) 2015-2017 Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from re import split as re_split from .errors import HTTPException, Forbidden, NotFound, InvalidArgument import datetime from base64 import b64encode import asyncio import json import warnings, functools DISCORD_EPOCH = 1420070400000 class cached_property: def __init__(self, function): self.function = function self.__doc__ = getattr(function, '__doc__') def __get__(self, instance, owner): if instance is None: return self value = self.function(instance) setattr(instance, self.function.__name__, value) return value class CachedSlotProperty: def __init__(self, name, function): self.name = name self.function = function self.__doc__ = getattr(function, '__doc__') def __get__(self, instance, owner): if instance is None: return self try: return getattr(instance, self.name) except AttributeError: value = self.function(instance) setattr(instance, self.name, value) return value def cached_slot_property(name): def decorator(func): return CachedSlotProperty(name, func) return decorator def parse_time(timestamp): if timestamp: return datetime.datetime(*map(int, re_split(r'[^\d]', timestamp.replace('+00:00', '')))) return None def deprecated(instead=None): def actual_decorator(func): @functools.wraps(func) def decorated(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) # turn off filter if instead: fmt = "{0.__name__} is deprecated, use {1} instead." else: fmt = '{0.__name__} is deprecated.' warnings.warn(fmt.format(func, instead), stacklevel=3, category=DeprecationWarning) warnings.simplefilter('default', DeprecationWarning) # reset filter return func(*args, **kwargs) return decorated return actual_decorator def oauth_url(client_id, permissions=None, guild=None, redirect_uri=None): """A helper function that returns the OAuth2 URL for inviting the bot into guilds. Parameters ----------- client_id : str The client ID for your bot. permissions : :class:`Permissions` The permissions you're requesting. If not given then you won't be requesting any permissions. guild : :class:`Guild` The guild to pre-select in the authorization screen, if available. redirect_uri : str An optional valid redirect URI. """ url = 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot'.format(client_id) if permissions is not None: url = url + '&permissions=' + str(permissions.value) if guild is not None: url = url + "&guild_id=" + guild.id if redirect_uri is not None: from urllib.parse import urlencode url = url + "&response_type=code&" + urlencode({'redirect_uri': redirect_uri}) return url def snowflake_time(id): """Returns the creation date in UTC of a discord id.""" return datetime.datetime.utcfromtimestamp(((id >> 22) + DISCORD_EPOCH) / 1000) def time_snowflake(datetime_obj, high=False): """Returns a numeric snowflake pretending to be created at the given date. When using as the lower end of a range, use time_snowflake(high=False) - 1 to be inclusive, high=True to be exclusive When using as the higher end of a range, use time_snowflake(high=True) + 1 to be inclusive, high=False to be exclusive Parameters ----------- datetime_obj A timezone-naive datetime object representing UTC time. high Whether or not to set the lower 22 bit to high or low. """ unix_seconds = (datetime_obj - type(datetime_obj)(1970, 1, 1)).total_seconds() discord_millis = int(unix_seconds * 1000 - DISCORD_EPOCH) return (discord_millis << 22) + (2**22-1 if high else 0) def find(predicate, seq): """A helper to return the first element found in the sequence that meets the predicate. For example: :: member = find(lambda m: m.name == 'Mighty', channel.guild.members) would find the first :class:`Member` whose name is 'Mighty' and return it. If an entry is not found, then ``None`` is returned. This is different from `filter`_ due to the fact it stops the moment it finds a valid entry. .. _filter: https://docs.python.org/3.6/library/functions.html#filter Parameters ----------- predicate A function that returns a boolean-like result. seq : iterable The iterable to search through. """ for element in seq: if predicate(element): return element return None def get(iterable, **attrs): """A helper that returns the first element in the iterable that meets all the traits passed in ``attrs``. This is an alternative for :func:`discord.utils.find`. When multiple attributes are specified, they are checked using logical AND, not logical OR. Meaning they have to meet every attribute passed in and not one of them. To have a nested attribute search (i.e. search by ``x.y``) then pass in ``x__y`` as the keyword argument. If nothing is found that matches the attributes passed, then ``None`` is returned. Examples --------- Basic usage: .. code-block:: python member = discord.utils.get(message.guild.members, name='Foo') Multiple attribute matching: .. code-block:: python channel = discord.utils.get(guild.channels, name='Foo', type=ChannelType.voice) Nested attribute matching: .. code-block:: python channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general') Parameters ----------- iterable An iterable to search through. \*\*attrs Keyword arguments that denote attributes to search with. """ def predicate(elem): for attr, val in attrs.items(): nested = attr.split('__') obj = elem for attribute in nested: obj = getattr(obj, attribute) if obj != val: return False return True return find(predicate, iterable) def _unique(iterable): seen = set() adder = seen.add return [x for x in iterable if not (x in seen or adder(x))] def _get_as_snowflake(data, key): try: value = data[key] except KeyError: return None else: if value is None: return value return int(value) def _get_mime_type_for_image(data): if data.startswith(b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A'): return 'image/png' elif data.startswith(b'\xFF\xD8') and data.endswith(b'\xFF\xD9'): return 'image/jpeg' elif data.startswith(b'\x47\x49\x46\x38\x37\x61') or data.startswith(b'\x47\x49\x46\x38\x39\x61'): return 'image/gif' else: raise InvalidArgument('Unsupported image type given') def _bytes_to_base64_data(data): fmt = 'data:{mime};base64,{data}' mime = _get_mime_type_for_image(data) b64 = b64encode(data).decode('ascii') return fmt.format(mime=mime, data=b64) def to_json(obj): return json.dumps(obj, separators=(',', ':'), ensure_ascii=True) @asyncio.coroutine def maybe_coroutine(f, *args, **kwargs): if asyncio.iscoroutinefunction(f): return (yield from f(*args, **kwargs)) else: return f(*args, **kwargs) @asyncio.coroutine def async_all(gen): check = asyncio.iscoroutine for elem in gen: if check(elem): elem = yield from elem if not elem: return False return True @asyncio.coroutine def sane_wait_for(futures, *, timeout, loop): done, pending = yield from asyncio.wait(futures, timeout=timeout, loop=loop) if len(pending) != 0: raise asyncio.TimeoutError()
mit
jmichalicek/bsblog
bsblog/tests.py
1
6520
from django.test import TestCase from django.utils import timezone from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.test.client import Client import markdown from models import * TEST_USER = 'test_user' TEST_PASSWORD = 'test_password' TEST_EMAIL = 'none@example.com' class CategoryTests(TestCase): """Test the Category model""" def setUp(self): self.category = Category.objects.create(name='Test Category') def tearDown(self): self.category.delete() class CategoryUnicodeTests(CategoryTests): """Test the __unicode__() method of the Category class""" def test_unicode(self): """Test the __unicode__() method ouptut""" self.assertEqual(self.category.name, self.category.__unicode__()) class PostTests(TestCase): """Test the Post model""" fixtures = ['test_postmodeltests'] def setUp(self): self.post = Post.objects.get(id=1) class PostUnicodeTests(PostTests): """Test the __unicode__() method of the Post class""" def test_unicode(self): self.assertEqual(self.post.title, self.post.__unicode__()) class PostGetAbsoluteUrlTests(PostTests): """Test the get_absolute_url() method of the Post class""" def test_get_absolute_url(self): year = self.post.created_date.strftime('%Y') month = self.post.created_date.strftime('%b').lower() day = self.post.created_date.strftime('%d') self.assertEqual(self.post.get_absolute_url(), reverse('bsblog_full_post_url', args=[year, month, day, self.post.slug])) class PostSaveTests(PostTests): """Test the save() method of the Post class""" def test_markdown(self): """Test the overridden save() method converts the text_markdown field and saves in text_html""" self.post.text_html = '' self.post.text_markdown = '''**test**''' self.post.save() self.assertEqual(self.post.text_html, markdown.markdown(self.post.text_markdown, safe_mode=False)) class UserProfileTests(TestCase): """Test the UserProfile model""" fixtures = ['test_userprofiletests'] def setUp(self): self.user_profile = UserProfile.objects.get(id=1) class UserProfileUnicodeTests(UserProfileTests): """Test the __unicode__() method of the UserProfile class""" def test_unicode(self): self.assertEqual(self.user_profile.user.username, self.user_profile.__unicode__()) class ArchiveViewTests(TestCase): """Test the archive view""" fixtures = ['test_archiveviewtests'] def setUp(self): self.client = Client() self.post_2012 = Post.objects.get(id=1) self.post_2011 = Post.objects.get(id=2) def test_get_response_code(self): """Make sure we get a 200 response code""" response = self.client.get(reverse('bsblog_archive')) self.assertEqual(response.status_code, 200) def test_template(self): response = self.client.get(reverse('bsblog_archive')) self.assertEqual(response.templates[0].name, 'bsblog/archive.html') def test_no_args(self): response = self.client.get(reverse('bsblog_archive')) posts = response.context['post_list'] self.assertEqual(2, len(posts)) self.assertTrue(self.post_2012 in posts) self.assertTrue(self.post_2011 in posts) class IndexViewTests(TestCase): """Test the index view""" fixtures = ['test_indexviewtests'] def setUp(self): self.published_post = Post.objects.get(id=1) self.unpublished_post = Post.objects.get(id=2) def test_get_response_code(self): """Make sure we get a 200 response code""" response = self.client.get(reverse('bsblog_main')) self.assertEqual(response.status_code, 200) def test_template(self): response = self.client.get(reverse('bsblog_main')) self.assertEqual(response.templates[0].name, 'bsblog/index.html') def test_empty_page(self): """Verify that a 404 is returned when a page with no results is requested""" response = self.client.get(reverse('bsblog_main', args=[2])) self.assertEqual(response.status_code, 404) def test_context_post_list(self): """Make sure our posts are in the post_list item in the context""" response = self.client.get(reverse('bsblog_main')) self.assertTrue('post_list' in response.context) self.assertTrue(self.published_post in response.context['post_list']) def test_unpublished_post(self): """Make sure unpublished posts are NOT in the post_list""" response = self.client.get(reverse('bsblog_main')) self.assertFalse(self.unpublished_post in response.context['post_list']) class ItemViewTests(TestCase): """Test the item view""" fixtures = ['test_itemviewtests'] def setUp(self): self.post = Post.objects.get(id=1) def test_get_response_code(self): """Make sure we get a 200 response code""" year = self.post.created_date.strftime('%Y') month = self.post.created_date.strftime('%b').lower() day = self.post.created_date.strftime('%d') response = self.client.get(reverse('bsblog_full_post_url', args=[year, month, day, self.post.slug])) self.assertEqual(response.status_code, 200) def test_template(self): year = self.post.created_date.strftime('%Y') month = self.post.created_date.strftime('%b').lower() day = self.post.created_date.strftime('%d') response = self.client.get(reverse('bsblog_full_post_url', args=[year, month, day, self.post.slug])) self.assertEqual(response.templates[0].name, 'bsblog/blog_post.html') def test_context_post(self): year = self.post.created_date.strftime('%Y') month = self.post.created_date.strftime('%b').lower() day = self.post.created_date.strftime('%d') response = self.client.get(reverse('bsblog_full_post_url', args=[year, month, day, self.post.slug])) self.assertTrue('post' in response.context) self.assertEqual(response.context['post'], self.post)
bsd-2-clause
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam
PyFoam/ThirdParty/Gnuplot/gp_mac.py
3
3525
# $Id: gp_mac.py 292 2006-03-03 09:49:04Z mhagger $ # Copyright (C) 1999-2003 Michael Haggerty <mhagger@alum.mit.edu> # Thanks to Tony Ingraldi and Noboru Yamamoto for their contributions. # # This file is licensed under the GNU Lesser General Public License # (LGPL). See LICENSE.txt for details. """gp_mac -- an interface to gnuplot for the Macintosh. """ import os, string from . import Errors # ############ Configuration variables: ################################ class GnuplotOpts: """The configuration options for gnuplot on the Macintosh. See gp.py for details about the meaning of these options. Please let me know if you know better choices for these settings.""" # The '-persist' option is not supported on the Mac: recognizes_persist = 0 # Apparently the Mac can use binary data: recognizes_binary_splot = 1 # Apparently the Mac can not use inline data: prefer_inline_data = 0 # os.mkfifo is not supported on the Mac. support_fifo = 0 prefer_fifo_data = 0 # The default choice for the 'set term' command (to display on screen). # Terminal types are different in Gnuplot 3.7.1c. # For earlier versions, this was default_term = 'macintosh' default_term = 'pict' # I don't know how to print directly to a printer on the Mac: default_lpr = '| lpr' # Used the 'enhanced' option of postscript by default? Set to # None (*not* 0!) if your version of gnuplot doesn't support # enhanced postscript. prefer_enhanced_postscript = 1 # ############ End of configuration options ############################ # The Macintosh doesn't support pipes so communication is via # AppleEvents. from . import gnuplot_Suites import Required_Suite import aetools # Mac doesn't recognize persist. def test_persist(): return 0 class _GNUPLOT(aetools.TalkTo, Required_Suite.Required_Suite, gnuplot_Suites.gnuplot_Suite, gnuplot_Suites.odds_and_ends, gnuplot_Suites.Standard_Suite, gnuplot_Suites.Miscellaneous_Events): """Start a gnuplot program and emulate a pipe to it.""" def __init__(self): aetools.TalkTo.__init__(self, '{GP}', start=1) class GnuplotProcess: """Unsophisticated interface to a running gnuplot program. See gp_unix.GnuplotProcess for usage information. """ def __init__(self, persist=0): """Start a gnuplot process. Create a 'GnuplotProcess' object. This starts a gnuplot program and prepares to write commands to it. Keyword arguments: 'persist' -- the '-persist' option is not supported on the Macintosh so this argument must be zero. """ if persist: raise Errors.OptionError( '-persist is not supported on the Macintosh!') self.gnuplot = _GNUPLOT() def close(self): if self.gnuplot is not None: self.gnuplot.quit() self.gnuplot = None def __del__(self): self.close() def write(self, s): """Mac gnuplot apparently requires '\r' to end statements.""" self.gnuplot.gnuexec(string.replace(s, '\n', os.linesep)) def flush(self): pass def __call__(self, s): """Send a command string to gnuplot, for immediate execution.""" # Apple Script doesn't seem to need the trailing '\n'. self.write(s) self.flush() # Should work with Python3 and Python2
gpl-2.0
sdague/home-assistant
tests/helpers/test_config_entry_flow.py
5
11179
"""Tests for the Config Entry Flow helper.""" import pytest from homeassistant import config_entries, data_entry_flow, setup from homeassistant.config import async_process_ha_core_config from homeassistant.helpers import config_entry_flow from tests.async_mock import Mock, patch from tests.common import ( MockConfigEntry, MockModule, mock_entity_platform, mock_integration, ) @pytest.fixture def discovery_flow_conf(hass): """Register a handler.""" handler_conf = {"discovered": False} async def has_discovered_devices(hass): """Mock if we have discovered devices.""" return handler_conf["discovered"] with patch.dict(config_entries.HANDLERS): config_entry_flow.register_discovery_flow( "test", "Test", has_discovered_devices, config_entries.CONN_CLASS_LOCAL_POLL ) yield handler_conf @pytest.fixture def webhook_flow_conf(hass): """Register a handler.""" with patch.dict(config_entries.HANDLERS): config_entry_flow.register_webhook_flow("test_single", "Test Single", {}, False) config_entry_flow.register_webhook_flow( "test_multiple", "Test Multiple", {}, True ) yield {} async def test_single_entry_allowed(hass, discovery_flow_conf): """Test only a single entry is allowed.""" flow = config_entries.HANDLERS["test"]() flow.hass = hass flow.context = {} MockConfigEntry(domain="test").add_to_hass(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "single_instance_allowed" async def test_user_no_devices_found(hass, discovery_flow_conf): """Test if no devices found.""" flow = config_entries.HANDLERS["test"]() flow.hass = hass flow.context = {"source": config_entries.SOURCE_USER} result = await flow.async_step_confirm(user_input={}) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "no_devices_found" async def test_user_has_confirmation(hass, discovery_flow_conf): """Test user requires confirmation to setup.""" discovery_flow_conf["discovered"] = True mock_entity_platform(hass, "config_flow.test", None) result = await hass.config_entries.flow.async_init( "test", context={"source": config_entries.SOURCE_USER}, data={} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "confirm" result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY @pytest.mark.parametrize("source", ["discovery", "mqtt", "ssdp", "zeroconf"]) async def test_discovery_single_instance(hass, discovery_flow_conf, source): """Test we not allow duplicates.""" flow = config_entries.HANDLERS["test"]() flow.hass = hass flow.context = {} MockConfigEntry(domain="test").add_to_hass(hass) result = await getattr(flow, f"async_step_{source}")({}) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "single_instance_allowed" @pytest.mark.parametrize("source", ["discovery", "mqtt", "ssdp", "zeroconf"]) async def test_discovery_confirmation(hass, discovery_flow_conf, source): """Test we ask for confirmation via discovery.""" flow = config_entries.HANDLERS["test"]() flow.hass = hass flow.context = {"source": source} result = await getattr(flow, f"async_step_{source}")({}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "confirm" result = await flow.async_step_confirm({}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY async def test_multiple_discoveries(hass, discovery_flow_conf): """Test we only create one instance for multiple discoveries.""" mock_entity_platform(hass, "config_flow.test", None) result = await hass.config_entries.flow.async_init( "test", context={"source": config_entries.SOURCE_DISCOVERY}, data={} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM # Second discovery result = await hass.config_entries.flow.async_init( "test", context={"source": config_entries.SOURCE_DISCOVERY}, data={} ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT async def test_only_one_in_progress(hass, discovery_flow_conf): """Test a user initialized one will finish and cancel discovered one.""" mock_entity_platform(hass, "config_flow.test", None) # Discovery starts flow result = await hass.config_entries.flow.async_init( "test", context={"source": config_entries.SOURCE_DISCOVERY}, data={} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM # User starts flow result = await hass.config_entries.flow.async_init( "test", context={"source": config_entries.SOURCE_USER}, data={} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM # Discovery flow has not been aborted assert len(hass.config_entries.flow.async_progress()) == 2 # Discovery should be aborted once user confirms result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert len(hass.config_entries.flow.async_progress()) == 0 async def test_import_abort_discovery(hass, discovery_flow_conf): """Test import will finish and cancel discovered one.""" mock_entity_platform(hass, "config_flow.test", None) # Discovery starts flow result = await hass.config_entries.flow.async_init( "test", context={"source": config_entries.SOURCE_DISCOVERY}, data={} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM # Start import flow result = await hass.config_entries.flow.async_init( "test", context={"source": config_entries.SOURCE_IMPORT}, data={} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY # Discovery flow has been aborted assert len(hass.config_entries.flow.async_progress()) == 0 async def test_import_no_confirmation(hass, discovery_flow_conf): """Test import requires no confirmation to set up.""" flow = config_entries.HANDLERS["test"]() flow.hass = hass flow.context = {} discovery_flow_conf["discovered"] = True result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY async def test_import_single_instance(hass, discovery_flow_conf): """Test import doesn't create second instance.""" flow = config_entries.HANDLERS["test"]() flow.hass = hass flow.context = {} discovery_flow_conf["discovered"] = True MockConfigEntry(domain="test").add_to_hass(hass) result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT async def test_ignored_discoveries(hass, discovery_flow_conf): """Test we can ignore discovered entries.""" mock_entity_platform(hass, "config_flow.test", None) result = await hass.config_entries.flow.async_init( "test", context={"source": config_entries.SOURCE_DISCOVERY}, data={} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM flow = next( ( flw for flw in hass.config_entries.flow.async_progress() if flw["flow_id"] == result["flow_id"] ), None, ) # Ignore it. await hass.config_entries.flow.async_init( flow["handler"], context={"source": config_entries.SOURCE_IGNORE}, data={"unique_id": flow["context"]["unique_id"]}, ) # Second discovery should be aborted result = await hass.config_entries.flow.async_init( "test", context={"source": config_entries.SOURCE_DISCOVERY}, data={} ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT async def test_webhook_single_entry_allowed(hass, webhook_flow_conf): """Test only a single entry is allowed.""" flow = config_entries.HANDLERS["test_single"]() flow.hass = hass MockConfigEntry(domain="test_single").add_to_hass(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "single_instance_allowed" async def test_webhook_multiple_entries_allowed(hass, webhook_flow_conf): """Test multiple entries are allowed when specified.""" flow = config_entries.HANDLERS["test_multiple"]() flow.hass = hass MockConfigEntry(domain="test_multiple").add_to_hass(hass) hass.config.api = Mock(base_url="http://example.com") result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM async def test_webhook_config_flow_registers_webhook(hass, webhook_flow_conf): """Test setting up an entry creates a webhook.""" flow = config_entries.HANDLERS["test_single"]() flow.hass = hass await async_process_ha_core_config( hass, {"external_url": "https://example.com"}, ) result = await flow.async_step_user(user_input={}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["data"]["webhook_id"] is not None async def test_webhook_create_cloudhook(hass, webhook_flow_conf): """Test only a single entry is allowed.""" assert await setup.async_setup_component(hass, "cloud", {}) async_setup_entry = Mock(return_value=True) async_unload_entry = Mock(return_value=True) mock_integration( hass, MockModule( "test_single", async_setup_entry=async_setup_entry, async_unload_entry=async_unload_entry, async_remove_entry=config_entry_flow.webhook_async_remove_entry, ), ) mock_entity_platform(hass, "config_flow.test_single", None) result = await hass.config_entries.flow.async_init( "test_single", context={"source": config_entries.SOURCE_USER} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM with patch( "hass_nabucasa.cloudhooks.Cloudhooks.async_create", return_value={"cloudhook_url": "https://example.com"}, ) as mock_create, patch( "homeassistant.components.cloud.async_active_subscription", return_value=True ), patch( "homeassistant.components.cloud.async_is_logged_in", return_value=True ): result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["description_placeholders"]["webhook_url"] == "https://example.com" assert len(mock_create.mock_calls) == 1 assert len(async_setup_entry.mock_calls) == 1 with patch( "hass_nabucasa.cloudhooks.Cloudhooks.async_delete", return_value={"cloudhook_url": "https://example.com"}, ) as mock_delete: result = await hass.config_entries.async_remove(result["result"].entry_id) assert len(mock_delete.mock_calls) == 1 assert result["require_restart"] is False
apache-2.0
morenopc/edx-platform
lms/djangoapps/foldit/models.py
65
5129
import logging from django.contrib.auth.models import User from django.db import models log = logging.getLogger(__name__) class Score(models.Model): """ This model stores the scores of different users on FoldIt problems. """ user = models.ForeignKey(User, db_index=True, related_name='foldit_scores') # The XModule that wants to access this doesn't have access to the real # userid. Save the anonymized version so we can look up by that. unique_user_id = models.CharField(max_length=50, db_index=True) puzzle_id = models.IntegerField() best_score = models.FloatField(db_index=True) current_score = models.FloatField(db_index=True) score_version = models.IntegerField() created = models.DateTimeField(auto_now_add=True) @staticmethod def display_score(score, sum_of=1): """ Argument: score (float), as stored in the DB (i.e., "rosetta score") sum_of (int): if this score is the sum of scores of individual problems, how many elements are in that sum Returns: score (float), as displayed to the user in the game and in the leaderboard """ return (-score) * 10 + 8000 * sum_of @staticmethod def get_tops_n(n, puzzles=['994559'], course_list=None): """ Arguments: puzzles: a list of puzzle ids that we will use. If not specified, defaults to puzzle used in 7012x. n (int): number of top scores to return Returns: The top n sum of scores for puzzles in <puzzles>, filtered by course. If no courses is specified we default the pool of students to all courses. Output is a list of dictionaries, sorted by display_score: [ {username: 'a_user', score: 12000} ...] """ if not isinstance(puzzles, list): puzzles = [puzzles] if course_list is None: scores = Score.objects \ .filter(puzzle_id__in=puzzles) \ .annotate(total_score=models.Sum('best_score')) \ .order_by('total_score')[:n] else: scores = Score.objects \ .filter(puzzle_id__in=puzzles) \ .filter(user__courseenrollment__course_id__in=course_list) \ .annotate(total_score=models.Sum('best_score')) \ .order_by('total_score')[:n] num = len(puzzles) return [ {'username': score.user.username, 'score': Score.display_score(score.total_score, num)} for score in scores ] class PuzzleComplete(models.Model): """ This keeps track of the sets of puzzles completed by each user. e.g. PuzzleID 1234, set 1, subset 3. (Sets and subsets correspond to levels in the intro puzzles) """ class Meta: # there should only be one puzzle complete entry for any particular # puzzle for any user unique_together = ('user', 'puzzle_id', 'puzzle_set', 'puzzle_subset') ordering = ['puzzle_id'] user = models.ForeignKey(User, db_index=True, related_name='foldit_puzzles_complete') # The XModule that wants to access this doesn't have access to the real # userid. Save the anonymized version so we can look up by that. unique_user_id = models.CharField(max_length=50, db_index=True) puzzle_id = models.IntegerField() puzzle_set = models.IntegerField(db_index=True) puzzle_subset = models.IntegerField(db_index=True) created = models.DateTimeField(auto_now_add=True) def __unicode__(self): return "PuzzleComplete({0}, id={1}, set={2}, subset={3}, created={4})".format( self.user.username, self.puzzle_id, self.puzzle_set, self.puzzle_subset, self.created) @staticmethod def completed_puzzles(anonymous_user_id): """ Return a list of puzzles that this user has completed, as an array of dicts: [ {'set': int, 'subset': int, 'created': datetime} ] """ complete = PuzzleComplete.objects.filter(unique_user_id=anonymous_user_id) return [{'set': c.puzzle_set, 'subset': c.puzzle_subset, 'created': c.created} for c in complete] @staticmethod def is_level_complete(anonymous_user_id, level, sub_level, due=None): """ Return True if this user completed level--sub_level by due. Users see levels as e.g. 4-5. Args: level: int sub_level: int due (optional): If specified, a datetime. Ignored if None. """ complete = PuzzleComplete.objects.filter(unique_user_id=anonymous_user_id, puzzle_set=level, puzzle_subset=sub_level) if due is not None: complete = complete.filter(created__lte=due) return complete.exists()
agpl-3.0
laurentnoe/iedera
tam92.py
1
2349
#!/usr/bin/env python3 import math flatten = lambda l: [item for sublist in l for item in sublist] def tamura1992substitutionMatrix(gcPercent, kappa, pam): s = 0.005 * gcPercent # prob(c) = prob(g) w = 0.5 - s # prob(a) = prob(t) t = pam * 0.01 / (4 * w * s * kappa + 0.5) i = math.exp(-t) j = 2 * math.exp(-(kappa + 1) * t / 2) wwr = 1 + i + j*s/w ssr = 1 + i + j*w/s tir = 1 + i - j tvr = 1 - i wwp = w * w * wwr ssp = s * s * ssr percentIdentity = 200 * (wwp + ssp) #print("percent identity: " + str(percentIdentity)) # A C G T probFromRowToColumn = [[w * wwr, s * tvr, s * tir, w * tvr], # A [w * tvr, s * ssr, s * tvr, w * tir], # C [w * tir, s * tvr, s * ssr, w * tvr], # G [w * tvr, s * tir, s * tvr, w * wwr]] # T return probFromRowToColumn def acgt_cross_tamuratamura1992substitutionMatrix(gcPercent, kappa, pam): s = 0.005 * gcPercent # prob(c) = prob(g) w = 0.5 - s # prob(a) = prob(t) original = [w, s, s, w] mutated = tamura1992substitutionMatrix(gcPercent, kappa, pam) result = [] for i,o in enumerate(original): result.append([]) for m in mutated[i]: result[i].append(o * m) return result import argparse if __name__ == "__main__": usage = "%(prog)s [options] length" descr = "provides the TAM92 associated probabilities for the \"./iedera -iupac \'-f\'\" command-line" ap = argparse.ArgumentParser(usage=usage, description=descr, formatter_class= argparse.ArgumentDefaultsHelpFormatter) ap.add_argument("-p", "--pam", type=float, default=30.0, help="PAM distance between related sequences") ap.add_argument("-k", "--kappa", type=float, default=1.0, metavar="K", help="transition/transversion rate ratio") ap.add_argument("--gc", type=float, default=50.0, metavar="PERCENT", help="percent G+C") args = ap.parse_args() t = acgt_cross_tamuratamura1992substitutionMatrix(args.gc, args.kappa, args.pam) t = flatten(t) print(','.join('{}'.format(e) for e in t))
bsd-2-clause
grungi-ankhfire/ebenezer
ebenezer/ui/menu.py
1
1102
# Copyright (c) 2012 Bastien Gorissen # Licensed under the MIT license # See LICENSE file for licensing details import os class Menu: def __init__(self, app): self.app = app self.header = [] self.contents = [] self.footer = [] self.prompt = "" self.answers = {} self.callback = None def update(self): pass def display(self): self.update() os.system("clear") for l in self.header: print l print "" for l in self.contents: print l print "" for l in self.footer: print l print "" return self.ask() def ask(self): ans = raw_input(self.prompt + " ") if len(self.answers.keys()) > 0: while ans.lower() not in self.answers.keys(): return self.display() return self.answers[ans.lower()][0](self.answers[ans.lower()][1]) else: return self.callback() def change_menu(self, new_menu): self.app.current_menu = new_menu
mit
jaredjennings/snowy
wsgi/snowy/snowy/core/management/commands/debugmail.py
8
2683
# # Copyright (c) 2010 Sander Dijkhuis <sander.dijkhuis@gmail.com> # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from django.core.management.base import NoArgsCommand, CommandError import smtpd class MailDebuggingServer(smtpd.DebuggingServer): skip_headers = ('Content-Type', 'MIME-Version', 'Content-Transfer-Encoding', 'Message-ID') def process_message(self, peer, mailfrom, rcpttos, data): print('\n----------- BEGIN MESSAGE -----------') (headers, body) = data.split('\n\n', 1) quoted_printable = False for header in headers.split('\n'): if header == 'Content-Transfer-Encoding: quoted-printable': quoted_printable = True if not header.split(':')[0] in self.skip_headers: print(header) print('') if quoted_printable: import quopri print(quopri.decodestring(body)) else: print(body) print('------------ END MESSAGE ------------') class Command(NoArgsCommand): help = "Run a mail debugging server, matching the project settings" def handle_noargs(self, **options): from django.conf import settings import asyncore import socket host_port = (settings.EMAIL_HOST, settings.EMAIL_PORT) try: server = MailDebuggingServer(host_port, None) except socket.error: raise CommandError('Could not set up the mail server at %s:%d.\n' % host_port + 'Make sure that you can actually host a server using the\n' + 'current values for settings.EMAIL_HOST and settings.EMAIL_PORT.') print('Mail debugging server is running at %s:%d' % host_port) print('Emails from this Django site will appear here instead of being sent.') print('Quit the server with CONTROL-C.') try: asyncore.loop() except KeyboardInterrupt: # Print a blank line after the ^C. This looks nicer. print('') pass
agpl-3.0