code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Copyright 2017 Janos Czentye
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper module for handling emulated test topology based on Mininet.
"""
from escape.infr import log, LAYER_NAME
from escape.nffg_lib.nffg import NFFG
from escape.nffg_lib.nffg_elements import NodeInfra
from escape.util.config import CONFIG
from escape.util.misc import quit_with_error, get_ifaces, remove_junks_at_boot
from mininet.link import TCLink, Intf
from mininet.net import VERSION as MNVERSION, Mininet, MininetWithControlNet
from mininet.node import RemoteController, RemoteSwitch
from mininet.term import makeTerms
from mininet.topo import Topo
class AbstractTopology(Topo):
"""
Abstract class for representing emulated topology.
Have the functions to build a ESCAPE-specific topology.
Can be used to define reusable topology similar to Mininet's high-level API.
Reusable, convenient and pre-defined way to define a topology, but less
flexible and powerful.
"""
# Default host options
default_host_opts = None
"""Default host options for Mininet"""
# Default switch options
default_switch_opts = None
"""Default switch options for Mininet"""
# Default link options
default_link_opts = None
"""Default link options for Mininet"""
# Default EE options
default_EE_opts = None
"""Default EE options for Mininet"""
# Type of the Topology class - NEED to be set
# The construction and build of the network is different for the STATIC and
# DYNAMIC way
TYPE = None
"""Type of the Topology class - NEED to be set"""
def __init__ (self, hopts=None, sopts=None, lopts=None, eopts=None):
"""
Init.
:param hopts: host options (optional)
:param sopts: switch options (optional)
:param lopts: link options (optional)
:param eopts: EE options (optional)
:return: None
"""
# Topo is Old-style class
Topo.__init__(self, hopts, sopts, lopts, eopts)
def construct (self, builder=None):
"""
Base class for construct the topology.
:param builder: optional builder object
"""
raise NotImplementedError
@staticmethod
def get_topo_desc ():
"""
Return the NFFG object represents the specific, constructed topology
:return: topology description
:rtype: :any`NFFG`
"""
raise NotImplementedError
class FallbackStaticTopology(AbstractTopology):
"""
Topology class for testing purposes and serve as a fallback topology.
Use the static way for topology compilation.
.. raw:: ascii
+----------+ +----------+
| | | |
| SW1 | | SW2 |
| | | |
+----------+ +----------+
|1 |1
1| 1|
+----------+ +----------+
| |2 2| |
| SW3 +-----------+ SW4 |
| | | |
+----------+ +----------+
|3 |3
1| 1|
+----+ +----+
|SAP1| |SAP2|
+----+ +----+
"""
TYPE = "STATIC"
def construct (self, builder=None):
"""
Assemble the topology description statically.
:param builder: optional builder object
:return: self
:rtype: :any:`FallbackStaticTopology`
"""
# nc1 = self.addEE(name='NC1', {})
# nc2 = self.addEE(name='NC2', {})
log.info("Start static topology creation...")
log.debug("Create Switch with name: SW1")
sw1 = self.addSwitch('SW1')
log.debug("Create Switch with name: SW2")
sw2 = self.addSwitch('SW2')
log.debug("Create Switch with name: SW3")
sw3 = self.addSwitch('SW3')
log.debug("Create Switch with name: SW4")
sw4 = self.addSwitch('SW4')
log.debug("Create SAP with name: SAP1")
sap1 = self.addHost('SAP1')
log.debug("Create SAP with name: SAP2")
sap2 = self.addHost('SAP2')
log.debug("Create Link SW3 <--> SW1")
self.addLink(sw3, sw1)
log.debug("Create Link SW4 <--> SW2")
self.addLink(sw4, sw2)
log.debug("Create Link SW3 <--> SW4")
self.addLink(sw3, sw4)
log.debug("Create Link SAP1 <--> SW3")
self.addLink(sap1, sw3)
log.debug("Create Link SAP2 <--> SW4")
self.addLink(sap2, sw4)
log.info("Static topology creation has been finished!")
return self
@staticmethod
def get_topo_desc ():
"""
Return the topology description.
:return: topo description
:rtype: :class:`NFFG`
"""
# Create NFFG
nffg = NFFG(id="STATIC-FALLBACK-TOPO", name="fallback-static")
# Add switches
sw1 = nffg.add_infra(id="sw1", name="SW1", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW)
sw2 = nffg.add_infra(id="sw2", name="SW2", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW)
sw3 = nffg.add_infra(id="sw3", name="SW3", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW)
sw4 = nffg.add_infra(id="sw4", name="SW4", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW)
# Add SAPs
sap1 = nffg.add_sap(id="sap1", name="SAP1")
sap2 = nffg.add_sap(id="sap2", name="SAP2")
# Add links
nffg.add_link(sw1.add_port(1), sw3.add_port(1), id="l1")
nffg.add_link(sw2.add_port(1), sw4.add_port(1), id="l2")
nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="l3")
nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="l4")
nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="l5")
# Duplicate one-way static links to become undirected in order to fit to
# the orchestration algorithm
# nffg.duplicate_static_links()
return nffg
class FallbackDynamicTopology(AbstractTopology):
"""
Topology class for testing purposes and serve as a fallback topology.
Use the dynamic way for topology compilation.
.. raw:: ascii
+----------+ +----------+
| | | |
| EE1 | | EE2 |
| | | |
+----------+ +----------+
|1 |1
1| 1|
+----------+ +----------+
| |2 2| |
| S3 +-----------+ S4 |
| | | |
+----------+ +----------+
|3 |3
1| 1|
+----+ +----+
|SAP1| |SAP2|
+----+ +----+
"""
TYPE = "DYNAMIC"
def construct (self, builder=None):
"""
Set a topology with NETCONF capability for mostly testing.
:param builder: builder object
:return: None
"""
log.info("Start dynamic topology creation...")
builder.create_Controller("ESCAPE")
agt1, nc_sw1 = builder.create_NETCONF_EE(name='NC1')
agt2, nc_sw2 = builder.create_NETCONF_EE(name='NC2')
sw3 = builder.create_Switch(name='SW3')
sw4 = builder.create_Switch(name='SW4')
sap1 = builder.create_SAP(name='SAP1')
sap2 = builder.create_SAP(name='SAP2')
builder.create_Link(sw3, nc_sw1)
builder.create_Link(sw4, nc_sw2)
builder.create_Link(sw3, sw4)
builder.create_Link(sap1, sw3)
builder.create_Link(sap2, sw4)
log.info("Dynamic topology creation has been finished!")
@staticmethod
def get_topo_desc ():
"""
Return the topology description.
:return: topo description
:rtype: :class:`NFFG`
"""
# Create NFFG
nffg = NFFG(id="DYNAMIC-FALLBACK-TOPO", name="fallback-dynamic")
# Add NETCONF capable containers a.k.a. Execution Environments
nc1 = nffg.add_infra(id="nc1", name="NC1", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
nc2 = nffg.add_infra(id="nc2", name="NC2", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
nc1.add_supported_type(['A', 'B'])
nc2.add_supported_type(['A', 'C'])
# Add inter-EE switches
sw3 = nffg.add_infra(id="sw3", name="SW3", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
sw4 = nffg.add_infra(id="sw4", name="SW4", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
# Add SAPs
sap1 = nffg.add_sap(id="sap1", name="SAP1")
sap2 = nffg.add_sap(id="sap2", name="SAP2")
# Add links
linkres = {'delay': 1.5, 'bandwidth': 2000}
nffg.add_link(nc1.add_port(1), sw3.add_port(1), id="l1", **linkres)
nffg.add_link(nc2.add_port(1), sw4.add_port(1), id="l2", **linkres)
nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="l3", **linkres)
nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="l4", **linkres)
nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="l5", **linkres)
# Duplicate one-way static links to become undirected in order to fit to
# the orchestration algorithm
# No need for that, ESCAPENetworkBridge do this later
# nffg.duplicate_static_links()
return nffg
class InternalControllerProxy(RemoteController):
"""
Controller class for emulated Mininet network. Making connection with
internal controller initiated by InternalPOXAdapter.
"""
def __init__ (self, name="InternalPOXController", ip='127.0.0.1', port=6653,
**kwargs):
"""
Init.
:param name: name of the controller (default: InternalPOXController)
:type name: str
:param ip: IP address (default: 127.0.0.1)
:type ip: str
:param port: port number (default 6633)
:type port: int
:return: None
"""
# Using old-style class because of MN's RemoteController class
RemoteController.__init__(self, name, ip, port, **kwargs)
def checkListening (self):
"""
Check the controller port is open.
"""
listening = self.cmd("echo A | telnet -e A %s %d" % (self.ip, self.port))
if 'Connected' not in listening:
log.debug(
"Unable to contact with internal controller at %s:%d. Waiting..." % (
self.ip, self.port))
class ESCAPENetworkBridge(object):
"""
Internal class for representing the emulated topology.
Represents a container class for network elements such as switches, nodes,
execution environments, links etc. Contains network management functions
similar to Mininet's mid-level API extended with ESCAPEv2 related capabilities
Separate the interface using internally from original Mininet object to
implement loose coupling and avoid changes caused by Mininet API changes
e.g. 2.1.0 -> 2.2.0.
Follows Bridge design pattern.
"""
def __init__ (self, network=None, topo_desc=None):
"""
Initialize Mininet implementation with proper attributes.
Use network as the hided Mininet topology if it's given.
:param topo_desc: static topology description e.g. the related NFFG
:type topo_desc: :class:`NFFG`
:param network: use this specific Mininet object for init (default: None)
:type network: :class:`mininet.net.MininetWithControlNet`
:return: None
"""
log.debug("Init ESCAPENetworkBridge with topo description: %s" % topo_desc)
if network is not None:
self.__mininet = network
else:
log.warning(
"Network implementation object is missing! Use Builder class instead "
"of direct initialization. Creating bare Mininet object anyway...")
self.__mininet = MininetWithControlNet()
# Topology description which is emulated by the Mininet
self.topo_desc = topo_desc
# Duplicate static links for ensure undirected neighbour relationship
if self.topo_desc is not None:
back_links = [l.id for u, v, l in
self.topo_desc.network.edges_iter(data=True) if
l.backward is True]
if len(back_links) == 0:
log.debug("No backward link has been detected! Duplicate STATIC links "
"to ensure undirected relationship for mapping...")
self.topo_desc.duplicate_static_links()
# Need to clean after shutdown
self._need_clean = None
# There is no such flag in the Mininet class so using this
self.started = False
self.xterms = []
@property
def network (self):
"""
Return the internal network representation.
:return: network representation
:rtype: :class:`mininet.net.MininetWithControlNet`
"""
return self.__mininet
def runXTerms (self):
"""
Start an xterm to every SAP if it's enabled in the global config. SAP are
stored as hosts in the Mininet class.
:return: None
"""
if CONFIG.get_SAP_xterms():
log.debug("Starting xterm on SAPS...")
terms = makeTerms(nodes=self.__mininet.hosts, title='SAP', term="xterm")
self.xterms.extend(terms)
else:
log.warning("Skip starting xterms on SAPS according to global config")
def start_network (self):
"""
Start network.
:return: None
"""
log.debug("Starting Mininet network...")
if self.__mininet is not None:
if not self.started:
try:
self.__mininet.start()
except SystemExit:
quit_with_error(msg="Mininet emulation requires root privileges!",
logger=LAYER_NAME)
except KeyboardInterrupt:
quit_with_error(
msg="Initiation of Mininet network was interrupted by user!",
logger=log)
self.started = True
log.debug("Mininet network has been started!")
self.runXTerms()
else:
log.warning(
"Mininet network has already started! Skipping start task...")
else:
log.error("Missing topology! Skipping emulation...")
def stop_network (self):
"""
Stop network.
:return: None
"""
log.debug("Shutting down Mininet network...")
if self.__mininet is not None:
if self.started:
self.__mininet.stop()
self.started = False
log.debug("Mininet network has been stopped!")
else:
log.warning("Mininet network is not started yet! Skipping stop task...")
if self._need_clean:
self.cleanup()
def cleanup (self):
"""
Clean up junk which might be left over from old runs.
..seealso::
:func:`mininet.clean.cleanup() <mininet.clean.cleanup>`
"""
if self.started:
log.warning(
"Mininet network is not stopped yet! Skipping cleanup task...")
else:
log.info("Schedule cleanup task after Mininet emulation...")
# Kill remained xterms
log.debug("Close SAP xterms...")
import os
import signal
for term in self.xterms:
os.killpg(term.pid, signal.SIGTERM)
# Schedule a cleanup as a coop task to avoid threading issues
from escape.util.misc import remove_junks_at_shutdown
# call_as_coop_task(remove_junks, log=log)
# threading.Thread(target=remove_junks, name="cleanup", args=(log,
# )).start()
# multiprocessing.Process(target=remove_junks, name="cleanup",
# args=(log,)).start()
remove_junks_at_shutdown(log=log)
def get_agent_to_switch (self, switch_name):
"""
Return the agent to which the given switch is tided..
:param switch_name: name of the switch
:type switch_name: str
:return: the agent
:rtype: :class:`mininet.node.NetconfAgent`
"""
for switch in self.__mininet.switches:
if switch.name == switch_name:
return switch.agent
return None
class TopologyBuilderException(Exception):
"""
Exception class for topology errors.
"""
pass
class ESCAPENetworkBuilder(object):
"""
Builder class for topology.
Update the network object based on the parameters if it's given or create
an empty instance.
Always return with an ESCAPENetworkBridge instance which offer a generic
interface for created :class:`mininet.net.Mininet` object and hide
implementation's nature.
Follows Builder design pattern.
"""
# Default initial options for Mininet
default_opts = {
"controller": InternalControllerProxy,
# Use own Controller
'build': False, # Not build during init
'inNamespace': False, # Not start element in namespace
'autoSetMacs': False, # Set simple MACs
'autoStaticArp': True, # Set static ARP entries
'listenPort': None, # Add listen port to OVS switches
'link': TCLink} # Add default link
"""Default initial options for Mininet"""
# Default internal storing format for NFFG parsing/reading from file
DEFAULT_NFFG_FORMAT = "NFFG"
"""Default internal storing format for NFFG parsing/reading from file"""
# Constants
TYPE_EE_LOCAL = "LOCAL"
TYPE_EE_REMOTE = "REMOTE"
# Constants for DPID generation
dpidBase = 1 # Switches start with port 1 in OpenFlow
dpidLen = 16 # digits in dpid passed to switch
def __init__ (self, net=None, opts=None, fallback=True, run_dry=True):
"""
Initialize NetworkBuilder.
If the topology definition is not found, an exception will be raised or
an empty :class:`mininet.net.Mininet` topology will be created if
``run_dry`` is set.
:param net: update given Mininet object instead of creating a new one
:type net: :class:`mininet.net.Mininet`
:param opts: update default options with the given opts
:type opts: dict
:param fallback: search for fallback topology (default: True)
:type fallback: bool
:param run_dry: do not raise an Exception and return with bare Mininet obj.
:type run_dry: bool
:return: None
"""
self.opts = dict(self.default_opts)
if opts is not None:
self.opts.update(opts)
self.fallback = fallback
self.run_dry = run_dry
if net is not None:
if isinstance(net, Mininet):
# Initial settings - Create new Mininet object if necessary
self.mn = net
else:
raise TopologyBuilderException(
"Network object's type must be a derived class of Mininet!")
else:
# self.mn = Mininet(**self.opts)
try:
self.mn = MininetWithControlNet(**self.opts)
except KeyboardInterrupt:
quit_with_error(
msg="Assembly of Mininet network was interrupted by user!",
logger=log)
# Basically a wrapper for mn to offer helping functions
self.mn_bridge = None
# Cache of the topology description as an NFFG which is parsed during
# initialization
self.topo_desc = None
self.__dpid_cntr = self.dpidBase
def __get_new_dpid (self):
"""
Generate a new DPID and return the valid format for Mininet/OVS.
:return: new DPID
:rtype: str
"""
dpid = hex(int(self.__dpid_cntr))[2:]
dpid = '0' * (self.dpidLen - len(dpid)) + dpid
self.__dpid_cntr += 1
return dpid
##############################################################################
# Topology initializer functions
##############################################################################
def __init_from_NFFG (self, nffg):
"""
Initialize topology from an :class:`NFFG` representation.
:param nffg: topology object structure
:type nffg: :class:`NFFG`
:return: None
"""
# pprint(nffg.network.__dict__)
log.info("Start topology creation from NFFG(name: %s)..." % nffg.name)
created_mn_nodes = {} # created nodes as 'NFFG-id': <node>
created_mn_links = {} # created links as 'NFFG-id': <link>
# If not set then cache the given NFFG as the topology description
self.topo_desc = nffg
# Create a Controller which will be the default internal POX controller
try:
self.create_Controller("ESCAPE")
except SystemExit:
raise TopologyBuilderException("Controller creations was unsuccessful!")
# Convert INFRAs
for infra in nffg.infras:
# Create EE
if infra.infra_type == NodeInfra.TYPE_EE:
if infra.domain == "INTERNAL":
ee_type = self.TYPE_EE_LOCAL
else:
log.warning(
"Detected domain of infra: %s is not INTERNAL! Remote EE creation "
"for domains other than INTERNAL is not supported yet!" % infra)
# ee_type = self.TYPE_EE_REMOTE
ee_type = self.TYPE_EE_LOCAL
# FIXME - set resource info in MN EE if can - cpu,mem,delay,bandwidth?
agt, sw = self.create_NETCONF_EE(name=infra.id, type=ee_type)
created_mn_nodes[infra.id] = sw
# Create Switch
elif infra.infra_type == NodeInfra.TYPE_SDN_SWITCH:
switch = self.create_Switch(name=infra.id)
created_mn_nodes[infra.id] = switch
elif infra.infra_type == NodeInfra.TYPE_STATIC_EE:
static_ee = self.create_static_EE(name=infra.id)
created_mn_nodes[infra.id] = static_ee
else:
quit_with_error(
msg="Type: %s in %s is not supported by the topology creation "
"process in %s!" % (
infra.infra_type, infra, self.__class__.__name__), logger=log)
# Create SAPs - skip the temporary, inter-domain SAPs
for sap in {s for s in nffg.saps if not s.binding}:
# Create SAP
sap_host = self.create_SAP(name=sap.id)
created_mn_nodes[sap.id] = sap_host
# Convert VNFs
# TODO - implement --> currently the default Mininet topology does not
# TODO contain NFs but it could be possible
# Convert connections - copy link ref in a list and iter over it
for edge in [l for l in nffg.links]:
# Skip initiation of links which connected to an inter-domain SAP
if (edge.src.node.type == NFFG.TYPE_SAP and
edge.src.node.binding is not None) or (
edge.dst.node.type == NFFG.TYPE_SAP and
edge.dst.node.binding is not None):
continue
# Create Links
mn_src_node = created_mn_nodes.get(edge.src.node.id)
mn_dst_node = created_mn_nodes.get(edge.dst.node.id)
if mn_src_node is None or mn_dst_node is None:
raise TopologyBuilderException(
"Created topology node is missing! Something really went wrong!")
src_port = int(edge.src.id) if int(edge.src.id) < 65535 else None
if src_port is None:
log.warning(
"Source port id of Link: %s is generated dynamically! Using "
"automatic port assignment based on internal Mininet "
"implementation!" % edge)
dst_port = int(edge.dst.id) if int(edge.dst.id) < 65535 else None
if dst_port is None:
log.warning(
"Destination port id of Link: %s is generated dynamically! Using "
"automatic port assignment based on internal Mininet "
"implementation!" % edge)
link = self.create_Link(src=mn_src_node, src_port=src_port,
dst=mn_dst_node, dst_port=dst_port,
bw=edge.bandwidth, delay=str(edge.delay) + 'ms')
created_mn_links[edge.id] = link
# Set port properties of SAP nodes.
# A possible excerpt from a escape-mn-topo.nffg file:
# "ports": [{ "id": 1,
# "property": ["ip:10.0.10.1/24"] }]
#
for n in {s for s in nffg.saps if not s.binding}:
mn_node = self.mn.getNodeByName(n.id)
for port in n.ports:
# ip should be something like '10.0.123.1/24'.
if len(port.l3):
if len(port.l3) == 1:
ip = port.l3.container[0].provided
else:
log.warning(
"Multiple L3 address is detected! Skip explicit IP address "
"definition...")
ip = None
else:
# or None
ip = port.get_property('ip')
if port.l2:
mac = port.l2
else:
mac = port.get_property('mac')
intf = mn_node.intfs.get(port.id)
if intf is None:
log.warn(("Port %s of node %s is not connected,"
"it will remain unconfigured!") % (port.id, n.name))
continue
if intf == mn_node.defaultIntf():
# Workaround a bug in Mininet
mn_node.params.update({'ip': ip})
mn_node.params.update({'mac': mac})
if ip is not None:
mn_node.setIP(ip, intf=intf)
log.debug("Use explicit IP: %s for node: %s" % (ip, n))
if mac is not None:
mn_node.setMAC(mac, intf=intf)
log.debug("Use explicit MAC: %s for node: %s" % (mac, n))
# For inter-domain SAPs no need to create host/xterm just add the SAP as
# a port to the border Node
# Iterate inter-domain SAPs
self.bind_inter_domain_SAPs(nffg=nffg)
log.info("Topology creation from NFFG has been finished!")
def __init_from_AbstractTopology (self, topo_class):
"""
Build topology from pre-defined Topology class.
:param topo_class: topology
:type topo_class: :any:`AbstractTopology`
:return: None
"""
log.info("Load topology from class: %s" % topo_class.__name__)
if topo_class.TYPE == "STATIC":
self.mn.topo = topo_class().construct()
self.mn.build()
elif topo_class.TYPE == "DYNAMIC":
# self.mn = topo_class().construct()
topo_class().construct(builder=self)
else:
raise TopologyBuilderException(
"TYPE field of the Topology class need to be set!")
self.topo_desc = topo_class.get_topo_desc()
def __init_from_CONFIG (self, format=DEFAULT_NFFG_FORMAT):
"""
Build a pre-defined topology from an NFFG stored in a file.
The file path is searched in CONFIG with tha name ``TOPO``.
:param format: NF-FG storing format (default: internal NFFG representation)
:type format: str
:return: None
"""
path = CONFIG.get_mininet_topology()
if path is None:
raise TopologyBuilderException("Missing Topology!")
self.__init_from_file(path=path, format=format)
def __init_from_file (self, path, format=DEFAULT_NFFG_FORMAT):
"""
Build a pre-defined topology from an NFFG stored in a file.
The file path is searched in CONFIG with tha name ``TOPO``.
:param path: file path
:type path: str
:param format: NF-FG storing format (default: internal NFFG representation)
:type format: str
:return: None
"""
if path is None:
log.error("Missing file path of Topology description")
return
try:
with open(path) as f:
log.info("Load topology from file: %s" % path)
if format == self.DEFAULT_NFFG_FORMAT:
log.debug("Using file format: %s" % format)
self.__init_from_NFFG(nffg=NFFG.parse(f.read()))
else:
raise TopologyBuilderException("Unsupported file format: %s!" %
format)
except IOError:
log.warning("Additional topology file not found: %s" % path)
raise TopologyBuilderException("Missing topology file!")
except ValueError as e:
log.error("An error occurred when load topology from file: %s" %
e.message)
raise TopologyBuilderException("File parsing error!")
# except SystemExit:
# raise TopologyBuilderException("Got exit exception from Mininet!")
def get_network (self):
"""
Return the bridge to the constructed network.
:return: object representing the emulated network
:rtype: :any:`ESCAPENetworkBridge`
"""
if self.mn_bridge is None:
# Create the Interface object and set the topology description as the
# original NFFG
self.mn_bridge = ESCAPENetworkBridge(network=self.mn,
topo_desc=self.topo_desc)
# Additional settings
self.mn_bridge._need_clean = CONFIG.get_clean_after_shutdown()
return self.mn_bridge
##############################################################################
# Builder functions
##############################################################################
def create_static_EE (self, name, cls=None, **params):
"""
Create and add a new EE to Mininet in the static way.
This function is for only backward compatibility.
.. warning::
Not tested yet!
:param name: name of the Execution Environment
:type name: str
:param cls: custom EE class/constructor (optional)
:type cls: :class:`mininet.node.EE`
:param cores: Specify (real) cores that our cgroup can run on (optional)
:type cores: list
:param frac: Set overall CPU fraction for this EE (optional)
:type frac: list
:param vlanif: set vlan interfaces (optional)
:type vlanif: list
:return: newly created EE object
:rtype: :class:`mininet.node.EE`
"""
# create static EE
cfg = CONFIG.get_EE_params()
cfg.update(params)
cfg['dpid'] = self.__get_new_dpid()
log.debug("Create static EE with name: %s" % name)
ee = self.mn.addEE(name=name, cls=cls, **cfg)
if 'cores' in cfg:
ee.setCPUs(**cfg['cores'])
if 'frac' in cfg:
ee.setCPUFrac(**cfg['frac'])
if 'vlanif' in cfg:
for vif in cfg['vlaninf']:
ee.cmdPrint('vconfig add ' + name + '-eth0 ' + vif[1])
ee.cmdPrint('ifconfig ' + name + '-eth0.' + vif[1] + ' ' + vif[0])
return ee
def create_NETCONF_EE (self, name, type=TYPE_EE_LOCAL, **params):
"""
Create and add a new EE to Mininet network.
The type of EE can be {local|remote} NETCONF-based.
:param name: name of the EE: switch: name, agent: agt_+'name'
:type name: str
:param type: type of EE {local|remote}
:type type: str
:param opts: additional options for the switch in EE
:type opts: str
:param dpid: remote switch DPID (remote only)
:param username: NETCONF username (remote only)
:param passwd: NETCONF password (remote only)
:param ip: control Interface for the agent (optional)
:param agentPort: port to listen on for NETCONF connections, (else set \
automatically)
:param minPort: first VNF control port which can be used (else set \
automatically)
:param cPort: number of VNF control ports (and VNFs) which can be used ( \
default: 10)
:return: tuple of newly created :class:`mininet.node.Agent` and \
:class:`mininet.node.Switch` object
:rtype: tuple
"""
type = type.upper()
cfg = CONFIG.get_EE_params()
cfg.update(params)
cfg['dpid'] = self.__get_new_dpid()
if type == self.TYPE_EE_LOCAL:
# create local NETCONF-based
log.debug("Create local NETCONF EE with name: %s" % name)
sw = self.mn.addSwitch(name, **cfg)
elif type == self.TYPE_EE_REMOTE:
# create remote NETCONF-based
log.debug("Create remote NETCONF EE with name: %s" % name)
cfg["inNamespace"] = False
sw = self.mn.addRemoteSwitch(name, cls=None, **cfg)
else:
raise TopologyBuilderException(
"Unsupported NETCONF-based EE type: %s!" % type)
agt = self.mn.addAgent('agt_' + name, cls=None, **cfg)
agt.setSwitch(sw)
return agt, sw
def create_Switch (self, name, cls=None, **params):
"""
Create and add a new OF switch instance to Mininet network.
Additional parameters are keyword arguments depend on and forwarded to
the initiated Switch class type.
:param name: name of switch
:type name: str
:param cls: custom switch class/constructor (optional)
:type cls: :class:`mininet.node.Switch`
:param dpid: DPID for switch (default: derived from name)
:type dpid: str
:param opts: additional switch options
:type opts: str
:param listenPort: custom listening port (optional)
:type listenPort: int
:param inNamespace: override the switch spawn in namespace (optional)
:type inNamespace: bool
:param of_ver: override OpenFlow version (optional)
:type of_ver: int
:param ip: set IP address for the switch (optional)
:type ip:
:return: newly created Switch object
:rtype: :class:`mininet.node.Switch`
"""
log.debug("Create Switch with name: %s" % name)
cfg = CONFIG.get_Switch_params()
cfg.update(params)
cfg['dpid'] = self.__get_new_dpid()
sw = self.mn.addSwitch(name=name, cls=cls, **cfg)
if 'of_ver' in cfg:
sw.setOpenFlowVersion(cfg['of_ver'])
if 'ip' in cfg:
sw.setSwitchIP(cfg['ip'])
return sw
def create_Controller (self, name, controller=None, **params):
"""
Create and add a new OF controller to Mininet network.
Additional parameters are keyword arguments depend on and forwarded to
the initiated Controller class type.
.. warning::
Should not call this function and use the default InternalControllerProxy!
:param name: name of controller
:type name: str
:param controller: custom controller class/constructor (optional)
:type controller: :class:`mininet.node.Controller`
:param inNamespace: override the controller spawn in namespace (optional)
:type inNamespace: bool
:return: newly created Controller object
:rtype: :class:`mininet.node.Controller`
"""
log.debug("Create Controller with name: %s" % name)
cfg = CONFIG.get_Controller_params()
cfg.update(params)
return self.mn.addController(name=name, controller=controller, **cfg)
def create_SAP (self, name, cls=None, **params):
"""
Create and add a new SAP to Mininet network.
Additional parameters are keyword arguments depend on and forwarded to
the initiated Host class type.
:param name: name of SAP
:type name: str
:param cls: custom hosts class/constructor (optional)
:type cls: :class:`mininet.node.Host`
:return: newly created Host object as the SAP
:rtype: :class:`mininet.node.Host`
"""
log.debug("Create SAP with name: %s" % name)
cfg = CONFIG.get_SAP_params()
cfg.update(params)
return self.mn.addHost(name=name, cls=cls, **cfg)
def bind_inter_domain_SAPs (self, nffg):
"""
Search for inter-domain SAPs in given :class:`NFFG`, create them as a
switch port and bind them to a physical interface given in sap.domain
attribute.
:param nffg: topology description
:type nffg: :class:`NFFG`
:return: None
"""
log.debug("Search for inter-domain SAPs...")
# Create the inter-domain SAP ports
for sap in {s for s in nffg.saps if s.binding is not None}:
# NFFG is the raw NFFG without link duplication --> iterate over every
# edges in or out there should be only one link in this case
# e = (u, v, data)
sap_switch_links = [e for e in
nffg.network.edges_iter(data=True) if sap.id in e]
try:
if sap_switch_links[0][0] == sap.id:
border_node = sap_switch_links[0][1]
else:
border_node = sap_switch_links[0][0]
except IndexError:
log.error("Link for inter-domain SAP: %s is not found. "
"Skip SAP creation..." % sap)
continue
log.debug("Detected inter-domain SAP: %s connected to border Node: %s" %
(sap, border_node))
# if sap.delay or sap.bandwidth:
# log.debug("Detected resource values for inter-domain connection: "
# "delay: %s, bandwidth: %s" % (sap.delay, sap.bandwidth))
sw_name = nffg.network.node[border_node].id
for sw in self.mn.switches:
# print sw.name
if sw.name == sw_name:
if sap.binding not in get_ifaces():
log.warning(
"Physical interface: %s is not found! Skip binding..."
% sap.binding)
continue
log.debug("Add physical port as inter-domain SAP: %s -> %s" %
(sap.binding, sap.id))
# Add interface to border switch in Mininet
# os.system('ovs-vsctl add-port %s %s' % (sw_name, sap.domain))
sw.addIntf(intf=Intf(name=sap.binding, node=sw))
def create_Link (self, src, dst, src_port=None, dst_port=None, **params):
"""
Create an undirected connection between src and dst.
Source and destination ports can be given optionally:
:param src: source Node
:param dst: destination Node
:param src_port: source Port (optional)
:param dst_port: destination Port (optional)
:param params: additional link parameters
:return: None
"""
log.debug("Create Link %s%s <--> %s%s" % (
src, ":%s" % src_port if src_port is not None else "", dst,
":%s" % dst_port if dst_port is not None else ""))
remote = filter(lambda n: isinstance(n, RemoteSwitch), [src, dst])
local = filter(lambda n: not isinstance(n, RemoteSwitch), [src, dst])
cfg = CONFIG.get_Link_params()
cfg.update(params)
if not remote:
self.mn.addLink(src, dst, src_port, dst_port, **cfg)
else:
# sw = local[0] # one of the local Node
# r = remote[0] # other Node which is the remote
# intfName = r.params['local_intf_name']
# r_mac = None # unknown, r.params['remote_mac']
# r_port = r.params['remote_port']
# # self._debug('\tadd hw interface (%s) to node (%s)' % (intfName,
# # sw.name))
# # This hack avoids calling __init__ which always makeIntfPair()
# link = Link.__new__(Link)
# i1 = Intf(intfName, node=sw, link=link)
# i2 = Intf(intfName, node=r, mac=r_mac, port=r_port, link=link)
# i2.mac = r_mac # mn runs 'ifconfig', which resets mac to None
# link.intf1, link.intf2 = i1, i2
raise TopologyBuilderException(
"Remote Link creation is not supported yet!")
def build (self, topo=None):
"""
Initialize network.
1. If the additional ``topology`` is given then using that for init.
2. If TOPO is not given, search topology description in CONFIG with the \
name 'TOPO'.
3. If TOPO not found or an Exception was raised, search for the fallback \
topo with the name ``FALLBACK-TOPO``.
4. If FALLBACK-TOPO not found raise an exception or run a bare Mininet \
object if the run_dry attribute is set
:param topo: optional topology representation
:type topo: :class:`NFFG` or :any:`AbstractTopology` or ``None``
:return: object representing the emulated network
:rtype: :any:`ESCAPENetworkBridge`
"""
log.debug("Init emulated topology based on Mininet v%s" % MNVERSION)
remove_junks_at_boot(log=log)
# Load topology
try:
if topo is None:
log.debug("Get Topology description from CONFIG...")
self.__init_from_CONFIG()
elif isinstance(topo, NFFG):
log.debug("Get Topology description from given NFFG...")
self.__init_from_NFFG(nffg=topo)
elif isinstance(topo, basestring) and topo.startswith('/'):
log.debug("Get Topology description from given file...")
self.__init_from_file(path=topo)
elif isinstance(topo, AbstractTopology):
log.debug("Get Topology description based on Topology class...")
self.__init_from_AbstractTopology(topo_class=topo)
else:
raise TopologyBuilderException(
"Unsupported topology format: %s - %s" % (type(topo), topo))
return self.get_network()
except SystemExit as e:
quit_with_error(msg="Mininet exited unexpectedly!", logger=log,
exception=e)
except TopologyBuilderException:
try:
if self.fallback:
# Search for fallback topology
fallback = CONFIG.get_fallback_topology()
if fallback:
log.info("Load topo from fallback topology description...")
self.__init_from_AbstractTopology(fallback)
return self.get_network()
except SystemExit as e:
quit_with_error(msg="Mininet exited unexpectedly!", logger=log,
exception=e)
# fallback topo is not found or set
if self.run_dry:
# Return with the bare Mininet object
log.warning("Topology description is not found! Running dry...")
return self.get_network()
else:
# Re-raise the exception
raise
except KeyboardInterrupt:
quit_with_error(
msg="Assembly of Mininet network was interrupted by user!",
logger=log)
| hsnlab/escape | escape/escape/infr/topology.py | Python | apache-2.0 | 40,815 |
#!/usr/bin/env python
#
# test_write_pdb.py
#
# tests for ModernaStructure PDB write feature.
#
# http://iimcb.genesilico.pl/moderna/
#
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
from unittest import main, TestCase
from moderna.ModernaStructure import ModernaStructure
from moderna.modifications import exchange_base, add_modification, remove_modification
from Bio.PDB import PDBParser
from test_data import *
TEST_OUTPUT = 'test_data/test_output.ent'
class WritePDBTests(TestCase):
def get_resnames(self,fn,chain='A'):
"""Returns a list of residue names from a PDB file."""
result = []
struc=PDBParser().get_structure('test_struc',fn)
chain=struc[0][chain]
for resi in chain.child_list:
result.append(resi.resname)
return result
def setUp(self):
self.s = ModernaStructure('file',MINI_TEMPLATE)
def test_res_names(self):
"""Names of written residues should be standard."""
self.s.write_pdb_file(TEST_OUTPUT)
rn = self.get_resnames(TEST_OUTPUT)
self.assertEqual(rn, [' G', ' C', ' G', ' G', ' A', ' U', ' U', ' U', ' A', '2MG', ' C', ' U', ' C', ' A', ' G'])
def test_res_names_mod(self):
"""Names of modifications should be standard."""
# I'll check file with amber names manualy.
# There are some conflicts and inconsistents.
pass
def test_res_names_exchanged(self):
"""Names should be consistent after base exchanges."""
exchange_base(self.s['5'], 'C')
self.s.write_pdb_file(TEST_OUTPUT)
rn = self.get_resnames(TEST_OUTPUT)
self.assertEqual(rn[4], ' C')
def test_res_names_add_modif(self):
"""Names should be consistent after adding modifications."""
add_modification(self.s['5'], 'm7G')
self.s.write_pdb_file(TEST_OUTPUT)
rn = self.get_resnames(TEST_OUTPUT)
self.assertEqual(rn[4], '7MG')
def test_res_names_remove_modif(self):
"""Names should be consistent after adding modifications."""
remove_modification(self.s['10'])
self.s.write_pdb_file(TEST_OUTPUT)
rn = self.get_resnames(TEST_OUTPUT)
self.assertEqual(rn[4], ' A')
def test_atom_number(self):
pass
if __name__ == '__main__':
main()
| lenarother/moderna | tests/test_write_pdb.py | Python | gpl-3.0 | 2,583 |
import logging, redis, time
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from monscale.rules import evaluate_traps
class Command(BaseCommand):
args = ''
help = 'Retrieve queued actions and execute them.'
def handle(self, *args, **options):
# logging.basicConfig(level=logging.DEBUG)
while True:
logging.debug("[trap_worker] starting loop ...")
evaluate_traps()
logging.debug("[trap_worker] going to sleep for %ss" % settings.ACTION_WORKER_SLEEP_SECS)
time.sleep(settings.ACTION_WORKER_SLEEP_SECS)
| jpardobl/monscale | monscale/management/commands/trap_worker.py | Python | bsd-3-clause | 654 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Import modules for CGI handling
import cgi
import cgitb
import Cookie
import os
import time
# =====================***** C-o-n-f-i-g--m-o-d-e *****================ #
import ConfigParser
Config = ConfigParser.ConfigParser()
Config.read(r'setting/devices.ini')
settingSec = Config.sections()
# print settingSec
def ConfigSectionMap(device_No):
dict1 = {}
options = Config.options(device_No)
for option in options:
try:
dict1[option] = Config.get(device_No, option)
if dict1[option] == -1:
DebugPrint('skip: %s' % option)
except:
print 'exception on %s!' % option
dict1[option] = None
return dict1
# ==============Reading config of device=============
def getAmount():
try:
amountDevice = ConfigSectionMap('NumDevice')
return amountDevice['amount']
except:
print 'Data not found.'
return 0
def getName(device_No):
try:
Device = ConfigSectionMap('Device' + str(device_No))
return Device['name']
except:
return 'Data not found.'
def getAPI_key(device_No):
try:
Device = ConfigSectionMap('Device' + str(device_No))
return Device['api-key']
except:
return 'Data not found.'
def getMaxMin(device_No):
try:
Device = ConfigSectionMap('Device' + str(device_No))
return Device['alert_type']
except:
return 'Data not found.'
def getThreadHole(device_No):
try:
Device = ConfigSectionMap('Device' + str(device_No))
return Device['decision_point']
except:
return 'Data not found.'
# ============== End Reading config of device =============
# ============== Update config MODE =======================
def amount(value):
cfgfile = open(r'setting/devices.ini', 'wb')
Config.set('NumDevice', 'amount', value)
Config.write(cfgfile)
cfgfile.close()
def setName(device_No, value):
# Uncomment add if you create new section or uncomment read when you want to update
# config.add_section('Device1')
# config.read(r'setting/devices.ini')
# lets create that config file for next time...
cfgfile = open(r'setting/devices.ini', 'wb')
Config.set('Device' + str(device_No), 'name', value)
Config.write(cfgfile)
cfgfile.close()
#setName(1, 'Temp & Humid.') # test
def setAPI_key(device_No, value):
cfgfile = open(r'setting/devices.ini', 'wb')
Config.set('Device' + str(device_No), 'api-key', value)
Config.write(cfgfile)
cfgfile.close()
def setMaxMin(device_No, value):
cfgfile = open(r'setting/devices.ini', 'wb')
Config.set('Device' + str(device_No), 'alert_type', value)
Config.write(cfgfile)
cfgfile.close()
def setThreadHole(device_No, value):
cfgfile = open(r'setting/devices.ini', 'wb')
Config.set('Device' + str(device_No), 'decision_point', value)
Config.write(cfgfile)
cfgfile.close()
# ============== End Update config MODE =======================
# =====================***** E-n-d C-o-n-f-i-g--m-o-d-e *****================ #
#when edit device
form = cgi.FieldStorage()
DeviceName1 = form.getvalue('Device-name1')
DeviceName2 = form.getvalue('Device-name2')
DeviceName3 = form.getvalue('Device-name3')
DeviceName4 = form.getvalue('Device-name4')
AlertTh = form.getvalue('Alert-th')
maxMin = form.getvalue('max-min')
Device_No = 0
if DeviceName1 is not None:
Device_No = 1
setName(Device_No, DeviceName1)
setMaxMin(Device_No, maxMin)
setThreadHole(Device_No, AlertTh)
elif DeviceName2 is not None:
Device_No = 2
setName(Device_No, DeviceName2)
setMaxMin(Device_No, maxMin)
setThreadHole(Device_No, AlertTh)
elif DeviceName3 is not None:
Device_No = 3
setName(Device_No, DeviceName3)
setMaxMin(Device_No, maxMin)
setThreadHole(Device_No, AlertTh)
elif DeviceName4 is not None:
Device_No = 4
setName(Device_No, DeviceName4)
setMaxMin(Device_No, maxMin)
setThreadHole(Device_No, AlertTh)
else:
pass
#when add device
newDeviceName = form.getvalue('Device-name-add')
newapiKeyDevice = form.getvalue('api-key-device-add')
cookie = Cookie.SimpleCookie()
cookie_string = os.environ.get('HTTP_COOKIE')
def getCookies():
if not cookie_string:
return False
else:
# load() parses the cookie string
cookie.load(cookie_string)
# Use the value attribute of the cookie to get it
txt = str(cookie['login'].value)
if txt == 'success':
return True
else:
return False
if getCookies() == False:
print 'Content-Type: text/html\n'
print '<html><head>'
homeIP = 'siczones.coe.psu.ac.th'
print ('''<meta http-equiv="refresh" content="0.1;http://%s">'''%(homeIP))
print '</head></html>'
else:
print ("Content-type:text/html\r\n\r\n")
print ('''<!DOCTYPE html>
<html lang="en">
<head>
<title>Device</title>
<meta charset="utf-8">
<link href="../favicon.ico" rel="icon" type="image/x-icon"/>
<link href="../favicon.ico" rel="shortcut icon" type="image/x-icon"/>
<!-- This file has been downloaded from Bootsnipp.com. Enjoy! -->
<meta name="viewport" content="width=device-width, initial-scale=1">
<link href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/css/bootstrap.min.css" rel="stylesheet">
<!-- Custom Fonts -->
<link href="/vendor/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Montserrat:400,700" rel="stylesheet" type="text/css">
<link href='https://fonts.googleapis.com/css?family=Kaushan+Script' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Droid+Serif:400,700,400italic,700italic' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Roboto+Slab:400,100,300,700' rel='stylesheet' type='text/css'>
<!-- Theme CSS -->
<link href="../css/agency.css" rel="stylesheet">
<link href="../css/siczones.css" rel="stylesheet">
<script src="http://code.jquery.com/jquery-1.11.1.min.js"></script>
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/js/bootstrap.min.js"></script>
<script>
$(document).ready(function(){
$(window).scroll(function () {
if ($(this).scrollTop() > 50) {
$('#back-to-top').fadeIn();
} else {
$('#back-to-top').fadeOut();
}
});
// scroll body to 0px on click
$('#back-to-top').click(function () {
$('#back-to-top').tooltip('hide');
$('body,html').animate({
scrollTop: 0
}, 800);
return false;
});
$('#back-to-top').tooltip('show');
});
</script>
</head>''')
print ('''
<body>
<!-- ==================== Nav Tabs ======================= -->
<nav class="nav nav-tabs navbar-default navbar-fixed-top">
<div class = "container">
<ul class="nav nav-tabs">
<li role="presentation"><a href="index.py"><span class="glyphicon glyphicon-home"/> Home</a></li>
<li role="presentation"><a href="mode.py">Mode</a></li>
<li role="presentation" class="dropdown active">
<a class="dropdown-toggle" data-toggle="dropdown" href="#" role="button" aria-haspopup="true" aria-expanded="false">
Other<span class="caret"></span>
</a>
<ul class="dropdown-menu">
<li><a href="status.py">Status</a></li>
<li><a href="device.py">Device</a></li>
<li><a href="alert.py">Alert</a></li>
<li role="separator" class="divider"></li>
<li><a href="logout.py" onmouseover="style.color='red'" onmouseout="style.color='black'">Log out</a></li>
</ul>
</li>
</ul>
</div>
</nav>
<br/><br/><br>
<div class="container-fluid">
<div class="container">
<div class="row">
<div class="col-sm-3 col-md-3 col-xs-5">
<!-- <img src="/img/brand.png" width="50px" height="50px" alt="Brand" style="display: block; margin-left: auto; margin-right: auto;"> -->
<img src="/img/brand/Brand.png" style="max-height: 100px; display: block; margin-left: auto; margin-right: auto;" class="img-responsive" alt="Header">
<br>
</div>
<div class="col-sm-9 col-md-9 col-xxs-7">
<br>
<brand style="display: block; margin-left: auto; margin-right: auto;">
Safety in residential system
</brand>
<hr>
</div>
</div>
</div>
</div>
<!-- ========================== Nav Tabs ======================= -->
<div class = "container bg-all">
<div class="wrapper">
<center>
<h4 class="form-signin-heading">Device configurations</h4>
<hr class="colorgraph"><br>
<!-- ======================== Data ======================== -->
<div class="form-signin">
''')
print ('''<hr><label>Change of option: </label><br>
DeviceName1 = %s <br>
DeviceName2 = %s <br>
DeviceName3 = %s <br>
DeviceName4 = %s <br>
AlertTh = %s <br>
maxMin = %s <br>
<hr>'''%(DeviceName1,DeviceName2,DeviceName3,DeviceName4,AlertTh,maxMin))
num_device = int(float(getAmount()))
i = 0
while i < num_device:
i = i + 1
print ('''<!-- ======================== Device%s ======================== -->''') %(str(i))
print ('''
<div class="panel panel-info">
<button class="panel-heading btn btn-sm btn-block" data-toggle="collapse" data-target="#Device%s" aria-expanded="false" aria-controls="collapseFull-Active" type="button"">
<label class="panel-title">Device%s : %s</label>
</button>
<div class="collapse" id="Device%s">
<div class="panel-body">
<div class="row">
<div class="col-sm-6">
<button type="button" class="btn btn-block btn-sm btn-default" data-toggle="modal" data-target="#edit-device-%s">Edit</button>
</div>
<div class="col-sm-6">
<button type="button" class="disabled btn btn-block btn-sm btn-danger" data-toggle="modal" data-target="#remove-device">Remove</button>
</div>
</div>
</div>
</div>
</div>
<br>
''')%(str(i), str(i), getName(i), str(i), str(i))
print ('''<!-#============= Edit device %s =================# -->''')%(str(i))
print ('''
<div class="modal fade" id="edit-device-%s" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true" style="display: none;">
<form action="#" method="get" class="modal-dialog">
<div class="loginmodal-container">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
<h4 class="modal-title" id="">Edit device setting</h4>
</div>
<div class="modal-body">
<div class="row">
<div class="col-lg-12">
<input type="text" class="form-control" name="Device-name%s" placeholder="%s" required="">
</div>
<div class="col-lg-12">
<div class="input-group">
<input type="number" step="0.01" class="form-control" name="Alert-th" placeholder="Thread-Hole (Current:%s)" required="" />
<span class="input-group-addon">
<input type="radio" name="max-min" value="max" aria-label="Max" checked>Max
</span>
<span class="input-group-addon">
<input type="radio" name="max-min" value="min" aria-label="Min">Min
</span>
</div>
<!-- /input-group -->
</div>
</div>
<!-- /.row -->
<div class="alert alert-warning alert-dismissable fade in" role="alert">
<p>Max : Alert when sensor value maximum than threadhold.</p>
<p>Min : Alert when sensor value minimum than threadhold.</p>
</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
<button type="Reset" class="btn btn-danger" value="Reset" onmouseover="style.color='red'" onmouseout="style.color='white'">Reset</button>
<button type="submit" class="btn btn-success" value="submit" onmouseover="style.color='yellow'" onmouseout="style.color='white'">Update</button>
</div>
</div>
</form>
</div>
''')%(str(i), str(i), getName(i), getThreadHole(i))
print('''
<!-#============= Add device =================# -->
<button data-toggle="modal" data-target="#add-device" class="btn btn-block btn-success btn-lg" onmouseover="style.color='yellow'" onmouseout="style.color='white'"><span class="glyphicon glyphicon-plus"> ADD</span></button>
<div class="modal fade" id="add-device" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true" style="display: none;">
<form action="#" method="get" class="modal-dialog">
<div class="loginmodal-container">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
<h4 class="modal-title" id="exampleModalLabel">Add device</h4>
</div>
<div class="modal-body">
<div class="row">
<div class="col-lg-12">
<input type="text" class="form-control" name="Device-name-add" placeholder="Name" required="">
<input type="text" class="form-control" name="api-key-device-add" placeholder="API-Key" required="">
</div>
<div class="col-lg-12">
<div class="input-group">
<input type="number" step="0.01" class="form-control" name="Alert-th" placeholder="Thread-Hole" required="" />
<span class="input-group-addon">
<input type="radio" name="max-min" aria-label="Max" checked>Max
</span>
<span class="input-group-addon">
<input type="radio" name="max-min" aria-label="Min">Min
</span>
</div>
<!-- /input-group -->
</div>
</div>
<!-- /.row -->
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
<button type="Reset" class="btn btn-danger" value="Reset" onmouseover="style.color='red'" onmouseout="style.color='white'">Reset</button>
<button type="submit" class="btn btn-success" value="submit" onmouseover="style.color='yellow'" onmouseout="style.color='white'">OK</button>
</div>
</div>
</form>
</div>
<!-- ======================== End Data ======================== -->
</center>
</div>
<form action="index.py"><button class="btn btn-lg btn-primary btn-block" VALUE="Back">Back</button></form>
<br><br>
</div>
<!-- ============== Footer ============ -->
<br><br><div class="navbar navbar-default navbar-fixed-bottom">
<div class="container">
<p class="navbar-text pull-left">Copyright © 2016-2017 Siczones.</p>
<!-- a id="back-to-top" href="#" class="navbar-btn btn-danger btn pull-right" role="button" data-toggle="tooltip" data-placement="left"><span class="glyphicon glyphicon-chevron-up"></span></a -->
<!-- Split button -->
<div class="navbar-btn btn-group dropup pull-right">
<button id="back-to-top" href="#" type="button" class="btn btn-warning"><span class="glyphicon glyphicon-chevron-up"></span> Top</button>
</div>
</div>
</div>
<!-- ============== End Footer ============ -->
</body>''')
print ("</html>")
| 5610110083/Safety-in-residential-project | cgi-bin/device.py | Python | apache-2.0 | 15,469 |
import time
from pageobjects.base import PageObject
from pageobjects.base import Popup
from pageobjects.tabs import Tabs
class Actions(PageObject):
@property
def name(self):
return self.parent.\
find_element_by_css_selector('.environment-action-form input')
@property
def rename(self):
return self.parent.\
find_element_by_css_selector('button.rename-environment-btn')
@property
def delete(self):
return self.parent.\
find_element_by_css_selector('button.delete-environment-btn')
@property
def reset(self):
return self.parent.\
find_element_by_css_selector('button.reset-environment-btn')
@property
def reset_popup(self):
return self.parent.\
find_element_by_xpath("//div[@class='modal-footer']/button"
"[contains(@class,'reset-environment-btn')]")
@property
def stop(self):
return self.parent.\
find_element_by_css_selector('button.stop-deployment-btn')
@property
def progress(self):
return self.parent.find_element_by_css_selector('.progress')
@property
def pending_nodes(self):
return self.parent.\
find_element_by_xpath("//span[text()='Pending Addition']")
@property
def cancel_popup(self):
return self.parent.\
find_element_by_xpath("//button[text()='Cancel']")
@property
def verify_disabled_deploy(self):
return self.parent.find_element_by_xpath(
"//div[@class='deployment-control-item-box']/button[@disabled]")
@property
def stop_deploy(self):
return self.parent.find_element_by_css_selector('button.'
'stop-deployment-btn')
@property
def stop_deploy_popup(self):
return self.parent.\
find_element_by_xpath("//div[@class='modal-footer']/button"
"[contains(@class,'stop-deployment-btn')]")
@classmethod
def reset_env(cls):
PageObject.click_element(Actions(), 'reset')
PageObject.wait_element(Actions(), 'reset_popup')
time.sleep(2)
PageObject.click_element(Actions(), 'reset_popup')
PageObject.click_element(Tabs(), 'nodes')
PageObject.long_wait_element(Actions(), 'pending_nodes')
@classmethod
def cancel_reset(cls):
Actions().reset.click()
PageObject.click_element(Actions(), 'cancel_popup')
PageObject.click_element(Tabs(), 'nodes')
@classmethod
def stop_deploy_process(cls):
PageObject.click_element(Actions(), 'stop_deploy')
PageObject.wait_element(Actions, "stop_deploy_popup")
PageObject.click_element(Actions(), 'stop_deploy_popup')
PageObject.click_element(Tabs(), 'nodes')
PageObject.long_wait_element(Actions(), 'pending_nodes')
class DeleteEnvironmentPopup(Popup):
@property
def delete(self):
return self.parent.\
find_element_by_css_selector(
'button.remove-cluster-btn')
| ddepaoli3/fuel-main-dev | fuelweb_ui_test/pageobjects/actions.py | Python | apache-2.0 | 3,127 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Conv2d implementations for cortex-m7."""
from . import direct_simd
| tqchen/tvm | python/tvm/topi/arm_cpu/cortex_m7/conv2d/__init__.py | Python | apache-2.0 | 856 |
# -*- coding: utf-8 -*-
from mock import patch
from unittest import TestCase
from django.template import Template, Context
@patch('django_thumbor.templatetags.thumbor_tags.generate_url')
class TestThumborURLTTagMock(TestCase):
url = 'domain.com/path/image.jpg'
def test_should_assign_result_to_variable(self, generate_url):
dummy_url = "generated.url"
generate_url.return_value = dummy_url
source = '''
{% load thumbor_tags %}
{% assign_thumbor_url image_url=url as thumb_url %}
<<{{ thumb_url }}>>'''
template = Template(source)
context = dict({'url': self.url})
rendered = template.render(Context(context)).strip()
self.assertEqual(rendered, '<<{}>>'.format(dummy_url))
| ricobl/django-thumbor | testproject/tests/test_assign_thumbor_url_ttag.py | Python | mit | 777 |
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DXJob Handler
+++++++++++++
Jobs are DNAnexus entities that capture an instantiation of a running
app or applet. They can be created from either
:func:`dxpy.bindings.dxapplet.DXApplet.run` or
:func:`dxpy.bindings.dxapp.DXApp.run` if running an applet or app, or
via :func:`new_dxjob` or :func:`DXJob.new` in the case of an existing
job creating a subjob.
"""
from __future__ import print_function, unicode_literals, division, absolute_import
import os, time
import dxpy
from . import DXObject, DXDataObject, DXJobFailureError, verify_string_dxid
from ..exceptions import DXError
from ..system_requirements import SystemRequirementsDict
from ..utils.local_exec_utils import queue_entry_point
from ..compat import basestring
#########
# DXJob #
#########
def new_dxjob(fn_input, fn_name, name=None, tags=None, properties=None, details=None,
instance_type=None, depends_on=None,
**kwargs):
'''
:param fn_input: Function input
:type fn_input: dict
:param fn_name: Name of the function to be called
:type fn_name: string
:param name: Name for the new job (default is "<parent job name>:<fn_name>")
:type name: string
:param tags: Tags to associate with the job
:type tags: list of strings
:param properties: Properties to associate with the job
:type properties: dict with string values
:param details: Details to set for the job
:type details: dict or list
:param instance_type: Instance type on which the job will be run, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID
:type depends_on: list
:rtype: :class:`~dxpy.bindings.dxjob.DXJob`
Creates and enqueues a new job that will execute a particular
function (from the same app or applet as the one the current job is
running). Returns the :class:`~dxpy.bindings.dxjob.DXJob` handle for
the job.
Note that this function is shorthand for::
dxjob = DXJob()
dxjob.new(fn_input, fn_name, **kwargs)
.. note:: This method is intended for calls made from within
already-executing jobs or apps. If it is called from outside of
an Execution Environment, an exception will be thrown. To create
new jobs from outside the Execution Environment, use
:func:`dxpy.bindings.dxapplet.DXApplet.run` or
:func:`dxpy.bindings.dxapp.DXApp.run`.
.. note:: If the environment variable ``DX_JOB_ID`` is not set, this method assmes that it is running within the debug harness, executes the job in place, and provides a debug job handler object that does not have a corresponding remote API job object.
'''
dxjob = DXJob()
dxjob.new(fn_input, fn_name, name=name, tags=tags, properties=properties,
details=details, instance_type=instance_type, depends_on=depends_on, **kwargs)
return dxjob
class DXJob(DXObject):
'''
Remote job object handler.
'''
_class = "job"
def __init__(self, dxid=None):
self._test_harness_result = None
DXObject.__init__(self, dxid=dxid)
self.set_id(dxid)
def new(self, fn_input, fn_name, name=None, tags=None, properties=None, details=None,
instance_type=None, depends_on=None,
**kwargs):
'''
:param fn_input: Function input
:type fn_input: dict
:param fn_name: Name of the function to be called
:type fn_name: string
:param name: Name for the new job (default is "<parent job name>:<fn_name>")
:type name: string
:param tags: Tags to associate with the job
:type tags: list of strings
:param properties: Properties to associate with the job
:type properties: dict with string values
:param details: Details to set for the job
:type details: dict or list
:param instance_type: Instance type on which the job will be run, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID
:type depends_on: list
Creates and enqueues a new job that will execute a particular
function (from the same app or applet as the one the current job
is running).
.. note:: This method is intended for calls made from within
already-executing jobs or apps. If it is called from outside
of an Execution Environment, an exception will be thrown. To
create new jobs from outside the Execution Environment, use
:func:`dxpy.bindings.dxapplet.DXApplet.run` or
:func:`dxpy.bindings.dxapp.DXApp.run`.
'''
final_depends_on = []
if depends_on is not None:
if isinstance(depends_on, list):
for item in depends_on:
if isinstance(item, DXJob) or isinstance(item, DXDataObject):
if item.get_id() is None:
raise DXError('A dxpy handler given in depends_on does not have an ID set')
final_depends_on.append(item.get_id())
elif isinstance(item, basestring):
final_depends_on.append(item)
else:
raise DXError('Expected elements of depends_on to only be either instances of DXJob or DXDataObject, or strings')
else:
raise DXError('Expected depends_on field to be a list')
if 'DX_JOB_ID' in os.environ:
req_input = {}
req_input["input"] = fn_input
req_input["function"] = fn_name
if name is not None:
req_input["name"] = name
if tags is not None:
req_input["tags"] = tags
if properties is not None:
req_input["properties"] = properties
if instance_type is not None:
req_input["systemRequirements"] = SystemRequirementsDict.from_instance_type(instance_type, fn_name).as_dict()
if depends_on is not None:
req_input["dependsOn"] = final_depends_on
if details is not None:
req_input["details"] = details
resp = dxpy.api.job_new(req_input, **kwargs)
self.set_id(resp["id"])
else:
self.set_id(queue_entry_point(function=fn_name, input_hash=fn_input,
depends_on=final_depends_on,
name=name))
def set_id(self, dxid):
'''
:param dxid: New job ID to be associated with the handler (localjob IDs also accepted for local runs)
:type dxid: string
Discards the currently stored ID and associates the handler with *dxid*
'''
if dxid is not None:
if not (isinstance(dxid, basestring) and dxid.startswith('localjob-')):
# localjob IDs (which do not follow the usual ID
# syntax) should be allowed; otherwise, follow the
# usual syntax checking
verify_string_dxid(dxid, self._class)
self._dxid = dxid
def describe(self, fields=None, io=None, **kwargs):
"""
:param fields: dict where the keys are field names that should
be returned, and values should be set to True (by default,
all fields are returned)
:type fields: dict
:param io: Include input and output fields in description;
cannot be provided with *fields*; default is True if
*fields* is not provided (deprecated)
:type io: bool
:returns: Description of the job
:rtype: dict
Returns a hash with key-value pairs containing information about
the job, including its state and (optionally) its inputs and
outputs, as described in the API documentation for the
`/job-xxxx/describe
<https://documentation.dnanexus.com/developer/api/running-analyses/applets-and-entry-points#api-method-job-xxxx-describe>`_
method.
"""
if fields is not None and io is not None:
raise DXError('DXJob.describe: cannot provide non-None values for both fields and io')
describe_input = {}
if fields is not None:
describe_input['fields'] = fields
if io is not None:
describe_input['io'] = io
self._desc = dxpy.api.job_describe(self._dxid, describe_input, **kwargs)
return self._desc
def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the job
:type tags: list of strings
Adds each of the specified tags to the job. Takes no
action for tags that are already listed for the job.
"""
dxpy.api.job_add_tags(self._dxid, {"tags": tags}, **kwargs)
def remove_tags(self, tags, **kwargs):
"""
:param tags: Tags to remove from the job
:type tags: list of strings
Removes each of the specified tags from the job. Takes
no action for tags that the job does not currently have.
"""
dxpy.api.job_remove_tags(self._dxid, {"tags": tags}, **kwargs)
def update(self, allow_ssh, **kwargs):
"""
:param allow_ssh: Allowable IP ranges to set for SSH access to the job
:type allow_ssh: list of strings
Updates a job's allowSSH field, overwrites existing values
"""
dxpy.api.job_update(self._dxid, {"allowSSH": allow_ssh}, **kwargs)
def set_properties(self, properties, **kwargs):
"""
:param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the job for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method.
"""
dxpy.api.job_set_properties(self._dxid, {"properties": properties}, **kwargs)
def wait_on_done(self, interval=2, timeout=3600*24*7, **kwargs):
'''
:param interval: Number of seconds between queries to the job's state
:type interval: integer
:param timeout: Maximum amount of time to wait, in seconds, until the job is done running
:type timeout: integer
:raises: :exc:`~dxpy.exceptions.DXError` if the timeout is reached before the job has finished running, or :exc:`dxpy.exceptions.DXJobFailureError` if the job fails
Waits until the job has finished running.
'''
elapsed = 0
while True:
state = self._get_state(**kwargs)
if state == "done":
break
if state == "failed":
desc = self.describe(**kwargs)
err_msg = "Job has failed because of {failureReason}: {failureMessage}".format(**desc)
if desc.get("failureFrom") != None and desc["failureFrom"]["id"] != desc["id"]:
err_msg += " (failure from {id})".format(id=desc['failureFrom']['id'])
raise DXJobFailureError(err_msg)
if state == "terminated":
raise DXJobFailureError("Job was terminated.")
if elapsed >= timeout or elapsed < 0:
raise DXJobFailureError("Reached timeout while waiting for the job to finish")
time.sleep(interval)
elapsed += interval
def terminate(self, **kwargs):
'''
Terminates the associated job.
'''
dxpy.api.job_terminate(self._dxid, **kwargs)
def get_output_ref(self, field, index=None, metadata=None):
'''
:param field: Output field name of this job
:type field: string
:param index: If the referenced field is an array, optionally specify an index (starting from 0) to indicate a particular member of the array
:type index: int
:param metadata: If the referenced field is of a data object class, a string indicating the metadata that should be read, e.g. "name", "properties.propkey", "details.refgenome"
:type metadata: string
Returns a dict containing a valid job-based object reference
to refer to an output of this job. This can be used directly
in place of a DNAnexus link when used as a job output value.
For example, after creating a subjob, the following app
snippet uses a reference to the new job's output as part of
its own output::
mysubjob = dxpy.new_dxjob({}, "my_function")
return { "myfileoutput": mysubjob.get_output_ref("output_field_name"),
"myotherfileoutput": mysubjob.get_output_ref("output_array",
index=1),
"filename": mysubjob.get_output_ref("output_field_name",
metadata="name") }
'''
link = {"$dnanexus_link": {"job": self._dxid, "field": field}}
if index is not None:
link["$dnanexus_link"]["index"] = index
if metadata is not None:
link["$dnanexus_link"]["metadata"] = metadata
return link
def _get_state(self, **kwargs):
'''
:returns: State of the remote object
:rtype: string
Queries the API server for the job's state.
Note that this function is shorthand for:
dxjob.describe(io=False, **kwargs)["state"]
'''
return self.describe(fields=dict(state=True), **kwargs)["state"]
| dnanexus/dx-toolkit | src/python/dxpy/bindings/dxjob.py | Python | apache-2.0 | 14,894 |
import numpy as np
#===============================================
#===============================================
#===============================================
#Corrections from ML2/alpha = 0.8 to 3D from Tremblay et al. (2013)
def ml28_to_3d_teff(teff,logg):
c = np.zeros(8)
c[0] = 1.0947335E-03
c[1] = -1.8716231E-01
c[2] = 1.9350009E-02
c[3] = 6.4821613E-01
c[4] = -2.2863187E-01
c[5] = 5.8699232E-01
c[6] = -1.0729871E-01
c[7] = 1.1009070E-01
gx = logg - 8.0
tx = (teff - 10000.)/1000.
shift = c[0] + (c[1] + c[6]*tx + c[7]*gx) * np.exp(-(c[2]+c[4]*tx+c[5]*gx)**2. * (tx-c[3])**2.)
teff3d = teff + (shift*1000.)
return teff3d
#===============================================
#===============================================
#===============================================
def ml28_to_3d_logg(teff,logg):
d = np.zeros(12)
d[0] = 7.5209868E-04
d[1] = -9.2086619E-01
d[2] = 3.1253746E-01
d[3] = -1.0348176E+01
d[4] = 6.5854716E-01
d[5] = 4.2849862E-01
d[6] = -8.8982873E-02
d[7] = 1.0199718E+01
d[8] = 4.9277883E-02
d[9] = -8.6543477E-01
d[10] = 3.6232756E-03
d[11] = -5.8729354E-02
gx = logg - 8.0
tx = (teff - 10000.)/1000.
shift = (d[0] + d[4]*np.exp(-1.*d[5]*(tx-d[6])**2.)) + d[1]*np.exp(-1.*d[2]*(tx-(d[3]+d[7]*np.exp(-1.*(d[8]+d[10]*tx+d[11]*gx)**2.*(tx-d[9])**2.)))**2.)
logg3d = logg + shift
return logg3d
#===============================================
#===============================================
#===============================================
def find_solution(combined,logg,teff):
#======================================
# Fit a parabola in the logg direction, then take the minimum from those fits, and fit a parabola in the Teff direction
# Find minimum point, move + and - 10 and 10 K to fit 5 parabolas in logg, take those centers, fit parabola
combinedindex = np.unravel_index(combined.argmin(),combined.shape)
combinedlogg, combinedteff = logg[combinedindex[0]], teff[combinedindex[1]]
rangeg = 3. #Number of grid points in logg space around lowest value to pick
ranget = 3. #Number of grid points in Teff space around lowest value to pick
#pick out region of grid with spacing of rangeg and ranget around the minimum
if combinedindex[0]-rangeg < 0:
##loggsmall = logg[0:combinedindex[0]+rangeg+1]
loggsmall = logg[0:2*rangeg+1]
logglow = 0
##logghigh = combinedindex[0]+rangeg+1
logghigh = 2*rangeg+1
elif combinedindex[0]+rangeg >= len(logg):
##loggsmall = logg[combinedindex[0]-rangeg:-1]
loggsmall = logg[-2*rangeg-1:]
##logglow = combinedindex[0]-rangeg
logglow = -2*rangeg-1
logghigh = -1
else:
loggsmall = logg[combinedindex[0]-rangeg:combinedindex[0]+rangeg+1]
logglow = combinedindex[0]-rangeg
logghigh = combinedindex[0]+rangeg+1
if combinedindex[1]-ranget < 0:
##teffsmall = teff[0:combinedindex[1]+ranget+1]
teffsmall = teff[0:2*ranget+1]
tefflow = 0
##teffhigh = combinedindex[1]+ranget+1
teffhigh = 2*ranget+1
elif combinedindex[1]+ranget >= len(teff):
##teffsmall = teff[combinedindex[1]-ranget:-1]
teffsmall = teff[-2*ranget-1:]
##tefflow = combinedindex[1]-ranget
tefflow = -2*ranget-1
teffhigh = -1
else:
teffsmall = teff[combinedindex[1]-ranget:combinedindex[1]+ranget+1]
tefflow = combinedindex[1]-ranget
teffhigh = combinedindex[1]+ranget+1
#Get the low and high values for each
lowg, highg = loggsmall[0], loggsmall[-1]
lowt, hight = teffsmall[0], teffsmall[-1]
teffsmallgrid, loggsmallgrid = np.meshgrid(teffsmall,loggsmall)
if (logghigh == -1) and (teffhigh == -1):
combinedsmall = combined[logglow:,tefflow:]
elif logghigh == -1:
combinedsmall = combined[logglow:,tefflow:teffhigh]
elif teffhigh == -1:
combinedsmall = combined[logglow:logghigh,tefflow:]
else:
combinedsmall = combined[logglow:logghigh,tefflow:teffhigh]
#print combinedsmall.shape
#print lowt, hight
#print lowg, highg
#Create finer small grid with spacing of 1 K and 0.005 logg
lenteffgrid = np.round(hight-lowt+1.) #Round to ensure we get the correct number of points. Otherwise, occasionally face a strange int/float issue.
teffsmallfine = np.linspace(lowt,hight,lenteffgrid,endpoint=True)
lenlogggrid = np.round((highg-lowg)*1000.+1.)
loggsmallfine = np.linspace(lowg,highg,lenlogggrid,endpoint=True)
teffsmallfinegrid, loggsmallfinegrid = np.meshgrid(teffsmallfine,loggsmallfine)
#####################
#Fit a polynomial to different Teff values to find center of logg
loggval = np.zeros(len(combinedsmall[:,0]))
chival = np.zeros(len(combinedsmall[:,0]))
for x in np.arange(len(combinedsmall[:,0])):
pol = np.polyfit(loggsmall,combinedsmall[:,x],2)
pc = np.poly1d(pol)
if x == np.median(np.arange(len(combinedsmall[:,0]))):
pckeep = np.poly1d(pol)
loggval[x] = loggsmallfine[pc(loggsmallfine).argmin()]
chival[x] = pc(loggval[x])
#print teffsmall[x], loggval[x], chival[x]
#plt.clf()
#plt.plot(loggsmall,combinedsmall[:,x],'b^')
#plt.plot(loggsmallfine,pc(loggsmallfine))
#plt.show()
#Now take these values and fit a polynomial in the Teff direction
lowtanswer = lowt - 100.
hightanswer = hight + 100.
lenteffanswer = np.round(hightanswer-lowtanswer+1.)
teffanswer = np.linspace(lowtanswer,hightanswer,lenteffanswer,endpoint=True)
tpol = np.polyfit(teffsmall,chival,2)
tpp = np.poly1d(tpol)
bestteff = teffanswer[tpp(teffanswer).argmin()]
#plt.clf()
#plt.plot(teffsmallfine,tpp(teffsmallfine),'k')
#plt.plot(teffanswer,tpp(teffanswer),'ro')
#plt.plot(teffsmall,chival,'g^')
#plt.plot(bestteff,tpp(bestteff),'ms')
#plt.show()
########################
#print 'Now logg'
#Fit a polynomial to different logg values to find center of Teff
teffval = np.zeros(len(combinedsmall[0,:]))
chivalteff = np.zeros(len(combinedsmall[0,:]))
for x in np.arange(len(combinedsmall[0,:])):
polteff = np.polyfit(teffsmall,combinedsmall[x,:],2)
pcteff = np.poly1d(polteff)
teffval[x] = teffsmallfine[pc(teffsmallfine).argmin()]
chivalteff[x] = pcteff(teffval[x])
#print loggsmall[x], teffval[x], chivalteff[x]
#plt.clf()
#plt.plot(teffsmall,combinedsmall[x,:],'b^')
#plt.plot(teffsmallfine,pcteff(teffsmallfine))
#plt.show()
#Now take these values and fit a polynomial in the Teff direction
lowganswer = lowg - 0.25
highganswer = highg + 0.25
lenlogganswer = np.round((highganswer-lowganswer)*1000.+1.)
logganswer = np.linspace(lowganswer,highganswer,lenlogganswer,endpoint=True)
gpol = np.polyfit(loggsmall,chivalteff,2)
gpp = np.poly1d(gpol)
bestlogg = logganswer[gpp(logganswer).argmin()]
#plt.clf()
#plt.plot(loggsmallfine,gpp(loggsmallfine),'k')
#plt.plot(logganswer,gpp(logganswer),'ro')
#plt.plot(loggsmall,chivalteff,'g^')
#plt.plot(bestlogg,gpp(bestlogg),'ms')
#plt.show()
########################
#Take these solutions and find the errors
deltateff = tpp(teffanswer)-tpp(bestteff)
lowterr = teffanswer[np.where(deltateff < 2.3)].min()-1.
highterr = teffanswer[np.where(deltateff < 2.3)].max()+1.
tefferr = ((bestteff-lowterr) + (highterr-bestteff)) / 2.
deltalogg = gpp(logganswer) - gpp(bestlogg)
lowloggerr = logganswer[np.where(deltalogg < 2.3)].min()-0.001
highloggerr = logganswer[np.where(deltalogg < 2.3)].max()+0.001
loggerr = ((bestlogg-lowloggerr) + (highloggerr-bestlogg)) / 2.
print 'Teff = ', bestteff , '+/-' , tefferr
print 'logg = ', bestlogg, '+/-' , loggerr
#plt.clf()
#plt.plot(loggsmallfine,deltalogg)
#plt.show()
return bestteff, tefferr, bestlogg, loggerr
| joshfuchs/ZZCeti_analysis | analysis_tools.py | Python | mit | 8,141 |
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2013 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib.auth.models import User
from dilla import spam
import random
import logging
log = logging.getLogger('dilla')
@spam.strict_handler('dialer_cdr.VoIPCall.duration')
def get_duration(record, field):
return random.randint(1, 100)
@spam.strict_handler('dialer_cdr.VoIPCall.user')
def get_user(record, field):
return User.objects.get(pk=1)
| garyjs/Newfiesautodialer | newfies/dialer_cdr/dialer_cdr_custom_spamlib.py | Python | mpl-2.0 | 767 |
import os
import re
import pandas
from PyQt4.QtGui import QCheckBox
from ert.enkf import ErtPlugin, CancelPluginException
from ert.enkf.export import SummaryCollector, GenKwCollector, MisfitCollector, DesignMatrixReader, CustomKWCollector
from ert_gui.ertwidgets.customdialog import CustomDialog
from ert_gui.ertwidgets.listeditbox import ListEditBox
from ert_gui.ertwidgets.models.path_model import PathModel
from ert_gui.ertwidgets.pathchooser import PathChooser
class CSVExportJob(ErtPlugin):
"""
Export of summary, custom_kw, misfit, design matrix data and gen kw into a single CSV file.
The script expects a single argument:
output_file: this is the path to the file to output the CSV data to
Optional arguments:
case_list: a comma separated list of cases to export (no spaces allowed)
if no list is provided the current case is exported
a single * can be used to export all cases
design_matrix: a path to a file containing the design matrix
infer_iteration: If True the script will try to infer the iteration number by looking at the suffix of the case name
(i.e. default_2 = iteration 2)
If False the script will use the ordering of the case list: the first item will be iteration 0,
the second item will be iteration 1...
The script also looks for default values for output path and design matrix path to present in the GUI. These can
be specified with DATA_KW keyword in the config file:
DATA_KW CSV_OUTPUT_PATH <some path>
DATA_KW DESIGN_MATRIX_PATH <some path>
"""
INFER_HELP = ("<html>"
"If this is checked the iteration number will be inferred from the name i.e.:"
"<ul>"
"<li>case_name -> iteration: 0</li>"
"<li>case_name_0 -> iteration: 0</li>"
"<li>case_name_2 -> iteration: 2</li>"
"<li>case_0, case_2, case_5 -> iterations: 0, 2, 5</li>"
"</ul>"
"Leave this unchecked to set iteration number to the order of the listed cases:"
"<ul><li>case_0, case_2, case_5 -> iterations: 0, 1, 2</li></ul>"
"<br/>"
"</html>")
def getName(self):
return "CSV Export"
def getDescription(self):
return "Export GenKW, CustomKW, design matrix, misfit data and summary data into a single CSV file."
def inferIterationNumber(self, case_name):
pattern = re.compile("_([0-9]+$)")
match = pattern.search(case_name)
if match is not None:
return int(match.group(1))
return 0
def run(self, output_file, case_list=None, design_matrix_path=None, infer_iteration=True):
cases = []
if case_list is not None:
if case_list.strip() == "*":
cases = self.getAllCaseList()
else:
cases = case_list.split(",")
if case_list is None or len(cases) == 0:
cases = [self.ert().getEnkfFsManager().getCurrentFileSystem().getCaseName()]
if design_matrix_path is not None:
if not os.path.exists(design_matrix_path):
raise UserWarning("The design matrix file does not exists!")
if not os.path.isfile(design_matrix_path):
raise UserWarning("The design matrix is not a file!")
data = pandas.DataFrame()
for index, case in enumerate(cases):
case = case.strip()
if not self.ert().getEnkfFsManager().caseExists(case):
raise UserWarning("The case '%s' does not exist!" % case)
if not self.ert().getEnkfFsManager().caseHasData(case):
raise UserWarning("The case '%s' does not have any data!" % case)
if infer_iteration:
iteration_number = self.inferIterationNumber(case)
else:
iteration_number = index
case_data = GenKwCollector.loadAllGenKwData(self.ert(), case)
custom_kw_data = CustomKWCollector.loadAllCustomKWData(self.ert(), case)
if not custom_kw_data.empty:
case_data = case_data.join(custom_kw_data, how='outer')
if design_matrix_path is not None:
design_matrix_data = DesignMatrixReader.loadDesignMatrix(design_matrix_path)
if not design_matrix_data.empty:
case_data = case_data.join(design_matrix_data, how='outer')
misfit_data = MisfitCollector.loadAllMisfitData(self.ert(), case)
if not misfit_data.empty:
case_data = case_data.join(misfit_data, how='outer')
summary_data = SummaryCollector.loadAllSummaryData(self.ert(), case)
if not summary_data.empty:
case_data = case_data.join(summary_data, how='outer')
else:
case_data["Date"] = None
case_data.set_index(["Date"], append=True, inplace=True)
case_data["Iteration"] = iteration_number
case_data["Case"] = case
case_data.set_index(["Case", "Iteration"], append=True, inplace=True)
data = pandas.concat([data, case_data])
data = data.reorder_levels(["Realization", "Iteration", "Date", "Case"])
data.to_csv(output_file)
export_info = "Exported %d rows and %d columns to %s." % (len(data.index), len(data.columns), output_file)
return export_info
def getArguments(self, parent=None):
description = "The CSV export requires some information before it starts:"
dialog = CustomDialog("CSV Export", description, parent)
default_csv_output_path = self.getDataKWValue("CSV_OUTPUT_PATH", default="output.csv")
output_path_model = PathModel(default_csv_output_path)
output_path_chooser = PathChooser(output_path_model)
design_matrix_default = self.getDataKWValue("DESIGN_MATRIX_PATH", default="")
design_matrix_path_model = PathModel(design_matrix_default, is_required=False, must_exist=True)
design_matrix_path_chooser = PathChooser(design_matrix_path_model)
list_edit = ListEditBox(self.getAllCaseList())
infer_iteration_check = QCheckBox()
infer_iteration_check.setChecked(True)
infer_iteration_check.setToolTip(CSVExportJob.INFER_HELP)
dialog.addLabeledOption("Output file path", output_path_chooser)
dialog.addLabeledOption("Design Matrix path", design_matrix_path_chooser)
dialog.addLabeledOption("List of cases to export", list_edit)
dialog.addLabeledOption("Infer iteration number", infer_iteration_check)
dialog.addButtons()
success = dialog.showAndTell()
if success:
design_matrix_path = design_matrix_path_model.getPath()
if design_matrix_path.strip() == "":
design_matrix_path = None
case_list = ",".join(list_edit.getItems())
return [output_path_model.getPath(), case_list, design_matrix_path, infer_iteration_check.isChecked()]
raise CancelPluginException("User cancelled!")
def getDataKWValue(self, name, default):
data_kw = self.ert().getDataKW()
if name in data_kw:
return data_kw[data_kw.indexForKey(name)][1]
return default
def getAllCaseList(self):
fs_manager = self.ert().getEnkfFsManager()
all_case_list = fs_manager.getCaseList()
all_case_list = [case for case in all_case_list if fs_manager.caseHasData(case)]
return all_case_list | arielalmendral/ert | share/workflows/jobs/internal-gui/scripts/csv_export.py | Python | gpl-3.0 | 7,663 |
from note.infrastructure.error import FileContentError
from note.module.element import QAState, QA, Command
from note.module.filehandler import FileContentHandler
from note.module.markdown.qa import MarkdownQA
from note.module.markdown.title_pat import *
from note.utils.os import fs
class MarkdownFileContentHandler(FileContentHandler):
def __init__(self, path, relpath):
self._path = path
self._relpath = relpath
with open(path, "r", encoding="utf-8") as fo:
try:
self._raw_file_content = fo.read()
except:
raise FileContentError.wrong_encoding(self._relpath)
self._toc = self._get_toc_part(self._raw_file_content)
@staticmethod
def _get_toc_part(file_content):
"""拿到第一个标题之前的所有内容
对于没有标题的文件,所有的内容均视为头
"""
# 匹配第一个标题之前的东西
pat = re.compile(r".*?^[ \t\r\f\v]*(?=#)", re.DOTALL | re.MULTILINE)
match_obj = pat.match(file_content)
return match_obj.group(0) if match_obj is not None else file_content
def get_qas(self):
"""拿到从self.path指向的文件解析出的QA"""
text_qas = self._get_text_qas()
return (self._create_qa(header, body) for header, body in text_qas)
def _get_text_qas(self):
"""将markdown文件以#为标志拆为head和body,以供进一步解析"""
content = self._escape()
items = self._split(content)
for item in items:
tmp = item.groupdict()
header = tmp["header"]
body = tmp["body"]
body = self._unescape(body)
yield header, body
def _escape(self):
"""
由于使用markdown中的标题识别QA,但是在正则匹配的过程中,符合标题的字符串
可能会出现在代码块中,如:
```python
import this
# 这个不是标题,是注释
```
为了防止将代码块中的#识别为标题语法,该方法将转义所有的代码块中的#,如上面的
代码块将被转义为:
```python
import this
\# 这个不是标题,是注释
```
Returns:
转义之后的文本
"""
# 匹配代码块
pat = re.compile(
r"""
[ \t\r\f\v]*```
.*?
[ \t\r\f\v]*```
$
""",
re.VERBOSE | re.DOTALL | re.MULTILINE)
content = pat.sub(self._sub_inner__headed_sharp, self._raw_file_content)
return content
@staticmethod
def _sub_inner__headed_sharp(match_obj):
"""
将以#开头的行的#转变为\#,因为是在``` ```中捕获的,所以整个字符串一定是
以 \n非\n空白* 结束的,开头一般是以python\n开头,至少都会有个\n
"""
pat = re.compile(
r"""
(?<=\n)
([ \t\r\f\v]*)
(\#)(.*?\n)
""",
re.VERBOSE)
return pat.sub(r"\1\#\3", match_obj.group(0))
@staticmethod
def _split(content):
"""分割文本为QA片段"""
pat = re.compile(
r"""
^
# 表示出了\n之外的多个空白,因为之后还原时需要用到,所以捕获
# .标题之后需要有一个空白
(?P<header>[ \t\r\f\v]*
\#{1,6} # 表示标题#需要转转义
.*?)(?:\n|\Z) # 可以是任何东西,但是不能是\n或者EOF(.*?)$不可以
(?P<body>.*?) # 这里是正文,
# 任何东西,但是只能匹配到下一个标题前,注意这里要避免匹配\#
(?:(?=\n^[ \t\r\f\v]*\#{1,6})|\Z)
""",
re.VERBOSE | re.MULTILINE | re.DOTALL)
# items是text中所有的东西,按照标题分了组的
items = pat.finditer(content)
return items
@staticmethod
def _unescape(content: str) -> str:
pat = re.compile(r"""
^
([ \t\r\f\v]*)
(\\\#)
(.*?\n)
""", re.VERBOSE | re.MULTILINE)
return pat.sub(r"\1#\3", content)
def _create_qa(self, header, body):
qa = MarkdownQA()
self._set_attr_from_body(qa, body)
self._set_attr_from_header(qa, header)
return qa
def _set_attr_from_body(self, qa, body):
result = body.split('\n---\n', 1)
if len(result) == 1:
qa.answer = result[0]
qa.body = None
elif len(result) == 2:
qa.answer = result[0]
qa.body = result[1]
else:
raise RuntimeError
@staticmethod
def _get_answer(body):
return body.split('---', 1)[0].strip()
def _set_attr_from_header(self, qa, header):
"""设置question,state,id,command"""
pat__type = [
(NEW, QAState.NORMAL),
(OLD, QAState.OLD),
(NEED_REVIEWED, QAState.NEED_REVIEWED),
(PAUSED, QAState.PAUSED),
]
for pat, state in pat__type:
mo = pat.match(header)
if mo:
group_dict = mo.groupdict()
qa.question = group_dict['question']
qa.state = state
if pat in (OLD, NEED_REVIEWED, PAUSED):
qa.id = int(group_dict['id'])
if pat == NEW:
interval = group_dict['cmd_string']
# 分数可以是空字符串,表示没有初始间隔
if interval == '':
qa.command = Command.ADD
return
try:
interval_ = int(interval)
# TODO:应该在配置文件中设置最大值.
if interval_ > 15:
raise ValueError
except ValueError:
raise FileContentError.wrong_command(
location=self._relpath,
question=qa.question,
grade=interval)
qa.command = Command.ADD
qa.arg = interval_
if pat == NEED_REVIEWED:
grade = group_dict['cmd_string']
if grade:
try:
grade = self._map_review_command(grade)
except FileContentError as err:
err.question = qa.question
raise err
qa.command = grade
if pat == PAUSED:
cmd_string = group_dict['cmd_string']
# 必须坚持空,因为['' in '123']为true
if cmd_string:
if cmd_string in 'CcCc':
qa.command = Command.CONTINUE
else:
raise FileContentError.wrong_command(
location=self._relpath,
question=qa.question, grade=cmd_string)
return
else:
qa.question = header
qa.state = QAState.NORMAL
def _map_review_command(self, command):
if command in 'XxXx':
command = Command.FORGET
elif command in 'VvVv':
command = Command.REMEMBER
elif command in 'PpPp':
command = Command.PAUSE
else:
raise FileContentError.wrong_command(
location=self._relpath, grade=command)
return command
def save_qas(self, qas, path=None):
new_content = self._convert_to_text(qas)
if path is None:
with open(self._path, "w", encoding="utf-8") as fo:
fo.write(new_content)
else:
fs.make_dir_of_file(path)
with open(path, "w", encoding="utf-8") as fo:
fo.write(new_content)
def append_qas(self, qas, relpath):
new_content = self._convert_to_text(qas, level_reduce=1)
with open(self._path, "a", encoding="utf-8") as fo:
fo.write('\n# /{}\n\n'.format(relpath.rstrip('.md')))
fo.write(new_content)
def _convert_to_text(self, qas, level_reduce=0):
if self._toc:
new_content = [self._toc.strip()]
else:
new_content = []
for qa in qas:
header, body = self._to_text(qa, level_reduce)
new_content.append(header)
new_content.append(body)
new_content = "\n".join(new_content)
return new_content
def _to_text(self, qa: QA, level_reduce):
assert qa.state is not None
if qa.body:
body = qa.answer + '\n---\n' + qa.body
else:
body = qa.answer
return self._get_q(qa, level_reduce), body
@staticmethod
def _get_q(qa: QA, level_reduce):
if level_reduce:
qa.question = '#' + qa.question.lstrip()
if qa.state == QAState.NORMAL:
if qa.command is None:
return qa.question
if qa.arg is None:
return NEW_TITLE(question=qa.question, cmd_string='')
return NEW_TITLE(question=qa.question, cmd_string=qa.arg)
if qa.state == QAState.OLD:
return OLD_TITLE(question=qa.question, id=qa.id, )
if qa.state == QAState.NEED_REVIEWED:
if qa.command:
command = {
Command.REMEMBER: 'V',
Command.FORGET: 'X',
Command.PAUSE: 'P',
}[qa.command]
return NEED_REVIEWED_TITLE(
question=qa.question, id=qa.id, cmd_string=command
)
return NEED_REVIEWED_TITLE(
question=qa.question, id=qa.id, cmd_string=''
)
if qa.state == QAState.PAUSED:
if qa.command:
return PAUSED_TITLE(
question=qa.question, id=qa.id, cmd_string='C'
)
return PAUSED_TITLE(
question=qa.question, id=qa.id, cmd_string=''
)
def __eq__(self, other):
return self.__dict__ == other.__dict__
| urnote/urnote | note/module/markdown/filehandler.py | Python | gpl-3.0 | 10,422 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.enums",
marshal="google.ads.googleads.v8",
manifest={"CampaignExperimentTrafficSplitTypeEnum",},
)
class CampaignExperimentTrafficSplitTypeEnum(proto.Message):
r"""Container for enum describing campaign experiment traffic
split type.
"""
class CampaignExperimentTrafficSplitType(proto.Enum):
r"""Enum of strategies for splitting traffic between base and
experiment campaigns in campaign experiment.
"""
UNSPECIFIED = 0
UNKNOWN = 1
RANDOM_QUERY = 2
COOKIE = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| googleads/google-ads-python | google/ads/googleads/v8/enums/types/campaign_experiment_traffic_split_type.py | Python | apache-2.0 | 1,292 |
# -*- coding: utf-8 -*-
# RelayBot - Simple VNC Relay Service, modules/minecraft/udpprotocol.py
#
# Copyright (C) 2021 Matthew Beeching
#
# This file is part of RelayBot.
#
# RelayBot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RelayBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RelayBot. If not, see <http://www.gnu.org/licenses/>.
import core.logging as _logging
import core.modules as _modules
import asyncio, re
log = _logging.log.getChild(__name__)
clients = {}
players = {}
class MCUDPProtocol(asyncio.Protocol):
def __init__(self, loop, config, module):
global clients, players
self.loop = loop
self.config = config
self.module = module
self.transport = None
self.log = log.getChildObj(self.config['name'])
players[self.config['name']] = {}
self.isshutdown = False
self.logre = re.compile('^\[(?P<time>[^\]]+)\] \[(?P<thread>[^\]]+?)(?: #[0-9]+)?/(?P<level>[A-Z]+)\]: (?P<message>[^\\r\\n]+)$')
self.msgcb = {
'PLAYER_IP': self.e_player_ip,
'PLAYER_CONNECT': self.e_player_connect,
'PLAYER_DISCONNECT': self.e_player_disconnect,
'PLAYER_UUID': self.e_player_uuid
}
self.msgre = {
'Server thread': {
'PLAYER_IP': [re.compile('^(?P<name>.+?)\\[/(?P<ip>.+?):(?P<port>[0-9]+?)\\] logged in with entity id.*?$')],
'PLAYER_CONNECT': [re.compile('^(?P<name>.+?) (?P<message>(?:\\(formerly known as .+?\\) )?joined the game)$')],
'PLAYER_DISCONNECT': [re.compile('^(?P<name>.+?) (?P<message>(?:\\(formerly known as .+?\\) )?left the game)$')],
'WHITELIST_FAIL': [re.compile('^com.mojang.authlib.GameProfile.+?id=(?P<uuid>[-a-f0-9]+),.*?name=(?P<name>.+?),.*? \\(/(?P<ip>.+?):(?P<port>[0-9]+?)\\) lost connection: You are not white-listed on this server!.*?$')],
'MESSAGE': [
re.compile('^(?P<raw><(?P<name>.+?)> (?P<message>.*?))$'),
re.compile('^(?P<raw>\\[(?P<name>[^ ]+?)\\] (?P<message>.*?))$')
],
'ACTION': [re.compile('^(?P<raw>\\* (?P<name>.+?) (?P<message>.*?))$')],
'ADVANCEMENT': [
re.compile('^(?P<name>.+?) (?P<message>has (?:lost|just earned) the achievement \\[(?P<advancement>.*?)\\])$'),
re.compile('^(?P<name>.+?) (?P<message>has made the advancement \\[(?P<advancement>.*?)\\])$'),
re.compile('^(?P<name>.+?) (?P<message>has completed the challenge \\[(?P<advancement>.*?)\\])$'),
re.compile('^(?P<name>.+?) (?P<message>has reached the goal \\[(?P<advancement>.*?)\\])$')
],
'DEATH': [
re.compile('^(?P<message>(?P<name>[^ ]*?) was slain by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was killed by even more magic)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) tried to swim in lava)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) fell off some vines)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) fell too far and was finished by .*? using .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was impaled by .*? with .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) fell off some twisting vines)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was squashed by a falling block whilst fighting .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was burnt to a crisp whilst fighting .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was killed by .*? using magic)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) experienced kinetic energy)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was killed trying to hurt .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) blew up)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was struck by lightning)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was squashed by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was roasted in dragon breath by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was impaled on a stalagmite)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) fell while climbing)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) went off with a bang due to a firework fired from .*? by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was fireballed by .*? using .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was squished too much)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was shot by .*? using .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) went up in flames)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was skewered by a falling stalactite whilst fighting .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) fell off some weeping vines)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was blown up by .*? using .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) fell too far and was finished by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) experienced kinetic energy whilst trying to escape .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was shot by a skull from .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) went off with a bang whilst fighting .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was squashed by a falling anvil)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was shot by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) drowned)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was doomed to fall by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) starved to death)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was impaled by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) fell from a high place)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was squashed by a falling anvil whilst fighting .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) withered away)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) tried to swim in lava to escape .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) died because of .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) froze to death)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was roasted in dragon breath)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) drowned whilst trying to escape .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) fell off a ladder)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was pummeled by .*? using .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was struck by lightning whilst fighting .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) hit the ground too hard)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was killed by magic)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) died from dehydration)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was pummeled by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was impaled on a stalagmite whilst fighting .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) fell off scaffolding)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was skewered by a falling stalactite)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was stung to death)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) suffocated in a wall whilst fighting .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was slain by .*? using .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was pricked to death)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) fell out of the world)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was frozen to death by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was squashed by a falling block)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was killed by .*? using .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) suffocated in a wall)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) walked into fire whilst fighting .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) burned to death)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) starved to death whilst fighting .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was poked to death by a sweet berry bush)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) walked into danger zone due to .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was killed by .*? trying to hurt .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was stung to death by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) didn\'t want to live in the same world as .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was doomed to fall by .*? using .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) went off with a bang)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was poked to death by a sweet berry bush whilst trying to escape .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) discovered the floor was lava)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) died)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was killed by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) died from dehydration whilst trying to escape .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) hit the ground too hard whilst trying to escape .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was doomed to fall)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was killed by magic whilst trying to escape .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was fireballed by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) was blown up by .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) walked into a cactus whilst trying to escape .*?)$'),
re.compile('^(?P<message>(?P<name>[^ ]*?) withered away whilst fighting .*?)$'),
]
},
'User Authenticator': {
'PLAYER_UUID': [re.compile('^UUID of player (?P<name>.+) is (?P<uuid>[-a-f0-9]+)$')]
}
}
clients[self.config['name']] = self
def connection_made(self, transport):
self.transport = transport
def connection_lost(self, exc):
global clients
if not exc is None:
self.log.info('Lost UDP connection: ' + str(exc))
else:
self.log.info('Lost UDP connection')
if self.config['name'] in clients:
del clients[self.config['name']]
if self.isshutdown:
return
self.log.info('Retrying in 30 seconds')
self.loop.call_later(30, createclient, self.loop, self.config, self.module)
def error_received(self, ex):
self.log.debug('Error received: ' + str(ex))
def datagram_received(self, data, addr):
lines = data.decode('utf-8').replace('\r', '\n').split('\n')
for line in lines:
if len(line) <= 0:
continue
self.log.protocol('Received UDP message from ' + str(addr) + ': ' + line)
match = self.logre.match(line)
if match:
self.log.protocol('Parsed UDP message: ' + str(match.groupdict()))
self._handle_msg(match.groupdict())
else:
self.log.warning('Unable to parse UDP message')
def shutdown(self, loop):
self.isshutdown = True
self.log.info('Shutting down UDP listener on ' + self.config['udp']['host'] + ']:' + self.config['udp']['port'])
self.transport.close()
def handle_event(self, loop, module, sender, protocol, event, data):
global players
if sender == self.config['name']:
if event == 'PLAYER_CONNECT':
if not data['uuid'] in players[self.config['name']]:
players[self.config['name']][data['uuid']] = {'name': data['name'], 'ip': data['ip'], 'port': data['port'], 'online': True}
elif not players[self.config['name']][data['uuid']]['online']:
players[self.config['name']][data['uuid']]['name'] = data['name']
players[self.config['name']][data['uuid']]['ip'] = data['ip']
players[self.config['name']][data['uuid']]['port'] = data['port']
players[self.config['name']][data['uuid']]['online'] = True
evt = {'name': players[self.config['name']][data['uuid']]['name'], 'uuid': data['uuid'], 'ip': players[self.config['name']][data['uuid']]['ip'], 'port': players[self.config['name']][data['uuid']]['port'], 'message': 'joined the game'}
self.e_player_connect(evt)
elif event == 'PLAYERS_OFFLINE':
for uuid in players[self.config['name']]:
if players[self.config['name']][uuid]['online']:
evt = {'name': players[self.config['name']][uuid]['name'], 'uuid': uuid, 'ip': players[self.config['name']][uuid]['ip'], 'port': players[self.config['name']][uuid]['port'], 'message': 'left the game'}
_modules.send_event(self.loop, self.module, self.config['name'], 'udp', 'PLAYER_DISCONNECT', evt)
self.e_player_disconnect(evt)
def e_player_ip(self, evt):
global players
uuid = playeruuidfromname(self.config['name'], evt['name'])
if uuid:
players[self.config['name']][uuid]['ip'] = evt['ip']
players[self.config['name']][uuid]['port'] = evt['port']
log.debug('Updated player "' + uuid + '": ' + str(players[self.config['name']][uuid]))
def e_player_uuid(self, evt):
global players
if not evt['uuid'] in players[self.config['name']]:
players[self.config['name']][evt['uuid']] = {'name': '', 'ip': '0.0.0.0', 'port': '', 'online': False}
players[self.config['name']][evt['uuid']]['name'] = evt['name']
log.debug('Cached player "' + evt['uuid'] + '": ' + str(players[self.config['name']][evt['uuid']]))
def e_player_connect(self, evt):
global players
uuid = playeruuidfromname(self.config['name'], evt['name'])
if uuid:
players[self.config['name']][uuid]['online'] = True
def e_player_disconnect(self, evt):
global players
uuid = playeruuidfromname(self.config['name'], evt['name'])
if uuid:
players[self.config['name']][uuid]['online'] = False
def _handle_msg(self, msg):
global players
for thread in self.msgre:
if msg['thread'] == thread:
for event in self.msgre[thread]:
for rec in self.msgre[thread][event]:
match = rec.match(msg['message'])
if match:
evt = match.groupdict()
if event == 'PLAYER_CONNECT' or event == 'PLAYER_DISCONNECT':
uuid = playeruuidfromname(self.config['name'], evt['name'])
if uuid:
evt['ip'] = players[self.config['name']][uuid]['ip']
evt['port'] = players[self.config['name']][uuid]['port']
evt['uuid'] = uuid
self.log.debug('Event "' + event + '": ' + str(evt))
if event in self.msgcb:
if self.msgcb[event]:
self.log.debug('Calling callback for event "' + event + '"')
self.msgcb[event](evt)
_modules.send_event(self.loop, self.module, self.config['name'], 'udp', event, evt)
break
else:
continue
return
async def connectclient(loop, conf, module):
try:
serv = '[' + conf['udp']['host'] + ']:' + conf['udp']['port']
log.info('Creating UDP listener ' + conf['name'] + ' listening on ' + serv)
await loop.create_datagram_endpoint(lambda: MCUDPProtocol(loop, conf, module), (conf['udp']['host'], conf['udp']['port']), reuse_address=True, reuse_port=True)
except Exception as e:
log.warning('Exception occurred attempting to create UDP listener ' + conf['name'] + ': ' + str(e))
log.info('Retrying in 30 seconds')
loop.call_later(30, createclient, loop, conf, module)
return
def createclient(loop, conf, module):
loop.create_task(connectclient(loop, conf, module))
def playeruuidfromname(conf, name):
global players
for uuid in players[conf]:
if players[conf][uuid]['name'] == name:
return uuid
return None
| jobe1986/relaybot | modules/minecraft/udpprotocol.py | Python | gpl-3.0 | 15,528 |
"""
This file contains a management command for exporting the modulestore to
neo4j, a graph database.
"""
import logging
from celery import task
from django.conf import settings
from django.utils import six, timezone
from edx_django_utils.cache import RequestCache
from opaque_keys.edx.keys import CourseKey
from py2neo import Graph, Node, Relationship, authenticate, NodeSelector
from py2neo.compat import integer, string
log = logging.getLogger(__name__)
celery_log = logging.getLogger('edx.celery.task')
# When testing locally, neo4j's bolt logger was noisy, so we'll only have it
# emit logs if there's an error.
bolt_log = logging.getLogger('neo4j.bolt') # pylint: disable=invalid-name
bolt_log.setLevel(logging.ERROR)
PRIMITIVE_NEO4J_TYPES = (integer, string, six.text_type, float, bool)
def serialize_item(item):
"""
Args:
item: an XBlock
Returns:
fields: a dictionary of an XBlock's field names and values
block_type: the name of the XBlock's type (i.e. 'course'
or 'problem')
"""
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES
# convert all fields to a dict and filter out parent and children field
fields = dict(
(field, field_value.read_from(item))
for (field, field_value) in six.iteritems(item.fields)
if field not in ['parent', 'children']
)
course_key = item.scope_ids.usage_id.course_key
block_type = item.scope_ids.block_type
# set or reset some defaults
fields['edited_on'] = six.text_type(getattr(item, 'edited_on', ''))
fields['display_name'] = item.display_name_with_default
fields['org'] = course_key.org
fields['course'] = course_key.course
fields['run'] = course_key.run
fields['course_key'] = six.text_type(course_key)
fields['location'] = six.text_type(item.location)
fields['block_type'] = block_type
fields['detached'] = block_type in DETACHED_XBLOCK_TYPES
if block_type == 'course':
# prune the checklists field
if 'checklists' in fields:
del fields['checklists']
# record the time this command was run
fields['time_last_dumped_to_neo4j'] = six.text_type(timezone.now())
return fields, block_type
def coerce_types(value):
"""
Args:
value: the value of an xblock's field
Returns: either the value, a text version of the value, or, if the
value is a list, a list where each element is converted to text.
"""
coerced_value = value
if isinstance(value, list):
coerced_value = [six.text_type(element) for element in coerced_value]
# if it's not one of the types that neo4j accepts,
# just convert it to text
elif not isinstance(value, PRIMITIVE_NEO4J_TYPES):
coerced_value = six.text_type(value)
return coerced_value
def add_to_transaction(neo4j_entities, transaction):
"""
Args:
neo4j_entities: a list of Nodes or Relationships
transaction: a neo4j transaction
"""
for entity in neo4j_entities:
transaction.create(entity)
def get_command_last_run(course_key, graph):
"""
This information is stored on the course node of a course in neo4j
Args:
course_key: a CourseKey
graph: a py2neo Graph
Returns: The datetime that the command was last run, converted into
text, or None, if there's no record of this command last being run.
"""
selector = NodeSelector(graph)
course_node = selector.select(
"course",
course_key=six.text_type(course_key)
).first()
last_this_command_was_run = None
if course_node:
last_this_command_was_run = course_node['time_last_dumped_to_neo4j']
return last_this_command_was_run
def get_course_last_published(course_key):
"""
We use the CourseStructure table to get when this course was last
published.
Args:
course_key: a CourseKey
Returns: The datetime the course was last published at, converted into
text, or None, if there's no record of the last time this course
was published.
"""
# Import is placed here to avoid model import at project startup.
from xmodule.modulestore.django import modulestore
from openedx.core.djangoapps.content.block_structure.models import BlockStructureModel
from openedx.core.djangoapps.content.block_structure.exceptions import BlockStructureNotFound
store = modulestore()
course_usage_key = store.make_course_usage_key(course_key)
try:
structure = BlockStructureModel.get(course_usage_key)
course_last_published_date = six.text_type(structure.modified)
except BlockStructureNotFound:
course_last_published_date = None
return course_last_published_date
def strip_branch_and_version(location):
"""
Removes the branch and version information from a location.
Args:
location: an xblock's location.
Returns: that xblock's location without branch and version information.
"""
return location.for_branch(None)
def serialize_course(course_id):
"""
Serializes a course into py2neo Nodes and Relationships
Args:
course_id: CourseKey of the course we want to serialize
Returns:
nodes: a list of py2neo Node objects
relationships: a list of py2neo Relationships objects
"""
# Import is placed here to avoid model import at project startup.
from xmodule.modulestore.django import modulestore
# create a location to node mapping we'll need later for
# writing relationships
location_to_node = {}
items = modulestore().get_items(course_id)
# create nodes
for item in items:
fields, block_type = serialize_item(item)
for field_name, value in six.iteritems(fields):
fields[field_name] = coerce_types(value)
node = Node(block_type, 'item', **fields)
location_to_node[strip_branch_and_version(item.location)] = node
# create relationships
relationships = []
for item in items:
previous_child_node = None
for index, child in enumerate(item.get_children()):
parent_node = location_to_node.get(strip_branch_and_version(item.location))
child_node = location_to_node.get(strip_branch_and_version(child.location))
if parent_node is not None and child_node is not None:
child_node["index"] = index
relationship = Relationship(parent_node, "PARENT_OF", child_node)
relationships.append(relationship)
if previous_child_node:
ordering_relationship = Relationship(
previous_child_node,
"PRECEDES",
child_node,
)
relationships.append(ordering_relationship)
previous_child_node = child_node
nodes = list(location_to_node.values())
return nodes, relationships
def should_dump_course(course_key, graph):
"""
Only dump the course if it's been changed since the last time it's been
dumped.
Args:
course_key: a CourseKey object.
graph: a py2neo Graph object.
Returns: bool of whether this course should be dumped to neo4j.
"""
last_this_command_was_run = get_command_last_run(course_key, graph)
course_last_published_date = get_course_last_published(course_key)
# if we don't have a record of the last time this command was run,
# we should serialize the course and dump it
if last_this_command_was_run is None:
return True
# if we've serialized the course recently and we have no published
# events, we will not dump it, and so we can skip serializing it
# again here
if last_this_command_was_run and course_last_published_date is None:
return False
# otherwise, serialize and dump the course if the command was run
# before the course's last published event
return last_this_command_was_run < course_last_published_date
@task(routing_key=settings.COURSEGRAPH_JOB_QUEUE)
def dump_course_to_neo4j(course_key_string, credentials):
"""
Serializes a course and writes it to neo4j.
Arguments:
course_key: course key for the course to be exported
credentials (dict): the necessary credentials to connect
to neo4j and create a py2neo `Graph` obje
"""
course_key = CourseKey.from_string(course_key_string)
nodes, relationships = serialize_course(course_key)
celery_log.info(
"Now dumping %s to neo4j: %d nodes and %d relationships",
course_key,
len(nodes),
len(relationships),
)
graph = authenticate_and_create_graph(credentials)
transaction = graph.begin()
course_string = six.text_type(course_key)
try:
# first, delete existing course
transaction.run(
"MATCH (n:item) WHERE n.course_key='{}' DETACH DELETE n".format(
course_string
)
)
# now, re-add it
add_to_transaction(nodes, transaction)
add_to_transaction(relationships, transaction)
transaction.commit()
celery_log.info("Completed dumping %s to neo4j", course_key)
except Exception: # pylint: disable=broad-except
celery_log.exception(
"Error trying to dump course %s to neo4j, rolling back",
course_string
)
transaction.rollback()
class ModuleStoreSerializer(object):
"""
Class with functionality to serialize a modulestore into subgraphs,
one graph per course.
"""
def __init__(self, course_keys):
self.course_keys = course_keys
@classmethod
def create(cls, courses=None, skip=None):
"""
Sets the object's course_keys attribute from the `courses` parameter.
If that parameter isn't furnished, loads all course_keys from the
modulestore.
Filters out course_keys in the `skip` parameter, if provided.
Args:
courses: A list of string serializations of course keys.
For example, ["course-v1:org+course+run"].
skip: Also a list of string serializations of course keys.
"""
# Import is placed here to avoid model import at project startup.
from xmodule.modulestore.django import modulestore
if courses:
course_keys = [CourseKey.from_string(course.strip()) for course in courses]
else:
course_keys = [
course.id for course in modulestore().get_course_summaries()
]
if skip is not None:
skip_keys = [CourseKey.from_string(course.strip()) for course in skip]
course_keys = [course_key for course_key in course_keys if course_key not in skip_keys]
return cls(course_keys)
def dump_courses_to_neo4j(self, credentials, override_cache=False):
"""
Method that iterates through a list of courses in a modulestore,
serializes them, then submits tasks to write them to neo4j.
Arguments:
credentials (dict): the necessary credentials to connect
to neo4j and create a py2neo `Graph` object
override_cache: serialize the courses even if they'be been recently
serialized
Returns: two lists--one of the courses that were successfully written
to neo4j and one of courses that were not.
"""
total_number_of_courses = len(self.course_keys)
submitted_courses = []
skipped_courses = []
graph = authenticate_and_create_graph(credentials)
for index, course_key in enumerate(self.course_keys):
# first, clear the request cache to prevent memory leaks
RequestCache.clear_all_namespaces()
log.info(
"Now submitting %s for export to neo4j: course %d of %d total courses",
course_key,
index + 1,
total_number_of_courses,
)
if not (override_cache or should_dump_course(course_key, graph)):
log.info("skipping submitting %s, since it hasn't changed", course_key)
skipped_courses.append(six.text_type(course_key))
continue
dump_course_to_neo4j.apply_async(
args=[six.text_type(course_key), credentials],
)
submitted_courses.append(six.text_type(course_key))
return submitted_courses, skipped_courses
def authenticate_and_create_graph(credentials):
"""
This function authenticates with neo4j and creates a py2neo graph object
Arguments:
credentials (dict): a dictionary of credentials used to authenticate,
and then create, a py2neo graph object.
Returns: a py2neo `Graph` object.
"""
host = credentials['host']
https_port = credentials['https_port']
http_port = credentials['http_port']
secure = credentials['secure']
neo4j_user = credentials['user']
neo4j_password = credentials['password']
authenticate(
"{host}:{port}".format(
host=host, port=https_port if secure else http_port
),
neo4j_user,
neo4j_password,
)
graph = Graph(
bolt=True,
password=neo4j_password,
user=neo4j_user,
https_port=https_port,
http_port=http_port,
host=host,
secure=secure,
)
return graph
| edx-solutions/edx-platform | openedx/core/djangoapps/coursegraph/tasks.py | Python | agpl-3.0 | 13,566 |
#!/usr/bin/env python
from keras import backend as K
def mean_corner_error(y_true, y_pred):
y_true = K.reshape(y_true, (-1, 4, 2))
y_pred = K.reshape(y_pred, (-1, 4, 2))
return K.mean(K.sqrt(K.sum(K.square(y_pred - y_true), axis=-1, keepdims=True)), axis=1)
| baudm/HomographyNet | homographynet/losses.py | Python | apache-2.0 | 273 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './QScintilla/SearchWidget.ui'
#
# Created: Tue Nov 18 17:53:58 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SearchWidget(object):
def setupUi(self, SearchWidget):
SearchWidget.setObjectName("SearchWidget")
SearchWidget.resize(973, 25)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(SearchWidget.sizePolicy().hasHeightForWidth())
SearchWidget.setSizePolicy(sizePolicy)
self.horizontalLayout = QtWidgets.QHBoxLayout(SearchWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.closeButton = QtWidgets.QToolButton(SearchWidget)
self.closeButton.setText("")
self.closeButton.setObjectName("closeButton")
self.horizontalLayout.addWidget(self.closeButton)
self.label = QtWidgets.QLabel(SearchWidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.findtextCombo = QtWidgets.QComboBox(SearchWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.findtextCombo.sizePolicy().hasHeightForWidth())
self.findtextCombo.setSizePolicy(sizePolicy)
self.findtextCombo.setMinimumSize(QtCore.QSize(300, 0))
self.findtextCombo.setEditable(True)
self.findtextCombo.setInsertPolicy(QtWidgets.QComboBox.InsertAtTop)
self.findtextCombo.setDuplicatesEnabled(False)
self.findtextCombo.setObjectName("findtextCombo")
self.horizontalLayout.addWidget(self.findtextCombo)
self.findPrevButton = QtWidgets.QToolButton(SearchWidget)
self.findPrevButton.setObjectName("findPrevButton")
self.horizontalLayout.addWidget(self.findPrevButton)
self.findNextButton = QtWidgets.QToolButton(SearchWidget)
self.findNextButton.setObjectName("findNextButton")
self.horizontalLayout.addWidget(self.findNextButton)
self.caseCheckBox = QtWidgets.QCheckBox(SearchWidget)
self.caseCheckBox.setObjectName("caseCheckBox")
self.horizontalLayout.addWidget(self.caseCheckBox)
self.wordCheckBox = QtWidgets.QCheckBox(SearchWidget)
self.wordCheckBox.setObjectName("wordCheckBox")
self.horizontalLayout.addWidget(self.wordCheckBox)
self.regexpCheckBox = QtWidgets.QCheckBox(SearchWidget)
self.regexpCheckBox.setObjectName("regexpCheckBox")
self.horizontalLayout.addWidget(self.regexpCheckBox)
self.wrapCheckBox = QtWidgets.QCheckBox(SearchWidget)
self.wrapCheckBox.setObjectName("wrapCheckBox")
self.horizontalLayout.addWidget(self.wrapCheckBox)
self.selectionCheckBox = QtWidgets.QCheckBox(SearchWidget)
self.selectionCheckBox.setObjectName("selectionCheckBox")
self.horizontalLayout.addWidget(self.selectionCheckBox)
self.retranslateUi(SearchWidget)
QtCore.QMetaObject.connectSlotsByName(SearchWidget)
SearchWidget.setTabOrder(self.findtextCombo, self.caseCheckBox)
SearchWidget.setTabOrder(self.caseCheckBox, self.wordCheckBox)
SearchWidget.setTabOrder(self.wordCheckBox, self.regexpCheckBox)
SearchWidget.setTabOrder(self.regexpCheckBox, self.wrapCheckBox)
SearchWidget.setTabOrder(self.wrapCheckBox, self.selectionCheckBox)
SearchWidget.setTabOrder(self.selectionCheckBox, self.findNextButton)
SearchWidget.setTabOrder(self.findNextButton, self.findPrevButton)
SearchWidget.setTabOrder(self.findPrevButton, self.closeButton)
def retranslateUi(self, SearchWidget):
_translate = QtCore.QCoreApplication.translate
SearchWidget.setWindowTitle(_translate("SearchWidget", "Find"))
self.closeButton.setToolTip(_translate("SearchWidget", "Press to close the window"))
self.label.setText(_translate("SearchWidget", "Find:"))
self.findPrevButton.setToolTip(_translate("SearchWidget", "Press to find the previous occurrence"))
self.findNextButton.setToolTip(_translate("SearchWidget", "Press to find the next occurrence"))
self.caseCheckBox.setText(_translate("SearchWidget", "Match case"))
self.wordCheckBox.setText(_translate("SearchWidget", "Whole word"))
self.regexpCheckBox.setText(_translate("SearchWidget", "Regexp"))
self.wrapCheckBox.setText(_translate("SearchWidget", "Wrap around"))
self.selectionCheckBox.setText(_translate("SearchWidget", "Selection only"))
| davy39/eric | QScintilla/Ui_SearchWidget.py | Python | gpl-3.0 | 4,989 |
#!/usr/bin/env python
"""
Unit tests for the EsuRestApi class
"""
import unittest, random, string, hashlib
from xml.etree.ElementTree import fromstring
from EsuRestApi import EsuRestApi, EsuException
class EsuRestApiTest(unittest.TestCase):
# Enter your own host in the form of sub.domain.com or 10.0.1.250
host = "lciga090.lss.emc.com"
# Enter the port where Atmos lives here
port = 80
# Enter your full UID in the form of something/something_else
uid = "0e2200283d4143d9b2895992a64cd319/test"
# Enter your secret here. (shhsh!)
secret = "lYp88RptTEnBOEh/DC0w5ys7olU="
def setUp(self):
self.esu = EsuRestApi(self.host, self.port, self.uid, self.secret)
self.oid_clean_up = []
self.path_clean_up = []
def tearDown(self):
if self.oid_clean_up:
for object in self.oid_clean_up:
self.esu.delete_object(object)
if self.path_clean_up:
dir = self.path_clean_up[0].split("/")
self.esu.delete_directory(dir[0])
def test_create_empty_object(self):
data = " "
oid = self.esu.create_object(data=data)
self.assertTrue(oid, "null object ID returned")
object = self.esu.read_object(oid)
self.assertEqual(object, data, "wrong object content")
self.oid_clean_up.append(oid)
def test_create_empty_object_on_path(self):
data = " "
path = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(8)) + "/file.data"
oid = self.esu.create_object_on_path(data=data, path=path)
self.assertTrue(oid, "null object ID returned")
object = self.esu.read_object(oid)
self.assertEqual(object, data, "wrong object content")
self.oid_clean_up.append(oid)
self.path_clean_up.append(path)
def test_create_object_with_content(self):
data = "The quick brown fox jumps over the lazy dog"
oid = self.esu.create_object(data=data)
object = self.esu.read_object(oid)
self.assertEquals(data, object)
self.oid_clean_up.append(oid)
def test_create_object_with_content_and_checksum(self):
data = "The quick brown fox jumps over the lazy dog"
checksum = "SHA1/%d/%s" % (len(data), hashlib.sha1(data).hexdigest())
oid = self.esu.create_object(data=data, checksum=checksum)
self.oid_clean_up.append(oid)
object = self.esu.read_object(oid)
self.assertEquals(data, object)
def test_create_object_on_path_with_content(self):
data = "The quick brown fox jumps over the lazy dog"
path = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(8)) + "/file.data"
oid = self.esu.create_object_on_path(data=data, path=path)
self.oid_clean_up.append(oid)
self.assertTrue(oid, "null object ID returned")
object = self.esu.read_object(oid)
self.assertEqual(object, data, "wrong object content")
def test_create_object_on_path_with_content_and_checksum(self):
data = "The quick brown fox jumps over the lazy dog"
path = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(8)) + "/file.data"
checksum = "SHA1/%d/%s" % (len(data), hashlib.sha1(data).hexdigest())
oid = self.esu.create_object_on_path(data=data, path=path, checksum=checksum)
self.oid_clean_up.append(oid)
self.assertTrue(oid, "null object ID returned")
object = self.esu.read_object(oid)
self.assertEqual(object, data, "wrong object content")
def test_create_object_on_path_with_metadata(self):
data = "The quick brown fox jumps over the lazy dog"
listable_meta = {"key1" : "value1", "key2" : "value2", "key3" : "value3"}
path = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(8)) + "/file.data"
oid = self.esu.create_object_on_path(data=data, path=path, listable_meta=listable_meta)
self.assertTrue(oid, "null object ID returned")
object = self.esu.read_object(oid)
self.assertEqual(object, data, "wrong object content")
# Retrieves existing metadata for an object and compares it to the known metadata dictionary that was stored
metadata = self.esu.get_user_metadata(oid)['listable_user_meta']
self.assertEqual(listable_meta, metadata, "metadata key/values are wrong")
self.oid_clean_up.append(oid)
self.path_clean_up.append(path)
def test_create_object_with_metadata(self):
data = "The quick brown fox jumps over the lazy dog"
listable_meta = {"key1" : "value1", "key2" : "value2", "key3" : "value3"}
oid = self.esu.create_object(data=data, listable_meta=listable_meta)
self.assertTrue(oid, "null object ID returned")
object = self.esu.read_object(oid)
self.assertEqual(object, data, "wrong object content")
# Retrieves existing metadata for an object and compares it to the known metadata dictionary that was stored
metadata = self.esu.get_user_metadata(oid)['listable_user_meta']
self.assertEqual(listable_meta, metadata, "metadata key/values are wrong")
self.oid_clean_up.append(oid)
def test_read_acl(self):
data = "The quick brown fox jumps over the lazy dog"
oid = self.esu.create_object(data=data)
uid = self.esu.uid.split("/")[0]
user_acl = "%s=FULL_CONTROL" % uid
resp = self.esu.set_acl(oid, user_acl)
acl = self.esu.get_acl(oid)['user_acl'][uid]
self.assertEqual(acl, "FULL_CONTROL", "acl does not match")
self.oid_clean_up.append(oid)
def test_delete_user_metadata(self):
data = "The quick brown fox jumps over the lazy dog"
listable_meta = {"key1" : "value1"}
oid = self.esu.create_object(data=data, listable_meta=listable_meta)
self.assertTrue(oid, "null object ID returned")
object = self.esu.read_object(oid)
self.assertEqual(object, data, "wrong object content")
# Retrieves existing metadata for an object and compares it to the known metadata dictionary that was stored
metadata = self.esu.get_user_metadata(oid)['listable_user_meta']
self.assertEqual(listable_meta, metadata, "metadata key/values are wrong")
self.esu.delete_user_metadata(object_id=oid, metadata_key="key1")
metadata = self.esu.get_user_metadata(oid)['listable_user_meta']
self.assertEqual(metadata, {})
self.oid_clean_up.append(oid)
def test_get_system_metadata(self):
data = "The quick brown fox jumps over the lazy dog"
oid = self.esu.create_object(data=data)
self.assertTrue(oid, "null object ID returned")
object = self.esu.read_object(oid)
self.assertEqual(object, data, "wrong object content")
system_meta = self.esu.get_system_metadata(oid)
self.assertTrue(system_meta['size'], "Size should be > 0" )
self.assertTrue(system_meta['ctime'], "the ctime was not set")
self.assertEqual(system_meta['objectid'], oid, "Object IDs do not match")
self.oid_clean_up.append(oid)
def test_list_objects(self):
data = "The quick brown fox jumps over the lazy dog"
key = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(8))
listable_meta = {key : "value1"}
oid = self.esu.create_object(data=data, listable_meta=listable_meta)
self.assertTrue(oid, "null object ID returned")
object = self.esu.read_object(oid)
self.assertEqual(object, data, "wrong object content")
list = self.esu.list_objects(metadata_key=key)
self.assertEqual(oid, list[0][0], "wrong object ids")
self.oid_clean_up.append(oid)
def test_list_directory(self):
data = "The quick brown fox jumps over the lazy dog"
path = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(8)) + "/file.data"
oid = self.esu.create_object_on_path(data=data, path=path)
dir = path.split("/")[0]
list = self.esu.list_directory(dir)
self.assertEqual(oid, list[0][0][0], "wrong object ids")
self.oid_clean_up.append(oid)
self.path_clean_up.append(path)
def test_delete_object(self):
data = "The quick brown fox jumps over the lazy dog"
oid = self.esu.create_object(data=data)
self.assertTrue(oid, "null object ID returned")
self.esu.delete_object(oid)
try:
object = self.esu.read_object(oid)
except EsuException, e:
self.assertEqual(e.atmos_error_code, "1003", "wrong error code")
if __name__ == "__main__":
test_classes = [ EsuRestApiTest ]
for test_class in test_classes:
temp = str(test_class)
name = temp.split('.')[-1][:-2]
print "Start of test for", name
suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
unittest.TextTestRunner(verbosity=2).run(suite)
print "End of test for", name
| ab24v07/atmos-python | EsuRestApiTest.py | Python | bsd-3-clause | 9,555 |
import pytest
from opentrons.calibration_storage import file_operators
from opentrons import config
@pytest.fixture
def grab_id(set_up_index_file_temporary_directory):
labware_to_access = 'opentrons_96_tiprack_10ul'
uri_to_check = f'opentrons/{labware_to_access}/1'
offset_path =\
config.get_opentrons_path('labware_calibration_offsets_dir_v2')
index_path = offset_path / 'index.json'
index_file = file_operators.read_cal_file(str(index_path))
calibration_id = ''
for key, data in index_file['data'].items():
if data['uri'] == uri_to_check:
calibration_id = key
return calibration_id
def test_access_individual_labware(api_client, grab_id):
calibration_id = grab_id
expected = {
'calibrationData': {
'offset': {
'value': [0.0, 0.0, 0.0],
'lastModified': None},
'tipLength': {
'value': None,
'lastModified': None}},
'loadName': 'opentrons_96_tiprack_10ul',
'namespace': 'opentrons',
'version': 1,
'parent': '',
'definitionHash': calibration_id}
resp = api_client.get(f'/labware/calibrations/{calibration_id}')
assert resp.status_code == 200
body = resp.json()
data = body['data']
assert data['type'] == 'LabwareCalibration'
assert data['id'] == calibration_id
data['attributes']['calibrationData']['offset']['lastModified'] = None
data['attributes']['calibrationData']['tipLength']['lastModified'] = None
assert data['attributes'] == expected
resp = api_client.get('/labware/calibrations/funnyId')
assert resp.status_code == 404
body = resp.json()
assert body == {
'errors': [{
'status': '404',
'title': 'Resource Not Found',
'detail': "Resource type 'calibration' with id "
"'funnyId' was not found"
}]}
def test_delete_individual_labware(api_client, grab_id):
calibration_id = grab_id
resp = api_client.delete('/labware/calibrations/funnyId')
assert resp.status_code == 404
body = resp.json()
assert body == {
'errors': [{
'status': '404',
'title': 'Resource Not Found',
'detail': "Resource type 'calibration' with id "
"'funnyId' was not found"
}]}
resp = api_client.delete(f'/labware/calibrations/{calibration_id}')
assert resp.status_code == 200
| Opentrons/labware | robot-server/tests/service/labware/test_labware_calibration_access.py | Python | apache-2.0 | 2,492 |
import socketserver
import json
class dynDnsServerHandler(socketserver.BaseRequestHandler):
def handle(self):
try:
response = {}
updated = False
clientIp = self.client_address[0]
dataReceived = self.request.recv(10).decode('UTF-8')
self.server.logger.info('Request #' + dataReceived + "\n")
if (clientIp != self.server.currentIp):
self.server.currentIp = clientIp
self.server.api.updateRecord(self.server.api.getRecordName(), clientIp)
self.server.logger.info('New IP detected: ' + clientIp)
updated = True
self.request.sendall(bytes(json.dumps({'updated':updated}), 'UTF-8'))
except Exception as e:
self.server.logger.error("Exception while receiving message: ", e) | MilkyWeb/dyndns | server/dynDnsServerHandler.py | Python | mit | 759 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""QSRlib ROS server interface.
:Author: Yiannis Gatsoulis <y.gatsoulis@leeds.ac.uk>
:Organization: University of Leeds
:Date: 22 September 2014
:Version: 0.1
:Status: Development
:Copyright: STRANDS default
"""
from __future__ import print_function, division
import rospy
from qsrlib.qsrlib import QSRlib
from qsr_lib.srv import *
try:
import cPickle as pickle
except:
import pickle
class QSRlib_ROS_Server(object):
def __init__(self, node_name="qsr_lib", active_qsrs=None):
self.qsrlib = QSRlib(active_qsrs)
self.node_name = node_name
self.node = rospy.init_node(self.node_name)
self.service_topic_names = {"request": self.node_name+"/request"}
self.srv_qsrs_request = rospy.Service(self.service_topic_names["request"], RequestQSRs, self.handle_request_qsrs)
rospy.loginfo("QSRlib_ROS_Server up and running, listening to: %s" % self.service_topic_names["request"])
def handle_request_qsrs(self, req):
rospy.logdebug("Handling QSRs request made at %i.%i" % (req.header.stamp.secs, req.header.stamp.nsecs))
request_message = pickle.loads(req.data)
qsrs_response_message = self.qsrlib.request_qsrs(request_message=request_message)
res = RequestQSRsResponse()
res.header.stamp = rospy.get_rostime()
res.data = pickle.dumps(qsrs_response_message)
print()
return res
if __name__ == "__main__":
srv = QSRlib_ROS_Server()
rospy.spin()
| yianni/strands_qsr_lib_rviz | qsr_lib/scripts/qsrlib_ros_server.py | Python | mit | 1,518 |
# Copyright (c) 2016 Tigera, Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import re
import shutil
import sys
import textwrap
import urllib2
from os import path
import subprocess
# The root directory
PATH_ROOT = path.dirname(path.dirname(path.realpath(__file__)))
sys.path.append(path.join(PATH_ROOT, "calicoctl"))
from calico_ctl import __version__
# Path names relative to the root of the project
PATH_CALICOCTL_NODE = path.join("calicoctl", "calico_ctl", "node.py")
PATH_CALICOCTL_INIT = path.join("calicoctl", "calico_ctl", "__init__.py")
PATH_CALICONODE_BUILD = path.join("calico_node", "build.sh")
PATH_MAKEFILE = "Makefile"
PATH_MAIN_README = "README.md"
PATH_DOCS = "docs"
PATH_RELEASE_DATA = ".releasedata"
PATH_BUILDING = path.join(PATH_DOCS, "Building.md")
# Regexes for calico-containers version format.
INIT_VERSION = re.compile(r'__version__\s*=\s*"(.*)"')
VERSION_RE = re.compile(r'^v(\d+)\.(\d+)\.(\d+)$')
VERSION_NAME_RE = re.compile(r'^v(\d+)\.(\d+)\.(\d+)[.-](\w+)$')
# Regex for MD file URL matching
MD_URL_RE = re.compile(r'\[([^\[\]]*)\]\(([^()]*)\)')
# Regex for matching the main README.
README_RE = re.compile(r'https://github\.com/projectcalico/calico\-containers/blob/.*/README\.md')
# Files to include in the list of files to automatically update. All file
# paths are relative to the project root.
UPDATE_FILES_STATIC = [PATH_MAIN_README,
PATH_CALICOCTL_NODE,
PATH_CALICONODE_BUILD,
PATH_CALICOCTL_INIT,
PATH_MAKEFILE]
UPDATE_FILES_DIRS = [PATH_DOCS]
UPDATE_FILES_EXCLUDE = [PATH_BUILDING]
UPDATE_FILES_RE = re.compile("(.*\.md)|(Vagrantfile)|(user\-data\-.*)|(.*\.yaml)")
# Indicators separating blocks of master only and release only text.
BLOCK_INDICATOR_MASTER_START = "<!--- master only -->"
BLOCK_INDICATOR_MASTER_ELSE = "<!--- else"
BLOCK_INDICATOR_MASTER_END = "<!--- end of master only -->"
arguments = {}
def run(command):
"""
Run or print a command
:param command: The command to run
:return: None
"""
if arguments['--dry-run']:
print command
else:
subprocess.call(command, shell=True)
def get_update_file_list():
"""
Return a set of files that need to be updated with new version strings.
:return: A set of files that need to be updated with release information.
"""
update_files_list = set(UPDATE_FILES_STATIC)
update_files_exclude = set(UPDATE_FILES_EXCLUDE)
for dirn in UPDATE_FILES_DIRS:
for root, dirs, files in os.walk(path.join(PATH_ROOT, dirn)):
for filen in files:
if UPDATE_FILES_RE.match(filen):
filep = path.join(root, filen)
update_files_list.add(path.relpath(filep, PATH_ROOT))
return update_files_list - update_files_exclude
def replace_file(filename, contents):
"""
Perform a safe update of the file, keeping a backup until the new file has
been written. File mode is transferred to the new file.
:param filename: The name of the file (relative to project root)
:param contents: The contents of the files as a list of lines, each line
should include the newline character.
"""
filename = path.join(PATH_ROOT, filename)
filename_bak = "%s.release.bak" % filename
os.rename(filename, filename_bak)
with open(filename, "w") as out_file:
out_file.write("".join(contents))
shutil.copymode(filename_bak, filename)
os.remove(filename_bak)
def load_file(filename):
"""
Load the contents of a file into a string.
:param filename: The name of the file (relative to the project root)
:return: The contents of the files as a list of lines. Each line includes
the newline character.
"""
with open(path.join(PATH_ROOT, filename), "r") as in_file:
return in_file.readlines()
def get_calicoctl_version():
"""
Determine the current version from the calicoctl __init__.py
:return: The current calicoctl version
"""
return "v" + __version__
def check_version_increment(old_version, new_version):
"""
Check that the new version is a valid increment from the old version.
:param old_version:
:param new_version:
:return: The increment type
"""
old_version_tuple = _get_version_tuple(old_version)
new_version_tuple = _get_version_tuple(new_version)
if new_version_tuple is None:
print_warning("The format of version '%s' is not valid. It should be"
" in the form vX.Y.Z or vX.Y.Z-ABCD" % new_version)
return None
old_major, old_minor, old_patch, old_name = old_version_tuple
new_major, new_minor, new_patch, new_name = new_version_tuple
if (new_major == old_major + 1 and
new_minor == 0 and
new_patch == 0):
return "Major version increment"
if (new_major == old_major and
new_minor == old_minor + 1 and
new_patch == 0):
return "Minor version increment"
if (new_major == old_major and
new_minor == old_minor and
new_patch == old_patch + 1):
return "Patch update"
if (new_major == old_major and
new_minor == old_minor and
new_patch == old_patch and
new_name != old_name):
return "Development update"
print_warning("The version increment is not valid. Expecting a single "
"increment of major, minor or patch.")
return None
def _get_version_tuple(version_string):
"""
Return the version tuple from the string.
:param version_string:
:return:
"""
match = VERSION_RE.match(version_string)
if match:
return (int(match.group(1)),
int(match.group(2)),
int(match.group(3)),
None)
match = VERSION_NAME_RE.match(version_string)
if match:
return (int(match.group(1)),
int(match.group(2)),
int(match.group(3)),
match.group(4))
return None
def update_files(regex_replace_list, values, is_release=True):
"""
Update files based on the supplied regex replace list and format values.
:param regex_replace_list: A list of tuples of (regex, replace format string)
:param values: The values to substitute in the replace format strings.
:param is_release: Whether this is a release branch. If so, remove the
master only text.
"""
# Copy the regex replace list, but update the replace strings to include
# the supplied values.
regex_replace_list = [(reg, repl.format(**values)) for (reg, repl) in regex_replace_list]
filens = get_update_file_list()
for filen in filens:
old_lines = load_file(filen)
new_lines = []
include = True
master_block = False
for line in old_lines:
if is_release:
if line.startswith(BLOCK_INDICATOR_MASTER_START):
assert not master_block, "<!--- start indicator with no end in file %s" % filen
master_block = True
include = False
continue
if line.startswith(BLOCK_INDICATOR_MASTER_ELSE):
assert master_block, "<!--- else indicator with no start in file %s" % filen
include = True
continue
if line.startswith(BLOCK_INDICATOR_MASTER_END):
assert master_block, "<!--- end indicator with no start in file %s" % filen
include = True
master_block = False
continue
if include:
for regex, replace in regex_replace_list:
line = regex.sub(replace, line)
new_lines.append(line)
assert not master_block, "<!--- start indicator with no end in file %s" % filen
replace_file(filen, new_lines)
def load_release_data():
"""
Load the release data. This always prints a warning if the release data
contains any release data.
:return:
"""
filen = path.join(PATH_ROOT, PATH_RELEASE_DATA)
try:
with open(filen, "r") as in_file:
data = pickle.load(in_file)
if data:
print_warning("You are continuing an existing release. If this "
"an error, delete the release data file and try "
"again. "
"Filename = see below")
print filen
return data
except:
return {}
def save_release_data(release_data):
"""
Save the release data.
:param release_data: The release data to pickle.
"""
assert isinstance(release_data, dict)
filen = path.join(PATH_ROOT, PATH_RELEASE_DATA)
filen_bak = "%s.bak" % filen
try:
if path.exists(filen_bak):
print_warning("Backup release data is found indicating an unclean "
"save. If this is expected, delete the file and "
"try again. "
"Filename=%s" % filen_bak)
sys.exit(1)
if path.exists(filen):
os.rename(filen, filen_bak)
with open(filen, "w") as out_file:
pickle.dump(release_data, out_file)
if path.exists(filen_bak):
os.remove(filen_bak)
except Exception, e:
print_warning("Unable to store release data: %s" % e)
sys.exit(1)
def get_github_library_version(name, current, url):
"""
Ask the user for the version of a GitHub library.
:param name: A friendly name.
:param current: The current version
:param url: The GitHub repo.
:return:
"""
while True:
# For the release, make sure the default versions do not include "-dev"
if current.endswith("-dev"):
current = current[:-4]
version = raw_input("Version of %s [currently %s]?: " % (name, current))
if not version:
# Default to current if user just presses enter
version = current
if not url_exists("%s/releases/tag/%s" % (url, version)):
print_warning("The version of %s is not valid. Please check the "
"GitHub releases for exact naming at "
"%s/releases" % (name, url))
continue
return version
def url_exists(url):
"""
Check that a URL exists.
:param url:
:return: True if it exists, False otherwise.
"""
# Check for URLs we can't validate
if url.startswith("https://kiwiirc.com"):
return True
if url.startswith("https://www.projectcalico.org"):
return True
try:
urllib2.urlopen(url)
return True
except urllib2.HTTPError, e:
print_bullet("Hit error reading %s: %s" % (url, e))
return False
except urllib2.URLError, e:
print_bullet("Hit error reading %s: %s" % (url, e))
return False
def print_paragraph(msg):
"""
Print a fixed width (80 chars) paragraph of text.
:param msg: The msg to print.
"""
print
print "\n".join(textwrap.wrap(msg, width=80))
def print_warning(msg):
"""
Print a warning message
:param msg: The msg to print
"""
print
print "*" * 80
print "\n".join(textwrap.wrap(msg, width=80))
print "*" * 80
def print_user_actions():
"""
Print a user actions heading.
"""
print
print "=" * 80
print " User Actions"
print "=" * 80
def print_bullet(msg, level=0):
"""
Print a bulleted message.
:param msg: The msg to print
:param level: The level of bullet
"""
margin = 1 + (3 * (level + 1))
lines = textwrap.wrap(msg, width=80 - margin)
print " " * (margin - 3) + "- " + lines[0]
for line in lines[1:]:
print " " * margin + line
def print_next(msg):
"""
Print the next step message.
:param msg: The message to display
"""
print
print "-" * 80
print "\n".join(textwrap.wrap(msg, width=80))
print "=" * 80
def check_or_exit(msg):
"""
Ask for a yes/no response and exit if the response is no.
:param msg:
:return:
"""
while True:
user_input = raw_input("%s (y/n): " % msg).lower()
if user_input in ['y', 'yes']:
print
return
if user_input in ['n', 'no']:
print
print_warning("Please complete the required steps and then "
"re-run the script.")
sys.exit(1)
def validate_markdown_uris():
"""
Validate that all of the URIs in the markdown files are accessible.
"""
print "Validating URIs in markdown files"
all_valid = True
all_md_files = [f for f in get_update_file_list() if f.endswith(".md")]
for filename in all_md_files:
lines = load_file(filename)
found_analytic_url = False
for line in lines:
for name, uri in MD_URL_RE.findall(line):
if name == "Analytics":
found_analytic_url = True
valid = validate_analytics_url(filename, uri)
else:
valid = validate_uri(filename, uri)
all_valid = all_valid and valid
if not found_analytic_url:
print_bullet("%s: No analytics URL in file" % filename)
if not all_valid:
print_warning("Errors detected in markdown file URIs. Please correct "
"the errors highlighted above and then re-run the"
"script.")
sys.exit(1)
print "Validation complete"
def validate_uri(filename, uri):
"""
Validate a URI exists, either by checking the file exists, or by checking
the URL is accessbile.
:param filename: The filename of the MD file containing the URI.
:param uri: The URI to validate (either a web URL, or a filename)
:return: True if URI is valid and accessible
"""
if uri.startswith("http"):
# Validating a URL. Don't validate the shield URLs.
if uri.startswith("https://img.shields.io"):
return True
if uri.startswith("https://badge.imagelayers.io"):
return True
# There should no calico-containers URL except for:
# - The README URLs which we skip since these are auto-generated
# - Issues (which we can validate)
# - Releases (which we can validate)
# Everything else should be specified with a relative path.
if (uri.startswith("https://github.com/projectcalico/calico-containers") or
uri.startswith("https://www.github.com/projectcalico/calico-containers")):
if README_RE.match(uri):
return True
# If an explicit version has been specified then keep it in, but warn the user.
if (uri.startswith("https://github.com/projectcalico/calico-containers/blob") or
uri.startswith("https://www.github.com/projectcalico/calico-containers/blob")):
print_bullet("%s: WARNING: Should this be a relative URL?: %s" % (filename, uri))
return True
if ((uri.find("/calico-containers/issues") < 0) and
(uri.find("/calico-containers/releases") < 0)):
print_bullet("%s: Do not specify calico-containers file using a URL, "
"specify using a relative path: %s" % (filename, uri))
return False
if not url_exists(uri):
print_bullet("%s: URL is not valid: %s" % (filename, uri))
return False
else:
return True
else:
# Validating a file.
uri_parts = uri.split("#")
relative_filename = uri_parts[0]
path = os.path.normpath(os.path.join(PATH_ROOT,
os.path.dirname(filename),
relative_filename))
if not os.path.exists(path):
print_bullet("%s: Referenced file does not exist: %s" % (filename, uri))
return False
else:
return True
def validate_analytics_url(filename, analytics_url):
"""
Validate the anaylytics URL is correct. The URL is fixed format which
includes the MD filename.
:param filename: The filename of the MD file containing the URI.
:param url: The analytics URL to validate.
:return: True if URL is valid and accessible
"""
expected_url = "https://calico-ga-beacon.appspot.com/UA-52125893-3/calico-containers/%s?pixel" % filename
if analytics_url != expected_url:
print_bullet("%s: Anayltics URL is incorrect, should be %s" % (filename, expected_url))
return False
else:
return True
| quater/calico-containers | release-scripts/utils.py | Python | apache-2.0 | 17,408 |
from multiprocessing.connection import Client
import sys
import time
from mergesort import mergesort as main
print("Ginnungagap client version 1.0.0 for UCA Cluster, based in Heimdall")
host = input("Type the master's ip: ")
toprint = not input("Should print? (y or n)> ") == "n"
porta = 666
ok = True #the ok is again, for avoid excessive printing
while True:
time.sleep(0.1) #basic time waiting so it wont flood the connection
while True:
resultado = 0 #reset data
final = 0
resposta = 0
inputs = []
try:
mysock = Client((host,porta)) # connects to the host
ok = True
except:
if ok is True:
print ("Error in connecting with the master")
ok = False
break
try:
mysock.send(bytes("ready","utf-8")) # sends the ready status to the server
resposta = mysock.recv().decode("utf-8") #receives the task or the waiting command
mysock.close()
if resposta == "wait": #if it must wait, the slave will break the loop and get inside it again
break
if toprint:
print ("Got a task!") #if it received a task, it will print it
ok = True
except:
e = sys.exc_info()[0]
if ok is True:
print ("Error in communicating with master")
print ("Error %s" % e)
ok = False
break
try:
inputs = [int(i) for i in resposta.split(",")] #converts the data to input
resultado = main(inputs) #inputs the data into the ploxys function
ok = True
except:
e = sys.exc_info()[0]
if ok is True:
print ("Error %s" % e)
print("Error in calculating result, maybe data could not been found")
ok = False
break
try:
final = "done|" + ",".join([str(i) for i in resultado]) #formats the resulting data as the protocol demands
mysock = Client((host,porta))
mysock.send(final.encode("utf-8"))
ok = True
except:
if ok is True:
print ("Error in answering the master")
ok = False
break
mysock.close() #closes the connections
saida = input("Type enter to exit") | victor-cortez/Heimdall | mergesort/Ginnungagap_slave.py | Python | mit | 2,395 |
#####################################################################
# G85_map.py
#
# (c) Copyright 2015, Benjamin Parzella. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#####################################################################
import logging
from xml.etree import ElementTree
def int16(value):
return int(value, 16)
class G85Object(object):
def __repr__(self):
return self.__class__.__name__ + ": " + str(self.__dict__)
def propertyFromMap(self, map, propertyName, converter, mandatory=True, default=None):
if propertyName in map.attrib:
setattr(self, propertyName, converter(map.attrib[propertyName]))
else:
if mandatory:
logging.warning("{} is missing mandatory field {}".format(self.__class__.__name__, propertyName))
setattr(self, propertyName, default)
@staticmethod
def elementName(name):
return "{http://www.semi.org}" + name
class G85Row(G85Object):
def __repr__(self):
return self.__class__.__name__ + ": " + str(len(self.cols)) + "cols"
@classmethod
def fromXML(cls, row):
newRow = cls()
newRow.rowData = row.text
return newRow
def unpack(self, binType):
if binType == "ASCII":
self.cols = self.rowData
elif binType == "Decimal":
self.cols = [int(c) for c in self.rowData.split(" ")]
elif binType == "HexaDecimal":
self.cols = [int(c, 16) for c in [self.rowData[i:i+2] for i in range(0, len(self.rowData), 2)]]
elif binType == "Integer2":
self.cols = [int(c, 16) for c in [self.rowData[i:i+4] for i in range(0, len(self.rowData), 4)]]
class G85Data(G85Object):
@classmethod
def fromXML(cls, data):
newData = cls()
newData.propertyFromMap(data, "MapName", str, False, "")
newData.propertyFromMap(data, "MapVersion", str, False, "")
newData.rows = []
for row in data.findall(newData.elementName("Row")):
newData.rows.append(G85Row.fromXML(row))
return newData
class G85ReferenceDevice(G85Object):
@classmethod
def fromXML(cls, referenceDevice):
newReferenceDevice = cls()
newReferenceDevice.propertyFromMap(referenceDevice, "ReferenceDeviceX", int, True)
newReferenceDevice.propertyFromMap(referenceDevice, "ReferenceDeviceY", int, True)
newReferenceDevice.propertyFromMap(referenceDevice, "RefDevicePosX", float, False, 0.0)
newReferenceDevice.propertyFromMap(referenceDevice, "RefDevicePosY", float, False, 0.0)
return newReferenceDevice
class G85Bin(G85Object):
@classmethod
def fromXML(cls, bin):
newBin = cls()
newBin.propertyFromMap(bin, "BinCode", int16, False, 0)
newBin.propertyFromMap(bin, "BinCount", int, False, 0)
newBin.propertyFromMap(bin, "BinQuality", str, False, "")
newBin.propertyFromMap(bin, "BinDescription", str, False, "")
return newBin
class G85Device(G85Object):
@classmethod
def fromXML(cls, device):
newDevice = cls()
newDevice.data = G85Data.fromXML(device.find(newDevice.elementName("Data")))
newDevice.propertyFromMap(device, "BinType", str, True, "ASCII")
newDevice.propertyFromMap(device, "OriginLocation", int, True, 0)
for row in newDevice.data.rows:
row.unpack(newDevice.BinType)
newDevice.propertyFromMap(device, "ProductId", str, False, "")
newDevice.propertyFromMap(device, "LotId", str, False, "")
newDevice.propertyFromMap(device, "Orientation", int)
newDevice.propertyFromMap(device, "WaferSize", int, False, 0)
newDevice.propertyFromMap(device, "DeviceSizeX", float, False, 0.0)
newDevice.propertyFromMap(device, "DeviceSizeY", float, False, 0.0)
newDevice.propertyFromMap(device, "StepSizeX", float, False, 0.0)
newDevice.propertyFromMap(device, "StepSizeY", float, False, 0.0)
newDevice.propertyFromMap(device, "MagazineId", str, False, "")
newDevice.propertyFromMap(device, "Rows", int, False, len(newDevice.data.rows))
newDevice.propertyFromMap(device, "Columns", int, False, len(newDevice.data.rows[0].cols))
newDevice.propertyFromMap(device, "FrameId", str, False, "")
newDevice.propertyFromMap(device, "NullBin", int16)
newDevice.propertyFromMap(device, "SupplierName", str, False, "")
newDevice.propertyFromMap(device, "CreateData", str, False, "")
newDevice.propertyFromMap(device, "LastModified", str, False, "")
newDevice.propertyFromMap(device, "Status", str, False, "")
newDevice.propertyFromMap(device, "SlotNumber", int, False, 0)
newDevice.propertyFromMap(device, "SubstrateNumber", int, False, 0)
newDevice.propertyFromMap(device, "GoodDevices", int, False, 0)
newDevice.referenceDevices = []
for referenceDevice in device.findall(newDevice.elementName("ReferenceDevice")):
newDevice.referenceDevices.append(G85ReferenceDevice.fromXML(referenceDevice))
newDevice.bins = []
for bin in device.findall(newDevice.elementName("Bin")):
newDevice.bins.append(G85Bin.fromXML(bin))
return newDevice
class G85Map(G85Object):
@classmethod
def fromXML(cls, map):
newMap = cls()
newMap.device = G85Device.fromXML(map.find(newMap.elementName("Device")))
newMap.propertyFromMap(map, "SubstrateType", str)
newMap.propertyFromMap(map, "SubstrateId", str)
newMap.propertyFromMap(map, "FormatRevision", str, False, "G85-0703")
return newMap
@classmethod
def load(cls, filename, waferId):
document = ElementTree.parse(filename)
root = document.getroot()
if root.tag == G85Object.elementName("Map"):
if (root.attrib["SubstrateId"] == waferId):
return cls.fromXML(root)
elif root.tag == "Maps":
for map in root.findall('Map'):
if (map.attrib["SubstrateId"] == waferId):
return cls.fromXML(map)
logging.warning("Map {} not found in {}".format(waferId, filename))
return None
| bparzella/gemma | app/models/G85_map.py | Python | lgpl-2.1 | 6,751 |
"""Provide an MCA simulator."""
import time
import numpy
import gevent
from .base import BaseMCA, PresetMode, TriggerMode, Stats
class SimulatedMCA(BaseMCA):
_init_time = 1.
_prepare_time = 0.1
_cleanup_time = 0.1
_gate_end = 0.5
_mapping_modulo = 2
# Initialization
def initialize_attributes(self):
self._running = False
self._block_size = None
self._spectrum_size = 1024
self._acquistion_number = 1
self._trigger_mode = TriggerMode.SOFTWARE
self._current_data = None
self._current_stats = None
def initialize_hardware(self):
gevent.sleep(self._init_time)
def finalize(self):
pass
# Information
@property
def detector_brand(self):
return "SIMULATION"
@property
def detector_type(self):
return "SIMULATION"
@property
def elements(self):
return (0, 1, 2, 3)
# Settings
@property
def spectrum_size(self):
return self._spectrum_size
def set_spectrum_size(self, size):
self._spectrum_size = size
@property
def supported_preset_modes(self):
return PresetMode.NONE,
def set_preset_mode(self, mode, value=None):
assert mode is PresetMode.REALTIME
self._realtime = value
@property
def supported_trigger_modes(self):
return TriggerMode.SOFTWARE, TriggerMode.GATE, TriggerMode.SYNC
def set_trigger_mode(self, mode):
if mode is None:
mode = TriggerMode.SOFTWARE
assert mode in self.supported_trigger_modes
self._trigger_mode = mode
@property
def hardware_points(self):
return self._hardware_points
def set_hardware_points(self, value):
self._hardware_points = value
@property
def block_size(self):
return self._block_size or 100
def set_block_size(self, value=None):
self._block_size = value
# Acquisition control
def start_acquisition(self):
if not self._running:
gevent.sleep(self._prepare_time)
self._t0 = time.time()
self._count = -1
self._data_buffer = {}
self._stats_buffer = {}
self._running = True
def stop_acquisition(self):
if self._running:
self._delta = time.time() - self._t0
gevent.sleep(self._cleanup_time)
self._running = False
pixel = self._generate_pixel(self.delta)
self._current_data, self._current_stats = pixel
def is_acquiring(self):
return self._running and self.delta < self._realtime
@property
def delta(self):
d = time.time() - self._t0 if self._running else self._delta
if self._trigger_mode == TriggerMode.GATE:
return min(d, self._gate_end)
if self._trigger_mode == TriggerMode.SOFTWARE:
return min(d, self._realtime)
return d
# Get data
def get_acquisition_data(self):
return self._current_data
def get_acquisition_statistics(self):
return self._current_stats
def poll_data(self):
# Update
self._count += 1
current = self._count // self._mapping_modulo
# Realtime
if self._trigger_mode == TriggerMode.SYNC:
delta = 0.2 * self._mapping_modulo
else:
delta = self._gate_end
# Flags
new_pixel = self._count % self._mapping_modulo != 0
full_buffer = current and current % self.block_size == 0
finished = current == self.hardware_points
# A new pixel has been generated
if current > 0 and new_pixel:
a, b = self._generate_pixel(delta)
self._data_buffer[current-1] = a
self._stats_buffer[current-1] = b
# Available data
if new_pixel and (full_buffer or finished):
a, b = self._data_buffer, self._stats_buffer
self._data_buffer = {}
self._stats_buffer = {}
return current, a, b
# Nothing to return yet
return current, {}, {}
# Data generation
def _generate_pixel(self, delta):
realtime = delta
livetime = realtime * numpy.random.normal(0.9, 0.01)
triggers = int(10000 * numpy.random.normal(livetime, livetime*0.2))
events = triggers // 2
icr = triggers / realtime if realtime else 0.
ocr = events / livetime if livetime else 0.
deadtime = 1 - ocr / icr if icr else 0.
st = Stats(realtime, livetime, triggers, events, icr, ocr, deadtime)
stats = dict((i, st) for i in self.elements)
size = self._spectrum_size
data = dict((i, numpy.zeros(size)) for i in self.elements)
for _ in range(events):
loc = numpy.random.normal(size//2, size//16)
for i in self.elements:
e = int(numpy.random.normal(loc, size//16))
data[i][e] += 1
return data, stats
| tiagocoutinho/bliss | bliss/controllers/mca/simulation.py | Python | lgpl-3.0 | 4,984 |
# imports the thing that reads files and imports random to make random numbers
from sys import argv
import random
# assigns some stuff to argv
script, filename = argv
# opens the file that the user specifies and assigns it to txt
txt = open(filename,'r')
# makes a list of the all the lines in the file
verbList = txt.readlines()
# closes the file
txt.close()
# actually tells the user what this is
print "Good day and welcome to the Dutch verb practice service."
print "This service will help you practice your Dutch verbs."
score = 0
for i in range(0,9):
# picks a random number
y = random.randint(1,6)
# and uses that random number to find a random line of the file
z = verbList[y]
# splits the verb list at the ampersand and prints the result
a = z.split('&')
# makes the list of possible types of verb
verbTypes = ['infinitive', 'imperfectum_s', 'imperfectum_p', 'perfectum', 'mhww']
# pick one randomly
b = random.randint(0,4)
verbTypeChosen = verbTypes[b]
# use that to make a string to ask the user
s = 'What is the ' + verbTypeChosen + ' for' + a[5]
print s
next = raw_input("> ")
if next == a[b].strip():
print "correct \n"
print ""
score += 1
elif next != a[b].strip():
print "incorrect \n"
print ""
else:
print "error"
print "You scored %i out of 10."%score
| HeatherJW/Code | dutch_verbs_practice/dutch2.py | Python | unlicense | 1,313 |
#!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Modifying an existing shelf opened with write-back enabled.
"""
__module_id__ = "$Id$"
#end_pymotw_header
import shelve
s = shelve.open('test_shelf.db', writeback=True)
try:
print s['key1']
s['key1']['new_value'] = 'this was not here before'
print s['key1']
finally:
s.close()
s = shelve.open('test_shelf.db', writeback=True)
try:
print s['key1']
finally:
s.close()
| qilicun/python | python2/PyMOTW-1.132/PyMOTW/shelve/shelve_writeback.py | Python | gpl-3.0 | 1,446 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django import forms
from .models import Language
class LanguageSpecialCharsForm(forms.ModelForm):
class Meta(object):
model = Language
fields = ('specialchars',)
| Finntack/pootle | pootle/apps/pootle_language/forms.py | Python | gpl-3.0 | 467 |
"""
Answer to question 3
"""
import sys
ADENINE = "A"
CYTOSINE = "C"
GUANINE = "G"
THYMINE = "T"
COMPLEMENT_MAP = {ADENINE:THYMINE, THYMINE:ADENINE, CYTOSINE: GUANINE, GUANINE:CYTOSINE}
input_file = sys.argv[1]
output_file = sys.argv[2]
input_string = ""
def read_input_data():
"""
Reads input data.
"""
global input_string
file_handle = open(input_file, 'r')
input_string = file_handle.read()
input_string = input_string.strip()
def process_data():
"""
Processes input data
"""
global input_string
input_string = input_string[::-1]
output_string = ""
for nucelotide in input_string:
output_string = output_string + COMPLEMENT_MAP[nucelotide]
with open(output_file, 'w') as outfile:
outfile.write(str(output_string) + "\n")
def main():
read_input_data()
process_data()
if __name__ == "__main__":
main() | thewickedaxe/600.647-Computational-Genomics | HW-1/answer3.py | Python | mit | 909 |
from django.middleware.cache import FetchFromCacheMiddleware
class CacheAdminMiddleware(FetchFromCacheMiddleware):
def process_request(self, request):
user = getattr(request, "user", None)
if user and user.is_superuser:
request._cache_update_cache = False
return None
return super(CacheAdminMiddleware, self).process_request(request)
| lotrekagency/djlotrek | djlotrek/middleware/cache_admin.py | Python | mit | 388 |
# -*- coding: utf-8 -*-
import argparse
from twstock.codes import __update_codes
from twstock.cli import best_four_point
from twstock.cli import stock
from twstock.cli import realtime
def run():
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bfp', nargs='+')
parser.add_argument('-s', '--stock', nargs='+')
parser.add_argument('-r', '--realtime', nargs='+')
parser.add_argument('-U', '--upgrade-codes', action='store_true', help='Update entites codes')
args = parser.parse_args()
if args.bfp:
best_four_point.run(args.bfp)
elif args.stock:
stock.run(args.stock)
elif args.realtime:
realtime.run(args.realtime)
elif args.upgrade_codes:
print('Start to update codes')
__update_codes()
print('Done!')
else:
parser.print_help()
| mlouielu/twstock | twstock/cli/__init__.py | Python | mit | 845 |
# -*- coding: utf-8 -*-
#
# Flow Framework documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 08 11:09:23 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.ifconfig'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flow Framework'
copyright = u'2006 and onwards by the authors'
author = u'Neos Team and Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'dev-master'
# The full version, including alpha/beta/rc tags.
release = 'dev-master'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'FlowFrameworkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FlowFramework.tex', u'Flow Framework Documentation',
u'The Neos Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flowframework', u'Flow Framework Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'FlowFramework', u'Flow Framework Documentation',
author, 'Flow Framework', 'A PHP Framework, built to power Neos CMS',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'neos': ('https://neos.readthedocs.io/en/stable', None),
}
# load PhpLexer
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
# enable highlighting for PHP code not between <?php ... ?> by default
lexers['php'] = PhpLexer(startinline=True)
lexers['php-annotations'] = PhpLexer(startinline=True)
# Use PHP syntax highlighting in code examples by default
highlight_language='php'
| chewbakartik/flow-development-collection | TYPO3.Flow/Documentation/conf.py | Python | mit | 10,318 |
from __future__ import annotations
from datetime import date, datetime, time
from decimal import Decimal
from math import isinf, isnan
from struct import pack as struct_pack
from struct import unpack as struct_unpack
from struct import unpack_from as struct_unpack_from
from typing import TYPE_CHECKING, Any, Callable
if TYPE_CHECKING: # pragma: no cover
from typing import NoReturn
else:
NoReturn = None
DYN_COL_INT = 0
DYN_COL_UINT = 1
DYN_COL_DOUBLE = 2
DYN_COL_STRING = 3
DYN_COL_DECIMAL = 4
DYN_COL_DATETIME = 5
DYN_COL_DATE = 6
DYN_COL_TIME = 7
DYN_COL_DYNCOL = 8
MAX_TOTAL_NAME_LENGTH = 65535
MAX_NAME_LENGTH = MAX_TOTAL_NAME_LENGTH // 4
class DynColLimitError(Exception):
"""
Indicates that some limit has been reached
"""
class DynColTypeError(TypeError):
"""
Indicates that a type is wrong
"""
class DynColValueError(ValueError):
"""
Indicates that a value is wrong
"""
class DynColNotSupported(Exception):
"""
Indicates a limitation in this implementation
"""
def pack(dicty: dict[str, Any]) -> bytes:
"""
Convert a mapping into the MariaDB dynamic columns format
"""
column_count = 0
column_directory = []
directory_offset = 0
name_offset = 0
names = []
data_offset = 0
data = []
total_encname_length = 0
dicty_names_encoded = {key.encode("utf-8"): value for key, value in dicty.items()}
for encname in sorted(dicty_names_encoded.keys(), key=name_order):
value = dicty_names_encoded[encname]
if value is None:
continue
if len(encname) > MAX_NAME_LENGTH:
raise DynColLimitError("Key too long: " + encname.decode("utf-8"))
total_encname_length += len(encname)
if total_encname_length > MAX_TOTAL_NAME_LENGTH:
raise DynColLimitError("Total length of keys too long")
try:
encode_func = ENCODE_FUNCS[type(value)]
except KeyError:
raise DynColTypeError(f"Unencodable type {type(value)}")
dtype, encvalue = encode_func(value)
column_count += 1
column_directory.append(name_offset)
column_directory.append((data_offset << 4) + dtype)
names.append(encname)
name_offset += len(encname)
data.append(encvalue)
data_offset += len(encvalue)
directory_offset += 2
data_size_flag, coldir_size_code, odd_sized_datacode = data_size(data)
flags = 4 | data_size_flag # means this contains named dynamic columns
enc_names = b"".join(names)
buf = [struct_pack("<BHH", flags, column_count, len(enc_names))]
if not odd_sized_datacode:
buf.append(
struct_pack(
"<" + ("H" + coldir_size_code) * (len(column_directory) // 2),
*column_directory,
)
)
else:
for i, val in enumerate(column_directory):
if i % 2 == 0:
# name_offset
buf.append(struct_pack("<H", val))
else:
# data_offset + dtype, have to cut last byte
value = struct_pack("<" + coldir_size_code, val)
buf.append(value[:-1])
buf.append(enc_names)
buf.extend(data)
return b"".join(buf)
def name_order(name: bytes) -> tuple[int, bytes]:
# Keys are ordered by name length then name
return len(name), name
def data_size(data: list[bytes]) -> tuple[int, str, bool]:
data_len = sum(len(d) for d in data)
if data_len < 0xFFF:
return 0, "H", False
elif data_len < 0xFFFFF:
return 1, "L", True
elif data_len < 0xFFFFFFF:
return 2, "L", False
else:
raise ValueError("Too much data")
def encode_int(value: int) -> tuple[int, bytes]:
if value < 0:
dtype = DYN_COL_INT
encvalue = -(value << 1) - 1
if value < -(2**32 - 1):
raise DynColValueError(f"int {value} out of range")
else:
if value <= (2**63 - 1):
dtype = DYN_COL_INT
encvalue = value << 1
elif value <= (2**64 - 1):
dtype = DYN_COL_UINT
encvalue = value
else:
raise DynColValueError(f"int {value} out of range")
to_enc = []
while encvalue:
to_enc.append(encvalue & 0xFF)
encvalue = encvalue >> 8
return dtype, struct_pack("B" * len(to_enc), *to_enc)
def encode_float(value: float) -> tuple[int, bytes]:
if isnan(value) or isinf(value):
raise DynColValueError(f"Float value not encodeable: {value}")
encvalue = struct_pack("d", value)
# -0.0 is not supported in SQL, change to 0.0
if encvalue == b"\x00\x00\x00\x00\x00\x00\x00\x80":
encvalue = b"\x00\x00\x00\x00\x00\x00\x00\x00"
return DYN_COL_DOUBLE, encvalue
def encode_string(value: str) -> tuple[int, bytes]:
return DYN_COL_STRING, b"\x2D" + value.encode("utf-8")
def encode_decimal(value: Decimal) -> NoReturn:
raise DynColNotSupported("Can't encode Decimal values currently")
# def encode_decimal(value):
# buf = bytearray()
# intg = int(value)
# intg_digits = 9
# buf.extend(struct_pack('>I', intg))
# frac = value - intg
# if frac:
# frac_digits = 1
# frac_piece = int(str(frac)[2:]) # ugh
# buf.extend(struct_pack('B', frac_piece))
# else:
# frac_digits = 0
# header = struct_pack('>BB', intg_digits, frac_digits)
# buf[0] |= 0x80 # Flip the top bit
# return DYN_COL_DECIMAL, header + bytes(buf)
def encode_datetime(value: datetime) -> tuple[int, bytes]:
_, enc_date = encode_date(value)
_, enc_time = encode_time(value)
return DYN_COL_DATETIME, enc_date + enc_time
def encode_date(value: date) -> tuple[int, bytes]:
# We don't need any validation since datetime.date is more limited than the
# MySQL format
val = value.day | value.month << 5 | value.year << 9
return DYN_COL_DATE, struct_pack("I", val)[:-1]
def encode_time(value: datetime | time) -> tuple[int, bytes]:
if value.microsecond > 0:
val = (
value.microsecond
| value.second << 20
| value.minute << 26
| value.hour << 32
)
return DYN_COL_TIME, struct_pack("Q", val)[:6]
else:
val = value.second | value.minute << 6 | value.hour << 12
return DYN_COL_TIME, struct_pack("I", val)[:3]
def encode_dict(value: dict[str, Any]) -> tuple[int, bytes]:
return DYN_COL_DYNCOL, pack(value)
ENCODE_FUNCS: dict[type[Any], Callable[[Any], tuple[int, bytes]]] = {
int: encode_int,
date: encode_date,
datetime: encode_datetime,
time: encode_time,
float: encode_float,
str: encode_string,
Decimal: encode_decimal,
dict: encode_dict,
}
def unpack(buf: bytes) -> dict[str, Any]:
"""
Convert MariaDB dynamic columns data in a byte string into a dict
"""
flags: int
column_count: int
len_names: int
flags, column_count, len_names = struct_unpack_from("<BHH", buf)
data_offset_code, coldata_size, data_offset_mask = decode_data_size(flags)
if (flags & 0xFC) != 4:
raise DynColValueError("Unknown dynamic columns format")
if column_count == 0:
return {}
column_directory_end = (1 + 2 + 2) + coldata_size * column_count
names_end = column_directory_end + len_names
column_directory = buf[1 + 2 + 2 : column_directory_end]
enc_names = buf[column_directory_end:names_end]
data = buf[names_end:]
names = {}
values = {}
last_name_offset: int | None = None
last_data_offset: int | None = None
last_dtype: int | None = None
name_offset: int
data_offset_dtype: int
for i in range(column_count):
if coldata_size % 2 == 0:
name_offset, data_offset_dtype = struct_unpack_from(
"<H" + data_offset_code, column_directory, offset=i * coldata_size
)
else:
(name_offset,) = struct_unpack_from(
"<H", column_directory, offset=i * coldata_size
)
# can't struct_unpack the 3 bytes so hack around
dodt_bytes = (
column_directory[i * coldata_size + 2 : (i * coldata_size + 5)]
+ b"\x00"
)
(data_offset_dtype,) = struct_unpack("<" + data_offset_code, dodt_bytes)
data_offset_dtype &= data_offset_mask
data_offset = data_offset_dtype >> 4
dtype = data_offset_dtype & 0xF
# Store *last* column's name
if last_name_offset is not None:
names[i - 1] = enc_names[last_name_offset:name_offset].decode("utf-8")
last_name_offset = name_offset
#
if last_data_offset is not None:
assert last_dtype is not None
values[i - 1] = decode(last_dtype, data[last_data_offset:data_offset])
last_data_offset = data_offset
last_dtype = dtype
names[column_count - 1] = enc_names[last_name_offset:].decode("utf-8")
assert last_dtype is not None
values[column_count - 1] = decode(last_dtype, data[last_data_offset:])
# join data and names
return {names[i]: values[i] for i in range(column_count)}
def decode_data_size(flags: int) -> tuple[str, int, int]:
t = flags & 0x03
if t == 0:
return "H", 4, 0xFFFF
elif t == 1:
return "L", 5, 0xFFFFFF
elif t == 2:
return "L", 6, 0xFFFFFFFF
else:
raise ValueError("Unknown dynamic columns format")
def decode(dtype: int, encvalue: bytes) -> Any:
try:
decode_func = DECODE_FUNCS[dtype]
except KeyError:
raise ValueError()
return decode_func(encvalue)
def decode_int(encvalue: bytes) -> int:
value = 0
for i, b in enumerate(bytearray(encvalue)):
value += b << (8 * i)
if value & 1:
return -(value >> 1) - 1
else:
return value >> 1
def decode_uint(encvalue: bytes) -> int:
(value,) = struct_unpack("Q", encvalue)
return value
def decode_double(encvalue: bytes) -> float:
(value,) = struct_unpack("d", encvalue)
return value
def decode_string(encvalue: bytes) -> str:
if not encvalue.startswith((b"\x21", b"\x2D")):
raise DynColNotSupported(
"Can only decode strings with MySQL charsets utf8 or utf8mb4"
)
return encvalue[1:].decode("utf-8")
def decode_decimal(encvalue: bytes) -> NoReturn:
raise DynColNotSupported("Can't decode Decimal values currently")
# def decode_decimal(encvalue):
# num_intg, num_frac = struct_unpack('>BB', encvalue[:2])
# intg, = struct_unpack('>I', encvalue[2:6])
# intg ^= 0x80000000
# if num_frac == 0:
# frac = 0
# else:
# frac, = struct_unpack('>B', encvalue[6:])
# return Decimal(str(intg) + '.' + str(frac))
def decode_datetime(encvalue: bytes) -> datetime:
d = decode_date(encvalue[:3])
t = decode_time(encvalue[3:])
return datetime.combine(d, t)
def decode_date(encvalue: bytes) -> date:
(val,) = struct_unpack("I", encvalue + b"\x00")
return date(day=val & 0x1F, month=(val >> 5) & 0xF, year=(val >> 9))
def decode_time(encvalue: bytes) -> time:
if len(encvalue) == 6:
(val,) = struct_unpack("Q", encvalue + b"\x00\x00")
return time(
microsecond=val & 0xFFFFF,
second=(val >> 20) & 0x3F,
minute=(val >> 26) & 0x3F,
hour=(val >> 32),
)
else: # must be 3
(val,) = struct_unpack("I", encvalue + b"\x00")
return time(
microsecond=0,
second=(val) & 0x3F,
minute=(val >> 6) & 0x3F,
hour=(val >> 12),
)
DECODE_FUNCS: dict[int, Callable[[bytes], Any]] = {
DYN_COL_INT: decode_int,
DYN_COL_UINT: decode_uint,
DYN_COL_DOUBLE: decode_double,
DYN_COL_STRING: decode_string,
DYN_COL_DECIMAL: decode_decimal,
DYN_COL_DATETIME: decode_datetime,
DYN_COL_DATE: decode_date,
DYN_COL_TIME: decode_time,
DYN_COL_DYNCOL: unpack,
}
| adamchainz/mariadb-dyncol | src/mariadb_dyncol/base.py | Python | mit | 12,070 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.core.cache import cache
from ...core.tests import utils
from .models import Flag, CommentFlag
from .forms import FlagForm
class FlagViewTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(category=self.category, user=self.user)
self.comment = utils.create_comment(user=self.user, topic=self.topic)
def test_flag_create(self):
"""
create flag
"""
utils.login(self)
form_data = {'reason': "0", }
response = self.client.post(reverse('spirit:comment:flag:create', kwargs={'comment_id': self.comment.pk, }),
form_data)
self.assertRedirects(response, self.comment.get_absolute_url(), status_code=302, target_status_code=302)
self.assertEqual(len(Flag.objects.all()), 1)
self.assertEqual(len(CommentFlag.objects.all()), 1)
class FlagFormTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(category=self.category, user=self.user)
self.comment = utils.create_comment(user=self.user, topic=self.topic)
def test_flag_create(self):
"""
create flag
"""
form_data = {'reason': '0', 'body': 'spam comment foo'}
form = FlagForm(data=form_data)
form.comment = self.comment
form.user = self.user
self.assertEqual(form.is_valid(), True)
form.save()
self.assertEqual(len(CommentFlag.objects.all()), 1)
| dvreed/Spirit | spirit/comment/flag/tests.py | Python | mit | 1,829 |
from datetime import date, datetime, time, timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import DatetimeIndex, Index, Timestamp, date_range, notna
import pandas._testing as tm
from pandas.core.indexes.base import InvalidIndexError
from pandas.tseries.offsets import BDay, CDay
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = pd.date_range(
"2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx"
)
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem_slice_keeps_name(self):
# GH4226
st = pd.Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles")
et = pd.Timestamp("2013-07-02 00:00:00", tz="America/Los_Angeles")
dr = pd.date_range(st, et, freq="H", name="timebucket")
assert dr[1:].name == dr.name
def test_getitem(self):
idx1 = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx2 = pd.date_range(
"2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx"
)
for idx in [idx1, idx2]:
result = idx[0]
assert result == Timestamp("2011-01-01", tz=idx.tz)
result = idx[0:5]
expected = pd.date_range(
"2011-01-01", "2011-01-05", freq="D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.date_range(
"2011-01-01", "2011-01-09", freq="2D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.date_range(
"2011-01-12", "2011-01-24", freq="3D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="-1D",
tz=idx.tz,
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_dti_business_getitem(self):
rng = pd.bdate_range(START, END)
smaller = rng[:5]
exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="B")
tm.assert_index_equal(smaller, exp)
assert smaller.freq == exp.freq
assert smaller.freq == rng.freq
sliced = rng[::5]
assert sliced.freq == BDay() * 5
fancy_indexed = rng[[4, 3, 2, 1, 0]]
assert len(fancy_indexed) == 5
assert isinstance(fancy_indexed, DatetimeIndex)
assert fancy_indexed.freq is None
# 32-bit vs. 64-bit platforms
assert rng[4] == rng[np.int_(4)]
def test_dti_business_getitem_matplotlib_hackaround(self):
rng = pd.bdate_range(START, END)
with tm.assert_produces_warning(DeprecationWarning):
# GH#30588 multi-dimensional indexing deprecated
values = rng[:, None]
expected = rng.values[:, None]
tm.assert_numpy_array_equal(values, expected)
def test_dti_custom_getitem(self):
rng = pd.bdate_range(START, END, freq="C")
smaller = rng[:5]
exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="C")
tm.assert_index_equal(smaller, exp)
assert smaller.freq == exp.freq
assert smaller.freq == rng.freq
sliced = rng[::5]
assert sliced.freq == CDay() * 5
fancy_indexed = rng[[4, 3, 2, 1, 0]]
assert len(fancy_indexed) == 5
assert isinstance(fancy_indexed, DatetimeIndex)
assert fancy_indexed.freq is None
# 32-bit vs. 64-bit platforms
assert rng[4] == rng[np.int_(4)]
def test_dti_custom_getitem_matplotlib_hackaround(self):
rng = pd.bdate_range(START, END, freq="C")
with tm.assert_produces_warning(DeprecationWarning):
# GH#30588 multi-dimensional indexing deprecated
values = rng[:, None]
expected = rng.values[:, None]
tm.assert_numpy_array_equal(values, expected)
def test_getitem_int_list(self):
dti = date_range(start="1/1/2005", end="12/1/2005", freq="M")
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
assert v1 == Timestamp("2/28/2005")
assert v2 == Timestamp("4/30/2005")
assert v3 == Timestamp("6/30/2005")
# getitem with non-slice drops freq
assert dti2.freq is None
class TestWhere:
def test_where_doesnt_retain_freq(self):
dti = date_range("20130101", periods=3, freq="D", name="idx")
cond = [True, True, False]
expected = DatetimeIndex([dti[0], dti[1], dti[0]], freq=None, name="idx")
result = dti.where(cond, dti[::-1])
tm.assert_index_equal(result, expected)
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range("20130101", periods=3, tz="US/Eastern")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notna(i2), i2._values)
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
dti = pd.date_range("20130101", periods=3, tz="US/Eastern")
i2 = Index([pd.NaT, pd.NaT] + dti[2:].tolist())
with pytest.raises(TypeError, match="Where requires matching dtype"):
# passing tz-naive ndarray to tzaware DTI
dti.where(notna(i2), i2.values)
with pytest.raises(TypeError, match="Where requires matching dtype"):
# passing tz-aware DTI to tznaive DTI
dti.tz_localize(None).where(notna(i2), i2)
with pytest.raises(TypeError, match="Where requires matching dtype"):
dti.where(notna(i2), i2.tz_localize(None).to_period("D"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
dti.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
dti.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match="Where requires matching dtype"):
# non-matching scalar
dti.where(notna(i2), pd.Timedelta(days=4))
def test_where_mismatched_nat(self, tz_aware_fixture):
tz = tz_aware_fixture
dti = pd.date_range("2013-01-01", periods=3, tz=tz)
cond = np.array([True, False, True])
msg = "Where requires matching dtype"
with pytest.raises(TypeError, match=msg):
# wrong-dtyped NaT
dti.where(cond, np.timedelta64("NaT", "ns"))
def test_where_tz(self):
i = pd.date_range("20130101", periods=3, tz="US/Eastern")
result = i.where(notna(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notna(i2))
expected = i2
tm.assert_index_equal(result, expected)
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx2 = pd.date_range(
"2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx"
)
for idx in [idx1, idx2]:
result = idx.take([0])
assert result == Timestamp("2011-01-01", tz=idx.tz)
result = idx.take([0, 1, 2])
expected = pd.date_range(
"2011-01-01", "2011-01-03", freq="D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.date_range(
"2011-01-01", "2011-01-05", freq="2D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = pd.date_range(
"2011-01-08", "2011-01-02", freq="-3D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = DatetimeIndex(
["2011-01-04", "2011-01-03", "2011-01-06"],
freq=None,
tz=idx.tz,
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(
["2011-01-29", "2011-01-03", "2011-01-06"],
freq=None,
tz=idx.tz,
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
# TODO: This method came from test_datetime; de-dup with version above
@pytest.mark.parametrize("tz", [None, "US/Eastern", "Asia/Tokyo"])
def test_take2(self, tz):
dates = [
datetime(2010, 1, 1, 14),
datetime(2010, 1, 1, 15),
datetime(2010, 1, 1, 17),
datetime(2010, 1, 1, 21),
]
idx = pd.date_range(
start="2010-01-01 09:00",
end="2010-02-01 09:00",
freq="H",
tz=tz,
name="idx",
)
expected = DatetimeIndex(dates, freq=None, name="idx", tz=tz)
taken1 = idx.take([5, 6, 8, 12])
taken2 = idx[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, DatetimeIndex)
assert taken.freq is None
assert taken.tz == expected.tz
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx")
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx")
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.DatetimeIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "out of bounds"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
def test_take_fill_value_with_timezone(self):
idx = pd.DatetimeIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", tz="US/Eastern"
)
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", tz="US/Eastern"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", tz="US/Eastern"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.DatetimeIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", tz="US/Eastern"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "out of bounds"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestGetLoc:
@pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"])
def test_get_loc_method_exact_match(self, method):
idx = pd.date_range("2000-01-01", periods=3)
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
if method is not None:
assert idx.get_loc(idx[1], method, tolerance=pd.Timedelta("0 days")) == 1
def test_get_loc(self):
idx = pd.date_range("2000-01-01", periods=3)
assert idx.get_loc("2000-01-01", method="nearest") == 0
assert idx.get_loc("2000-01-01T12", method="nearest") == 1
assert idx.get_loc("2000-01-01T12", method="nearest", tolerance="1 day") == 1
assert (
idx.get_loc("2000-01-01T12", method="nearest", tolerance=pd.Timedelta("1D"))
== 1
)
assert (
idx.get_loc(
"2000-01-01T12", method="nearest", tolerance=np.timedelta64(1, "D")
)
== 1
)
assert (
idx.get_loc("2000-01-01T12", method="nearest", tolerance=timedelta(1)) == 1
)
with pytest.raises(ValueError, match="unit abbreviation w/o a number"):
idx.get_loc("2000-01-01T12", method="nearest", tolerance="foo")
with pytest.raises(KeyError, match="'2000-01-01T03'"):
idx.get_loc("2000-01-01T03", method="nearest", tolerance="2 hours")
with pytest.raises(
ValueError, match="tolerance size must match target index size"
):
idx.get_loc(
"2000-01-01",
method="nearest",
tolerance=[
pd.Timedelta("1day").to_timedelta64(),
pd.Timedelta("1day").to_timedelta64(),
],
)
assert idx.get_loc("2000", method="nearest") == slice(0, 3)
assert idx.get_loc("2000-01", method="nearest") == slice(0, 3)
assert idx.get_loc("1999", method="nearest") == 0
assert idx.get_loc("2001", method="nearest") == 2
with pytest.raises(KeyError, match="'1999'"):
idx.get_loc("1999", method="pad")
with pytest.raises(KeyError, match="'2001'"):
idx.get_loc("2001", method="backfill")
with pytest.raises(KeyError, match="'foobar'"):
idx.get_loc("foobar")
with pytest.raises(InvalidIndexError, match=r"slice\(None, 2, None\)"):
idx.get_loc(slice(2))
idx = pd.to_datetime(["2000-01-01", "2000-01-04"])
assert idx.get_loc("2000-01-02", method="nearest") == 0
assert idx.get_loc("2000-01-03", method="nearest") == 1
assert idx.get_loc("2000-01", method="nearest") == slice(0, 2)
# time indexing
idx = pd.date_range("2000-01-01", periods=24, freq="H")
tm.assert_numpy_array_equal(
idx.get_loc(time(12)), np.array([12]), check_dtype=False
)
tm.assert_numpy_array_equal(
idx.get_loc(time(12, 30)), np.array([]), check_dtype=False
)
msg = "cannot yet lookup inexact labels when key is a time object"
with pytest.raises(NotImplementedError, match=msg):
idx.get_loc(time(12, 30), method="pad")
def test_get_loc_tz_aware(self):
# https://github.com/pandas-dev/pandas/issues/32140
dti = pd.date_range(
pd.Timestamp("2019-12-12 00:00:00", tz="US/Eastern"),
pd.Timestamp("2019-12-13 00:00:00", tz="US/Eastern"),
freq="5s",
)
key = pd.Timestamp("2019-12-12 10:19:25", tz="US/Eastern")
result = dti.get_loc(key, method="nearest")
assert result == 7433
def test_get_loc_nat(self):
# GH#20464
index = DatetimeIndex(["1/3/2000", "NaT"])
assert index.get_loc(pd.NaT) == 1
assert index.get_loc(None) == 1
assert index.get_loc(np.nan) == 1
assert index.get_loc(pd.NA) == 1
assert index.get_loc(np.datetime64("NaT")) == 1
with pytest.raises(KeyError, match="NaT"):
index.get_loc(np.timedelta64("NaT"))
@pytest.mark.parametrize("key", [pd.Timedelta(0), pd.Timedelta(1), timedelta(0)])
def test_get_loc_timedelta_invalid_key(self, key):
# GH#20464
dti = pd.date_range("1970-01-01", periods=10)
msg = "Cannot index DatetimeIndex with [Tt]imedelta"
with pytest.raises(TypeError, match=msg):
dti.get_loc(key)
def test_get_loc_reasonable_key_error(self):
# GH#1062
index = DatetimeIndex(["1/3/2000"])
with pytest.raises(KeyError, match="2000"):
index.get_loc("1/1/2000")
class TestContains:
def test_dti_contains_with_duplicates(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
assert d in ix
@pytest.mark.parametrize(
"vals",
[
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
],
)
def test_contains_nonunique(self, vals):
# GH#9512
idx = DatetimeIndex(vals)
assert idx[0] in idx
class TestGetIndexer:
def test_get_indexer(self):
idx = pd.date_range("2000-01-01", periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(["-1 hour", "12 hours", "1 day 1 hour"])
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest", tolerance=pd.Timedelta("1 hour")),
np.array([0, -1, 1], dtype=np.intp),
)
tol_raw = [
pd.Timedelta("1 hour"),
pd.Timedelta("1 hour"),
pd.Timedelta("1 hour").to_timedelta64(),
]
tm.assert_numpy_array_equal(
idx.get_indexer(
target, "nearest", tolerance=[np.timedelta64(x) for x in tol_raw]
),
np.array([0, -1, 1], dtype=np.intp),
)
tol_bad = [
pd.Timedelta("2 hour").to_timedelta64(),
pd.Timedelta("1 hour").to_timedelta64(),
"foo",
]
with pytest.raises(ValueError, match="abbreviation w/o a number"):
idx.get_indexer(target, "nearest", tolerance=tol_bad)
with pytest.raises(ValueError, match="abbreviation w/o a number"):
idx.get_indexer(idx[[0]], method="nearest", tolerance="foo")
@pytest.mark.parametrize(
"target",
[
[date(2020, 1, 1), pd.Timestamp("2020-01-02")],
[pd.Timestamp("2020-01-01"), date(2020, 1, 2)],
],
)
def test_get_indexer_mixed_dtypes(self, target):
# https://github.com/pandas-dev/pandas/issues/33741
values = pd.DatetimeIndex(
[pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")]
)
result = values.get_indexer(target)
expected = np.array([0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"target, positions",
[
([date(9999, 1, 1), pd.Timestamp("2020-01-01")], [-1, 0]),
([pd.Timestamp("2020-01-01"), date(9999, 1, 1)], [0, -1]),
([date(9999, 1, 1), date(9999, 1, 1)], [-1, -1]),
],
)
def test_get_indexer_out_of_bounds_date(self, target, positions):
values = pd.DatetimeIndex(
[pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")]
)
result = values.get_indexer(target)
expected = np.array(positions, dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
class TestMaybeCastSliceBound:
def test_maybe_cast_slice_bounds_empty(self):
# GH#14354
empty_idx = date_range(freq="1H", periods=0, end="2015")
right = empty_idx._maybe_cast_slice_bound("2015-01-02", "right", "loc")
exp = Timestamp("2015-01-02 23:59:59.999999999")
assert right == exp
left = empty_idx._maybe_cast_slice_bound("2015-01-02", "left", "loc")
exp = Timestamp("2015-01-02 00:00:00")
assert left == exp
def test_maybe_cast_slice_duplicate_monotonic(self):
# https://github.com/pandas-dev/pandas/issues/16515
idx = DatetimeIndex(["2017", "2017"])
result = idx._maybe_cast_slice_bound("2017-01-01", "left", "loc")
expected = Timestamp("2017-01-01")
assert result == expected
class TestDatetimeIndex:
def test_get_value(self):
# specifically make sure we have test for np.datetime64 key
dti = pd.date_range("2016-01-01", periods=3)
arr = np.arange(6, 9)
ser = pd.Series(arr, index=dti)
key = dti[1]
with pytest.raises(AttributeError, match="has no attribute '_values'"):
with tm.assert_produces_warning(FutureWarning):
dti.get_value(arr, key)
with tm.assert_produces_warning(FutureWarning):
result = dti.get_value(ser, key)
assert result == 7
with tm.assert_produces_warning(FutureWarning):
result = dti.get_value(ser, key.to_pydatetime())
assert result == 7
with tm.assert_produces_warning(FutureWarning):
result = dti.get_value(ser, key.to_datetime64())
assert result == 7
| TomAugspurger/pandas | pandas/tests/indexes/datetimes/test_indexing.py | Python | bsd-3-clause | 23,710 |
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferUITest
import Gaffer
import GafferImage
import GafferScene
import GafferSceneUI
class DocumentationTest( GafferUITest.TestCase ) :
def test( self ) :
self.maxDiff = None
self.assertNodesAreDocumented(
GafferScene,
additionalTerminalPlugTypes = ( GafferScene.ScenePlug, Gaffer.CompoundDataPlug.MemberPlug, GafferImage.ImagePlug )
)
if __name__ == "__main__":
unittest.main()
| hradec/gaffer | python/GafferSceneUITest/DocumentationTest.py | Python | bsd-3-clause | 2,209 |
#!/usr/bin/env python2
#
# This file is part of the coreboot project.
#
# Copyright (c) 2015 MediaTek Inc.
# Author: Tristan Shieh <tristan.shieh@mediatek.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
import struct
import sys
import hashlib
def read(path):
with open(path, "rb") as f:
return f.read()
def write(path, data):
with open(path, "wb") as f:
f.write(data)
def padding(data, size, pattern = '\0'):
return data + pattern * (size - len(data))
def align(data, size, pattern = '\0'):
return padding(data, (len(data) + (size - 1)) & ~(size - 1), pattern)
gfh_infos = {
'mt8173': struct.pack("44I",
0x014d4d4d, 0x00000038, 0x454c4946, 0x464e495f,
0x0000004f, 0x00000001, 0x01050001, 0x000C0f50,
0xffffffff, 0x00020000, 0x000000a8, 0x00000020,
0x000000B0, 0x00000001, 0x014d4d4d, 0x0001000c,
0x00000001, 0x034d4d4d, 0x00070064, 0x00001182,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00006400, 0x00001388,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
)}
def gen_emmc_header(data):
header = (padding(struct.pack("<12sII", "EMMC_BOOT", 1, 512), 512, '\xff') +
padding(struct.pack("<8sIIIIIIII", "BRLYT", 1, 2048, 2048 + len(data),
0x42424242, 0x00010005, 2048, 2048 + len(data), 1) + '\0' * 140, 512, '\xff') +
'\0' * 1024)
return header
def gen_sf_header(data):
header = (padding(struct.pack("<12sII", "SF_BOOT", 1, 512), 512, '\xff') +
padding(struct.pack("<8sIIIIIIII", "BRLYT", 1, 2048, 2048 + len(data),
0x42424242, 0x00010007, 2048, 2048 + len(data), 1) + '\0' * 140, 512, '\xff') +
'\0' * 1024)
return header
gen_dev_header = {
"emmc": gen_emmc_header,
"sf": gen_sf_header
}
def gen_preloader(chip_ver, flash_type, data):
gfh_info = gfh_infos[chip_ver]
gfh_info = gfh_info[0:32] + struct.pack("1I", len(data)+len(gfh_info)+32) + gfh_info[36:len(gfh_info)]
gfh_hash = hashlib.sha256(gfh_info + data).digest()
data = align(gfh_info + data + gfh_hash, 512, '\xff')
header = gen_dev_header[flash_type](data)
return header + data
def main(argv):
if len(argv) != 5:
print "Usage: %s <chip> <flash_type> <input_file> <output_file>" % argv[0]
print "\t flash_type: emmc/sf"
print "\t chip : mt8173"
exit(1)
write(argv[4], gen_preloader(argv[1], argv[2], read(argv[3])))
if __name__ == "__main__":
main(sys.argv)
| librecore-org/librecore | util/mtkheader/gen-bl-img.py | Python | gpl-2.0 | 2,886 |
import chainer
from chainer.functions.array import broadcast
from chainer.functions.array import reshape
def scale(x, y, axis=1):
"""Elementwise product with broadcasting.
Computes a elementwise product of two input variables, with the shape of
the latter variable broadcasted to match the shape of the former. ``axis``
is the first axis of the first variable along which the second variable is
applied.
The term "broadcasting" here comes from Caffe's scale layer so the
"broadcasting" with the following arguments::
x : 100 x 3 x 40 x 60
y : 3 x 40
axis : 1
is equivalent to the following numpy broadcasting::
x : 100 x 3 x 40 x 60
y : 1 x 3 x 40 x 1
Note that how the ``axis`` indicates to which axis of ``x`` we apply ``y``.
Args:
x (~chainer.Variable): Input variable to be scaled.
y (~chainer.Variable): Input variable to scale, broadcasted.
axis (int): The first axis of ``x`` along which ``y`` is applied.
Returns:
~chainer.Variable: Output variable.
"""
x_shape = x.shape
y_shape = y.shape
if chainer.is_debug():
assert x_shape[axis:axis + len(y_shape)] == y_shape
y1_shape = tuple([1] * axis + list(y_shape) +
[1] * (len(x_shape) - axis - len(y_shape)))
y1 = reshape.reshape(y, y1_shape)
y2 = broadcast.broadcast_to(y1, x_shape)
return x * y2
| aonotas/chainer | chainer/functions/math/scale.py | Python | mit | 1,448 |
(S'f2d5decc4813e22fef85936300f74439'
p1
(ihappydoclib.parseinfo.moduleinfo
ModuleInfo
p2
(dp3
S'_namespaces'
p4
((dp5
(dp6
tp7
sS'_import_info'
p8
(ihappydoclib.parseinfo.imports
ImportInfo
p9
(dp10
S'_named_imports'
p11
(dp12
sS'_straight_imports'
p13
(lp14
S'_pysssr'
p15
aS'sys'
p16
asbsS'_filename'
p17
S'../python/frowns/extensions/pysssr/test_sssr.py'
p18
sS'_docstring'
p19
S''
sS'_name'
p20
S'test_sssr'
p21
sS'_parent'
p22
NsS'_comment_info'
p23
(dp24
sS'_configuration_values'
p25
(dp26
S'include_comments'
p27
I1
sS'cacheFilePrefix'
p28
S'.happydoc.'
p29
sS'useCache'
p30
I1
sS'docStringFormat'
p31
S'StructuredText'
p32
ssS'_class_info'
p33
g5
sS'_function_info'
p34
g6
sS'_comments'
p35
S''
sbt. | tuffery/Frog2 | frowns/extensions/pysssr/.happydoc.test_sssr.py | Python | gpl-3.0 | 708 |
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['Application', 'AppException']
import sys, types
from .node_data import NodeData
from .app_environment import AppException
class Application(object):
'''base class defining interface for 3D applications
a possbility to created a distributed maya server that will not be
run on the web server'''
def __init__(self, init=True):
self.buffer_flag = False
self.buffer = None
self.verbose = False
def message(self, message):
print(message)
def start_buffer(self):
'''starts a buffer which redirects commands to a buffer'''
self.buffer_flag = True
self.buffer = []
def end_buffer(self):
self.buffer_flag = False
def get_buffer(self):
return self.buffer
def set_verbose(self, flag=True):
self.verbose = flag
def cleanup(self):
'''Note: necessary in Houdini'''
pass
def get_node_data(self, node_name):
return NodeData(node_name)
# Common operations
# These abstract and interface to appplication version so that
# implementations can made. As few basic operations as possible
# into the application are defined to simplify porting.
def is_tactic_node(self, node):
raise AppException("Function: 'is_tactic_node()' must be overriden")
def new_session(self):
'''clear the session'''
raise AppException("Function: 'new()' must be overriden")
def set_project(self, project_dir):
raise AppException("Function: 'set_project()' must be overriden")
def get_project(self):
raise AppException("Function: 'get_project()' must be overriden")
def get_var(self):
raise AppException("Function: 'get_var()' must be overriden")
def get_node_type(self, node_name):
raise AppException("Function: 'get_node_type()' must be overriden")
def get_parent(self, node_name):
pass
def get_children(self, node_name):
pass
def get_frame_range(self):
pass
def has_flex_range(self):
'''has flexible file range for file sequence definition'''
return True
# action functions
def set_attr(self, node, attr, value, attr_type=""):
pass
def select(self, node):
raise AppException("Function: 'select()' must be overriden")
def select_add(self, node):
pass
def select_none(self):
pass
def select_restore(self, nodes):
pass
def select_hierarchy(self, node):
pass
# interaction with files
def import_file(self, path, namespace=":"):
'''import a file into the session'''
pass
def import_reference(self, path, namespace=":"):
'''load using renferences'''
pass
def replace_reference(self, node_name, path, top_reference=True):
'''load using renferences'''
pass
def is_reference(self, instance):
'''detect whether an instance is a reference or not'''
pass
def is_keyed(self, node_name, attr):
'''detect whether an instance is keyed or not'''
pass
def import_static(self, buffer, node_name):
'''import unkeyed values'''
pass
def import_anim(self, path, namespace=":"):
'''load in animation'''
pass
def export_anim(self, path, namespace=":"):
'''export the animation'''
def save(self, path, file_type=None):
'''save the file'''
raise AppException("Function: 'save()' must be overriden")
def load(self, path):
'''load the file'''
raise AppException("Function: 'load()' must be overriden")
def rename(self, path):
'''rename the file'''
# this is harmless if it doesn't do anything
pass
def export_node(self, node_name, context, dir=None, filename="", preserve_ref=None):
'''export a top node'''
pass
def save_node(self, node_name, dir=None):
'''use file/save for a particular node (saves whole file instead
of using export'''
pass
def get_file_path(self):
'''returns the path of the last save filename'''
raise AppException("Function: 'get_file_path()' must be overriden")
# namespace commands
def set_namespace(self, namespace=":"):
pass
def add_namespace(self, namespace):
pass
def remove_namespace(self, namespace):
pass
def namespace_exists(self, namespace):
pass
def get_namespace_info(self):
pass
def get_namespace_contents(self):
'''retrieves the contents of the current namespace'''
return []
def get_all_namespaces(self):
return []
def rename_node(self, node_name, new_name):
pass
# set functions
def get_sets(self):
return []
def is_set(self, node_name):
if node_name in self.get_sets():
return True
else:
return False
def create_set(self, node_name):
pass
def add_to_set(self, set_name, node_name):
pass
def get_nodes_in_set(self, set_name):
pass
# information retrieval functions. Requires an open Maya session
def node_exists(self,node):
pass
def get_nodes_by_type(self, type):
pass
def get_selected_node(self):
nodes = self.get_selected_nodes()
if nodes:
return nodes[0]
else:
return None
def get_selected_nodes(self):
pass
def get_selected_top_nodes(self):
raise AppException("Function: 'get_selected_top_nodes()' must be overriden")
def get_top_nodes(self):
raise AppException("Function: 'get_top_nodes()' must be overriden")
def get_tactice_nodes(self, top_node=None):
'''method to find all of the tactic nodes in a session'''
return []
def get_reference_nodes(self, top_node, recursive=False):
'''gets all of the references under a single dag node'''
return []
def get_reference_path(self, node):
pass
def add_node(self, type, node_name, unique=False):
'''return name of node added'''
pass
# attributes
def add_attr(self, node, attribute, type="long"):
pass
def attr_exists(self, node, attribute):
'''return True or False'''
raise AppException("Function: 'attr_exists()' must be overriden")
def get_attr(self, node, attribute):
pass
def get_attr_type(self, node, attribute):
pass
def get_all_attrs(self, node):
pass
def get_attr_default(self, node, attr):
pass
# layers
def get_all_layers(self):
return []
def get_layer_nodes(self):
'''gets all of the TACTIC nodes in a layer'''
return []
def set_user_environment(self, sandbox_dir, basename):
'''gives the opportunity to let TACTIC set the user envrionment based
on the sandbox dir. It is safe not to use this for a given application
'''
pass
class AppException(Exception):
'''Used by different applications for raising exceptions'''
pass
| diegocortassa/TACTIC | src/pyasm/application/common/application.py | Python | epl-1.0 | 7,585 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('compute', 'server_usage')
class ServerUsageController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ServerUsageController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_server(self, server, instance):
for k in ['launched_at', 'terminated_at']:
key = "%s:%s" % (Server_usage.alias, k)
# NOTE(danms): Historically, this timestamp has been generated
# merely by grabbing str(datetime) of a TZ-naive object. The
# only way we can keep that with instance objects is to strip
# the tzinfo from the stamp and str() it.
server[key] = (instance[k].replace(tzinfo=None)
if instance[k] else None)
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(server, db_instance)
class Server_usage(extensions.ExtensionDescriptor):
"""Adds launched_at and terminated_at on Servers."""
name = "ServerUsage"
alias = "OS-SRV-USG"
namespace = ("http://docs.openstack.org/compute/ext/"
"server_usage/api/v1.1")
updated = "2013-04-29T00:00:00Z"
def get_controller_extensions(self):
controller = ServerUsageController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| silenceli/nova | nova/api/openstack/compute/contrib/server_usage.py | Python | apache-2.0 | 2,996 |
# -*- coding: utf-8 -*-
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
''' render.py - functions to render various objects, like date, time,
etc. ''' # pylint: disable=W0105
from taskcoachlib.domain import date as datemodule
from taskcoachlib.thirdparty import desktop
from taskcoachlib.i18n import _
from taskcoachlib import operating_system
import datetime
import codecs
import locale
import re
# pylint: disable=W0621
def priority(priority):
''' Render an (integer) priority '''
return str(priority)
def timeLeft(time_left, completed_task):
''' Render time left as a text string. Returns an empty string for
completed tasks and for tasks without planned due date. Otherwise it
returns the number of days, hours, and minutes left. '''
if completed_task or time_left == datemodule.TimeDelta.max:
return ''
sign = '-' if time_left.days < 0 else ''
time_left = abs(time_left)
if time_left.days > 0:
days = _('%d days') % time_left.days if time_left.days > 1 else \
_('1 day')
days += ', '
else:
days = ''
hours_and_minutes = ':'.join(str(time_left).split(':')[:-1]).split(', ')[-1]
return sign + days + hours_and_minutes
def timeSpent(timeSpent, showSeconds=True):
''' Render time spent (of type date.TimeDelta) as
"<hours>:<minutes>:<seconds>" or "<hours>:<minutes>" '''
zero = datemodule.TimeDelta()
if timeSpent == zero:
return ''
else:
sign = '-' if timeSpent < zero else ''
hours, minutes, seconds = timeSpent.hoursMinutesSeconds()
return sign + '%d:%02d' % (hours, minutes) + \
(':%02d' % seconds if showSeconds else '')
def recurrence(recurrence):
''' Render the recurrence as a short string describing the frequency of
the recurrence. '''
if not recurrence:
return ''
if recurrence.amount > 2:
labels = [_('Every %(frequency)d days'),
_('Every %(frequency)d weeks'),
_('Every %(frequency)d months'),
_('Every %(frequency)d years')]
elif recurrence.amount == 2:
labels = [_('Every other day'), _('Every other week'),
_('Every other month'), _('Every other year')]
else:
labels = [_('Daily'), _('Weekly'), _('Monthly'), _('Yearly')]
mapping = dict(zip(['daily', 'weekly', 'monthly', 'yearly'], labels))
return mapping.get(recurrence.unit) % dict(frequency=recurrence.amount)
def budget(aBudget):
''' Render budget (of type date.TimeDelta) as
"<hours>:<minutes>:<seconds>". '''
return timeSpent(aBudget)
# Default time formatting
language_and_country = locale.getlocale()[0]
if language_and_country and ('_US' in language_and_country or
'_United States' in language_and_country):
timeFormat = '%I %p'
timeWithMinutesFormat = '%I:%M %p'
timeWithSecondsFormat = '%I:%M:%S %p'
else:
timeFormat = '%H'
timeWithMinutesFormat = '%H:%M' # %X includes seconds (see http://stackoverflow.com/questions/2507726)
timeWithSecondsFormat = '%X'
def rawTimeFunc(dt, minutes=True, seconds=False):
if seconds:
fmt = timeWithSecondsFormat
else:
if minutes:
fmt = timeWithMinutesFormat
else:
fmt = timeFormat
return datemodule.DateTime.strftime(dt, fmt)
dateFormat = '%x'
def rawDateFunc(dt=None):
return operating_system.decodeSystemString(datetime.datetime.strftime(dt, dateFormat))
def dateFunc(dt=None, humanReadable=False):
if humanReadable:
theDate = dt.date()
if theDate == datemodule.Now().date():
return _('Today')
elif theDate == datemodule.Yesterday().date():
return _('Yesterday')
elif theDate == datemodule.Tomorrow().date():
return _('Tomorrow')
return rawDateFunc(dt)
# OS-specific time formatting
if operating_system.isWindows():
import pywintypes, win32api
def rawTimeFunc(dt, minutes=True, seconds=False):
if seconds:
# You can't include seconds without minutes
flags = 0x0
else:
if minutes:
flags = 0x2
else:
flags = 0x1
return operating_system.decodeSystemString(win32api.GetTimeFormat(0x400, flags, None if dt is None else pywintypes.Time(dt), None))
def rawDateFunc(dt):
return operating_system.decodeSystemString(win32api.GetDateFormat(0x400, 0, None if dt is None else pywintypes.Time(dt), None))
elif operating_system.isMac():
import Cocoa, calendar
# We don't actually respect the 'seconds' parameter; this assumes that the short time format does
# not include them, but the medium format does.
_shortFormatter = Cocoa.NSDateFormatter.alloc().init()
_shortFormatter.setFormatterBehavior_(Cocoa.NSDateFormatterBehavior10_4)
_shortFormatter.setTimeStyle_(Cocoa.NSDateFormatterShortStyle)
_shortFormatter.setDateStyle_(Cocoa.NSDateFormatterNoStyle)
_shortFormatter.setTimeZone_(Cocoa.NSTimeZone.timeZoneForSecondsFromGMT_(0))
_mediumFormatter = Cocoa.NSDateFormatter.alloc().init()
_mediumFormatter.setFormatterBehavior_(Cocoa.NSDateFormatterBehavior10_4)
_mediumFormatter.setTimeStyle_(Cocoa.NSDateFormatterMediumStyle)
_mediumFormatter.setDateStyle_(Cocoa.NSDateFormatterNoStyle)
_mediumFormatter.setTimeZone_(Cocoa.NSTimeZone.timeZoneForSecondsFromGMT_(0))
# Special case for hour without minutes or seconds. I don't know if it is possible to get the AM/PM
# setting alone, so parse the format string instead.
# See http://www.unicode.org/reports/tr35/tr35-25.html#Date_Format_Patterns
_state = 0
_hourFormat = u''
_ampmFormat = u''
for c in _mediumFormatter.dateFormat():
if _state == 0:
if c == u"'":
_state = 1 # After single quote
elif c in [u'h', u'H', u'k', u'K', u'j']:
_hourFormat += c
elif c == 'a':
_ampmFormat = c
elif _state == 1:
if c == u"'":
_state = 0
else:
_state = 2 # Escaped string
elif _state == 2:
if c == u"'":
_state = 0
_hourFormatter = Cocoa.NSDateFormatter.alloc().init()
_hourFormatter.setFormatterBehavior_(Cocoa.NSDateFormatterBehavior10_4)
_hourFormatter.setDateFormat_(_hourFormat + (' %s' % _ampmFormat if _ampmFormat else ''))
_hourFormatter.setTimeZone_(Cocoa.NSTimeZone.timeZoneForSecondsFromGMT_(0))
_dateFormatter = Cocoa.NSDateFormatter.alloc().init()
_dateFormatter.setFormatterBehavior_(Cocoa.NSDateFormatterBehavior10_4)
_dateFormatter.setDateStyle_(Cocoa.NSDateFormatterShortStyle)
_dateFormatter.setTimeStyle_(Cocoa.NSDateFormatterNoStyle)
_dateFormatter.setTimeZone_(Cocoa.NSTimeZone.timeZoneForSecondsFromGMT_(0))
def _applyFormatter(dt, fmt):
dt_native = Cocoa.NSDate.dateWithTimeIntervalSince1970_((dt - datetime.datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds())
return fmt.stringFromDate_(dt_native)
def rawTimeFunc(dt, minutes=True, seconds=False):
if minutes:
if seconds:
return _applyFormatter(dt, _mediumFormatter)
return _applyFormatter(dt, _shortFormatter)
return _applyFormatter(dt, _hourFormatter)
def rawDateFunc(dt):
return _applyFormatter(datetime.datetime.combine(dt, datetime.time(0, 0, 0, 0)), _dateFormatter)
elif desktop.get_desktop() == 'KDE4':
try:
# Import gtk first because when it's imported indirectly it generates a RuntimeWarning.
import gtk
from PyKDE4.kdecore import KGlobal, KLocale
from PyQt4.QtCore import QTime, QDate
except ImportError:
pass
else:
_localeCopy = KLocale(KGlobal.locale())
if '%p' in KGlobal.locale().timeFormat():
_localeCopy.setTimeFormat('%I %p')
else:
_localeCopy.setTimeFormat('%H')
def rawTimeFunc(dt, minutes=True, seconds=False):
qtdt = QTime(dt.hour, dt.minute, dt.second)
if minutes:
return unicode(KGlobal.locale().formatTime(qtdt, seconds))
return unicode(_localeCopy.formatTime(qtdt))
def rawDateFunc(dt):
qtdt = QDate(dt.year, dt.month, dt.day)
return unicode(KGlobal.locale().formatDate(qtdt, 0))
timeFunc = lambda dt, minutes=True, seconds=False: operating_system.decodeSystemString(rawTimeFunc(dt, minutes=minutes, seconds=seconds))
dateTimeFunc = lambda dt=None, humanReadable=False: u'%s %s' % (dateFunc(dt, humanReadable=humanReadable), timeFunc(dt))
def date(aDateTime, humanReadable=False):
''' Render a date/time as date. '''
if str(aDateTime) == '':
return ''
year = aDateTime.year
if year >= 1900:
return dateFunc(aDateTime, humanReadable=humanReadable)
else:
result = date(datemodule.DateTime(year + 1900, aDateTime.month,
aDateTime.day),
humanReadable=humanReadable)
return re.sub(str(year + 1900), str(year), result)
def dateTime(aDateTime, humanReadable=False):
if not aDateTime or aDateTime == datemodule.DateTime() or aDateTime == datemodule.DateTime.min:
return ''
timeIsMidnight = (aDateTime.hour, aDateTime.minute) in ((0, 0), (23, 59))
year = aDateTime.year
if year >= 1900:
return dateFunc(aDateTime, humanReadable=humanReadable) if timeIsMidnight else \
dateTimeFunc(aDateTime, humanReadable=humanReadable)
else:
result = dateTime(aDateTime.replace(year=year + 1900), humanReadable=humanReadable)
return re.sub(str(year + 1900), str(year), result)
def dateTimePeriod(start, stop, humanReadable=False):
if stop is None:
return '%s - %s' % (dateTime(start, humanReadable=humanReadable), _('now'))
elif start.date() == stop.date():
return '%s %s - %s' % (date(start, humanReadable=humanReadable),
time(start), time(stop))
else:
return '%s - %s' % (dateTime(start, humanReadable=humanReadable), dateTime(stop, humanReadable=humanReadable))
def time(dateTime, seconds=False, minutes=True):
try:
# strftime doesn't handle years before 1900, be prepared:
dateTime = dateTime.replace(year=2000)
except TypeError: # We got a time instead of a dateTime
dateTime = datemodule.Now().replace(hour=dateTime.hour,
minute=dateTime.minute,
second=dateTime.second)
return timeFunc(dateTime, minutes=minutes, seconds=seconds)
def month(dateTime):
return dateTime.strftime('%Y %B')
def weekNumber(dateTime):
# Would have liked to use dateTime.strftime('%Y-%U'), but the week number
# is one off in 2004
return '%d-%d' % (dateTime.year, dateTime.weeknumber())
def monetaryAmount(aFloat):
''' Render a monetary amount, using the user's locale. '''
return '' if round(aFloat, 2) == 0 else \
locale.format('%.2f', aFloat, monetary=True)
def percentage(aFloat):
''' Render a percentage. '''
return '' if round(aFloat, 0) == 0 else '%.0f%%' % aFloat
def exception(exception, instance):
''' Safely render an exception, being prepared for new exceptions. '''
try:
# In this order. Python 2.6 fixed the unicode exception problem.
try:
return unicode(instance)
except UnicodeDecodeError:
# On Windows, some exceptions raised by win32all lead to this
# Hack around it
result = []
for val in instance.args:
if isinstance(val, unicode):
result.append(val.encode('UTF-8'))
else:
result.append(val)
return unicode(result)
except UnicodeEncodeError:
return '<class %s>' % str(exception)
| TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/render.py | Python | gpl-3.0 | 12,800 |
#!/usr/bin/env python3
import xml.etree.ElementTree as etree
import requests
import sys
import urllib.parse
XML_URL = 'http://www.billboard.com/rss/charts/hot-100'
ROW_TMPL = """
<tr>
<td>{rank}</td>
<td>{lastrank}</td>
<td><a href="{link}">{title}</a></td>
<td>{artist}</td>
</tr>
"""
PAGE_TMPL = """
<!DOCTYPE html>
<html>
<head>
<title>Hot 100</title>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css">
</head>
<body>
<table class="table table-striped"">
<thead><tr>
<th>Rank</th>
<th>Last Rank</th>
<th>Title</th>
<th>Artist</th>
</tr></thead>
<tbody>{body}</tbody>
</table>
</body>
</html>
"""
YOUTUBE_SEARCH_URL = 'https://youtube.com/results'
def fetch():
req = requests.get(XML_URL)
return etree.fromstring(req.text)
def get(tree):
for item in tree.findall('.//item'):
artist = item.findtext('./artist')
title = item.findtext('./title')
_, title = title.split(': ', 1)
url = '{}?{}'.format(
YOUTUBE_SEARCH_URL,
urllib.parse.urlencode({
'q': '{} {}'.format(artist, title).lower(),
}),
)
yield {
'artist': artist,
'title': title,
'rank': int(item.findtext('./rank_this_week')),
'lastrank': int(item.findtext('./rank_last_week')) or '—',
'link': url,
}
def dump(items):
body_parts = []
for item in items:
body_parts.append(ROW_TMPL.format(**item))
return PAGE_TMPL.format(body=''.join(body_parts))
if __name__ == '__main__':
sys.stdout.write(dump(get(fetch())))
| sampsyo/hot100 | hot100.py | Python | mit | 1,662 |
import string
from htmllib import HTMLParser
from cgi import escape
from urlparse import urlparse
from formatter import AbstractFormatter
from htmlentitydefs import entitydefs
from xml.sax.saxutils import quoteattr
import re
ALPHABET = string.ascii_uppercase + string.ascii_lowercase + \
string.digits + '-_'
ALPHABET_REVERSE = dict((c, i) for (i, c) in enumerate(ALPHABET))
BASE = len(ALPHABET)
SIGN_CHARACTER = '$'
def num_encode(n):
if n < 0:
return SIGN_CHARACTER + num_encode(-n)
s = []
while True:
n, r = divmod(n, BASE)
s.append(ALPHABET[r])
if n == 0:
break
return ''.join(reversed(s))
def num_decode(s):
if s[0] == SIGN_CHARACTER:
return -num_decode(s[1:])
n = 0
for c in s:
n = n * BASE + ALPHABET_REVERSE[c]
return n
def xssescape(text):
"""Gets rid of < and > and & and, for good measure, :"""
return escape(text, quote=True).replace(':', ':')
def despam(text):
"""
Rudimentary bad word filter, to be replaced soon by something more solid
"""
return re.sub(
r'c.?[i1].?[a@].?[l1].?[i1].?[s$]|v.?[Ii1].?[a@].?gr.?[a@]|[l1].?[e3].?v.?[i!1].?t.?r.?[a@]|\
-online|4u|adipex|advicer|baccarrat|blackjack|bllogspot|booker|byob|car-rental-e-site|car-rentals-e-site|\
carisoprodol|c.?[a@].?[s$].?[i!1].?n.?[o0]|chatroom|coolhu|coolhu|credit-card-debt|credit-report|cwas|cyclen|\
benzaprine|dating-e-site|day-trading|debt-consolidation|debt-consolidation|discreetordering|\
duty-free|dutyfree|equityloans|fioricet|flowers-leading-site|freenet-shopping|freenet|gambling-|hair-loss|\
health-insurancedeals-4u|homeequityloans|homefinance|holdem|\
hotel-dealse-site|hotele-site|hotelse-site|incest|insurance-quotesdeals-4u|insurancedeals-4u|jrcreations|\
macinstruct|mortgage-4-u|mortgagequotes|online-gambling|onlinegambling-4u|ottawavalleyag|ownsthis|\
palm-texas-holdem-game \
|p.?[a@].?x.?[i1!].?[l!1]|penis|pharmacy|phentermine|poker-chip|poze|pussy|rental-car-e-site|ringtones\
|roulette |shemale|shoes|slot-machine|\
shit|fuck|damn|cunt|ass.?hole|ass.?wipe|jackass|bitch|twat|whore|cock.?sucker|faggot| \
texas-holdem|thorcarlson|top-site|top-e-site|tramadol|trim-spa|\
ultram|v.?[i1!].?[o0].?x|x.?[a@].?n.?[a@].?x|zolus' + '(?i)', r'', text
)
class XssCleaner(HTMLParser):
"""
Cross-site scripting protection, from http://code.activestate.com/recipes/496942-cross-site-scripting-xss-defense/
"""
def __init__(self, fmt=AbstractFormatter):
HTMLParser.__init__(self, fmt)
self.result = ""
self.open_tags = []
# A list of forbidden tags.
self.forbidden_tags = ['script', 'embed', 'iframe', 'frame', ]
# A list of tags that require no closing tag.
self.requires_no_close = ['img', 'br']
# A dictionary showing the only attributes allowed for particular tags.
# If a tag is not listed here, it is allowed no attributes. Adding
# "on" tags, like "onhover," would not be smart. Also be very careful
# of "background" and "style."
# <h5 style="text-align: center;"><b><i><u><font size="5" face="impact">THIS IS A TEST</font></u></i></b></h5>
# <blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><p style="text-align: center;">
# <font size="5" face="arial" color="#cc3333">of the EBS</font></p><p style="text-align: center;">
# <font size="5" face="arial"><br></font></p><p style="text-align: center;"><font size="5" face="arial">
# <sup>reddit</sup><sub>2</sub></font></p>
# <p style="text-align: center;"><font size="5" face="arial"><sub><br></sub></font></p>
# <p style="text-align: center;"><font size="5" face="arial">fiiiiiii<sub>4</sub></font></p>
# <p style="text-align: center;"><font size="5" face="arial"><sub><br></sub></font></p>
# <p style="text-align: center;"><hr><br></p><p style="text-align: center;">
# <strike>strike</strike></p></blockquote>
self.allowed_attributes =\
{'a': ['href', 'title', 'target', 'style'],
'p': ['style'],
'img': ['src', 'alt', 'border', 'style', 'align'],
'blockquote': ['type', 'style', 'align'],
'font': ['size', 'face', 'align'],
'h5': ['style'], 'h4': ['style'], 'h3': ['style'], 'h2': ['style'], 'h1': ['style'],
'table': ['border', 'width', 'height', 'style', 'align', 'bgcolor'],
'tbody': ['border', 'width', 'height', 'style', 'align', 'bgcolor'],
'tr': ['border', 'width', 'height', 'style', 'align', 'bgcolor'],
'td': ['border', 'width', 'height', 'style', 'align', 'bgcolor'],
'div': ['border', 'width', 'height', 'style', 'align', 'bgcolor'],
'span': ['border', 'width', 'height', 'style', 'align', 'bgcolor'],
}
# The only schemes allowed in URLs (for href and src attributes).
# Adding "javascript" or "vbscript" to this list would not be smart.
self.allowed_schemes = ['http', 'https', 'ftp']
def handle_data(self, data):
if data:
self.result += xssescape(data)
def handle_charref(self, ref):
if len(ref) < 7 and ref.isdigit():
self.result += '&#%s;' % ref
else:
self.result += xssescape('&#%s' % ref)
def handle_entityref(self, ref):
if ref in entitydefs:
self.result += '&%s;' % ref
else:
self.result += xssescape('&%s' % ref)
def handle_comment(self, comment):
if comment:
self.result += xssescape("<!--%s-->" % comment)
def handle_starttag(self, tag, method, attrs):
if tag in self.forbidden_tags:
self.result += xssescape("<%s>" % tag)
else:
bt = "<" + tag
if tag in self.allowed_attributes:
attrs = dict(attrs)
self.allowed_attributes_here =\
[x for x in self.allowed_attributes[tag] if x in attrs and
len(attrs[x]) > 0
]
for attribute in self.allowed_attributes_here:
if attribute in ['href', 'src', 'background']:
if self.url_is_acceptable(attrs[attribute]):
bt += ' %s="%s"' % (attribute, attrs[attribute])
else:
bt += ' %s=%s' %\
(xssescape(attribute), quoteattr(attrs[attribute]))
if bt == "<a" or bt == "<img":
return
if tag in self.requires_no_close:
bt += "/"
bt += ">"
self.result += bt
self.open_tags.insert(0, tag)
def handle_endtag(self, tag, attrs):
bracketed = "</%s>" % tag
if tag in self.forbidden_tags:
self.result += xssescape(bracketed)
elif tag in self.open_tags:
self.result += bracketed
self.open_tags.remove(tag)
def unknown_starttag(self, tag, attributes):
self.handle_starttag(tag, None, attributes)
def unknown_endtag(self, tag):
self.handle_endtag(tag, None)
def url_is_acceptable(self, url):
# Requires all URLs to be "absolute."
parsed = urlparse(url)
return parsed[0] in self.allowed_schemes and '.' in parsed[1]
def strip(self, rawstring):
"""Returns the argument stripped of potentially harmful HTML or Javascript code"""
self.result = ""
self.feed(rawstring)
for endtag in self.open_tags:
if endtag not in self.requires_no_close:
self.result += "</%s>" % endtag
return self.result
def xtags(self):
"""Returns a printable string informing the user which tags are allowed"""
self.forbidden_tags.sort()
tg = ""
for x in self.forbidden_tags:
tg += "<" + x
if x in self.allowed_attributes:
for y in self.allowed_attributes[x]:
tg += ' %s=""' % y
tg += "> "
return xssescape(tg.strip())
# end of http://code.activestate.com/recipes/496942/ }}}
| MapStory/geonode | geonode/contrib/worldmap/wm_extra/encode.py | Python | gpl-3.0 | 8,522 |
"""spaCy
Run spaCy tools and pipelines on your datasets.
Currently only includes tokenization, but this could be expanded to include
many more of spaCy's tools.
Or, if you want a different tool/pipeline, you could create your own
module type following the same approach.
""" | markgw/pimlico | src/python/pimlico/modules/spacy/__init__.py | Python | gpl-3.0 | 278 |
#!/usr/bin/env python3
import sys
import gzip
filename_nTx = sys.argv[1]
#filename_nTx = 'TKLab201907n_PELCHtx_mms9171+Heart_NoPart_nTx.fa'
filename_quant = filename_nTx.replace('_NoPart_nTx.fa', '.salmon_quant.sf')
f_nTx = open(filename_nTx, 'r')
if filename_nTx.endswith('.gz'):
f_nTx = gzip.open(filename_nTx, 'rt')
filename_quant = filename_nTx.replace('_NoPart_nTx.fa.gz', '.salmon_quant.sf')
tx2quant = dict()
f_quant = open(filename_quant, 'r')
h_quant = f_quant.readline()
for line in f_quant:
tokens = line.strip().split("\t")
tx_id = tokens[0]
tx_tpm = float(tokens[-2])
tx_reads = float(tokens[-1])
tx2quant[tx_id] = {'tpm': tx_tpm, 'reads': tx_reads}
f_quant.close()
is_print = -1
for line in f_nTx:
if line.startswith('>'):
tx_id = line.strip().lstrip('>')
if tx_id not in tx2quant:
is_print = -1
elif tx2quant[tx_id]['reads'] < 1.0:
is_print = -1
else:
is_print = 1
print(">%s tpm=%.3f;reads=%.3f" % (tx_id, tx2quant[tx_id]['tpm'], tx2quant[tx_id]['reads']))
elif is_print > 0:
print(line.strip())
f_nTx.close()
| marcottelab/NuevoTx | quant/filter-nTx-by-quant.py | Python | apache-2.0 | 1,154 |
import itertools
import pytest
from tlz import merge
np = pytest.importorskip("numpy")
import dask
import dask.array as da
from dask import config
from dask.array.slicing import (
_sanitize_index_element,
_slice_1d,
cached_cumsum,
make_block_sorted_slices,
new_blockdim,
normalize_index,
sanitize_index,
shuffle_slice,
slice_array,
slicing_plan,
take,
)
from dask.array.utils import assert_eq, same_keys
from ..chunk import getitem
def test_slice_1d():
expected = {0: slice(10, 25, 1), 1: slice(None, None, None), 2: slice(0, 1, 1)}
result = _slice_1d(100, [25] * 4, slice(10, 51, None))
assert expected == result
# x[100:12:-3]
expected = {
0: slice(-2, -8, -3),
1: slice(-1, -21, -3),
2: slice(-3, -21, -3),
3: slice(-2, -21, -3),
4: slice(-1, -21, -3),
}
result = _slice_1d(100, [20] * 5, slice(100, 12, -3))
assert expected == result
# x[102::-3]
expected = {
0: slice(-2, -21, -3),
1: slice(-1, -21, -3),
2: slice(-3, -21, -3),
3: slice(-2, -21, -3),
4: slice(-1, -21, -3),
}
result = _slice_1d(100, [20] * 5, slice(102, None, -3))
assert expected == result
# x[::-4]
expected = {
0: slice(-1, -21, -4),
1: slice(-1, -21, -4),
2: slice(-1, -21, -4),
3: slice(-1, -21, -4),
4: slice(-1, -21, -4),
}
result = _slice_1d(100, [20] * 5, slice(None, None, -4))
assert expected == result
# x[::-7]
expected = {
0: slice(-5, -21, -7),
1: slice(-4, -21, -7),
2: slice(-3, -21, -7),
3: slice(-2, -21, -7),
4: slice(-1, -21, -7),
}
result = _slice_1d(100, [20] * 5, slice(None, None, -7))
assert expected == result
# x=range(115)
# x[::-7]
expected = {
0: slice(-7, -24, -7),
1: slice(-2, -24, -7),
2: slice(-4, -24, -7),
3: slice(-6, -24, -7),
4: slice(-1, -24, -7),
}
result = _slice_1d(115, [23] * 5, slice(None, None, -7))
assert expected == result
# x[79::-3]
expected = {
0: slice(-1, -21, -3),
1: slice(-3, -21, -3),
2: slice(-2, -21, -3),
3: slice(-1, -21, -3),
}
result = _slice_1d(100, [20] * 5, slice(79, None, -3))
assert expected == result
# x[-1:-8:-1]
expected = {4: slice(-1, -8, -1)}
result = _slice_1d(100, [20, 20, 20, 20, 20], slice(-1, 92, -1))
assert expected == result
# x[20:0:-1]
expected = {0: slice(-1, -20, -1), 1: slice(-20, -21, -1)}
result = _slice_1d(100, [20, 20, 20, 20, 20], slice(20, 0, -1))
assert expected == result
# x[:0]
expected = {}
result = _slice_1d(100, [20, 20, 20, 20, 20], slice(0))
assert result
# x=range(99)
expected = {
0: slice(-3, -21, -3),
1: slice(-2, -21, -3),
2: slice(-1, -21, -3),
3: slice(-2, -20, -3),
4: slice(-1, -21, -3),
}
# This array has non-uniformly sized blocks
result = _slice_1d(99, [20, 20, 20, 19, 20], slice(100, None, -3))
assert expected == result
# x=range(104)
# x[::-3]
expected = {
0: slice(-1, -21, -3),
1: slice(-3, -24, -3),
2: slice(-3, -28, -3),
3: slice(-1, -14, -3),
4: slice(-1, -22, -3),
}
# This array has non-uniformly sized blocks
result = _slice_1d(104, [20, 23, 27, 13, 21], slice(None, None, -3))
assert expected == result
# x=range(104)
# x[:27:-3]
expected = {
1: slice(-3, -16, -3),
2: slice(-3, -28, -3),
3: slice(-1, -14, -3),
4: slice(-1, -22, -3),
}
# This array has non-uniformly sized blocks
result = _slice_1d(104, [20, 23, 27, 13, 21], slice(None, 27, -3))
assert expected == result
# x=range(104)
# x[100:27:-3]
expected = {
1: slice(-3, -16, -3),
2: slice(-3, -28, -3),
3: slice(-1, -14, -3),
4: slice(-4, -22, -3),
}
# This array has non-uniformly sized blocks
result = _slice_1d(104, [20, 23, 27, 13, 21], slice(100, 27, -3))
assert expected == result
# x=range(1000000000000)
# x[1000:]
expected = {0: slice(1000, 1000000000, 1)}
expected.update({ii: slice(None, None, None) for ii in range(1, 1000)})
# This array is large
result = _slice_1d(1000000000000, [1000000000] * 1000, slice(1000, None, None))
assert expected == result
def test_slice_singleton_value_on_boundary():
assert _slice_1d(15, [5, 5, 5], 10) == {2: 0}
assert _slice_1d(30, (5, 5, 5, 5, 5, 5), 10) == {2: 0}
def test_slice_array_1d():
# x[24::2]
expected = {
("y", 0): (getitem, ("x", 0), (slice(24, 25, 2),)),
("y", 1): (getitem, ("x", 1), (slice(1, 25, 2),)),
("y", 2): (getitem, ("x", 2), (slice(0, 25, 2),)),
("y", 3): (getitem, ("x", 3), (slice(1, 25, 2),)),
}
result, chunks = slice_array("y", "x", [[25] * 4], [slice(24, None, 2)], 8)
assert expected == result
# x[26::2]
expected = {
("y", 0): (getitem, ("x", 1), (slice(1, 25, 2),)),
("y", 1): (getitem, ("x", 2), (slice(0, 25, 2),)),
("y", 2): (getitem, ("x", 3), (slice(1, 25, 2),)),
}
result, chunks = slice_array("y", "x", [[25] * 4], [slice(26, None, 2)], 8)
assert expected == result
# x[24::2]
expected = {
("y", 0): (getitem, ("x", 0), (slice(24, 25, 2),)),
("y", 1): (getitem, ("x", 1), (slice(1, 25, 2),)),
("y", 2): (getitem, ("x", 2), (slice(0, 25, 2),)),
("y", 3): (getitem, ("x", 3), (slice(1, 25, 2),)),
}
result, chunks = slice_array("y", "x", [(25,) * 4], (slice(24, None, 2),), 8)
assert expected == result
# x[26::2]
expected = {
("y", 0): (getitem, ("x", 1), (slice(1, 25, 2),)),
("y", 1): (getitem, ("x", 2), (slice(0, 25, 2),)),
("y", 2): (getitem, ("x", 3), (slice(1, 25, 2),)),
}
result, chunks = slice_array("y", "x", [(25,) * 4], (slice(26, None, 2),), 8)
assert expected == result
def test_slice_array_2d():
# 2d slices: x[13::2,10::1]
expected = {
("y", 0, 0): (getitem, ("x", 0, 0), (slice(13, 20, 2), slice(10, 20, 1))),
("y", 0, 1): (
getitem,
("x", 0, 1),
(slice(13, 20, 2), slice(None, None, None)),
),
("y", 0, 2): (
getitem,
("x", 0, 2),
(slice(13, 20, 2), slice(None, None, None)),
),
}
result, chunks = slice_array(
"y",
"x",
[[20], [20, 20, 5]],
[slice(13, None, 2), slice(10, None, 1)],
itemsize=8,
)
assert expected == result
# 2d slices with one dimension: x[5,10::1]
expected = {
("y", 0): (getitem, ("x", 0, 0), (5, slice(10, 20, 1))),
("y", 1): (getitem, ("x", 0, 1), (5, slice(None, None, None))),
("y", 2): (getitem, ("x", 0, 2), (5, slice(None, None, None))),
}
result, chunks = slice_array(
"y", "x", ([20], [20, 20, 5]), [5, slice(10, None, 1)], 8
)
assert expected == result
def test_slice_optimizations():
# bar[:]
expected = {("foo", 0): ("bar", 0)}
result, chunks = slice_array("foo", "bar", [[100]], (slice(None, None, None),), 8)
assert expected == result
# bar[:,:,:]
expected = {("foo", 0): ("bar", 0), ("foo", 1): ("bar", 1), ("foo", 2): ("bar", 2)}
result, chunks = slice_array(
"foo",
"bar",
[(100, 1000, 10000)],
(slice(None, None, None), slice(None, None, None), slice(None, None, None)),
itemsize=8,
)
assert expected == result
def test_slicing_with_singleton_indices():
result, chunks = slice_array(
"y", "x", ([5, 5], [5, 5]), (slice(0, 5), 8), itemsize=8
)
expected = {("y", 0): (getitem, ("x", 0, 1), (slice(None, None, None), 3))}
assert expected == result
def test_slicing_with_newaxis():
result, chunks = slice_array(
"y",
"x",
([5, 5], [5, 5]),
(slice(0, 3), None, slice(None, None, None)),
itemsize=8,
)
expected = {
("y", 0, 0, 0): (
getitem,
("x", 0, 0),
(slice(0, 3, 1), None, slice(None, None, None)),
),
("y", 0, 0, 1): (
getitem,
("x", 0, 1),
(slice(0, 3, 1), None, slice(None, None, None)),
),
}
assert expected == result
assert chunks == ((3,), (1,), (5, 5))
def test_take():
chunks, dsk = take("y", "x", [(20, 20, 20, 20)], [5, 1, 47, 3], itemsize=8, axis=0)
expected = {
("y", 0): (getitem, ("x", 0), (np.array([5, 1]),)),
("y", 1): (getitem, ("x", 2), (np.array([7]),)),
("y", 2): (getitem, ("x", 0), (np.array([3]),)),
}
np.testing.assert_equal(sorted(dsk.items()), sorted(expected.items()))
assert chunks == ((2, 1, 1),)
chunks, dsk = take(
"y", "x", [(20, 20, 20, 20), (20, 20)], [5, 1, 47, 3], itemsize=8, axis=0
)
expected = {
("y", 0, 0): (
getitem,
("x", 0, 0),
(np.array([5, 1]), slice(None, None, None)),
),
("y", 0, 1): (
getitem,
("x", 0, 1),
(np.array([5, 1]), slice(None, None, None)),
),
("y", 1, 0): (getitem, ("x", 2, 0), (np.array([7]), slice(None, None, None))),
("y", 1, 1): (getitem, ("x", 2, 1), (np.array([7]), slice(None, None, None))),
("y", 2, 0): (getitem, ("x", 0, 0), (np.array([3]), slice(None, None, None))),
("y", 2, 1): (getitem, ("x", 0, 1), (np.array([3]), slice(None, None, None))),
}
np.testing.assert_equal(sorted(dsk.items()), sorted(expected.items()))
assert chunks == ((2, 1, 1), (20, 20))
def test_take_sorted():
chunks, dsk = take("y", "x", [(20, 20, 20, 20)], [1, 3, 5, 47], itemsize=8, axis=0)
expected = {
("y", 0): (getitem, ("x", 0), ([1, 3, 5],)),
("y", 1): (getitem, ("x", 2), ([7],)),
}
np.testing.assert_equal(dsk, expected)
assert chunks == ((3, 1),)
chunks, dsk = take(
"y", "x", [(20, 20, 20, 20), (20, 20)], [1, 3, 5, 37], itemsize=8, axis=1
)
expected = merge(
{
("y", i, 0): (getitem, ("x", i, 0), (slice(None, None, None), [1, 3, 5]))
for i in range(4)
},
{
("y", i, 1): (getitem, ("x", i, 1), (slice(None, None, None), [17]))
for i in range(4)
},
)
np.testing.assert_equal(dsk, expected)
assert chunks == ((20, 20, 20, 20), (3, 1))
def test_slicing_chunks():
result, chunks = slice_array(
"y", "x", ([5, 5], [5, 5]), (1, np.array([2, 0, 3])), itemsize=8
)
assert chunks == ((3,),)
result, chunks = slice_array(
"y", "x", ([5, 5], [5, 5]), (slice(0, 7), np.array([2, 0, 3])), itemsize=8
)
assert chunks == ((5, 2), (3,))
result, chunks = slice_array(
"y", "x", ([5, 5], [5, 5]), (slice(0, 7), 1), itemsize=8
)
assert chunks == ((5, 2),)
def test_slicing_with_numpy_arrays():
a, bd1 = slice_array(
"y",
"x",
((3, 3, 3, 1), (3, 3, 3, 1)),
(np.array([1, 2, 9]), slice(None, None, None)),
itemsize=8,
)
b, bd2 = slice_array(
"y",
"x",
((3, 3, 3, 1), (3, 3, 3, 1)),
(np.array([1, 2, 9]), slice(None, None, None)),
itemsize=8,
)
assert bd1 == bd2
np.testing.assert_equal(a, b)
i = [False, True, True, False, False, False, False, False, False, True]
index = (i, slice(None, None, None))
index = normalize_index(index, (10, 10))
c, bd3 = slice_array("y", "x", ((3, 3, 3, 1), (3, 3, 3, 1)), index, itemsize=8)
assert bd1 == bd3
np.testing.assert_equal(a, c)
def test_slicing_and_chunks():
o = da.ones((24, 16), chunks=((4, 8, 8, 4), (2, 6, 6, 2)))
t = o[4:-4, 2:-2]
assert t.chunks == ((8, 8), (6, 6))
def test_slicing_identities():
a = da.ones((24, 16), chunks=((4, 8, 8, 4), (2, 6, 6, 2)))
assert a is a[slice(None)]
assert a is a[:]
assert a is a[::]
assert a is a[...]
assert a is a[0:]
assert a is a[0::]
assert a is a[::1]
assert a is a[0 : len(a)]
assert a is a[0::1]
assert a is a[0 : len(a) : 1]
def test_slice_stop_0():
# from gh-125
a = da.ones(10, chunks=(10,))[:0].compute()
b = np.ones(10)[:0]
assert_eq(a, b)
def test_slice_list_then_None():
x = da.zeros(shape=(5, 5), chunks=(3, 3))
y = x[[2, 1]][None]
assert_eq(y, np.zeros((1, 2, 5)))
class ReturnItem:
def __getitem__(self, key):
return key
@pytest.mark.skip(reason="really long test")
def test_slicing_exhaustively():
x = np.random.rand(6, 7, 8)
a = da.from_array(x, chunks=(3, 3, 3))
I = ReturnItem()
# independent indexing along different axes
indexers = [0, -2, I[:], I[:5], [0, 1], [0, 1, 2], [4, 2], I[::-1], None, I[:0], []]
for i in indexers:
assert_eq(x[i], a[i]), i
for j in indexers:
assert_eq(x[i][:, j], a[i][:, j]), (i, j)
assert_eq(x[:, i][j], a[:, i][j]), (i, j)
for k in indexers:
assert_eq(x[..., i][:, j][k], a[..., i][:, j][k]), (i, j, k)
# repeated indexing along the first axis
first_indexers = [I[:], I[:5], np.arange(5), [3, 1, 4, 5, 0], np.arange(6) < 6]
second_indexers = [0, -1, 3, I[:], I[:3], I[2:-1], [2, 4], [], I[:0]]
for i in first_indexers:
for j in second_indexers:
assert_eq(x[i][j], a[i][j]), (i, j)
def test_slicing_with_negative_step_flops_keys():
x = da.arange(10, chunks=5)
y = x[:1:-1]
assert (x.name, 1) in y.dask[(y.name, 0)]
assert (x.name, 0) in y.dask[(y.name, 1)]
assert_eq(y, np.arange(10)[:1:-1])
assert y.chunks == ((5, 3),)
assert y.dask[(y.name, 0)] == (getitem, (x.name, 1), (slice(-1, -6, -1),))
assert y.dask[(y.name, 1)] == (getitem, (x.name, 0), (slice(-1, -4, -1),))
def test_empty_slice():
x = da.ones((5, 5), chunks=(2, 2), dtype="i4")
y = x[:0]
assert_eq(y, np.ones((5, 5), dtype="i4")[:0])
def test_multiple_list_slicing():
x = np.random.rand(6, 7, 8)
a = da.from_array(x, chunks=(3, 3, 3))
assert_eq(x[:, [0, 1, 2]][[0, 1]], a[:, [0, 1, 2]][[0, 1]])
def test_boolean_list_slicing():
with pytest.raises(IndexError):
da.asarray(range(2))[[True]]
with pytest.raises(IndexError):
da.asarray(range(2))[[False, False, False]]
x = np.arange(5)
ind = [True, False, False, False, True]
assert_eq(da.asarray(x)[ind], x[ind])
# https://github.com/dask/dask/issues/3706
ind = [True]
assert_eq(da.asarray([0])[ind], np.arange(1)[ind])
def test_boolean_numpy_array_slicing():
with pytest.raises(IndexError):
da.asarray(range(2))[np.array([True])]
with pytest.raises(IndexError):
da.asarray(range(2))[np.array([False, False, False])]
x = np.arange(5)
ind = np.array([True, False, False, False, True])
assert_eq(da.asarray(x)[ind], x[ind])
# https://github.com/dask/dask/issues/3706
ind = np.array([True])
assert_eq(da.asarray([0])[ind], np.arange(1)[ind])
def test_empty_list():
x = np.ones((5, 5, 5), dtype="i4")
dx = da.from_array(x, chunks=2)
assert_eq(dx[[], :3, :2], x[[], :3, :2])
assert_eq(dx[:3, [], :2], x[:3, [], :2])
assert_eq(dx[:3, :2, []], x[:3, :2, []])
def test_uneven_chunks():
assert da.ones(20, chunks=5)[::2].chunks == ((3, 2, 3, 2),)
def test_new_blockdim():
assert new_blockdim(20, [5, 5, 5, 5], slice(0, None, 2)) == [3, 2, 3, 2]
def test_slicing_consistent_names():
x = np.arange(100).reshape((10, 10))
a = da.from_array(x, chunks=(5, 5))
assert same_keys(a[0], a[0])
assert same_keys(a[:, [1, 2, 3]], a[:, [1, 2, 3]])
assert same_keys(a[:, 5:2:-1], a[:, 5:2:-1])
assert same_keys(a[0, ...], a[0, ...])
assert same_keys(a[...], a[...])
assert same_keys(a[[1, 3, 5]], a[[1, 3, 5]])
assert same_keys(a[-11:11], a[:])
assert same_keys(a[-11:-9], a[:1])
assert same_keys(a[-1], a[9])
assert same_keys(a[0::-1], a[0:-11:-1])
def test_slicing_consistent_names_after_normalization():
x = da.zeros(10, chunks=(5,))
assert same_keys(x[0:], x[:10])
assert same_keys(x[0:], x[0:10])
assert same_keys(x[0:], x[0:10:1])
assert same_keys(x[:], x[0:10:1])
def test_sanitize_index_element():
with pytest.raises(TypeError):
_sanitize_index_element("Hello!")
def test_sanitize_index():
pd = pytest.importorskip("pandas")
with pytest.raises(TypeError):
sanitize_index("Hello!")
np.testing.assert_equal(sanitize_index(pd.Series([1, 2, 3])), [1, 2, 3])
np.testing.assert_equal(sanitize_index((1, 2, 3)), [1, 2, 3])
def test_uneven_blockdims():
blockdims = ((31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30), (100,))
index = (slice(240, 270), slice(None))
dsk_out, bd_out = slice_array("in", "out", blockdims, index, itemsize=8)
sol = {
("in", 0, 0): (getitem, ("out", 7, 0), (slice(28, 31, 1), slice(None))),
("in", 1, 0): (getitem, ("out", 8, 0), (slice(0, 27, 1), slice(None))),
}
assert dsk_out == sol
assert bd_out == ((3, 27), (100,))
blockdims = ((31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30),) * 2
index = (slice(240, 270), slice(180, 230))
dsk_out, bd_out = slice_array("in", "out", blockdims, index, itemsize=8)
sol = {
("in", 0, 0): (getitem, ("out", 7, 5), (slice(28, 31, 1), slice(29, 30, 1))),
("in", 0, 1): (getitem, ("out", 7, 6), (slice(28, 31, 1), slice(None))),
("in", 0, 2): (getitem, ("out", 7, 7), (slice(28, 31, 1), slice(0, 18, 1))),
("in", 1, 0): (getitem, ("out", 8, 5), (slice(0, 27, 1), slice(29, 30, 1))),
("in", 1, 1): (getitem, ("out", 8, 6), (slice(0, 27, 1), slice(None))),
("in", 1, 2): (getitem, ("out", 8, 7), (slice(0, 27, 1), slice(0, 18, 1))),
}
assert dsk_out == sol
assert bd_out == ((3, 27), (1, 31, 18))
def test_oob_check():
x = da.ones(5, chunks=(2,))
with pytest.raises(IndexError):
x[6]
with pytest.raises(IndexError):
x[[6]]
with pytest.raises(IndexError):
x[-10]
with pytest.raises(IndexError):
x[[-10]]
with pytest.raises(IndexError):
x[0, 0]
@pytest.mark.parametrize("idx_chunks", [None, 3, 2, 1])
@pytest.mark.parametrize("x_chunks", [None, (3, 5), (2, 3), (1, 2), (1, 1)])
def test_index_with_int_dask_array(x_chunks, idx_chunks):
# test data is crafted to stress use cases:
# - pick from different chunks of x out of order
# - a chunk of x contains no matches
# - only one chunk of x
x = np.array(
[[10, 20, 30, 40, 50], [60, 70, 80, 90, 100], [110, 120, 130, 140, 150]]
)
idx = np.array([3, 0, 1])
expect = np.array([[40, 10, 20], [90, 60, 70], [140, 110, 120]])
if x_chunks is not None:
x = da.from_array(x, chunks=x_chunks)
if idx_chunks is not None:
idx = da.from_array(idx, chunks=idx_chunks)
assert_eq(x[:, idx], expect)
assert_eq(x.T[idx, :], expect.T)
@pytest.mark.parametrize("chunks", [1, 2, 3])
def test_index_with_int_dask_array_0d(chunks):
# Slice by 0-dimensional array
x = da.from_array([[10, 20, 30], [40, 50, 60]], chunks=chunks)
idx0 = da.from_array(1, chunks=1)
assert_eq(x[idx0, :], x[1, :])
assert_eq(x[:, idx0], x[:, 1])
@pytest.mark.parametrize("chunks", [1, 2, 3, 4, 5])
def test_index_with_int_dask_array_nanchunks(chunks):
# Slice by array with nan-sized chunks
a = da.arange(-2, 3, chunks=chunks)
assert_eq(a[a.nonzero()], np.array([-2, -1, 1, 2]))
# Edge case: the nan-sized chunks resolve to size 0
a = da.zeros(5, chunks=chunks)
assert_eq(a[a.nonzero()], np.array([]))
@pytest.mark.parametrize("chunks", [2, 4])
def test_index_with_int_dask_array_negindex(chunks):
a = da.arange(4, chunks=chunks)
idx = da.from_array([-1, -4], chunks=1)
assert_eq(a[idx], np.array([3, 0]))
@pytest.mark.parametrize("chunks", [2, 4])
def test_index_with_int_dask_array_indexerror(chunks):
a = da.arange(4, chunks=chunks)
idx = da.from_array([4], chunks=1)
with pytest.raises(IndexError):
a[idx].compute()
idx = da.from_array([-5], chunks=1)
with pytest.raises(IndexError):
a[idx].compute()
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64"]
)
def test_index_with_int_dask_array_dtypes(dtype):
a = da.from_array([10, 20, 30, 40], chunks=-1)
idx = da.from_array(np.array([1, 2]).astype(dtype), chunks=1)
assert_eq(a[idx], np.array([20, 30]))
def test_index_with_int_dask_array_nocompute():
"""Test that when the indices are a dask array
they are not accidentally computed
"""
def crash():
raise NotImplementedError()
x = da.arange(5, chunks=-1)
idx = da.Array({("x", 0): (crash,)}, name="x", chunks=((2,),), dtype=np.int64)
result = x[idx]
with pytest.raises(NotImplementedError):
result.compute()
def test_index_with_bool_dask_array():
x = np.arange(36).reshape((6, 6))
d = da.from_array(x, chunks=(3, 3))
ind = np.asarray([True, True, False, True, False, False], dtype=bool)
ind = da.from_array(ind, chunks=2)
for index in [ind, (slice(1, 9, 2), ind), (ind, slice(2, 8, 1))]:
x_index = dask.compute(index)[0]
assert_eq(x[x_index], d[index])
def test_index_with_bool_dask_array_2():
x = np.random.random((10, 10, 10))
ind = np.random.random(10) > 0.5
d = da.from_array(x, chunks=(3, 4, 5))
dind = da.from_array(ind, chunks=4)
index = [slice(1, 9, 1), slice(None)]
for i in range(x.ndim):
index2 = index[:]
index2.insert(i, dind)
index3 = index[:]
index3.insert(i, ind)
assert_eq(x[tuple(index3)], d[tuple(index2)])
@pytest.mark.xfail
def test_cull():
x = da.ones(1000, chunks=(10,))
for slc in [1, slice(0, 30), slice(0, None, 100)]:
y = x[slc]
assert len(y.dask) < len(x.dask)
@pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 5)])
@pytest.mark.parametrize(
"index", [(Ellipsis,), (None, Ellipsis), (Ellipsis, None), (None, Ellipsis, None)]
)
def test_slicing_with_Nones(shape, index):
x = np.random.random(shape)
d = da.from_array(x, chunks=shape)
assert_eq(x[index], d[index])
indexers = [Ellipsis, slice(2), 0, 1, -2, -1, slice(-2, None), None]
"""
# We comment this out because it is 4096 tests
@pytest.mark.parametrize('a', indexers)
@pytest.mark.parametrize('b', indexers)
@pytest.mark.parametrize('c', indexers)
@pytest.mark.parametrize('d', indexers)
def test_slicing_none_int_ellipses(a, b, c, d):
if (a, b, c, d).count(Ellipsis) > 1:
return
shape = (2,3,5,7,11)
x = np.arange(np.prod(shape)).reshape(shape)
y = da.core.asarray(x)
xx = x[a, b, c, d]
yy = y[a, b, c, d]
assert_eq(xx, yy)
"""
def test_slicing_integer_no_warnings():
# https://github.com/dask/dask/pull/2457/
X = da.random.random((100, 2), (2, 2))
idx = np.array([0, 0, 1, 1])
with pytest.warns(None) as rec:
X[idx].compute()
assert len(rec) == 0
@pytest.mark.slow
def test_slicing_none_int_ellipes():
shape = (2, 3, 5, 7, 11)
x = np.arange(np.prod(shape)).reshape(shape)
y = da.core.asarray(x)
for ind in itertools.product(indexers, indexers, indexers, indexers):
if ind.count(Ellipsis) > 1:
continue
assert_eq(x[ind], y[ind])
def test_None_overlap_int():
a, b, c, d = (0, slice(None, 2, None), None, Ellipsis)
shape = (2, 3, 5, 7, 11)
x = np.arange(np.prod(shape)).reshape(shape)
y = da.core.asarray(x)
xx = x[a, b, c, d]
yy = y[a, b, c, d]
assert_eq(xx, yy)
def test_negative_n_slicing():
assert_eq(da.ones(2, chunks=2)[-2], np.ones(2)[-2])
def test_negative_list_slicing():
x = np.arange(5)
dx = da.from_array(x, chunks=2)
assert_eq(dx[[0, -5]], x[[0, -5]])
assert_eq(dx[[4, -1]], x[[4, -1]])
def test_permit_oob_slices():
x = np.arange(5)
dx = da.from_array(x, chunks=2)
assert_eq(x[-102:], dx[-102:])
assert_eq(x[102:], dx[102:])
assert_eq(x[:102], dx[:102])
assert_eq(x[:-102], dx[:-102])
def test_normalize_index():
assert normalize_index((Ellipsis, None), (10,)) == (slice(None), None)
assert normalize_index(5, (np.nan,)) == (5,)
assert normalize_index(-5, (np.nan,)) == (-5,)
(result,) = normalize_index([-5, -2, 1], (np.nan,))
assert result.tolist() == [-5, -2, 1]
assert normalize_index(slice(-5, -2), (np.nan,)) == (slice(-5, -2),)
def test_take_semi_sorted():
x = da.ones(10, chunks=(5,))
index = np.arange(15) % 10
y = x[index]
assert y.chunks == ((5, 5, 5),)
@pytest.mark.parametrize(
"chunks,index,expected",
[
((5, 5, 5), np.arange(5, 15) % 10, [(1, np.arange(5)), (0, np.arange(5))]),
(
(5, 5, 5, 5),
np.arange(20) // 2,
[(0, np.arange(10) // 2), (1, np.arange(10) // 2)],
),
((10, 10), [15, 2, 3, 15], [(1, [5]), (0, [2, 3]), (1, [5])]),
],
)
def test_slicing_plan(chunks, index, expected):
plan = slicing_plan(chunks, index=index)
assert len(plan) == len(expected)
for (i, x), (j, y) in zip(plan, expected):
assert i == j
assert len(x) == len(y)
assert (x == y).all()
def test_getitem_avoids_large_chunks():
with dask.config.set({"array.chunk-size": "0.1Mb"}):
a = np.arange(2 * 128 * 128, dtype="int64").reshape(2, 128, 128)
arr = da.from_array(a, chunks=(1, 128, 128))
indexer = [0] + [1] * 11
expected = a[indexer]
# By default, we warn
with pytest.warns(da.PerformanceWarning):
result = arr[indexer]
assert_eq(result, expected)
assert result.chunks == ((1, 11), (128,), (128,))
# Users can silence the warning
with dask.config.set({"array.slicing.split-large-chunks": False}):
with pytest.warns(None) as e:
result = arr[indexer]
assert len(e) == 0
assert_eq(result, expected)
# Users can silence the warning
with dask.config.set({"array.slicing.split-large-chunks": True}):
with pytest.warns(None) as e:
result = arr[indexer]
assert len(e) == 0 # no
assert_eq(result, expected)
assert result.chunks == ((1,) * 12, (128,), (128,))
@pytest.mark.parametrize(
"chunks",
[
((1, 1, 1, 1), (np.nan,), (np.nan,)),
pytest.param(
((np.nan, np.nan, np.nan, np.nan), (500,), (500,)),
marks=pytest.mark.xfail(reason="https://github.com/dask/dask/issues/6586"),
),
],
)
def test_getitem_avoids_large_chunks_missing(chunks):
# We cannot apply the "avoid large chunks" optimization when
# the chunks have unknown sizes.
with dask.config.set({"array.slicing.split-large-chunks": True}):
a = np.arange(4 * 500 * 500).reshape(4, 500, 500)
arr = da.from_array(a, chunks=(1, 500, 500))
arr._chunks = chunks
indexer = [0, 1] + [2] * 100 + [3]
expected = a[indexer]
result = arr[indexer]
assert_eq(result, expected)
def test_take_avoids_large_chunks():
# unit test for https://github.com/dask/dask/issues/6270
with dask.config.set({"array.slicing.split-large-chunks": True}):
chunks = ((1, 1, 1, 1), (500,), (500,))
itemsize = 8
index = np.array([0, 1] + [2] * 101 + [3])
chunks2, dsk = take("a", "b", chunks, index, itemsize)
assert chunks2 == ((1, 1, 51, 50, 1), (500,), (500,))
assert len(dsk) == 5
index = np.array([0] * 101 + [1, 2, 3])
chunks2, dsk = take("a", "b", chunks, index, itemsize)
assert chunks2 == ((51, 50, 1, 1, 1), (500,), (500,))
assert len(dsk) == 5
index = np.array([0, 1, 2] + [3] * 101)
chunks2, dsk = take("a", "b", chunks, index, itemsize)
assert chunks2 == ((1, 1, 1, 51, 50), (500,), (500,))
assert len(dsk) == 5
chunks = ((500,), (1, 1, 1, 1), (500,))
index = np.array([0, 1, 2] + [3] * 101)
chunks2, dsk = take("a", "b", chunks, index, itemsize, axis=1)
assert chunks2 == ((500,), (1, 1, 1, 51, 50), (500,))
assert len(dsk) == 5
def test_take_uses_config():
with dask.config.set({"array.slicing.split-large-chunks": True}):
chunks = ((1, 1, 1, 1), (500,), (500,))
index = np.array([0, 1] + [2] * 101 + [3])
itemsize = 8
with config.set({"array.chunk-size": "10GB"}):
chunks2, dsk = take("a", "b", chunks, index, itemsize)
assert chunks2 == ((1, 1, 101, 1), (500,), (500,))
assert len(dsk) == 4
def test_pathological_unsorted_slicing():
x = da.ones(100, chunks=10)
# [0, 10, 20, ... 90, 1, 11, 21, ... 91, ...]
index = np.arange(100).reshape(10, 10).ravel(order="F")
with pytest.warns(da.PerformanceWarning) as info:
x[index]
assert "10" in str(info.list[0])
assert "out-of-order" in str(info.list[0])
def test_cached_cumsum():
a = (1, 2, 3, 4)
x = cached_cumsum(a)
y = cached_cumsum(a, initial_zero=True)
assert x == (1, 3, 6, 10)
assert y == (0, 1, 3, 6, 10)
def test_cached_cumsum_nan():
a = (1, np.nan, 3)
x = cached_cumsum(a)
y = cached_cumsum(a, initial_zero=True)
np.testing.assert_equal(x, (1, np.nan, np.nan))
np.testing.assert_equal(y, (0, 1, np.nan, np.nan))
def test_cached_cumsum_non_tuple():
a = [1, 2, 3]
assert cached_cumsum(a) == (1, 3, 6)
a[1] = 4
assert cached_cumsum(a) == (1, 5, 8)
@pytest.mark.parametrize("params", [(2, 2, 1), (5, 3, 2)])
def test_setitem_with_different_chunks_preserves_shape(params):
"""Reproducer for https://github.com/dask/dask/issues/3730.
Mutating based on an array with different chunks can cause new chunks to be
used. We need to ensure those new chunk sizes are applied to the mutated
array, otherwise the array won't generate the correct keys.
"""
array_size, chunk_size1, chunk_size2 = params
x = da.zeros(array_size, chunks=chunk_size1)
mask = da.zeros(array_size, chunks=chunk_size2)
x[mask] = 1
result = x.compute()
assert x.shape == result.shape
def test_gh3579():
assert_eq(np.arange(10)[0::-1], da.arange(10, chunks=3)[0::-1])
assert_eq(np.arange(10)[::-1], da.arange(10, chunks=3)[::-1])
def test_make_blockwise_sorted_slice():
x = da.arange(8, chunks=4)
index = np.array([6, 0, 4, 2, 7, 1, 5, 3])
a, b = make_block_sorted_slices(index, x.chunks)
index2 = np.array([0, 2, 4, 6, 1, 3, 5, 7])
index3 = np.array([3, 0, 2, 1, 7, 4, 6, 5])
np.testing.assert_array_equal(a, index2)
np.testing.assert_array_equal(b, index3)
@pytest.mark.filterwarnings("ignore:Slicing:dask.array.core.PerformanceWarning")
@pytest.mark.parametrize(
"size, chunks", [((100, 2), (50, 2)), ((100, 2), (37, 1)), ((100,), (55,))]
)
def test_shuffle_slice(size, chunks):
x = da.random.randint(0, 1000, size=size, chunks=chunks)
index = np.arange(len(x))
np.random.shuffle(index)
a = x[index]
b = shuffle_slice(x, index)
assert_eq(a, b)
@pytest.mark.parametrize("lock", [True, False])
@pytest.mark.parametrize("asarray", [True, False])
@pytest.mark.parametrize("fancy", [True, False])
def test_gh4043(lock, asarray, fancy):
a1 = da.from_array(np.zeros(3), chunks=1, asarray=asarray, lock=lock, fancy=fancy)
a2 = da.from_array(np.ones(3), chunks=1, asarray=asarray, lock=lock, fancy=fancy)
al = da.stack([a1, a2])
assert_eq(al, al)
def test_slice_array_3d_with_bool_numpy_array():
# https://github.com/dask/dask/issues/6089
array = da.arange(0, 24).reshape((4, 3, 2))
mask = np.arange(0, 24).reshape((4, 3, 2)) > 12
actual = array[mask].compute()
expected = np.arange(13, 24)
assert_eq(actual, expected)
| jakirkham/dask | dask/array/tests/test_slicing.py | Python | bsd-3-clause | 32,269 |
# -*- coding:utf-8 -*-
import logging
import warnings
from flypwd.config import config
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
log = logging.getLogger(__name__)
def check_key(keyfile):
"""
checks the RSA key file
raises ValueError if not valid
"""
with open(keyfile, 'r') as f:
return RSA.importKey(f.read(), passphrase="")
def gen_key():
return RSA.generate(config.getint('keys', 'dimension'))
def encrypt_with_pub(pwd, pub):
cipher = PKCS1_v1_5.new(pub)
return cipher.encrypt(pwd.encode('utf-8'))
| giupo/flypwd | flypwd/keys.py | Python | bsd-3-clause | 658 |
from RMPY.rig.biped.rig import arm
from RMPY.rig.biped.rig import rigSpine
from RMPY.rig.biped.rig import hand
from RMPY.rig import rigFK
from RMPY.rig import rigWorld
from RMPY.rig.biped.rig import neckHead
from RMPY.rig.biped.rig import rigIKFKLegFeet
from RMPY.rig import rigBase
from RMPY.rig import rigProp
from RMPY.rig.biped.rig import armSpaceSwitch
from RMPY.rig.biped.rig import legSpaceSwitch
from RMPY.rig.quadruped.rigs import rigIKQuadLegFeet
class RigBypedModel(rigBase.BaseModel):
def __init__(self, **kwargs):
super(RigBypedModel, self).__init__(**kwargs)
self.l_arm = arm.Arm()
self.r_arm = arm.Arm()
self.l_leg = rigIKQuadLegFeet.RigIKQuadLegFeet()
self.r_leg = rigIKQuadLegFeet.RigIKQuadLegFeet()
self.l_hand = hand.Hand()
self.r_hand = hand.Hand()
self.neck_head = neckHead.NeckHead()
self.spine = rigSpine.RigSpine()
self.hip = rigFK.RigFK()
self.cog = rigProp.RigProp()
self.jaw = rigFK.RigFK()
self.rig_world = rigWorld.RigWorld()
self.l_arm_space_switch = armSpaceSwitch.ArmSpaceSwitch()
self.r_arm_space_switch = armSpaceSwitch.ArmSpaceSwitch()
self.l_leg_space_switch = legSpaceSwitch.LegSpaceSwitch()
self.r_leg_space_switch = legSpaceSwitch.LegSpaceSwitch()
class RigByped(rigBase.RigBase):
def __init__(self, *args, **kwargs):
super(RigByped, self).__init__(*args, **kwargs)
self._model = RigBypedModel()
self.arm_root = [u'{}_clavicle01_reference_pnt', u'{}_shoulder01_reference_pnt', u'{}_elbow01_reference_pnt',
u'{}_wrist01_reference_pnt']
self.leg_root = [u'{}_backLeg00_reference_pnt', u'{}_backLeg01_reference_pnt',
u'{}_backLeg02_reference_pnt', u'{}_paw00_reference_pnt']
self.feet_root = [u'{}_pawRoll00_reference_pnt', u'{}_pawToe00_reference_pnt',
u'{}_footLimitBack00_reference_pnt', u'{}_footLimitOuter00_reference_pnt',
u'{}_footLimitInner00_reference_pnt']
self.fingers = [u'{}_leg01_reference_pnt', u'{}_Knee01_reference_pnt', u'{}_ankle01_reference_pnt',
u'{}_ankleFeet01_reference_pnt']
self.hand_root = [u'{}_palm01_reference_pnt']
self.hip_root = [u'C_Hip00_reference_pnt', u'C_Hip01_reference_pnt']
self.jaw_root = [u'C_jaw01_reference_pnt', u'C_jawTip01_reference_pnt']
self.spine_root = [u'C_Spine01_reference_pnt', u'C_Spine02_reference_pnt', u'C_Spine03_reference_pnt',
u'C_Spine04_reference_pnt', u'C_Spine05_reference_pnt']
self.COG_root = u'C_COG_reference_pnt'
self.neck_root = [u'C_neck00_reference_pnt', u'C_head00_reference_pnt', u'C_headTip00_reference_pnt']
@property
def neck_head(self):
return self._model.neck_head
@property
def spine(self):
return self._model.spine
@property
def l_arm(self):
return self._model.l_arm
@property
def r_arm(self):
return self._model.r_arm
@property
def l_leg(self):
return self._model.l_leg
@property
def r_leg(self):
return self._model.r_leg
@property
def l_hand(self):
return self._model.l_hand
@property
def r_hand(self):
return self._model.r_hand
@property
def hip(self):
return self._model.hip
@property
def cog(self):
return self._model.cog
@property
def rig_world(self):
return self._model.rig_world
@property
def l_arm_space_switch(self):
return self._model.l_arm_space_switch
@property
def r_arm_space_switch(self):
return self._model.l_arm_space_switch
@property
def l_leg_space_switch(self):
return self._model.l_leg_space_switch
@property
def r_leg_space_switch(self):
return self._model.r_leg_space_switch
@property
def jaw(self):
return self._model.jaw
def build(self):
self.spine.create_point_base(*self.spine_root)
self.hip.create_point_base(*self.hip_root, name='hip')
self.cog.create_point_base(self.COG_root, name='cog')
self.l_arm.create_point_base(*[each.format('L') for each in self.arm_root])
self.l_arm.set_parent(self.spine)
self.r_arm.create_point_base(*[each.format('R') for each in self.arm_root])
self.r_arm.set_parent(self.spine)
self.l_hand.create_point_base(*[each.format('L') for each in self.hand_root])
self.l_hand.set_parent(self.l_arm)
self.l_arm_space_switch.build(self.l_arm, self.rig_world)
self.r_hand.create_point_base(*[each.format('R') for each in self.hand_root])
self.r_hand.set_parent(self.r_arm)
self.r_arm_space_switch.build(self.r_arm, self.rig_world)
l_root_points = [each.format('L') for each in self.leg_root]
l_root_points.extend([each.format('L') for each in self.feet_root])
self.l_leg.create_point_base(*l_root_points)
self.l_leg_space_switch.build(self.l_leg, self.rig_world)
r_root_points = [each.format('R') for each in self.leg_root]
r_root_points.extend([each.format('R') for each in self.feet_root])
self.r_leg.create_point_base(*r_root_points)
self.r_leg_space_switch.build(self.r_leg, self.rig_world)
self.neck_head.create_point_base(*self.neck_root)
self.jaw.create_point_base(*self.jaw_root)
self.neck_head.set_parent(self.spine)
self.jaw.set_parent(self.neck_head)
self.cog.set_parent(self.rig_world)
self.spine.set_parent(self.cog)
self.hip.set_parent(self.cog)
self.l_leg.set_parent(self.hip)
self.r_leg.set_parent(self.hip)
# setup as skinned joints
self.jaw.rename_as_skinned_joints()
self.spine.rename_as_skinned_joints()
self.hip.rename_as_skinned_joints()
self.l_arm.rename_as_skinned_joints()
self.r_arm.rename_as_skinned_joints()
self.l_hand.rename_as_skinned_joints()
self.r_hand.rename_as_skinned_joints()
self.l_leg.rename_as_skinned_joints()
self.r_leg.rename_as_skinned_joints()
self.neck_head.rename_as_skinned_joints()
self.spine.rename_as_skinned_joints()
self.l_leg.rename_as_skinned_joints()
self.r_leg.rename_as_skinned_joints()
if __name__ == '__main__':
rig_biped = RigByped()
rig_biped.build()
| rendermotion/RMPY | rig/quadruped/rigQuadruped.py | Python | lgpl-3.0 | 6,548 |
"""The tests for the REST switch platform."""
import asyncio
import aiohttp
import homeassistant.components.rest.switch as rest
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.const import (
CONF_HEADERS,
CONF_NAME,
CONF_PARAMS,
CONF_PLATFORM,
CONF_RESOURCE,
CONTENT_TYPE_JSON,
HTTP_INTERNAL_SERVER_ERROR,
HTTP_NOT_FOUND,
HTTP_OK,
)
from homeassistant.helpers.template import Template
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component
"""Tests for setting up the REST switch platform."""
NAME = "foo"
METHOD = "post"
RESOURCE = "http://localhost/"
STATE_RESOURCE = RESOURCE
HEADERS = {"Content-type": CONTENT_TYPE_JSON}
AUTH = None
PARAMS = None
async def test_setup_missing_config(hass):
"""Test setup with configuration missing required entries."""
assert not await rest.async_setup_platform(hass, {CONF_PLATFORM: rest.DOMAIN}, None)
async def test_setup_missing_schema(hass):
"""Test setup with resource missing schema."""
assert not await rest.async_setup_platform(
hass,
{CONF_PLATFORM: rest.DOMAIN, CONF_RESOURCE: "localhost"},
None,
)
async def test_setup_failed_connect(hass, aioclient_mock):
"""Test setup when connection error occurs."""
aioclient_mock.get("http://localhost", exc=aiohttp.ClientError)
assert not await rest.async_setup_platform(
hass,
{CONF_PLATFORM: rest.DOMAIN, CONF_RESOURCE: "http://localhost"},
None,
)
async def test_setup_timeout(hass, aioclient_mock):
"""Test setup when connection timeout occurs."""
aioclient_mock.get("http://localhost", exc=asyncio.TimeoutError())
assert not await rest.async_setup_platform(
hass,
{CONF_PLATFORM: rest.DOMAIN, CONF_RESOURCE: "http://localhost"},
None,
)
async def test_setup_minimum(hass, aioclient_mock):
"""Test setup with minimum configuration."""
aioclient_mock.get("http://localhost", status=HTTP_OK)
with assert_setup_component(1, SWITCH_DOMAIN):
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: rest.DOMAIN,
CONF_RESOURCE: "http://localhost",
}
},
)
assert aioclient_mock.call_count == 1
async def test_setup_query_params(hass, aioclient_mock):
"""Test setup with query params."""
aioclient_mock.get("http://localhost/?search=something", status=HTTP_OK)
with assert_setup_component(1, SWITCH_DOMAIN):
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: rest.DOMAIN,
CONF_RESOURCE: "http://localhost",
CONF_PARAMS: {"search": "something"},
}
},
)
print(aioclient_mock)
assert aioclient_mock.call_count == 1
async def test_setup(hass, aioclient_mock):
"""Test setup with valid configuration."""
aioclient_mock.get("http://localhost", status=HTTP_OK)
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: rest.DOMAIN,
CONF_NAME: "foo",
CONF_RESOURCE: "http://localhost",
CONF_HEADERS: {"Content-type": CONTENT_TYPE_JSON},
rest.CONF_BODY_ON: "custom on text",
rest.CONF_BODY_OFF: "custom off text",
}
},
)
assert aioclient_mock.call_count == 1
assert_setup_component(1, SWITCH_DOMAIN)
async def test_setup_with_state_resource(hass, aioclient_mock):
"""Test setup with valid configuration."""
aioclient_mock.get("http://localhost", status=HTTP_NOT_FOUND)
aioclient_mock.get("http://localhost/state", status=HTTP_OK)
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: rest.DOMAIN,
CONF_NAME: "foo",
CONF_RESOURCE: "http://localhost",
rest.CONF_STATE_RESOURCE: "http://localhost/state",
CONF_HEADERS: {"Content-type": CONTENT_TYPE_JSON},
rest.CONF_BODY_ON: "custom on text",
rest.CONF_BODY_OFF: "custom off text",
}
},
)
assert aioclient_mock.call_count == 1
assert_setup_component(1, SWITCH_DOMAIN)
"""Tests for REST switch platform."""
def _setup_test_switch(hass):
body_on = Template("on", hass)
body_off = Template("off", hass)
switch = rest.RestSwitch(
NAME,
RESOURCE,
STATE_RESOURCE,
METHOD,
HEADERS,
PARAMS,
AUTH,
body_on,
body_off,
None,
10,
True,
)
switch.hass = hass
return switch, body_on, body_off
def test_name(hass):
"""Test the name."""
switch, body_on, body_off = _setup_test_switch(hass)
assert NAME == switch.name
def test_is_on_before_update(hass):
"""Test is_on in initial state."""
switch, body_on, body_off = _setup_test_switch(hass)
assert switch.is_on is None
async def test_turn_on_success(hass, aioclient_mock):
"""Test turn_on."""
aioclient_mock.post(RESOURCE, status=HTTP_OK)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert body_on.template == aioclient_mock.mock_calls[-1][2].decode()
assert switch.is_on
async def test_turn_on_status_not_ok(hass, aioclient_mock):
"""Test turn_on when error status returned."""
aioclient_mock.post(RESOURCE, status=HTTP_INTERNAL_SERVER_ERROR)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert body_on.template == aioclient_mock.mock_calls[-1][2].decode()
assert switch.is_on is None
async def test_turn_on_timeout(hass, aioclient_mock):
"""Test turn_on when timeout occurs."""
aioclient_mock.post(RESOURCE, status=HTTP_INTERNAL_SERVER_ERROR)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert switch.is_on is None
async def test_turn_off_success(hass, aioclient_mock):
"""Test turn_off."""
aioclient_mock.post(RESOURCE, status=HTTP_OK)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_off()
assert body_off.template == aioclient_mock.mock_calls[-1][2].decode()
assert not switch.is_on
async def test_turn_off_status_not_ok(hass, aioclient_mock):
"""Test turn_off when error status returned."""
aioclient_mock.post(RESOURCE, status=HTTP_INTERNAL_SERVER_ERROR)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_off()
assert body_off.template == aioclient_mock.mock_calls[-1][2].decode()
assert switch.is_on is None
async def test_turn_off_timeout(hass, aioclient_mock):
"""Test turn_off when timeout occurs."""
aioclient_mock.post(RESOURCE, exc=asyncio.TimeoutError())
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert switch.is_on is None
async def test_update_when_on(hass, aioclient_mock):
"""Test update when switch is on."""
switch, body_on, body_off = _setup_test_switch(hass)
aioclient_mock.get(RESOURCE, text=body_on.template)
await switch.async_update()
assert switch.is_on
async def test_update_when_off(hass, aioclient_mock):
"""Test update when switch is off."""
switch, body_on, body_off = _setup_test_switch(hass)
aioclient_mock.get(RESOURCE, text=body_off.template)
await switch.async_update()
assert not switch.is_on
async def test_update_when_unknown(hass, aioclient_mock):
"""Test update when unknown status returned."""
aioclient_mock.get(RESOURCE, text="unknown status")
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_update()
assert switch.is_on is None
async def test_update_timeout(hass, aioclient_mock):
"""Test update when timeout occurs."""
aioclient_mock.get(RESOURCE, exc=asyncio.TimeoutError())
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_update()
assert switch.is_on is None
| tboyce021/home-assistant | tests/components/rest/test_switch.py | Python | apache-2.0 | 8,431 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IPRule(Model):
"""IP rule with specific IP or IP range in CIDR format.
:param ip_address_or_range: Specifies the IP or IP range in CIDR format.
Only IPV4 address is allowed.
:type ip_address_or_range: str
:param action: The action of IP ACL rule. Possible values include:
'Allow'. Default value: "Allow" .
:type action: str or ~azure.mgmt.storage.v2017_10_01.models.Action
"""
_validation = {
'ip_address_or_range': {'required': True},
}
_attribute_map = {
'ip_address_or_range': {'key': 'value', 'type': 'str'},
'action': {'key': 'action', 'type': 'Action'},
}
def __init__(self, ip_address_or_range, action="Allow"):
super(IPRule, self).__init__()
self.ip_address_or_range = ip_address_or_range
self.action = action
| lmazuel/azure-sdk-for-python | azure-mgmt-storage/azure/mgmt/storage/v2017_10_01/models/ip_rule.py | Python | mit | 1,345 |
import rospy
import time
from collections import deque
class Publisher(object):
def __init__(self):
self.publishers = {}
self.queue = deque()
def add_publisher(self, alias, publisher):
self.publishers[alias] = publisher
def publish(self):
while len(self.queue) > 0:
alias, msg = self.queue.popleft()
print "publishing " + alias + ":" + str(msg)
self.publishers[alias].publish(msg)
def append(self, alias, msg):
self.queue.append((alias, msg))
| jgrizou/robot_omniwheel | catkin_ws/src/roslego/scripts/publisher.py | Python | gpl-3.0 | 541 |
#!/usr/bin/python3
#-*- coding: utf-8 -*-
name = 'ITU Turkish NLP Pipeline Caller'
__copyright__ = '__copyright__ 2015-2018 Maintainers'
__license__ = 'GPLv2\n\
This program is free software; you can redistribute it and/or \
modify it under the terms of the GNU General Public license version 2 \
as published by the Free Software Foundation. \
This program is distributed in the hope that it will be useful, \
but WITHOUT ANY WARRANTY; without even the implied warranty of \
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the \
GNU General Public license for more details. \
You should have received a copy of the GNU General Public license \
along with this program. If not, see <http://www.gnu.org/licenses/>.'
author0 = ('Ferit Tunçer', 'ferit@cryptolab.net')
author1 = ('Ülgen Sarıkavak', 'work@ulgens.me')
website = 'https://github.com/ferittuncer/ITU-Turkish-NLP-Pipeline-Caller'
version = '3.0.0'
import argparse
import locale
import os
import re
import time
import urllib.parse
import urllib.request
TOKEN_PATH = "pipeline.token"
TOKEN_ENVVAR = "pipeline_token"
DEFAULT_ENCODING = locale.getpreferredencoding(False)
DEFAULT_OUTPUT_DIR = 'output'
class PipelineCaller(object):
API_URL = 'http://tools.nlp.itu.edu.tr/SimpleApi'
PIPELINE_ENCODING = 'UTF-8'
DEFAULT_SENTENCE_SPLIT_DELIMITER_CLASS = '[\.\?:;!]'
def __init__(self, tool='pipelineNoisy', text='example', token='invalid', processing_type='whole'):
self.tool = tool
self.text = text
self.token = token
self.processing_type = processing_type
self.sentences = []
self.words = []
def call(self):
if self.processing_type == 'whole':
params = self.encode_parameters(self.text)
return self.request(params)
if self.processing_type == 'sentence':
results = []
self.parse_sentences()
for sentence in self.sentences:
params = self.encode_parameters(sentence)
results.append(self.request(params))
return "\n".join(results)
if self.processing_type == 'word':
results = []
self.parse_words()
for word in self.words:
params = self.encode_parameters(word)
results.append(self.request(params))
return "\n".join(results)
def parse_sentences(self):
r = re.compile(r'(?<=(?:{}))\s+'.format(PipelineCaller.DEFAULT_SENTENCE_SPLIT_DELIMITER_CLASS))
self.sentences = r.split(self.text)
if re.match('^\s*$', self.sentences[-1]):
self.sentences.pop(-1)
def parse_words(self):
self.parse_sentences()
for sentence in self.sentences:
for word in sentence.split():
self.words.append(word)
def encode_parameters(self, text):
return urllib.parse.urlencode({'tool': self.tool, 'input': text, 'token': self.token}).encode(self.PIPELINE_ENCODING)
def request(self, params):
response = urllib.request.urlopen(self.API_URL, params)
return response.read().decode(self.PIPELINE_ENCODING)
def get_token(filename=TOKEN_PATH, envvar=TOKEN_ENVVAR):
"""
Returns pipeline_token for API
Tries local file first, then env variable
"""
if os.path.isfile(filename):
with open(filename) as token_file:
token = token_file.readline().strip()
else:
token = os.environ.get(envvar)
if not token:
raise ValueError("No token found.\n"
"{} file doesn't exist.\n{} environment variable is not set.".format(filename, envvar))
return token
def get_output_path(output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
filepath = os.path.join(output_dir, 'output{0:.0f}'.format(time.time()))
return filepath
def conditional_info(to_be_printed, quiet):
if quiet == 0:
print(to_be_printed)
def parse_arguments():
# epilog section is free now
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='ITU Turkish NLP Pipeline Caller v{}\n\n Maintained by:\n {} {}\n {} {}\n\n {}'.format(version, *author0, *author1, website),
add_help=True
)
parser.add_argument('filename', help='relative input filepath')
parser.add_argument('-p', '--processing-type', dest='processing_type', choices=['word', 'sentence', 'whole'], default='whole', help='Switches processing type, default is whole text at once. Alternatively, word by word or sentence by sentence processing can be selected.')
parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', help='no info during process')
parser.add_argument('--tool', dest='tool', default='pipelineNoisy', choices=['ner', 'morphanalyzer', 'isturkish', 'morphgenerator', 'tokenizer', 'normalize', 'deasciifier', 'Vowelizer', 'DepParserFormal', 'DepParserNoisy', 'spellcheck', 'disambiguator', 'pipelineFormal', 'pipelineNoisy'], help='Switches pipeline tool which is \'pipelineNoisy\' by default')
parser.add_argument('-e', '--encoding', dest='encoding', metavar='E', default=DEFAULT_ENCODING, help='force I/O to use given encoding, instead of default locale')
parser.add_argument('-o', '--output', metavar='O', dest='output_dir', default=DEFAULT_OUTPUT_DIR, help='change output directory, \'{}\' by default'.format(DEFAULT_OUTPUT_DIR))
parser.add_argument('--version', action='version', version='{} {}'.format(name, version), help='version information')
parser.add_argument('--license', action='version', version='{}'.format(__license__), help='license information')
return parser.parse_args()
def main():
args = parse_arguments()
with open(args.filename, encoding=args.encoding) as input_file:
text = input_file.read()
output_path = get_output_path(args.output_dir)
token = get_token()
conditional_info('[INFO] Pipeline tool: {}'.format(args.tool), args.quiet)
conditional_info('[INFO] File I/O encoding: {}'.format(args.encoding), args.quiet)
conditional_info('[INFO] Output destination: .{}{}'.format(os.sep, output_path), args.quiet)
start_time = time.time()
caller = PipelineCaller(args.tool, text, token, args.processing_type)
with open(output_path, 'w', encoding=args.encoding) as output_file:
output_file.write('{}\n'.format(caller.call()))
process_time = time.time() - start_time
print("[DONE] It took {0:.0f} seconds to process whole text.".format(process_time))
if __name__ == '__main__':
main()
| ferittuncer/ITU-Turkish-NLP-Pipeline-Caller | pipeline_caller.py | Python | gpl-2.0 | 6,662 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from measurements import draw_properties
import page_sets
@benchmark.Disabled() # http://crbug.com/463111
class DrawPropertiesToughScrolling(benchmark.Benchmark):
test = draw_properties.DrawProperties
page_set = page_sets.ToughScrollingCasesPageSet
@classmethod
def Name(cls):
return 'draw_properties.tough_scrolling'
@benchmark.Disabled() # http://crbug.com/463111
class DrawPropertiesTop25(benchmark.Benchmark):
"""Measures the relative performance of CalcDrawProperties vs computing draw
properties from property trees.
http://www.chromium.org/developers/design-documents/rendering-benchmarks
"""
test = draw_properties.DrawProperties
page_set = page_sets.Top25SmoothPageSet
@classmethod
def Name(cls):
return 'draw_properties.top_25'
| sgraham/nope | tools/perf/benchmarks/draw_properties.py | Python | bsd-3-clause | 974 |
"""The Garages Amsterdam integration."""
from datetime import timedelta
import logging
import async_timeout
import garages_amsterdam
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN
PLATFORMS = ["binary_sensor", "sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Garages Amsterdam from a config entry."""
await get_coordinator(hass)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload Garages Amsterdam config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if len(hass.config_entries.async_entries(DOMAIN)) == 1:
hass.data.pop(DOMAIN)
return unload_ok
async def get_coordinator(
hass: HomeAssistant,
) -> DataUpdateCoordinator:
"""Get the data update coordinator."""
if DOMAIN in hass.data:
return hass.data[DOMAIN]
async def async_get_garages():
async with async_timeout.timeout(10):
return {
garage.garage_name: garage
for garage in await garages_amsterdam.get_garages(
aiohttp_client.async_get_clientsession(hass)
)
}
coordinator = DataUpdateCoordinator(
hass,
logging.getLogger(__name__),
name=DOMAIN,
update_method=async_get_garages,
update_interval=timedelta(minutes=10),
)
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN] = coordinator
return coordinator
| jawilson/home-assistant | homeassistant/components/garages_amsterdam/__init__.py | Python | apache-2.0 | 1,819 |
from django.apps import AppConfig
class LandingConfig(AppConfig):
name = 'landing'
| duncanwp/cis_esp | web/landing/apps.py | Python | lgpl-3.0 | 89 |
"""
This module will be available in templates as ``u``.
This module is also used to lookup custom template context providers, i.e. functions
following a special naming convention which are called to update the template context
before rendering resource's detail or index views.
"""
import re
import itertools
import collections
from sqlalchemy import func, desc, text
from sqlalchemy.orm import joinedload
from clld import RESOURCES
from clld.web.util.helpers import get_referents
from clld.web.util.htmllib import HTML
from clld.db.meta import DBSession
from clld.db.models.common import (
Contributor, ValueSet, Contribution, ContributionContributor, Language, Parameter, Value,
)
from clld_glottologfamily_plugin.models import Family
from clld.web.util.multiselect import CombinationMultiSelect
from clld.web.util import glottolog # used in templates!
from clld_phylogeny_plugin.models import Phylogeny
from clldutils.misc import slug
from markdown import markdown
from grambank.models import GrambankLanguage, DatapointContributor, Datapoint
COLORS = [
# red yellow
"00ff00", "ff0000", "ffff00", "0000ff", "ff00ff", "00ffff", "000000",
]
assert markdown
def process_markdown(text, req, section=None):
from markdown import markdown
md, in_section, current_section = [], False, None
in_example = False
for i, line in enumerate(text.strip().split('\n'), start=1):
if line.startswith('##'):
current_section = line[2:].strip()
line = '##' + line
if current_section.startswith('Patron'):
break
if section and current_section != section:
in_section = False
if i == 1 and line.startswith('##'):
continue
if line.startswith('```'):
in_example = not in_example
elif in_example:
line = line.lstrip()
if (not section) or in_section:
md.append(line)
if section and current_section == section:
in_section = True
html = markdown('\n'.join(md), extensions=['tables', 'fenced_code', 'toc'])
wiki_url_pattern = re.compile('https://github.com/grambank/[gG]rambank/wiki/(?P<id>GB[0-9]{3})')
html = wiki_url_pattern.sub(lambda m: req.route_url('parameter', id=m.group('id')), html)
return html.replace('<code>', '').replace('</code>', '').replace('<table>', '<table class="table table-nonfluid">')
def contributor_index_html(request=None, context=None, **kw):
contribs = DBSession.query(Contributor).all()
ndatapoint = {r[0]: r[1] for r in DBSession.query(
DatapointContributor.contributor_pk, func.count(DatapointContributor.datapoint_pk)).group_by(DatapointContributor.contributor_pk)}
nlangs = {r[0]: r[1] for r in DBSession.query(
ContributionContributor.contributor_pk, func.count(ContributionContributor.contribution_pk)).group_by(ContributionContributor.contributor_pk)}
res = []
for role in [
'Project leader',
'Project coordinator',
'Database manager',
'Patron',
'Node leader',
'Coder',
'Methods-team',
'Senior advisor',
]:
cs = [c for c in contribs if role in c.jsondata['roles']]
iter_ = iter(reversed(cs) if role == 'Project leader' else cs)
people = list(itertools.zip_longest(iter_, iter_, iter_, iter_))
res.append((role, slug(role), people))
return dict(contribs=res, ndatapoint=ndatapoint, nlangs=nlangs)
def family_detail_html(request=None, context=None, **kw):
return {
'features': DBSession.query(Parameter).all(),
'feature': Parameter.get(request.params['feature']) if request.params.get('feature') else None,
'phylogeny': Phylogeny.get(context.id, default=None),
}
def phylogeny_detail_html(request=None, context=None, **kw):
return {
'ms': CombinationMultiSelect,
}
def source_detail_html(context=None, request=None, **kw):
return dict(referents=get_referents(context, exclude=[
'sentence',
'contribution',
'valueset',
]))
def contributor_detail_html(context=None, request=None, **kw):
counts = {r[0]: r[1] for r in DBSession.query(Datapoint.language_pk, func.count(DatapointContributor.pk))\
.join(DatapointContributor)\
.filter(DatapointContributor.contributor_pk == context.pk)\
.group_by(Datapoint.language_pk)}
languages = []
for lang in DBSession.query(Language) \
.outerjoin(Family) \
.join(ValueSet) \
.join(Contribution) \
.join(ContributionContributor) \
.filter(Language.pk.in_(list(counts.keys()))) \
.order_by(Family.name, Language.name) \
.options(joinedload(GrambankLanguage.family)):
languages.append((lang, counts[lang.pk]))
return {'languages': languages}
def dataset_detail_html(context=None, request=None, **kw):
contribs = DBSession.query(Contributor.name, func.count(ValueSet.id).label('c'))\
.join(
Contributor.contribution_assocs,
ContributionContributor.contribution,
Contribution.valuesets)\
.group_by(Contributor.name)\
.order_by(desc(text('c')))
return dict(
contribs=contribs,
stats=context.get_stats(
[rsc for rsc in RESOURCES if rsc.name in ['language', 'parameter', 'value']]),
nzvalues=DBSession.query(Value).filter(Value.name != '?').count(),
)
| clld/grambank | grambank/util.py | Python | apache-2.0 | 5,510 |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from .._common_conversion import _to_str
class Container(object):
'''
Blob container class.
:ivar str name:
The name of the container.
:ivar metadata:
A dict containing name-value pairs associated with the container as metadata.
This var is set to None unless the include=metadata param was included
for the list containers operation. If this parameter was specified but the
container has no metadata, metadata will be set to an empty dictionary.
:vartype metadata: dict mapping str to str
:ivar ContainerProperties properties:
System properties for the container.
'''
def __init__(self, name=None, props=None, metadata=None):
self.name = name
self.properties = props or ContainerProperties()
self.metadata = metadata
class ContainerProperties(object):
'''
Blob container's properties class.
:ivar datetime last_modified:
A datetime object representing the last time the container was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar LeaseProperties lease:
Stores all the lease information for the container.
'''
def __init__(self):
self.last_modified = None
self.etag = None
self.lease = LeaseProperties()
class Blob(object):
'''
Blob class.
:ivar str name:
Name of blob.
:ivar str snapshot:
A DateTime value that uniquely identifies the snapshot. The value of
this header indicates the snapshot version, and may be used in
subsequent requests to access the snapshot.
:ivar content:
Blob content.
:vartype content: str or bytes
:ivar BlobProperties properties:
Stores all the system properties for the blob.
:ivar metadata:
Name-value pairs associated with the blob as metadata.
'''
def __init__(self, name=None, snapshot=None, content=None, props=None, metadata=None):
self.name = name
self.snapshot = snapshot
self.content = content
self.properties = props or BlobProperties()
self.metadata = metadata
class BlobProperties(object):
'''
Blob Properties
:ivar str blob_type:
String indicating this blob's type.
:ivar datetime last_modified:
A datetime object representing the last time the blob was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar int content_length:
The length of the content returned. If the entire blob was requested,
the length of blob in bytes. If a subset of the blob was requested, the
length of the returned subset.
:ivar str content_range:
Indicates the range of bytes returned in the event that the client
requested a subset of the blob.
:ivar int append_blob_committed_block_count:
(For Append Blobs) Number of committed blocks in the blob.
:ivar int page_blob_sequence_number:
(For Page Blobs) Sequence number for page blob used for coordinating
concurrent writes.
:ivar ~azure.storage.blob.models.CopyProperties copy:
Stores all the copy properties for the blob.
:ivar ~azure.storage.blob.models.ContentSettings content_settings:
Stores all the content settings for the blob.
:ivar ~azure.storage.blob.models.LeaseProperties lease:
Stores all the lease information for the blob.
'''
def __init__(self):
self.blob_type = None
self.last_modified = None
self.etag = None
self.content_length = None
self.content_range = None
self.append_blob_committed_block_count = None
self.page_blob_sequence_number = None
self.copy = CopyProperties()
self.content_settings = ContentSettings()
self.lease = LeaseProperties()
class ContentSettings(object):
'''
Used to store the content settings of a blob.
:ivar str content_type:
The content type specified for the blob. If no content type was
specified, the default content type is application/octet-stream.
:ivar str content_encoding:
If the content_encoding has previously been set
for the blob, that value is stored.
:ivar str content_language:
If the content_language has previously been set
for the blob, that value is stored.
:ivar str content_disposition:
content_disposition conveys additional information about how to
process the response payload, and also can be used to attach
additional metadata. If content_disposition has previously been set
for the blob, that value is stored.
:ivar str cache_control:
If the cache_control has previously been set for
the blob, that value is stored.
:ivar str content_md5:
If the content_md5 has been set for the blob, this response
header is stored so that the client can check for message content
integrity.
'''
def __init__(
self, content_type=None, content_encoding=None,
content_language=None, content_disposition=None,
cache_control=None, content_md5=None):
self.content_type = content_type
self.content_encoding = content_encoding
self.content_language = content_language
self.content_disposition = content_disposition
self.cache_control = cache_control
self.content_md5 = content_md5
def _to_headers(self):
return {
'x-ms-blob-cache-control': _to_str(self.cache_control),
'x-ms-blob-content-type': _to_str(self.content_type),
'x-ms-blob-content-disposition': _to_str(self.content_disposition),
'x-ms-blob-content-md5': _to_str(self.content_md5),
'x-ms-blob-content-encoding': _to_str(self.content_encoding),
'x-ms-blob-content-language': _to_str(self.content_language),
}
class CopyProperties(object):
'''
Blob Copy Properties.
:ivar str id:
String identifier for the last attempted Copy Blob operation where this blob
was the destination blob. This header does not appear if this blob has never
been the destination in a Copy Blob operation, or if this blob has been
modified after a concluded Copy Blob operation using Set Blob Properties,
Put Blob, or Put Block List.
:ivar str source:
URL up to 2 KB in length that specifies the source blob used in the last attempted
Copy Blob operation where this blob was the destination blob. This header does not
appear if this blob has never been the destination in a Copy Blob operation, or if
this blob has been modified after a concluded Copy Blob operation using
Set Blob Properties, Put Blob, or Put Block List.
:ivar str status:
State of the copy operation identified by Copy ID, with these values:
success:
Copy completed successfully.
pending:
Copy is in progress. Check copy_status_description if intermittent,
non-fatal errors impede copy progress but don’t cause failure.
aborted:
Copy was ended by Abort Copy Blob.
failed:
Copy failed. See copy_status_description for failure details.
:ivar str progress:
Contains the number of bytes copied and the total bytes in the source in the last
attempted Copy Blob operation where this blob was the destination blob. Can show
between 0 and Content-Length bytes copied.
:ivar datetime completion_time:
Conclusion time of the last attempted Copy Blob operation where this blob was the
destination blob. This value can specify the time of a completed, aborted, or
failed copy attempt.
:ivar str status_description:
only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
or non-fatal copy operation failure.
'''
def __init__(self):
self.id = None
self.source = None
self.status = None
self.progress = None
self.completion_time = None
self.status_description = None
class LeaseProperties(object):
'''
Blob Lease Properties.
:ivar str status:
The lease status of the blob.
:ivar str state:
Lease state of the blob.
Possible values: pending|success|aborted|failed
:ivar str duration:
When a blob is leased, specifies whether the lease is of infinite or fixed duration.
'''
def __init__(self):
self.status = None
self.state = None
self.duration = None
class BlobPrefix(object):
'''
BlobPrefix objects may potentially returned in the blob list when
:func:`~azure.storage.blob.baseblobservice.BaseBlobService.list_blobs` is
used with a delimiter. Prefixes can be thought of as virtual blob directories.
:ivar str name: The name of the blob prefix.
'''
def __init__(self):
self.name = None
class BlobBlockState(object):
'''Block blob block types.'''
Committed = 'Committed'
'''Committed blocks.'''
Latest = 'Latest'
'''Latest blocks.'''
Uncommitted = 'Uncommitted'
'''Uncommitted blocks.'''
class BlobBlock(object):
'''
BlockBlob Block class.
:ivar str id:
Block id.
:ivar str state:
Block state.
Possible valuse: committed|uncommitted
:ivar int size:
Block size in bytes.
'''
def __init__(self, id=None, state=BlobBlockState.Latest):
self.id = id
self.state = state
def _set_size(self, size):
self.size = size
class BlobBlockList(object):
'''
Blob Block List class.
:ivar committed_blocks:
List of committed blocks.
:vartype committed_blocks: list of :class:`BlobBlock`
:ivar uncommitted_blocks:
List of uncommitted blocks.
:vartype uncommitted_blocks: list of :class:`BlobBlock`
'''
def __init__(self):
self.committed_blocks = list()
self.uncommitted_blocks = list()
class PageRange(object):
'''
Page Range for page blob.
:ivar int start:
Start of page range in bytes.
:ivar int end:
End of page range in bytes.
:ivar bool is_cleared:
Indicates if a page range is cleared or not. Only applicable
for get_page_range_diff API.
'''
def __init__(self, start=None, end=None, is_cleared=False):
self.start = start
self.end = end
self.is_cleared = is_cleared
class ResourceProperties(object):
'''
Base response for a resource request.
:ivar str etag:
Opaque etag value that can be used to check if resource
has been modified.
:ivar datetime last_modified:
Datetime for last time resource was modified.
'''
def __init__(self):
self.last_modified = None
self.etag = None
class AppendBlockProperties(ResourceProperties):
'''
Response for an append block request.
:ivar int append_offset:
Position to start next append.
:ivar int committed_block_count:
Number of committed append blocks.
'''
def __init__(self):
super(ResourceProperties, self).__init__()
self.append_offset = None
self.committed_block_count = None
class PageBlobProperties(ResourceProperties):
'''
Response for a page request.
:ivar int sequence_number:
Identifer for page blobs to help handle concurrent writes.
'''
def __init__(self):
super(ResourceProperties, self).__init__()
self.sequence_number = None
class PublicAccess(object):
'''
Specifies whether data in the container may be accessed publicly and the level of access.
'''
Blob = 'blob'
'''
Specifies public read access for blobs. Blob data within this container can be read
via anonymous request, but container data is not available. Clients cannot enumerate
blobs within the container via anonymous request.
'''
Container = 'container'
'''
Specifies full public read access for container and blob data. Clients can enumerate
blobs within the container via anonymous request, but cannot enumerate containers
within the storage account.
'''
class DeleteSnapshot(object):
'''
Required if the blob has associated snapshots. Specifies how to handle the snapshots.
'''
Include = 'include'
'''
Delete the base blob and all of its snapshots.
'''
Only = 'only'
'''
Delete only the blob's snapshots and not the blob itself.
'''
class BlockListType(object):
'''
Specifies whether to return the list of committed blocks, the list of uncommitted
blocks, or both lists together.
'''
All = 'all'
'''Both committed and uncommitted blocks.'''
Committed = 'committed'
'''Committed blocks.'''
Uncommitted = 'uncommitted'
'''Uncommitted blocks.'''
class SequenceNumberAction(object):
'''Sequence number actions.'''
Increment = 'increment'
'''
Increments the value of the sequence number by 1. If specifying this option,
do not include the x-ms-blob-sequence-number header.
'''
Max = 'max'
'''
Sets the sequence number to be the higher of the value included with the
request and the value currently stored for the blob.
'''
Update = 'update'
'''Sets the sequence number to the value included with the request.'''
class _LeaseActions(object):
'''Actions for a lease.'''
Acquire = 'acquire'
'''Acquire the lease.'''
Break = 'break'
'''Break the lease.'''
Change = 'change'
'''Change the lease ID.'''
Release = 'release'
'''Release the lease.'''
Renew = 'renew'
'''Renew the lease.'''
class _BlobTypes(object):
'''Blob type options.'''
AppendBlob = 'AppendBlob'
'''Append blob type.'''
BlockBlob = 'BlockBlob'
'''Block blob type.'''
PageBlob = 'PageBlob'
'''Page blob type.'''
class Include(object):
'''
Specifies the datasets to include in the blob list response.
:ivar ~azure.storage.blob.models.Include Include.COPY:
Specifies that metadata related to any current or previous Copy Blob operation
should be included in the response.
:ivar ~azure.storage.blob.models.Include Include.METADATA:
Specifies that metadata be returned in the response.
:ivar ~azure.storage.blob.models.Include Include.SNAPSHOTS:
Specifies that snapshots should be included in the enumeration.
:ivar ~azure.storage.blob.models.Include Include.UNCOMMITTED_BLOBS:
Specifies that blobs for which blocks have been uploaded, but which have not
been committed using Put Block List, be included in the response.
'''
def __init__(self, snapshots=False, metadata=False, uncommitted_blobs=False,
copy=False, _str=None):
'''
:param bool snapshots:
Specifies that snapshots should be included in the enumeration.
:param bool metadata:
Specifies that metadata be returned in the response.
:param bool uncommitted_blobs:
Specifies that blobs for which blocks have been uploaded, but which have
not been committed using Put Block List, be included in the response.
:param bool copy:
Specifies that metadata related to any current or previous Copy Blob
operation should be included in the response.
:param str _str:
A string representing the includes.
'''
if not _str:
_str = ''
components = _str.split(',')
self.snapshots = snapshots or ('snapshots' in components)
self.metadata = metadata or ('metadata' in components)
self.uncommitted_blobs = uncommitted_blobs or ('uncommittedblobs' in components)
self.copy = copy or ('copy' in components)
def __or__(self, other):
return Include(_str=str(self) + str(other))
def __add__(self, other):
return Include(_str=str(self) + str(other))
def __str__(self):
include = (('snapshots,' if self.snapshots else '') +
('metadata,' if self.metadata else '') +
('uncommittedblobs,' if self.uncommitted_blobs else '') +
('copy,' if self.copy else ''))
return include.rstrip(',')
Include.COPY = Include(copy=True)
Include.METADATA = Include(metadata=True)
Include.SNAPSHOTS = Include(snapshots=True)
Include.UNCOMMITTED_BLOBS = Include(uncommitted_blobs=True)
class BlobPermissions(object):
'''
BlobPermissions class to be used with
:func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_blob_shared_access_signature` API.
:ivar BlobPermissions BlobPermissions.ADD:
Add a block to an append blob.
:ivar BlobPermissions BlobPermissions.CREATE:
Write a new blob, snapshot a blob, or copy a blob to a new blob.
:ivar BlobPermissions BlobPermissions.DELETE:
Delete the blob.
:ivar BlobPermissions BlobPermissions.READ:
Read the content, properties, metadata and block list. Use the blob as the source of a copy operation.
:ivar BlobPermissions BlobPermissions.WRITE:
Create or write content, properties, metadata, or block list. Snapshot or lease
the blob. Resize the blob (page blob only). Use the blob as the destination of a
copy operation within the same account.
'''
def __init__(self, read=False, add=False, create=False, write=False,
delete=False, _str=None):
'''
:param bool read:
Read the content, properties, metadata and block list. Use the blob as
the source of a copy operation.
:param bool add:
Add a block to an append blob.
:param bool create:
Write a new blob, snapshot a blob, or copy a blob to a new blob.
:param bool write:
Create or write content, properties, metadata, or block list. Snapshot
or lease the blob. Resize the blob (page blob only). Use the blob as the
destination of a copy operation within the same account.
:param bool delete:
Delete the blob.
:param str _str:
A string representing the permissions.
'''
if not _str:
_str = ''
self.read = read or ('r' in _str)
self.add = add or ('a' in _str)
self.create = create or ('c' in _str)
self.write = write or ('w' in _str)
self.delete = delete or ('d' in _str)
def __or__(self, other):
return BlobPermissions(_str=str(self) + str(other))
def __add__(self, other):
return BlobPermissions(_str=str(self) + str(other))
def __str__(self):
return (('r' if self.read else '') +
('a' if self.add else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else ''))
BlobPermissions.ADD = BlobPermissions(add=True)
BlobPermissions.CREATE = BlobPermissions(create=True)
BlobPermissions.DELETE = BlobPermissions(delete=True)
BlobPermissions.READ = BlobPermissions(read=True)
BlobPermissions.WRITE = BlobPermissions(write=True)
class ContainerPermissions(object):
'''
ContainerPermissions class to be used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_container_shared_access_signature`
API and for the AccessPolicies used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.set_container_acl`.
:ivar ContainerPermissions ContainerPermissions.DELETE:
Delete any blob in the container. Note: You cannot grant permissions to
delete a container with a container SAS. Use an account SAS instead.
:ivar ContainerPermissions ContainerPermissions.LIST:
List blobs in the container.
:ivar ContainerPermissions ContainerPermissions.READ:
Read the content, properties, metadata or block list of any blob in the
container. Use any blob in the container as the source of a copy operation.
:ivar ContainerPermissions ContainerPermissions.WRITE:
For any blob in the container, create or write content, properties,
metadata, or block list. Snapshot or lease the blob. Resize the blob
(page blob only). Use the blob as the destination of a copy operation
within the same account. Note: You cannot grant permissions to read or
write container properties or metadata, nor to lease a container, with
a container SAS. Use an account SAS instead.
'''
def __init__(self, read=False, write=False, delete=False, list=False,
_str=None):
'''
:param bool read:
Read the content, properties, metadata or block list of any blob in the
container. Use any blob in the container as the source of a copy operation.
:param bool write:
For any blob in the container, create or write content, properties,
metadata, or block list. Snapshot or lease the blob. Resize the blob
(page blob only). Use the blob as the destination of a copy operation
within the same account. Note: You cannot grant permissions to read or
write container properties or metadata, nor to lease a container, with
a container SAS. Use an account SAS instead.
:param bool delete:
Delete any blob in the container. Note: You cannot grant permissions to
delete a container with a container SAS. Use an account SAS instead.
:param bool list:
List blobs in the container.
:param str _str:
A string representing the permissions.
'''
if not _str:
_str = ''
self.read = read or ('r' in _str)
self.write = write or ('w' in _str)
self.delete = delete or ('d' in _str)
self.list = list or ('l' in _str)
def __or__(self, other):
return ContainerPermissions(_str=str(self) + str(other))
def __add__(self, other):
return ContainerPermissions(_str=str(self) + str(other))
def __str__(self):
return (('r' if self.read else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('l' if self.list else ''))
ContainerPermissions.DELETE = ContainerPermissions(delete=True)
ContainerPermissions.LIST = ContainerPermissions(list=True)
ContainerPermissions.READ = ContainerPermissions(read=True)
ContainerPermissions.WRITE = ContainerPermissions(write=True)
| emgerner-msft/azure-storage-python | azure/storage/blob/models.py | Python | apache-2.0 | 23,827 |
'''
Flowchart based particle parser.
'''
import re
from .instrgeom import RayBundle, ParticleStory, ParticleCompGroup, ParticleState
from .flowchart import FCNTerminal, FCNDecisionBool, FCNDecisionMulti, FCNProcess, FlowChartControl
# terminal nodes implementation
def t_begin(args):
print("starting particle parsing")
def t_end(args):
print("ended particle parsing")
def t_error(args):
print(args['linegetter'].current())
raise Exception("error")
def t_error2(args):
print(args['linegetter'].current())
raise Exception("error2")
def t_error3(args):
print(args['linegetter'].current())
raise Exception("error3")
def t_error4(args):
print(args['linegetter'].current())
raise Exception("error4")
def t_error5(args):
print(args['linegetter'].current())
raise Exception("error5")
# decision nodes implementation
def d_isenter(args):
m = re.match('ENTER:', args['linegetter'].current())
return m is not None
def d_done(args):
return args['linegetter'].isempty()
def d_isstate(args):
m = re.match('STATE:', args['linegetter'].current())
return m is not None
def d_isscatter(args):
m = re.match('SCATTER:', args['linegetter'].current())
return m is not None
def d_iscomp(args):
m = re.match('COMP:', args['linegetter'].current())
return m is not None
def d_isleave(args):
m = re.match('LEAVE:', args['linegetter'].current())
return m is not None
def d_iskeywd(args):
line = args['linegetter'].current()
m0 = re.match('COMP:', line)
if m0:
return 0
m1 = re.match('SCATTER:', line)
if m1:
return 1
m2 = re.match('ABSORB:', line)
if m2:
return 2
m3 = re.match('LEAVE:', line)
if m3:
return 3
m4 = re.match('STATE:', line)
if m4:
return 4
raise Exception("wrong line: %s" % line)
# process nodes implementation --- NOTE: all process nodes increment line idx by one
def p_newparticle(args):
args['weaver'].new_story()
args['linegetter'].inc()
def p_ignoreline(args):
args['linegetter'].inc()
def p_addcomp(args):
linegetter = args['linegetter']
weaver = args['weaver']
weaver.close_comp()
weaver.new_comp(_get_compname(linegetter.current()))
linegetter.inc()
def p_addpoint(args):
linegetter = args['linegetter']
args['weaver'].new_point(_get_strcoords(linegetter.current()))
linegetter.inc()
def p_addpointclose(args):
linegetter = args['linegetter']
args['weaver'].new_point(_get_strcoords(linegetter.current()))
weaver = args['weaver']
weaver.close_comp()
weaver.close_story()
linegetter.inc()
# helper functions implementation
def _get_strcoords(line):
m = re.match('\w+: ([\d\.\+\-e]+), ([\d\.\+\-e]+), ([\d\.\+\-e]+), ([\d\.\+\-e]+), ([\d\.\+\-e]+), ([\d\.\+\-e]+), ([\d\.\+\-e]+), ([\d\.\+\-e]+), ([\d\.\+\-e]+), ([\d\.\+\-e]+), ([\d\.\+\-e]+)', line)
return [m.group(1), m.group(2), m.group(3), m.group(4), m.group(5), m.group(6), m.group(7), m.group(8), m.group(9), m.group(10), m.group(11)]
def _get_compname(line):
m = re.match('COMP: \"(\w+)\"', line)
return m.group(1)
# action code helper classes
class ParticleBundleWeaver(object):
''' creates the particle ray data structure by aid of a bunch of functions that can be called in succession '''
def __init__(self):
self._rays = []
self._bundle = RayBundle(self._rays)
self._compgroup = None
self._story = None
def new_story(self):
if self._story is not None:
raise Exception("Close the current story before adding a new one.")
self._story = ParticleStory()
self._rays.append(self._story)
def new_comp(self, compname):
if self._story is None:
raise Exception("You must add a particle story before adding a compgroup.")
if self._compgroup is not None:
raise Exception("Close the current compgroup before adding a new one.")
self._compgroup = ParticleCompGroup(compname)
self._story.add_group(self._compgroup)
def new_point(self, point_str):
if self._compgroup is None:
raise Exception("You must add a compgroup before adding points.")
point = ParticleState(point_str)
self._compgroup.add_event(point)
def close_comp(self):
self._compgroup = None
def close_story(self):
self._story = None
def get_particles(self):
if self._story == None and self._compgroup == None:
return self._bundle
else:
raise Exception("Close compgroup and story before fetching the particle bundle.")
class LineGetter(object):
def __init__(self, text):
self.lines = text.splitlines()
self.idx = 0
def current(self):
if not self.idx >= len(self.lines):
return self.lines[self.idx]
def prev(self):
if not self.idx == 0:
return self.lines[self.idx-1]
def next(self):
if not self.idx == len(self.lines) - 1:
return self.lines[self.idx+1]
def inc(self):
self.idx += 1
def isempty(self):
return self.idx >= len(self.lines)
# flowchart assembly and execution
class FlowChartParticleTraceParser(object):
def __init__(self):
# terminal nodes
t1 = FCNTerminal(key="begin", fct=t_begin)
t2 = FCNTerminal(key="end", fct=t_end)
t3 = FCNTerminal(key="error", fct=t_error)
t4 = FCNTerminal(key="error2", fct=t_error2)
t5 = FCNTerminal(key="error3", fct=t_error3)
t6 = FCNTerminal(key="error4", fct=t_error4)
t7 = FCNTerminal(key="error5", fct=t_error5)
# decision nodes
d0 = FCNDecisionBool(fct=d_done)
d1 = FCNDecisionBool(fct=d_isenter)
d2 = FCNDecisionBool(fct=d_isstate)
d3 = FCNDecisionMulti(fct=d_iskeywd)
d4 = FCNDecisionBool(fct=d_isstate)
d5 = FCNDecisionBool(fct=d_isstate)
d5_b = FCNDecisionBool(fct=d_isscatter)
d5_c = FCNDecisionBool(fct=d_iscomp)
d6 = FCNDecisionBool(fct=d_isstate)
d7 = FCNDecisionBool(fct=d_isstate)
d8 = FCNDecisionBool(fct=d_isleave)
# process nodes
p1 = FCNProcess(fct=p_newparticle)
p2 = FCNProcess(fct=p_ignoreline)
p3 = FCNProcess(fct=p_addcomp)
p4 = FCNProcess(fct=p_addpoint)
p5 = FCNProcess(fct=p_addpoint)
p6 = FCNProcess(fct=p_ignoreline)
p7 = FCNProcess(fct=p_ignoreline)
p8 = FCNProcess(fct=p_addpoint)
p9 = FCNProcess(fct=p_ignoreline)
p10 = FCNProcess(fct=p_addpointclose)
p12 = FCNProcess(fct=p_ignoreline)
# assemble the flowchart from top
t1.set_nodenext(node_next=d0)
d0.set_nodes(node_T=t2, node_F=d1)
d1.set_nodes(node_T=p1, node_F=t3)
p1.set_nodenext(node_next=d2)
d2.set_nodes(node_T=p2, node_F=t4)
p2.set_nodenext(node_next=d3)
d3.set_node_lst(node_lst=[p3, p5, p7, p9, p12])
p3.set_nodenext(node_next=d4)
d4.set_nodes(node_T=p4, node_F=t5)
p4.set_nodenext(node_next=d3)
p5.set_nodenext(node_next=d5)
d5.set_nodes(node_T=p6, node_F=d5_b)
d5_b.set_nodes(node_T=p5, node_F=d5_c)
d5_c.set_nodes(node_T=d3, node_F=t6)
p6.set_nodenext(node_next=d3)
p7.set_nodenext(node_next=d6)
d6.set_nodes(node_T=p8, node_F=d3)
p8.set_nodenext(node_next=d8) # <-- this event rarely happens for most instruments!
d8.set_nodes(node_T=p7, node_F=d3)
p9.set_nodenext(node_next=d7)
d7.set_nodes(node_T=p10, node_F=t7)
p10.set_nodenext(node_next=d0)
p12.set_nodenext(node_next=d3)
self.rootnode = t1
def execute(self, text):
# set args according to the above implementation and execute the flowchart
args = {}
args['linegetter'] = LineGetter(text)
weaver = ParticleBundleWeaver()
args['weaver'] = weaver
flowchart = FlowChartControl(self.rootnode)
flowchart.process(args)
return weaver.get_particles()
| markusappel/McCode | tools/Python/mccodelib/fcparticleparser.py | Python | gpl-2.0 | 8,291 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements commands for running/interacting with Fuchsia on an emulator."""
import pkg_repo
import boot_data
import logging
import os
import subprocess
import sys
import target
import tempfile
class EmuTarget(target.Target):
def __init__(self, out_dir, target_cpu, logs_dir):
"""out_dir: The directory which will contain the files that are
generated to support the emulator deployment.
target_cpu: The emulated target CPU architecture.
Can be 'x64' or 'arm64'."""
super(EmuTarget, self).__init__(out_dir, target_cpu, logs_dir)
self._emu_process = None
self._pkg_repo = None
def __enter__(self):
return self
def _BuildCommand(self):
"""Build the command that will be run to start Fuchsia in the emulator."""
pass
def _SetEnv(self):
return os.environ.copy()
def Start(self):
emu_command = self._BuildCommand()
# We pass a separate stdin stream. Sharing stdin across processes
# leads to flakiness due to the OS prematurely killing the stream and the
# Python script panicking and aborting.
# The precise root cause is still nebulous, but this fix works.
# See crbug.com/741194.
logging.debug('Launching %s.' % (self.EMULATOR_NAME))
logging.debug(' '.join(emu_command))
# Zircon sends debug logs to serial port (see kernel.serial=legacy flag
# above). Serial port is redirected to a file through emulator stdout.
# If runner_logs are not enabled, we output the kernel serial log
# to a temporary file, and print that out if we are unable to connect to
# the emulator guest, to make it easier to diagnose connectivity issues.
temporary_log_file = None
if self._log_manager.IsLoggingEnabled():
stdout = self._log_manager.Open('serial_log')
else:
temporary_log_file = tempfile.NamedTemporaryFile('w')
stdout = temporary_log_file
self.LogProcessStatistics('proc_stat_start_log')
self.LogSystemStatistics('system_statistics_start_log')
self._emu_process = subprocess.Popen(emu_command,
stdin=open(os.devnull),
stdout=stdout,
stderr=subprocess.STDOUT,
env=self._SetEnv())
try:
self._WaitUntilReady()
self.LogProcessStatistics('proc_stat_ready_log')
except target.FuchsiaTargetException:
if temporary_log_file:
logging.info('Kernel logs:\n' +
open(temporary_log_file.name, 'r').read())
raise
def Stop(self):
try:
super(EmuTarget, self).Stop()
finally:
self.Shutdown()
def GetPkgRepo(self):
if not self._pkg_repo:
self._pkg_repo = pkg_repo.ManagedPkgRepo(self)
return self._pkg_repo
def Shutdown(self):
if not self._emu_process:
logging.error('%s did not start' % (self.EMULATOR_NAME))
return
returncode = self._emu_process.poll()
if returncode == None:
logging.info('Shutting down %s' % (self.EMULATOR_NAME))
self._emu_process.kill()
elif returncode == 0:
logging.info('%s quit unexpectedly without errors' % self.EMULATOR_NAME)
elif returncode < 0:
logging.error('%s was terminated by signal %d' %
(self.EMULATOR_NAME, -returncode))
else:
logging.error('%s quit unexpectedly with exit code %d' %
(self.EMULATOR_NAME, returncode))
self.LogProcessStatistics('proc_stat_end_log')
self.LogSystemStatistics('system_statistics_end_log')
def _IsEmuStillRunning(self):
if not self._emu_process:
return False
return os.waitpid(self._emu_process.pid, os.WNOHANG)[0] == 0
def _GetEndpoint(self):
if not self._IsEmuStillRunning():
raise Exception('%s quit unexpectedly.' % (self.EMULATOR_NAME))
return ('localhost', self._host_ssh_port)
def _GetSshConfigPath(self):
return boot_data.GetSSHConfigPath()
def LogSystemStatistics(self, log_file_name):
self._LaunchSubprocessWithLogs(['top', '-b', '-n', '1'], log_file_name)
self._LaunchSubprocessWithLogs(['ps', '-ax'], log_file_name)
def LogProcessStatistics(self, log_file_name):
self._LaunchSubprocessWithLogs(['cat', '/proc/stat'], log_file_name)
def _LaunchSubprocessWithLogs(self, command, log_file_name):
"""Launch a subprocess and redirect stdout and stderr to log_file_name.
Command will not be run if logging directory is not set."""
if not self._log_manager.IsLoggingEnabled():
return
log = self._log_manager.Open(log_file_name)
subprocess.call(command,
stdin=open(os.devnull),
stdout=log,
stderr=subprocess.STDOUT)
| nwjs/chromium.src | build/fuchsia/emu_target.py | Python | bsd-3-clause | 4,940 |
import sys, os, time, atexit
from signal import SIGTERM
class BaseDaemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
logfile = None
stdin = "/dev/null"
stdout = "/dev/null"
stderr = "/dev/null"
chdir = None
pidfile = None
pid_rm_on_stop = True ## Remove pid file on stop
def __init__(self, pidfile=None, stdin=None, stdout=None, stderr=None, chdir=None, ):
self.stdin = stdin or self.stdin
self.stdout = stdout or self.stdout
self.stderr = stderr or self.stderr
self.pidfile = pidfile or self.pidfile
self.chdir = chdir or self.chdir
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
if self.chdir is not None: os.chdir(self.chdir)
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
if self.logfile is not None:
self.stdout = self.logfile
self.stderr = self.logfile
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
self.pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % self.pid)
def delpid(self):
if not self.pid_rm_on_stop:
file(self.pidfile,'w+').write("")
return None
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
##except IOError:
except Exception as err:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
##except IOError:
except Exception as err:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
sys.stdout.flush()
sys.stderr.flush()
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile) and self.pid_rm_on_stop:
self.delpid()
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
| procool/mygw | globals/utils/daemon/basedaemon.py | Python | bsd-2-clause | 3,715 |
#!/usr/bin/env python
"""Tests client actions related to administrating the client."""
import os
import StringIO
import psutil
from grr.client import actions
from grr.client import comms
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib import stats
from grr.lib import test_lib
class ConfigActionTest(test_lib.EmptyActionTest):
"""Tests the client actions UpdateConfiguration and GetConfiguration."""
def testUpdateConfiguration(self):
"""Test that we can update the config."""
# Make sure the config file is not already there
try:
os.unlink(self.config_file)
except OSError:
pass
# In a real client, the writeback location should be set to something real,
# but for this test we make it the same as the config file..
config_lib.CONFIG.SetWriteBack(self.config_file)
# Make sure the file is gone
self.assertRaises(IOError, open, self.config_file)
location = "http://www.example.com"
request = rdfvalue.Dict()
request["Client.location"] = location
request["Client.foreman_check_frequency"] = 3600
result = self.RunAction("UpdateConfiguration", request)
self.assertEqual(result, [])
self.assertEqual(config_lib.CONFIG["Client.foreman_check_frequency"], 3600)
# Test the config file got written.
data = open(self.config_file).read()
self.assert_("location: {0}".format(location) in data)
# Now test that our location was actually updated.
def FakeUrlOpen(req):
self.fake_url = req.get_full_url()
return StringIO.StringIO()
comms.urllib2.urlopen = FakeUrlOpen
client_context = comms.GRRHTTPClient()
client_context.MakeRequest("", comms.Status())
self.assertTrue(self.fake_url.startswith(location))
def testUpdateConfigBlacklist(self):
"""Tests that disallowed fields are not getting updated."""
config_lib.CONFIG.Set("Client.location", "http://something.com/")
config_lib.CONFIG.Set("Client.server_serial_number", 1)
location = "http://www.example.com"
request = rdfvalue.Dict()
request["Client.location"] = location
request["Client.server_serial_number"] = 10
self.RunAction("UpdateConfiguration", request)
# Location can be set.
self.assertEqual(config_lib.CONFIG["Client.location"], location)
# But the server serial number can not be updated.
self.assertEqual(config_lib.CONFIG["Client.server_serial_number"], 1)
def testGetConfig(self):
"""Check GetConfig client action works."""
# Use UpdateConfig to generate a config.
location = "http://example.com"
request = rdfvalue.Dict()
request["Client.location"] = location
request["Client.foreman_check_frequency"] = 3600
self.RunAction("UpdateConfiguration", request)
# Check that our GetConfig actually gets the real data.
self.RunAction("GetConfiguration")
self.assertEqual(config_lib.CONFIG["Client.foreman_check_frequency"], 3600)
self.assertEqual(config_lib.CONFIG["Client.location"], location)
def VerifyResponse(self, response, bytes_received, bytes_sent):
self.assertEqual(response.bytes_received, bytes_received)
self.assertEqual(response.bytes_sent, bytes_sent)
self.assertEqual(len(response.cpu_samples), 2)
self.assertAlmostEqual(response.cpu_samples[1].user_cpu_time, 0.1)
self.assertAlmostEqual(response.cpu_samples[1].system_cpu_time, 0.2)
self.assertAlmostEqual(response.cpu_samples[1].cpu_percent, 15.0)
self.assertEqual(len(response.io_samples), 2)
self.assertEqual(response.io_samples[0].read_bytes, 100)
self.assertEqual(response.io_samples[1].read_bytes, 200)
self.assertEqual(response.io_samples[1].write_bytes, 200)
self.assertEqual(response.boot_time, long(100 * 1e6))
def testGetClientStatsAuto(self):
"""Checks that stats collection works."""
class MockCollector(object):
cpu_samples = [(100, 0.1, 0.1, 10.0), (110, 0.1, 0.2, 15.0)]
io_samples = [(100, 100, 100), (110, 200, 200)]
class MockContext(object):
def __init__(self):
self.stats_collector = MockCollector()
old_boot_time = psutil.BOOT_TIME
psutil.BOOT_TIME = 100
try:
stats.STATS.IncrementCounter("grr_client_received_bytes", 1566)
received_bytes = stats.STATS.GetMetricValue("grr_client_received_bytes")
stats.STATS.IncrementCounter("grr_client_sent_bytes", 2000)
sent_bytes = stats.STATS.GetMetricValue("grr_client_sent_bytes")
action_cls = actions.ActionPlugin.classes.get(
"GetClientStatsAuto", actions.ActionPlugin)
action = action_cls(None, grr_worker=self)
action.grr_worker = MockContext()
action.Send = lambda r: self.VerifyResponse(r, received_bytes, sent_bytes)
action.Run(None)
finally:
psutil.BOOT_TIME = old_boot_time
| MiniSEC/GRR_clone | client/client_actions/admin_test.py | Python | apache-2.0 | 4,821 |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from . import boundingBox
from . import imageGradients
from . import rawData
view_extensions = [
# Set show=True if extension should be shown by default
# in the 'Select Visualization Method' dialog. These defaults
# can be changed by editing DIGITS config option
# 'view_extension_list'
{'class': boundingBox.Visualization, 'show': True},
{'class': imageGradients.Visualization, 'show': False},
{'class': rawData.Visualization, 'show': True},
]
def get_default_extension():
"""
return the default view extension
"""
return rawData.Visualization
def get_extensions(show_all=False):
"""
return set of data data extensions
"""
return [extension['class']
for extension in view_extensions
if show_all or extension['show']]
def get_extension(extension_id):
"""
return extension associated with specified extension_id
"""
for extension in view_extensions:
extension_class = extension['class']
if extension_class.get_id() == extension_id:
return extension_class
return None
| dongjoon-hyun/DIGITS | digits/extensions/view/__init__.py | Python | bsd-3-clause | 1,210 |
def extractChuunihimeWordpressCom(item):
'''
Parser for 'chuunihime.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractChuunihimeWordpressCom.py | Python | bsd-3-clause | 560 |
""" pimms.py Entry point for PI.M.M.S application. Does all the web rendering.
Copyright 2013 Will Bickerstaff <will.bickerstaff@gmail.com>
This file is part of Pi.M.M.S.
Pi.M.M.S is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Foobar is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Foobar. If not, see <http://www.gnu.org/licenses/>.
"""
import sqlite3 as sqlite
import datetime, time, os, json
from collections import namedtuple
from flask import Flask, request
from jinja2 import Environment, FileSystemLoader
from www.appjson import JSONTemps
import datefuncs.dt as dt
templatedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static', 'templates')
env = Environment(loader = FileSystemLoader(templatedir))
app = Flask(__name__)
app.config['DEBUG'] = True
def reading_extents_offset():
""" Get the timedelta from now of the earliest and most recent readings
returns a namedtuple with start and end
start being a timedelta difference from now to the earliest reading
end being a timedelta difference from now to the most recent reading
"""
Deltatype = namedtuple('Deltatype', 'start end')
n = dt.date_now()
with sqlite.connect(os.environ['PIMMS_DB']) as con:
cur = con.cursor()
cur.execute("SELECT MIN(timestamp) FROM readings;")
starttime = datetime.date.fromtimestamp(cur.fetchone()[0])
cur.execute("SELECT MAX(timestamp) FROM readings;")
endtime = datetime.date.fromtimestamp(cur.fetchone()[0])
return (Deltatype(n - starttime, n - endtime if endtime < n else n - n))
def get_readings(day):
""" Get all readings for the given day
@see make_day
"""
res = []
plotdate = (time.mktime(day.start.timetuple()),
time.mktime(day.end.timetuple()))
with sqlite.connect(os.environ['PIMMS_DB']) as con:
cur = con.cursor()
cur.execute("SELECT * FROM readings "
"WHERE timestamp > {start:f} AND timestamp < {end:f} "
"ORDER BY timestamp ASC;".format(start=plotdate[0],
end=plotdate[1]))
res = cur.fetchall()
return res
def render_page(template, template_args):
template = env.get_template(template)
return template.render(args=template_args)
def requested_plot(request):
plotdate = datetime.date.fromtimestamp(dt.now())
if (request.method == 'POST' and 'dateselected' in request.form and
len(request.form['dateselected'].split('/')) == 3):
pdate = time.strptime(request.form['dateselected'], '%m/%d/%Y')
if dt.valid_date(pdate.tm_year, pdate.tm_mon, pdate.tm_mday):
plotdate = datetime.date.fromtimestamp(time.mktime(pdate))
return plotdate
def doplot(plotdate):
day = dt.make_day(plotdate) # datetime for start and end of day
# Time from now to the 1st and last available reading
# Used to limit the range available in the datepicker
deltas = reading_extents_offset()
template_args = {"start": 0 - deltas.start.days,
"end": deltas.end.days,
"day": day}
template = 'today.html'
if not dt.is_today(plotdate):
template = 'default.html'
temps = JSONTemps.dbval2json(get_readings(day))
template_args["readings"] = json.dumps(temps)
return template, template_args
@app.route('/', methods=["GET", "POST"])
def index():
template, template_args = doplot(requested_plot(request))
return render_page(template, template_args)
if __name__ == '__main__':
app.run()
| WillBickerstaff/PiMMS | www/web.py | Python | gpl-3.0 | 4,054 |
"""Unit tests for feedgrabber"""
from django.test import TestCase
| Fantomas42/django-feedgrabber | feedgrabber/tests.py | Python | bsd-3-clause | 67 |
#$Id$#
from books.model.Bill import Bill
from books.model.LineItem import LineItem
from books.model.Address import Address
from books.model.PaymentAndCredit import PaymentAndCredit
from books.model.BillPayment import BillPayment
from books.service.ZohoBooks import ZohoBooks
zoho_books = ZohoBooks("{auth_token}", "{organization_id}")
bill_api = zoho_books.get_bills_api()
bill_id = bill_api.get_bills().get_bills()[1].get_bill_id()
vendor_api = zoho_books.get_vendor_payments_api()
vendor_id = vendor_api.get_vendor_payments().get_vendor_payments()[0].get_vendor_id()
param = {'filter_by': 'AccountType.Expense'}
chart_of_accounts_api = zoho_books.get_chart_of_accounts_api()
account_id = chart_of_accounts_api.get_chart_of_accounts(param).get_chartofaccounts()[1].get_account_id()
attachment = '/{file_directory}/emp.pdf'
# List bills
parameter = {'status':'open'}
print bill_api.get_bills()
print bill_api.get_bills(parameter)
# Get a bill
print bill_api.get(bill_id)
# Create a bill
bill = Bill()
bill.set_vendor_id(vendor_id)
bill.set_bill_number('38')
bill.set_date('2014-05-12')
bill.set_due_date('2014-05-13')
bill.set_exchange_rate(1)
bill.set_reference_number("2")
line_items = LineItem()
line_items.set_account_id(account_id)
line_items.set_description('table')
line_items.set_rate("1000.0")
line_items.set_quantity("4")
line_items.set_item_order("0")
bill.set_line_items(line_items)
bill.set_notes("before due")
bill.set_terms('conditions Apply')
print bill_api.create(bill)
#print bill_api.create(bill,attachment)
# Update a bill
bill = Bill()
bill.set_vendor_id(vendor_id)
bill.set_bill_number('38')
bill.set_date('2014-05-12')
bill.set_due_date('2014-05-13')
bill.set_exchange_rate(1)
bill.set_reference_number("2")
line_items = LineItem()
line_items.set_account_id(account_id)
line_items.set_description('table')
line_items.set_rate("1000.0")
line_items.set_quantity("4")
line_items.set_tax_id("")
line_items.set_item_order("0")
bill.set_line_items(line_items)
bill.set_notes("before due")
bill.set_terms('conditions Apply')
#print bill_api.update(bill_id,bill)
print bill_api.update(bill_id,bill,attachment)
# Delete a bill
print bill_api.delete(bill_id)
# Void a bill
print bill_api.void_a_bill(bill_id)
# Mark a bill as open
print bill_api.mark_a_bill_as_open(bill_id)
# Update billing_ address
billing_address=Address()
billing_address.set_address('no. 2 kumaran streeet')
billing_address.set_city('chennai')
billing_address.set_state('Tamilnadu')
billing_address.set_zip('908')
billing_address.set_country('India')
print bill_api.update_billing_address(bill_id,billing_address)
'''
'''
param = {'status': 'paid'}
bill_id = bill_api.get_bills(param).get_bills()[0].get_bill_id()
payment_id = bill_api.list_bill_payments(bill_id)[0].get_payment_id()
bill_payment_id = bill_api.list_bill_payments(bill_id)[0].get_bill_payment_id()
# List bill payment
print bill_api.list_bill_payments(bill_id)
# Apply credits
bill_payments = BillPayment()
bill_payments.set_payment_id(payment_id)
bill_payments.set_amount_applied(0.0)
print bill_api.apply_credits(bill_id,[bill_payments])
# Delete a payment
print bill_api.delete_a_payment(bill_id,bill_payment_id)
'''
# Get a bill attachment
'''
print bill_api.get_a_bill_attachment(bill_id)
#print bill_api.get_a_bill_attachment("71127000000080017",True)
# Add attachment to a bill
attachment='/{file_directory}/download.jpg'
print bill_api.add_attachments_to_a_bill(bill_id,attachment)
# Delete an attachment
print bill_api.delete_an_attachment(bill_id)
# List bill comments and history
comment_id = bill_api.list_bill_comments_and_history(bill_id).get_comments()[0].get_comment_id()
print bill_api.list_bill_comments_and_history(bill_id)
# Add Comment
description="Welcome"
print bill_api.add_comment(bill_id,description)
# Delete a comment
comment_id = "71127000000204442"
print bill_api.delete_a_comment(bill_id,comment_id)
| zoho/books-python-wrappers | test/BillTest.py | Python | mit | 3,928 |
from django.conf.urls import url,include
from django.contrib import admin
from webchat import views
urlpatterns = [
url(r'^$', views.dashboard,name='chat_dashboard'),
url(r'^msg_send/$', views.send_msg,name='send_msg'),
url(r'^new_msgs/$', views.get_new_msgs,name='get_new_msgs'),
]
| XiaJieCom/change | stu103151/days21/project/webchat/urls.py | Python | lgpl-2.1 | 296 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen(args,
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if not line.startswith(' Creating library '):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefix = 'Processing '
processing = set(os.path.basename(x) for x in lines if x.startswith(prefix))
for line in lines:
if not line.startswith(prefix) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
# MSVS doesn't assemble x64 asm files.
if arch == 'environment.x64':
return 0
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| t3wz/mtasa-blue | vendor/google-breakpad/src/tools/gyp/pylib/gyp/win_tool.py | Python | gpl-3.0 | 12,634 |
from itertools import product
from rpython.rlib import jit, rarithmetic, rstring
from rpython.rlib.rbigint import rbigint
import debug
class KernelValue(object):
simple = True
def __init__(self, source_pos=None):
self.source_pos = source_pos
def equal(self, other):
return other is self
def tostring(self):
return str(self)
def todisplay(self):
return self.tostring()
def interpret(self, env, cont):
assert self.simple, "expected simple value"
return cont.plug_reduce(self.interpret_simple(env))
def interpret_simple(self, env):
return self
def combine(self, operands, env, cont):
signal_type_error(Combiner, self)
#XXX: Unicode
class String(KernelValue):
type_name = 'string'
_immutable_fields_ = ['strval']
def __init__(self, value, source_pos=None):
assert isinstance(value, str), "wrong value for String: %s" % value
self.strval = value
self.source_pos = source_pos
def tostring(self):
return '"%s"' % self.strval
def todisplay(self):
return self.strval
def equal(self, other):
return isinstance(other, String) and other.strval == self.strval
class Number(KernelValue):
type_name = 'number'
def lteq(self, other):
return self.lt(other) or self.equal(other)
def gt(self, other):
return other.lt(self)
def gteq(self, other):
return self.gt(other) or self.equal(other)
def sub(self, other):
return self.add(other.neg())
class Infinity(Number):
def divide(self, other):
# divide primitive already discards error cases.
return zero
def mod(self, other):
return other
def divmod(self, other):
return Pair(zero, Pair(other, nil))
class ExactPositiveInfinity(Infinity):
def tostring(self):
return "#e+infinity"
def equal(self, other):
return isinstance(other, ExactPositiveInfinity)
def lt(self, other):
return False
def add(self, other):
if isinstance(other, ExactNegativeInfinity):
signal_add_positive_to_negative_infinity(self, other)
else:
return self
def mul(self, other):
if zero.lt(other):
return self
elif other.lt(zero):
return e_neg_inf
else:
signal_multiply_infinity_by_zero(self, other)
def neg(self):
return e_neg_inf
e_pos_inf = ExactPositiveInfinity()
class ExactNegativeInfinity(Infinity):
def tostring(self):
return "#e-infinity"
def equal(self, other):
return isinstance(other, ExactNegativeInfinity)
def lt(self, other):
return not isinstance(other, ExactNegativeInfinity)
def add(self, other):
if isinstance(other, ExactPositiveInfinity):
signal_add_positive_to_negative_infinity(other, self)
else:
return self
def mul(self, other):
if zero.lt(other):
return self
elif other.lt(zero):
return e_pos_inf
else:
signal_multiply_infinity_by_zero(self, other)
def neg(self):
return e_pos_inf
e_neg_inf = ExactNegativeInfinity()
class Fixnum(Number):
_immutable_fields_ = ['fixval']
def __init__(self, fixval, source_pos=None):
assert isinstance(fixval, int)
self.fixval = fixval
self.source_pos = source_pos
def tostring(self):
return str(self.fixval)
def equal(self, other):
return isinstance(other, Fixnum) and other.fixval == self.fixval
def lt(self, other):
if isinstance(other, Fixnum):
return self.fixval < other.fixval
elif isinstance(other, ExactNegativeInfinity):
return False
else:
return True
def add(self, other):
if isinstance(other, Fixnum):
try:
res = rarithmetic.ovfcheck(other.fixval + self.fixval)
return Fixnum(res)
except OverflowError:
return Bignum(rbigint.fromint(self.fixval).add(rbigint.fromint(other.fixval)))
else:
assert isinstance(other, Number)
return other.add(self)
def mul(self, other):
if isinstance(other, Fixnum):
try:
res = rarithmetic.ovfcheck(other.fixval * self.fixval)
return Fixnum(res)
except OverflowError:
return Bignum(rbigint.fromint(self.fixval).mul(rbigint.fromint(other.fixval)))
else:
assert isinstance(other, Number)
return other.mul(self)
def divide_by(self, other):
if isinstance(other, Fixnum):
return Fixnum(self.fixval // other.fixval)
else:
return other.divide(self)
def mod_by(self, other):
if isinstance(other, Fixnum):
return Fixnum(self.fixval % other.fixval)
else:
return other.mod(self)
def divmod_by(self, other):
if isinstance(other, Fixnum):
# builtin divmod doesn't seem to work in RPython.
s = self.fixval
o = other.fixval
return Pair(Fixnum(s // o), Pair(Fixnum(s % o), nil))
else:
return other.divmod(self)
def neg(self):
try:
return Fixnum(-self.fixval)
except OverflowError:
return Bignum(rbigint.fromint(self.fixval).neg())
zero = Fixnum(0)
one = Fixnum(1)
class Bignum(Number):
_immutable_fields_ = ['bigval']
def __init__(self, bigval, source_pos=None):
assert isinstance(bigval, rbigint)
self.bigval = bigval
self.source_pos = source_pos
def tostring(self):
return str(self.bigval)
def equal(self, other):
return isinstance(other, Bignum) and other.bigval.eq(self.bigval)
#XXX: refactor out duplication once we have them all implemented.
def add(self, other):
if isinstance(other, Bignum):
otherval = other.bigval
elif isinstance(other, Fixnum):
otherval = rbigint.fromint(other.fixval)
else:
assert isinstance(other, Number)
return other.add(self)
return try_and_make_fixnum(self.bigval.add(otherval))
def mul(self, other):
if isinstance(other, Bignum):
otherval = other.bigval
elif isinstance(other, Fixnum):
otherval = rbigint.fromint(other.fixval)
else:
assert isinstance(other, Number)
return other.add(self)
return try_and_make_fixnum(self.bigval.mul(otherval))
def divide(self, other):
if isinstance(other, Bignum):
otherval = other.bigval
elif isinstance(other, Fixnum):
otherval = rbigint.fromint(other.fixval)
else:
assert isinstance(other, Number)
return other.divide_by(self)
return try_and_make_fixnum(otherval.floordiv(self.bigval))
def divide_by(self, other):
if isinstance(other, Bignum):
otherval = other.bigval
elif isinstance(other, Fixnum):
otherval = rbigint.fromint(other.fixval)
else:
assert isinstance(other, Number)
return other.divide(self)
return try_and_make_fixnum(self.bigval.floordiv(otherval))
def mod(self, other):
if isinstance(other, Bignum):
otherval = other.bigval
elif isinstance(other, Fixnum):
otherval = rbigint.fromint(other.fixval)
else:
assert isinstance(other, Number)
return other.mod_by(self)
return try_and_make_fixnum(otherval.mod(self.bigval))
def mod_by(self, other):
if isinstance(other, Bignum):
otherval = other.bigval
elif isinstance(other, Fixnum):
otherval = rbigint.fromint(other.fixval)
else:
assert isinstance(other, Number)
return other.mod(self)
return try_and_make_fixnum(self.bigval.mod(otherval))
def divmod(self, other):
if isinstance(other, Bignum):
otherval = other.bigval
elif isinstance(other, Fixnum):
otherval = rbigint.fromint(other.fixval)
else:
assert isinstance(other, Number)
return other.divmod_by(self)
d, m = otherval.divmod(self.bigval)
return Pair(try_and_make_fixnum(d),
Pair(try_and_make_fixnum(m),
nil))
def divmod_by(self, other):
if isinstance(other, Bignum):
otherval = other.bigval
elif isinstance(other, Fixnum):
otherval = rbigint.fromint(other.fixval)
else:
assert isinstance(other, Number)
return other.divmod(self)
d, m = self.bigval.divmod(otherval)
return Pair(try_and_make_fixnum(d),
Pair(try_and_make_fixnum(m),
nil))
def neg(self):
return try_and_make_fixnum(self.bigval.neg())
def try_and_make_fixnum(bi):
try:
num = bi.toint()
return Fixnum(num)
except OverflowError:
return Bignum(bi)
#XXX: Unicode
class Symbol(KernelValue):
type_name = 'symbol'
_immutable_fields_ = ['symval']
def __init__(self, value, source_pos=None):
assert isinstance(value, str), "wrong value for Symbol: %s" % value
self.symval = value
self.source_pos = source_pos
def tostring(self):
return self.symval
def interpret_simple(self, env):
return env.lookup(self)
def equal(self, other):
return isinstance(other, Symbol) and other.symval == self.symval
_symbol_table = {}
def get_interned(name):
try:
return _symbol_table[name]
except KeyError:
ret = _symbol_table[name] = Symbol(name)
return ret
class List(KernelValue):
pass
class Null(List):
type_name = 'null'
def tostring(self):
return "()"
def equal(self, other):
return isinstance(other, Null)
nil = Null()
def is_nil(kv):
return isinstance(kv, Null)
class Ignore(KernelValue):
type_name = 'ignore'
def tostring(self):
return '#ignore'
def equal(self, other):
return isinstance(other, Ignore)
ignore = Ignore()
def is_ignore(kv):
return ignore.equal(kv)
class Inert(KernelValue):
type_name = 'inert'
def tostring(self):
return '#inert'
def equal(self, other):
return isinstance(other, Inert)
inert = Inert()
def is_inert(kv):
return inert.equal(kv)
class Boolean(KernelValue):
type_name = 'boolean'
_immutable_fields_ = ['value']
def __init__(self, value, source_pos=None):
assert isinstance(value, bool), "wrong value for Boolean: %s" % value
self.bval = value
self.source_pos = source_pos
def tostring(self):
return '#t' if self.bval else '#f'
def equal(self, other):
return isinstance(other, Boolean) and other.bval == self.bval
true = Boolean(True)
false = Boolean(False)
def is_true(kv):
return true.equal(kv)
def is_false(kv):
return false.equal(kv)
def kernel_boolean(python_boolean):
return true if python_boolean else false
class Pair(List):
type_name = 'pair'
_immutable_fields_ = ['car', 'cdr']
simple = False
def __init__(self, car, cdr, source_pos=None):
assert isinstance(car, KernelValue), "non-KernelValue car: %s" % car
assert isinstance(cdr, KernelValue), "non-KernelValue cdr: %s" % cdr
self.car = car
self.cdr = cdr
self.source_pos = source_pos
def tostring(self):
s = rstring.StringBuilder()
s.append("(")
pair = self
while True:
assert isinstance(pair, Pair), "not a pair: %s" % pair
s.append(pair.car.tostring())
if isinstance(pair.cdr, Pair):
pair = pair.cdr
s.append(" ")
else:
if not is_nil(pair.cdr):
s.append(" . ")
s.append(pair.cdr.tostring())
break
s.append(")")
return s.build()
def interpret(self, env, cont):
if cont.source_pos is None:
cont.source_pos = self.source_pos
return self.car, env, CombineCont(self.cdr,
env,
cont,
source_pos=self.car.source_pos)
def equal(self, other):
return (isinstance(other, Pair)
and self.car.equal(other.car)
and self.cdr.equal(other.cdr))
class Combiner(KernelValue):
type_name = 'combiner'
def combine(self, operands, env, cont):
raise NotImplementedError
class Operative(Combiner):
type_name = 'operative'
name = None
class CompoundOperative(Operative):
def __init__(self, formals, eformal, exprs, static_env, source_pos=None, name=None):
self.formals = formals
self.eformal = eformal
self.exprs = exprs
self.static_env = static_env
self.source_pos = source_pos
self.name = name
def combine(self, operands, env, cont):
eval_env = Environment([self.static_env])
match_parameter_tree(self.formals, operands, eval_env)
match_parameter_tree(self.eformal, env, eval_env)
return sequence(self.exprs, eval_env, cont)
def tostring(self):
if self.name is None:
return str(self)
else:
return "<operative '%s'>" % self.name
class Primitive(Operative):
def __init__(self, code, name):
self.code = code
self.source_pos = None
self.name = name
def combine(self, operands, env, cont):
return self.code(operands, env, cont)
def tostring(self):
return "<primitive '%s'>" % self.name
class SimplePrimitive(Operative):
def __init__(self, code, name):
self.code = code
self.source_pos = None
self.name = name
def combine(self, operands, env, cont):
return cont.plug_reduce(self.code(operands))
def tostring(self):
return "<primitive '%s'>" % self.name
class ContWrapper(Operative):
def __init__(self, cont, source_pos=None):
self.cont = cont
self.source_pos = source_pos
self.name = None
def combine(self, operands, env, cont):
return abnormally_pass(operands, cont, self.cont)
def abnormally_pass(operands, src_cont, dst_cont):
dst_cont.mark(True)
exiting = select_interceptors(src_cont, InnerGuardCont)
dst_cont.mark(False)
src_cont.mark(True)
entering = select_interceptors(dst_cont, OuterGuardCont)
src_cont.mark(False)
cont = dst_cont
for outer, interceptor in entering:
cont = InterceptCont(interceptor, cont, outer)
for outer, interceptor in reversed(exiting):
cont = InterceptCont(interceptor, cont, outer)
debug.on_abnormal_pass(operands, src_cont, dst_cont, exiting, entering)
return pass_to_next(operands, cont)
def pass_to_next(operands, cont):
if isinstance(cont, InterceptCont):
outer = cont.prev
return cont.interceptor.combine(
Pair(operands, Pair(Applicative(ContWrapper(outer)), nil)),
outer.env,
cont)
else:
return cont.plug_reduce(operands)
def select_interceptors(cont, cls):
ls = []
while cont is not None and not cont.marked:
if isinstance(cont, cls):
for guard in iter_list(cont.guards):
selector, interceptor = pythonify_list(guard)
if selector.marked:
outer_cont = cont if isinstance(cont, OuterGuardCont) else cont.prev
ls.append((outer_cont, interceptor))
break
cont = cont.prev
return ls
class Applicative(Combiner):
type_name = 'applicative'
def __init__(self, combiner, source_pos=None):
assert isinstance(combiner, Combiner), "wrong type to wrap: %s" % combiner
self.wrapped_combiner = combiner
self.source_pos = source_pos
def combine(self, operands, env, cont):
return evaluate_arguments(operands,
env,
ApplyCont(self.wrapped_combiner, env, cont))
def equal(self, other):
return (isinstance(other, Applicative)
and other.wrapped_combiner.equal(self.wrapped_combiner))
def tostring(self):
return "<applicative %s>" % self.wrapped_combiner.tostring()
class Environment(KernelValue):
type_name = 'environment'
def __init__(self, parents, bindings=None, source_pos=None):
for each in parents:
check_type(each, Environment)
assert isinstance(each, Environment)
self.parents = parents
self.bindings = bindings or {}
self.source_pos = source_pos
def set(self, symbol, value):
assert isinstance(symbol, Symbol), "setting non-symbol: %s" % symbol
self.bindings[symbol.symval] = value
def lookup(self, symbol):
ret = self.lookup_unchecked(symbol)
if ret is None:
signal_symbol_not_found(symbol)
else:
return ret
def lookup_unchecked(self, symbol):
assert isinstance(symbol, Symbol), "looking up non-symbol: %s" % symbol
ret = self.bindings.get(symbol.symval, None)
if ret is not None:
return ret
for parent in self.parents:
ret = parent.lookup_unchecked(symbol)
if ret is not None:
return ret
return None
class EncapsulationType(KernelValue):
def create_methods(self, source_pos=None):
constructor = Applicative(EncapsulationConstructor(self, source_pos))
predicate = Applicative(EncapsulationPredicate(self, source_pos))
accessor = Applicative(EncapsulationAccessor(self, source_pos))
return Pair(constructor, Pair(predicate, Pair(accessor, nil)))
class EncapsulatedObject(KernelValue):
# Abusing terminology; this is actually a union of types.
type_name = 'encapsulated-object'
def __init__(self, val, encapsulation_type, source_pos=None):
self.val = val
self.encapsulation_type = encapsulation_type
self.source_pos = source_pos
class EncapsulationMethod(Operative):
def __init__(self, encapsulation_type, source_pos=None):
self.encapsulation_type = encapsulation_type
self.source_pos = source_pos
self.name = None
class EncapsulationConstructor(EncapsulationMethod):
def combine(self, operands, env, cont):
to_wrap, = pythonify_list(operands, 1)
return cont.plug_reduce(EncapsulatedObject(to_wrap,
self.encapsulation_type))
class EncapsulationPredicate(EncapsulationMethod):
def combine(self, operands, env, cont):
for val in iter_list(operands):
if not isinstance(val, EncapsulatedObject):
return cont.plug_reduce(false)
if val.encapsulation_type is not self.encapsulation_type:
return cont.plug_reduce(false)
return cont.plug_reduce(true)
class EncapsulationAccessor(EncapsulationMethod):
def combine(self, operands, env, cont):
wrapped, = pythonify_list(operands, 1)
check_type(wrapped, EncapsulatedObject)
assert isinstance(wrapped, EncapsulatedObject)
if wrapped.encapsulation_type is self.encapsulation_type:
return cont.plug_reduce(wrapped.val)
else:
signal_encapsulation_type_error(self, wrapped)
# Copying a trick from the reference implementation in the R-1RK.
#
# This is a member of promises so it can be overwritten altogether when
# a promise results in another that must be resolved immediately.
#
# If env is None then val is the result of this promise, otherwise val is the
# expression that we need to evaluate in env.
class PromiseData(object):
def __init__(self, val, env):
self.val = val
self.env = env
class Promise(KernelValue):
type_name = 'promise'
def __init__(self, val, env, source_pos=None):
self.data = PromiseData(val, env)
self.source_pos = source_pos
def force(self, cont):
if self.data.env is None:
return cont.plug_reduce(self.data.val)
else:
return (self.data.val,
self.data.env,
HandlePromiseResultCont(self, cont))
class KeyedDynamicBinder(Operative):
def combine(self, operands, env, cont):
value, thunk = pythonify_list(operands, 2)
return thunk.combine(nil,
Environment([]),
KeyedDynamicCont(self, value, cont))
class KeyedDynamicAccessor(Operative):
def __init__(self, binder, source_pos=None):
self.binder = binder
self.source_pos = source_pos
def combine(self, operands, env, cont):
pythonify_list(operands, 0)
prev = cont
while prev is not None:
if (isinstance(prev, KeyedDynamicCont)
and prev.binder is self.binder):
return cont.plug_reduce(prev.value)
prev = prev.prev
signal_unbound_dynamic_key(self)
class KeyedStaticBinder(Operative):
def combine(self, operands, env, cont):
value, env = pythonify_list(operands, 2)
check_type(env, Environment)
assert isinstance(env, Environment)
return cont.plug_reduce(KeyedEnvironment(self, value, env))
class KeyedEnvironment(Environment):
def __init__(self, binder, value, parent, source_pos=None):
Environment.__init__(self, [parent], {}, source_pos)
self.binder = binder
self.value = value
class KeyedStaticAccessor(Operative):
def __init__(self, binder, source_pos=None):
self.binder = binder
self.source_pos = source_pos
def combine(self, operands, env, cont):
pythonify_list(operands, 0)
ret = self.find_binding(env)
if ret is None:
signal_unbound_static_key(self)
else:
return cont.plug_reduce(ret)
def find_binding(self, env):
if (isinstance(env, KeyedEnvironment)
and env.binder is self.binder):
return env.value
for parent in env.parents:
ret = self.find_binding(parent)
if ret is not None:
return ret
return None
class Continuation(KernelValue):
type_name = 'continuation'
_immutable_args_ = ['prev']
def __init__(self, prev, source_pos=None):
self.prev = prev
self.marked = False
self.source_pos = source_pos
def plug_reduce(self, val):
debug.on_plug_reduce(val, self)
return self._plug_reduce(val)
def _plug_reduce(self, val):
return self.prev.plug_reduce(val)
def mark(self, boolean):
self.marked = boolean
if self.prev is not None:
self.prev.mark(boolean)
class RootCont(Continuation):
def __init__(self):
Continuation.__init__(self, None)
self.source_pos = None
def _plug_reduce(self, val):
raise KernelExit
class BaseErrorCont(Continuation):
def _plug_reduce(self, val):
if not isinstance(val, ErrorObject):
print "*** ERROR ***:",
print val.todisplay()
return Continuation._plug_reduce(self, val)
def evaluate_arguments(vals, env, cont):
if is_nil(vals):
return cont.plug_reduce(nil)
elif isinstance(vals, Pair):
if is_nil(vals.cdr):
return vals.car, env, NoMoreArgsCont(cont, vals.car.source_pos)
else:
return vals.car, env, EvalArgsCont(vals.cdr, env, cont, vals.car.source_pos)
else:
# XXX: if the arguments are an improper list, this only prints the last
# cdr.
signal_combine_with_non_list_operands(Pair(vals, nil))
# XXX: DRY
def s_andp(vals, env, cont):
if is_nil(vals):
return cont.plug_reduce(true)
elif isinstance(vals, Pair):
return vals.car, env, AndCont(vals.cdr, env, cont, vals.car.source_pos)
else:
# XXX: if the arguments are an improper list, this only prints the last
# cdr.
signal_value_error("$and? with non-list arguments", Pair(vals, nil))
class AndCont(Continuation):
def __init__(self, exprs, env, prev, source_pos=None):
Continuation.__init__(self, prev, source_pos)
self.exprs = exprs
self.env = env
def _plug_reduce(self, val):
check_type(val, Boolean)
if true.equal(val):
return s_andp(self.exprs, self.env, self.prev)
else:
return self.prev.plug_reduce(false)
# XXX: DRY
def s_orp(vals, env, cont):
if is_nil(vals):
return cont.plug_reduce(false)
elif isinstance(vals, Pair):
return vals.car, env, OrCont(vals.cdr, env, cont, vals.car.source_pos)
else:
# XXX: if the arguments are an improper list, this only prints the last
# cdr.
signal_value_error("$or? with non-list arguments", Pair(vals, nil))
class OrCont(Continuation):
def __init__(self, exprs, env, prev, source_pos=None):
Continuation.__init__(self, prev, source_pos)
self.exprs = exprs
self.env = env
def _plug_reduce(self, val):
check_type(val, Boolean)
if true.equal(val):
return self.prev.plug_reduce(true)
else:
return s_orp(self.exprs, self.env, self.prev)
# XXX: refactor to extract common pattern with evaluate_arguments.
# this one happens to work on a Python list because we just
# happen to build one for transposing the list arguments to
# map.
def map_(combiner, transposed_lists, index, env, cont):
if not transposed_lists:
return cont.plug_reduce(nil)
if index == len(transposed_lists) - 1:
return combiner.combine(transposed_lists[index],
env, NoMoreArgsCont(cont))
else:
return combiner.combine(transposed_lists[index],
env,
MapCont(combiner,
transposed_lists,
index+1,
env,
cont))
class MapCont(Continuation):
def __init__(self, combiner, lists, index, env, prev, source_pos=None):
Continuation.__init__(self, prev, source_pos)
self.combiner = combiner
self.lists = lists
self.index = index
self.env = env
def _plug_reduce(self, val):
return map_(self.combiner,
self.lists,
self.index,
self.env,
GatherArgsCont(val, self.prev))
class EvalArgsCont(Continuation):
def __init__(self, exprs, env, prev, source_pos=None):
Continuation.__init__(self, prev, source_pos)
self.exprs = exprs
self.env = env
def _plug_reduce(self, val):
return evaluate_arguments(self.exprs,
self.env,
GatherArgsCont(val, self.prev))
class NoMoreArgsCont(Continuation):
def _plug_reduce(self, val):
return self.prev.plug_reduce(Pair(val, nil))
class GatherArgsCont(Continuation):
def __init__(self, val, prev, source_pos=None):
Continuation.__init__(self, prev)
self.val = val
self.source_pos = source_pos
def _plug_reduce(self, val):
return self.prev.plug_reduce(Pair(self.val, val))
class ApplyCont(Continuation):
def __init__(self, combiner, env, prev, source_pos=None):
Continuation.__init__(self, prev)
self.combiner = combiner
self.env = env
self.source_pos = source_pos
def _plug_reduce(self, args):
return self.combiner.combine(args, self.env, self.prev)
class CombineCont(Continuation):
def __init__(self, operands, env, prev, source_pos=None):
Continuation.__init__(self, prev)
self.operands = operands
self.env = env
self.source_pos = source_pos
def _plug_reduce(self, val):
return val.combine(self.operands, self.env, self.prev)
class GuardCont(Continuation):
def __init__(self, guards, env, prev, source_pos=None):
Continuation.__init__(self, prev)
self.guards = guards
self.env = env
self.source_pos = source_pos
class SequenceCont(Continuation):
def __init__(self, exprs, env, prev, source_pos=None):
Continuation.__init__(self, prev)
self.exprs = exprs
self.env = env
self.source_pos = source_pos
def _plug_reduce(self, val):
return sequence(self.exprs, self.env, self.prev)
def sequence(exprs, env, cont):
if is_nil(exprs):
return cont.plug_reduce(inert)
assert isinstance(exprs, Pair), "non-pair sequence: %s" % exprs
if is_nil(exprs.cdr):
# The whole function can be made shorter and simpler if we don't treat
# this as a special case, but then we'd be creating an extra
# continuation for the last element of a list. Avoiding that should be
# a significant savings since every list has a last element.
#
# This optimization was taken from Queinnec's LiSP (see README).
# I haven't actually measured, yet, how worthy is it when compiling
# with JIT enabled.
return exprs.car, env, cont
else:
return exprs.car, env, SequenceCont(exprs.cdr, env, cont)
class IfCont(Continuation):
def __init__(self, consequent, alternative, env, prev):
Continuation.__init__(self, prev)
self.consequent = consequent
self.alternative = alternative
self.env = env
def _plug_reduce(self, val):
if is_true(val):
return self.consequent, self.env, self.prev
elif is_false(val):
return self.alternative, self.env, self.prev
else:
signal_type_error(Boolean, val)
class CondCont(Continuation):
def __init__(self, clauses, env, prev, source_pos=None):
Continuation.__init__(self, prev)
self.clauses = clauses
self.env = env
self.prev = prev
self.source_pos = source_pos
def _plug_reduce(self, val):
if is_true(val):
return sequence(cdar(self.clauses), self.env, self.prev)
else:
return cond(cdr(self.clauses), self.env, self.prev)
def cond(vals, env, cont):
if is_nil(vals):
return cont.plug_reduce(inert)
return caar(vals), env, CondCont(vals, env, cont)
class DefineCont(Continuation):
def __init__(self, definiend, env, prev, source_pos=None):
Continuation.__init__(self, prev)
self.definiend = definiend
self.env = env
self.source_pos = source_pos
def _plug_reduce(self, val):
match_parameter_tree(self.definiend, val, self.env)
return self.prev.plug_reduce(inert)
def match_parameter_tree(param_tree, operand_tree, env):
if isinstance(param_tree, Symbol):
op = operand_tree
while isinstance(op, Applicative):
op = op.wrapped_combiner
if isinstance(op, Operative) and op.name is None:
op.name = param_tree.symval
env.set(param_tree, operand_tree)
elif is_ignore(param_tree):
pass
elif is_nil(param_tree):
if not is_nil(operand_tree):
# XXX: this only shows the tail of the mismatch
signal_operand_mismatch(param_tree, operand_tree)
elif isinstance(param_tree, Pair):
if not isinstance(operand_tree, Pair):
# XXX: this only shows the tail of the mismatch
signal_operand_mismatch(param_tree, operand_tree)
match_parameter_tree(param_tree.car, operand_tree.car, env)
match_parameter_tree(param_tree.cdr, operand_tree.cdr, env)
class InnerGuardCont(GuardCont):
pass
class OuterGuardCont(GuardCont):
pass
class InterceptCont(Continuation):
def __init__(self, interceptor, next_cont, outer_cont, source_pos=None):
# The outer continuation is the parent of this one for the purposes of
# abnormal passes, but normal return from this continuation goes to the
# next interceptor (or to the destination, if this is the last one)
# instead.
Continuation.__init__(self, outer_cont, source_pos)
self.next_cont = next_cont
assert isinstance(interceptor, Applicative), "non-applicative interceptor: %s" % interceptor
self.interceptor = interceptor.wrapped_combiner
def _plug_reduce(self, val):
return pass_to_next(val, self.next_cont)
class ExtendCont(Continuation):
def __init__(self, receiver, env, cont_to_extend, source_pos=None):
Continuation.__init__(self, cont_to_extend)
assert isinstance(receiver, Applicative), "non-applicative receiver: %s" % receiver
self.receiver = receiver.wrapped_combiner
self.env = env
self.source_pos = source_pos
def _plug_reduce(self, val):
return self.receiver.combine(val, self.env, self.prev)
class HandlePromiseResultCont(Continuation):
def __init__(self, promise, prev, source_pos=None):
Continuation.__init__(self, prev, source_pos)
self.promise = promise
def plug_reduce(self, val):
if self.promise.data.env is None:
return self.prev.plug_reduce(self.promise.data.val)
if isinstance(val, Promise):
self.promise.data = val.data
return self.promise.force(self.prev)
else:
self.promise.data.val = val
self.promise.data.env = None
return self.prev.plug_reduce(val)
class KeyedDynamicCont(Continuation):
def __init__(self, binder, value, prev, source_pos=None):
Continuation.__init__(self, prev, source_pos)
self.binder = binder
self.value = value
class DebugErrorCont(Continuation):
def plug_reduce(self, val):
return debug.on_error(val)
class ConstantCont(Continuation):
"""Ignore the value passed to me; just pass on the one provided in the
constructor."""
def __init__(self, val, prev):
Continuation.__init__(self, prev)
self.val = val
def _plug_reduce(self, val):
return self.prev.plug_reduce(self.val)
class BindsCont(Continuation):
def __init__(self, pyvals, prev, source_pos=None):
Continuation.__init__(self, prev, source_pos)
self.pyvals = pyvals
def _plug_reduce(self, env):
check_type(env, Environment)
assert isinstance(env, Environment)
for symbol in self.pyvals[1:]:
if env.lookup_unchecked(symbol) is None:
return self.prev.plug_reduce(false)
return self.prev.plug_reduce(true)
def car(val):
assert isinstance(val, Pair), "car on non-pair: %s" % val
return val.car
def cdr(val):
assert isinstance(val, Pair), "cdr on non-pair: %s" % val
return val.cdr
# caar, cadr, ..., caadr, ..., cdddddr.
for length in range(2, 6):
for adsoup in product('ad', repeat=length):
exec("def c%sr(val): return %sval%s"
% (''.join(adsoup),
''.join('c%sr(' % each for each in adsoup),
''.join(')' for each in adsoup)))
# XXX: these don't feel like they belong in a kernel type module, but placing
# them in primitive.py would create a cyclic dependency.
root_cont = RootCont()
debug_error_cont = DebugErrorCont(root_cont)
error_cont = BaseErrorCont(debug_error_cont)
system_error_cont = Continuation(error_cont)
user_error_cont = Continuation(error_cont)
file_not_found_cont = Continuation(user_error_cont)
parse_error_cont = Continuation(user_error_cont)
type_error_cont = Continuation(user_error_cont)
value_error_cont = Continuation(user_error_cont)
combine_with_non_list_operands_cont = Continuation(value_error_cont)
encapsulation_type_error_cont = Continuation(type_error_cont)
operand_mismatch_cont = Continuation(type_error_cont)
arity_mismatch_cont = Continuation(operand_mismatch_cont)
symbol_not_found_cont = Continuation(user_error_cont)
unbound_dynamic_key_cont = Continuation(user_error_cont)
unbound_static_key_cont = Continuation(user_error_cont)
arithmetic_error_cont = Continuation(user_error_cont)
add_positive_to_negative_infinity_cont = Continuation(arithmetic_error_cont)
multiply_infinity_by_zero_cont = Continuation(arithmetic_error_cont)
divide_infinity_cont = Continuation(arithmetic_error_cont)
divide_by_zero_cont = Continuation(arithmetic_error_cont)
class ErrorObject(KernelValue):
type_name = 'error-object'
def __init__(self, dest_cont, message, irritants):
self.dest_cont = dest_cont
assert isinstance(message, str)
if not is_nil(irritants):
check_type(irritants, Pair)
self.message = String(message)
self.irritants = irritants
# Filled in by the evaluator.
self.val = None
self.env = None
self.src_cont = None
def todisplay(self):
return "*** ERROR ***: %s" % self.message.todisplay()
def raise_(*args):
raise KernelException(ErrorObject(*args))
def signal_file_not_found(filename):
raise_(file_not_found_cont,
("file '%s' not found" % filename),
Pair(String(filename), nil))
def signal_parse_error(error_string, source_filename):
raise_(parse_error_cont,
error_string,
Pair(String(source_filename), nil))
def signal_symbol_not_found(symbol):
raise_(symbol_not_found_cont,
"Symbol '%s' not found" % symbol.todisplay(),
Pair(symbol, nil))
def signal_unbound_dynamic_key(accessor):
raise_(unbound_dynamic_key_cont,
"Binder '%s' not in dynamic extent" % accessor.binder.todisplay(),
Pair(accessor, nil))
def signal_unbound_static_key(accessor):
raise_(unbound_static_key_cont,
"Binder '%s' not in scope" % accessor.binder.todisplay(),
Pair(accessor, nil))
def signal_type_error(expected_type, actual_value):
raise_(type_error_cont,
"Expected object of type %s, but got %s instead"
% (expected_type.type_name,
actual_value.tostring()),
Pair(String(expected_type.type_name),
Pair(actual_value, nil)))
def signal_value_error(msg, irritants):
raise_(value_error_cont, msg, irritants)
def signal_combine_with_non_list_operands(irritants):
raise_(combine_with_non_list_operands_cont,
("Combine with non-list operands: %s" % irritants.tostring()),
irritants)
def signal_encapsulation_type_error(expected_type, actual_value):
raise_(encapsulation_type_error_cont,
"Expected encapsulated object of type %s, but got %s instead"
% (expected_type.tostring(),
actual_value.tostring()),
Pair(expected_type, Pair(actual_value, nil)))
def check_type(val, type_):
if not isinstance(val, type_):
signal_type_error(type_, val)
def signal_operand_mismatch(expected_params, actual_operands):
raise_(operand_mismatch_cont,
"%s doesn't match expected param tree %s"
% (actual_operands.tostring(),
expected_params.tostring()),
Pair(expected_params, Pair(actual_operands, nil)))
def signal_arity_mismatch(expected_arity, actual_arguments):
raise_(arity_mismatch_cont,
"expected %s arguments but got %s"
% (expected_arity,
actual_arguments.tostring()),
Pair(String(expected_arity), Pair(actual_arguments, nil)))
def signal_add_positive_to_negative_infinity(pos, neg):
raise_(add_positive_to_negative_infinity_cont,
"Tried to add positive to negative infinity",
Pair(pos, Pair(neg, nil)))
def signal_multiply_infinity_by_zero(inf, zero):
raise_(multiply_infinity_by_zero_cont,
"Tried to multiply infinity by zero",
Pair(inf, Pair(zero, nil)))
def signal_divide_infinity(inf, divisor):
raise_(divide_infinity_cont,
"Tried to divide infinity",
Pair(inf, Pair(divisor, nil)))
def signal_divide_by_zero(dividend, zero):
raise_(divide_by_zero_cont,
"Tried to divide by zero",
Pair(dividend, Pair(zero, nil)))
# Not actual kernel type.
class KernelExit(Exception):
pass
# We need to wrap ErrorObjects in these because we want them to be KernelValues
# and rpython doesn't allow raising non-Exceptions nor multiple inheritance.
class KernelException(Exception):
def __init__(self, val):
assert isinstance(val, ErrorObject)
self.val = val
def __str__(self):
return self.val.todisplay()
class NonNullListTail(Exception):
def __init__(self, val):
self.val = val
def iter_list(vals):
while isinstance(vals, Pair):
yield vals.car
vals = vals.cdr
if not is_nil(vals):
raise NonNullListTail(vals)
def pythonify_list(vals, check_arity=-1):
ret = []
for item in iter_list(vals):
ret.append(item)
if check_arity != -1 and len(ret) != check_arity:
signal_arity_mismatch(str(check_arity), vals)
return ret
def kernelify_list(ls):
ret = nil
for x in reversed(ls):
ret = Pair(x, ret)
return ret
| euccastro/icbink | kernel_type.py | Python | mit | 41,697 |
import unittest
from common import gtk
class TreeViewTest(unittest.TestCase):
# Check for #350252
# Cooment out this test until we get a response
# on bug #546005 and #498010
#
#def test_default_attributes(self):
# model = gtk.ListStore(str)
# treeview = gtk.TreeView(model)
# treeview.set_cursor(1)
# Bug #347273
def testTreeSelectionForeach(self):
model = gtk.ListStore(str)
treeview = gtk.TreeView(model)
sel = treeview.get_selection()
iter_ = model.append()
model.set_value(iter_, 0, "foo")
sel.select_path((0,))
list_ = []
sel.selected_foreach(lambda model, path, iter: list_.append(iter))
list_ = [model.get_value(iter_, 0) for iter_ in list_]
self.assertEqual(list_, ["foo"])
if __name__ == '__main__':
unittest.main()
| GNOME/pygtk | tests/test_treeview.py | Python | lgpl-2.1 | 864 |
import json
from six.moves.urllib.parse import parse_qs
from xml.dom.minidom import parseString
class XeroException(Exception):
def __init__(self, response, msg=None):
self.response = response
super(XeroException, self).__init__(msg)
class XeroNotVerified(Exception):
# Credentials haven't been verified
pass
class XeroBadRequest(XeroException):
# HTTP 400: Bad Request
def __init__(self, response):
if response.headers['content-type'].startswith('application/json'):
data = json.loads(response.text)
msg = "%s: %s" % (data['Type'], data['Message'])
self.errors = [err['Message']
for elem in data.get('Elements', [])
for err in elem.get('ValidationErrors', [])
]
self.problem = self.errors[0] if len(self.errors) > 0 else None
super(XeroBadRequest, self).__init__(response, msg=msg)
elif response.headers['content-type'].startswith('text/html'):
payload = parse_qs(response.text)
self.errors = [payload['oauth_problem'][0]]
self.problem = self.errors[0]
super(XeroBadRequest, self).__init__(response, payload['oauth_problem_advice'][0])
else:
# Extract the messages from the text.
# parseString takes byte content, not unicode.
dom = parseString(response.text.encode(response.encoding))
messages = dom.getElementsByTagName('Message')
msg = messages[0].childNodes[0].data
self.errors = [
m.childNodes[0].data for m in messages[1:]
]
self.problem = self.errors[0]
super(XeroBadRequest, self).__init__(response, msg)
class XeroUnauthorized(XeroException):
# HTTP 401: Unauthorized
def __init__(self, response):
payload = parse_qs(response.text)
self.errors = [payload['oauth_problem'][0]]
self.problem = self.errors[0]
super(XeroUnauthorized, self).__init__(response, payload['oauth_problem_advice'][0])
class XeroForbidden(XeroException):
# HTTP 403: Forbidden
def __init__(self, response):
super(XeroForbidden, self).__init__(response, response.text)
class XeroNotFound(XeroException):
# HTTP 404: Not Found
def __init__(self, response):
super(XeroNotFound, self).__init__(response, response.text)
class XeroUnsupportedMediaType(XeroException):
# HTTP 415: UnsupportedMediaType
def __init__(self, response):
super(XeroUnsupportedMediaType, self).__init__(response, response.text)
class XeroInternalError(XeroException):
# HTTP 500: Internal Error
def __init__(self, response):
super(XeroInternalError, self).__init__(response, response.text)
class XeroNotImplemented(XeroException):
# HTTP 501
def __init__(self, response):
# Extract the useful error message from the text.
# parseString takes byte content, not unicode.
dom = parseString(response.text.encode(response.encoding))
messages = dom.getElementsByTagName('Message')
msg = messages[0].childNodes[0].data
super(XeroNotImplemented, self).__init__(response, msg)
class XeroRateLimitExceeded(XeroException):
# HTTP 503 - Rate limit exceeded
def __init__(self, response, payload):
self.errors = [payload['oauth_problem'][0]]
self.problem = self.errors[0]
super(XeroRateLimitExceeded, self).__init__(response, payload['oauth_problem_advice'][0])
class XeroNotAvailable(XeroException):
# HTTP 503 - Not available
def __init__(self, response):
super(XeroNotAvailable, self).__init__(response, response.text)
class XeroExceptionUnknown(XeroException):
# Any other exception.
pass
| unomena/pyxeropos | xero/exceptions.py | Python | bsd-3-clause | 3,809 |
#-*- coding: utf-8 -*-
"""
Certificates Tests.
"""
import itertools
import json
import ddt
import mock
import six
from django.conf import settings
from django.test.utils import override_settings
from opaque_keys.edx.keys import AssetKey
from six.moves import range
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from cms.djangoapps.contentstore.utils import get_lms_link_for_certificate_web_view, reverse_course_url
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import CourseInstructorRole, CourseStaffRole
from common.djangoapps.student.tests.factories import UserFactory
from common.djangoapps.util.testing import EventTestMixin, UrlResetMixin
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from ..certificates import CERTIFICATE_SCHEMA_VERSION, CertificateManager
FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True
CERTIFICATE_JSON = {
u'name': u'Test certificate',
u'description': u'Test description',
u'is_active': True,
u'version': CERTIFICATE_SCHEMA_VERSION,
}
CERTIFICATE_JSON_WITH_SIGNATORIES = {
u'name': u'Test certificate',
u'description': u'Test description',
u'version': CERTIFICATE_SCHEMA_VERSION,
u'course_title': 'Course Title Override',
u'is_active': True,
u'signatories': [
{
"name": "Bob Smith",
"title": "The DEAN.",
"signature_image_path": "/c4x/test/CSS101/asset/Signature.png"
}
]
}
C4X_SIGNATORY_PATH = '/c4x/test/CSS101/asset/Signature{}.png'
SIGNATORY_PATH = 'asset-v1:test+CSS101+SP2017+type@asset+block@Signature{}.png'
# pylint: disable=no-member
class HelperMethods(object):
"""
Mixin that provides useful methods for certificate configuration tests.
"""
def _create_fake_images(self, asset_keys):
"""
Creates fake image files for a list of asset_keys.
"""
for asset_key_string in asset_keys:
asset_key = AssetKey.from_string(asset_key_string)
content = StaticContent(
asset_key, "Fake asset", "image/png", "data",
)
contentstore().save(content)
def _add_course_certificates(self, count=1, signatory_count=0, is_active=False,
asset_path_format=C4X_SIGNATORY_PATH):
"""
Create certificate for the course.
"""
signatories = [
{
'name': 'Name ' + str(i),
'title': 'Title ' + str(i),
'signature_image_path': asset_path_format.format(i),
'id': i
} for i in range(signatory_count)
]
# create images for signatory signatures except the last signatory
self._create_fake_images(signatory['signature_image_path'] for signatory in signatories[:-1])
certificates = [
{
'id': i,
'name': 'Name ' + str(i),
'description': 'Description ' + str(i),
'signatories': signatories,
'version': CERTIFICATE_SCHEMA_VERSION,
'is_active': is_active
} for i in range(count)
]
self.course.certificates = {'certificates': certificates}
self.save_course()
# pylint: disable=no-member
class CertificatesBaseTestCase(object):
"""
Mixin with base test cases for the certificates.
"""
def _remove_ids(self, content):
"""
Remove ids from the response. We cannot predict IDs, because they're
generated randomly.
We use this method to clean up response when creating new certificate.
"""
certificate_id = content.pop("id")
return certificate_id
def test_required_fields_are_absent(self):
"""
Test required fields are absent.
"""
bad_jsons = [
# must have name of the certificate
{
u'description': 'Test description',
u'version': CERTIFICATE_SCHEMA_VERSION
},
# an empty json
{},
]
for bad_json in bad_jsons:
response = self.client.post(
self._url(),
data=json.dumps(bad_json),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 400)
self.assertNotIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("error", content)
def test_invalid_json(self):
"""
Test invalid json handling.
"""
# Invalid JSON.
invalid_json = u"{u'name': 'Test Name', u'description': 'Test description'," \
u" u'version': " + str(CERTIFICATE_SCHEMA_VERSION) + ", []}"
response = self.client.post(
self._url(),
data=invalid_json,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 400)
self.assertNotIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("error", content)
def test_certificate_data_validation(self):
#Test certificate schema version
json_data_1 = {
u'version': 100,
u'name': u'Test certificate',
u'description': u'Test description'
}
with self.assertRaises(Exception) as context:
CertificateManager.validate(json_data_1)
self.assertIn(
"Unsupported certificate schema version: 100. Expected version: 1.",
str(context.exception)
)
#Test certificate name is missing
json_data_2 = {
u'version': CERTIFICATE_SCHEMA_VERSION,
u'description': u'Test description'
}
with self.assertRaises(Exception) as context:
CertificateManager.validate(json_data_2)
self.assertIn('must have name of the certificate', str(context.exception))
@ddt.ddt
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
class CertificatesListHandlerTestCase(
EventTestMixin, CourseTestCase, CertificatesBaseTestCase, HelperMethods, UrlResetMixin
):
"""
Test cases for certificates_list_handler.
"""
def setUp(self): # lint-amnesty, pylint: disable=arguments-differ
"""
Set up CertificatesListHandlerTestCase.
"""
super(CertificatesListHandlerTestCase, self).setUp('cms.djangoapps.contentstore.views.certificates.tracker') # lint-amnesty, pylint: disable=super-with-arguments
self.reset_urls()
def _url(self):
"""
Return url for the handler.
"""
return reverse_course_url('certificates_list_handler', self.course.id)
def test_can_create_certificate(self):
"""
Test that you can create a certificate.
"""
expected = {
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'Test certificate',
u'description': u'Test description',
u'is_active': True,
u'signatories': []
}
response = self.client.ajax_post(
self._url(),
data=CERTIFICATE_JSON
)
self.assertEqual(response.status_code, 201)
self.assertIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
certificate_id = self._remove_ids(content)
self.assertEqual(content, expected)
self.assert_event_emitted(
'edx.certificate.configuration.created',
course_id=six.text_type(self.course.id),
configuration_id=certificate_id,
)
def test_cannot_create_certificate_if_user_has_no_write_permissions(self):
"""
Tests user without write permissions on course should not able to create certificate
"""
user = UserFactory()
self.client.login(username=user.username, password='test')
response = self.client.ajax_post(
self._url(),
data=CERTIFICATE_JSON
)
self.assertEqual(response.status_code, 403)
@override_settings(LMS_BASE=None)
def test_no_lms_base_for_certificate_web_view_link(self):
test_link = get_lms_link_for_certificate_web_view(
course_key=self.course.id,
mode='honor'
)
self.assertEqual(test_link, None)
@override_settings(LMS_BASE="lms_base_url")
def test_lms_link_for_certificate_web_view(self):
test_url = "//lms_base_url/certificates/" \
"course/" + six.text_type(self.course.id) + '?preview=honor'
link = get_lms_link_for_certificate_web_view(
course_key=self.course.id,
mode='honor'
)
self.assertEqual(link, test_url)
@mock.patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_certificate_info_in_response(self):
"""
Test that certificate has been created and rendered properly with non-audit course mode.
"""
CourseModeFactory.create(course_id=self.course.id, mode_slug='verified')
response = self.client.ajax_post(
self._url(),
data=CERTIFICATE_JSON_WITH_SIGNATORIES
)
self.assertEqual(response.status_code, 201)
# in html response
result = self.client.get_html(self._url())
self.assertContains(result, 'Test certificate')
self.assertContains(result, 'Test description')
# in JSON response
response = self.client.get_json(self._url())
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['name'], 'Test certificate')
self.assertEqual(data[0]['description'], 'Test description')
self.assertEqual(data[0]['version'], CERTIFICATE_SCHEMA_VERSION)
@mock.patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_certificate_info_not_in_response(self):
"""
Test that certificate has not been rendered audit only course mode.
"""
response = self.client.ajax_post(
self._url(),
data=CERTIFICATE_JSON_WITH_SIGNATORIES
)
self.assertEqual(response.status_code, 201)
# in html response
result = self.client.get_html(self._url())
self.assertNotContains(result, 'Test certificate')
def test_unsupported_http_accept_header(self):
"""
Test if not allowed header present in request.
"""
response = self.client.get(
self._url(),
HTTP_ACCEPT="text/plain",
)
self.assertEqual(response.status_code, 406)
def test_certificate_unsupported_method(self):
"""
Unit Test: test_certificate_unsupported_method
"""
resp = self.client.put(self._url())
self.assertEqual(resp.status_code, 405)
def test_not_permitted(self):
"""
Test that when user has not read access to course then permission denied exception should raised.
"""
test_user_client, test_user = self.create_non_staff_authed_user_client()
CourseEnrollment.enroll(test_user, self.course.id)
response = test_user_client.ajax_post(
self._url(),
data=CERTIFICATE_JSON
)
self.assertContains(response, "error", status_code=403)
def test_audit_course_mode_is_skipped(self):
"""
Tests audit course mode is skipped when rendering certificates page.
"""
CourseModeFactory.create(course_id=self.course.id)
CourseModeFactory.create(course_id=self.course.id, mode_slug='verified')
response = self.client.get_html(
self._url(),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'verified')
self.assertNotContains(response, 'audit')
def test_audit_only_disables_cert(self):
"""
Tests audit course mode is skipped when rendering certificates page.
"""
CourseModeFactory.create(course_id=self.course.id, mode_slug='audit')
response = self.client.get_html(
self._url(),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This course does not use a mode that offers certificates.')
self.assertNotContains(response, 'This module is not enabled.')
self.assertNotContains(response, 'Loading')
@ddt.data(
['audit', 'verified'],
['verified'],
['audit', 'verified', 'credit'],
['verified', 'credit'],
['professional']
)
def test_non_audit_enables_cert(self, slugs):
"""
Tests audit course mode is skipped when rendering certificates page.
"""
for slug in slugs:
CourseModeFactory.create(course_id=self.course.id, mode_slug=slug)
response = self.client.get_html(
self._url(),
)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'This course does not use a mode that offers certificates.')
self.assertNotContains(response, 'This module is not enabled.')
self.assertContains(response, 'Loading')
def test_assign_unique_identifier_to_certificates(self):
"""
Test certificates have unique ids
"""
self._add_course_certificates(count=2)
json_data = {
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'New test certificate',
u'description': u'New test description',
u'is_active': True,
u'signatories': []
}
response = self.client.post(
self._url(),
data=json.dumps(json_data),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
new_certificate = json.loads(response.content.decode('utf-8'))
for prev_certificate in self.course.certificates['certificates']:
self.assertNotEqual(new_certificate.get('id'), prev_certificate.get('id'))
@ddt.ddt
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
class CertificatesDetailHandlerTestCase(
EventTestMixin, CourseTestCase, CertificatesBaseTestCase, HelperMethods, UrlResetMixin
):
"""
Test cases for CertificatesDetailHandlerTestCase.
"""
_id = 0
def setUp(self): # pylint: disable=arguments-differ
"""
Set up CertificatesDetailHandlerTestCase.
"""
super(CertificatesDetailHandlerTestCase, self).setUp('cms.djangoapps.contentstore.views.certificates.tracker') # lint-amnesty, pylint: disable=super-with-arguments
self.reset_urls()
def _url(self, cid=-1):
"""
Return url for the handler.
"""
cid = cid if cid > 0 else self._id
return reverse_course_url(
'certificates_detail_handler',
self.course.id,
kwargs={'certificate_id': cid},
)
def test_can_create_new_certificate_if_it_does_not_exist(self):
"""
PUT/POST new certificate.
"""
expected = {
u'id': 666,
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'Test certificate',
u'description': u'Test description',
u'is_active': True,
u'course_title': u'Course Title Override',
u'signatories': []
}
response = self.client.put(
self._url(cid=666),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
self.assert_event_emitted(
'edx.certificate.configuration.created',
course_id=six.text_type(self.course.id),
configuration_id=666,
)
def test_can_edit_certificate(self):
"""
Edit certificate, check its id and modified fields.
"""
self._add_course_certificates(count=2)
expected = {
u'id': 1,
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'New test certificate',
u'description': u'New test description',
u'is_active': True,
u'course_title': u'Course Title Override',
u'signatories': []
}
response = self.client.put(
self._url(cid=1),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
self.assert_event_emitted(
'edx.certificate.configuration.modified',
course_id=six.text_type(self.course.id),
configuration_id=1,
)
self.reload_course()
# Verify that certificate is properly updated in the course.
course_certificates = self.course.certificates['certificates']
self.assertEqual(len(course_certificates), 2)
self.assertEqual(course_certificates[1].get('name'), u'New test certificate')
self.assertEqual(course_certificates[1].get('description'), 'New test description')
def test_can_edit_certificate_without_is_active(self):
"""
Tests user should be able to edit certificate, if is_active attribute is not present
for given certificate. Old courses might not have is_active attribute in certificate data.
"""
certificates = [
{
'id': 1,
'name': 'certificate with is_active',
'description': 'Description ',
'signatories': [],
'version': CERTIFICATE_SCHEMA_VERSION,
}
]
self.course.certificates = {'certificates': certificates}
self.save_course()
expected = {
u'id': 1,
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'New test certificate',
u'description': u'New test description',
u'is_active': True,
u'course_title': u'Course Title Override',
u'signatories': []
}
response = self.client.post(
self._url(cid=1),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 201)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_can_delete_certificate_with_signatories(self, signatory_path):
"""
Delete certificate
"""
self._add_course_certificates(count=2, signatory_count=1, asset_path_format=signatory_path)
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.assert_event_emitted(
'edx.certificate.configuration.deleted',
course_id=six.text_type(self.course.id),
configuration_id='1',
)
self.reload_course()
# Verify that certificates are properly updated in the course.
certificates = self.course.certificates['certificates']
self.assertEqual(len(certificates), 1)
self.assertEqual(certificates[0].get('name'), 'Name 0')
self.assertEqual(certificates[0].get('description'), 'Description 0')
def test_can_delete_certificate_with_slash_prefix_signatory(self):
"""
Delete certificate
"""
self._add_course_certificates(count=2, signatory_count=1, asset_path_format="/" + SIGNATORY_PATH)
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.assert_event_emitted(
'edx.certificate.configuration.deleted',
course_id=six.text_type(self.course.id),
configuration_id='1',
)
self.reload_course()
# Verify that certificates are properly updated in the course.
certificates = self.course.certificates['certificates']
self.assertEqual(len(certificates), 1)
self.assertEqual(certificates[0].get('name'), 'Name 0')
self.assertEqual(certificates[0].get('description'), 'Description 0')
@ddt.data("not_a_valid_asset_key{}.png", "/not_a_valid_asset_key{}.png")
def test_can_delete_certificate_with_invalid_signatory(self, signatory_path):
"""
Delete certificate
"""
self._add_course_certificates(count=2, signatory_count=1, asset_path_format=signatory_path)
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.assert_event_emitted(
'edx.certificate.configuration.deleted',
course_id=six.text_type(self.course.id),
configuration_id='1',
)
self.reload_course()
# Verify that certificates are properly updated in the course.
certificates = self.course.certificates['certificates']
self.assertEqual(len(certificates), 1)
self.assertEqual(certificates[0].get('name'), 'Name 0')
self.assertEqual(certificates[0].get('description'), 'Description 0')
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_delete_certificate_without_write_permissions(self, signatory_path):
"""
Tests certificate deletion without write permission on course.
"""
self._add_course_certificates(count=2, signatory_count=1, asset_path_format=signatory_path)
user = UserFactory()
self.client.login(username=user.username, password='test')
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 403)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_delete_certificate_without_global_staff_permissions(self, signatory_path):
"""
Tests deletion of an active certificate without global staff permission on course.
"""
self._add_course_certificates(count=2, signatory_count=1, is_active=True, asset_path_format=signatory_path)
user = UserFactory()
for role in [CourseInstructorRole, CourseStaffRole]:
role(self.course.id).add_users(user)
self.client.login(username=user.username, password='test')
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 403)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_update_active_certificate_without_global_staff_permissions(self, signatory_path):
"""
Tests update of an active certificate without global staff permission on course.
"""
self._add_course_certificates(count=2, signatory_count=1, is_active=True, asset_path_format=signatory_path)
cert_data = {
u'id': 1,
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'New test certificate',
u'description': u'New test description',
u'course_title': u'Course Title Override',
u'org_logo_path': '',
u'is_active': False,
u'signatories': []
}
user = UserFactory()
for role in [CourseInstructorRole, CourseStaffRole]:
role(self.course.id).add_users(user)
self.client.login(username=user.username, password='test')
response = self.client.put(
self._url(cid=1),
data=json.dumps(cert_data),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 403)
def test_delete_non_existing_certificate(self):
"""
Try to delete a non existing certificate. It should return status code 404 Not found.
"""
self._add_course_certificates(count=2)
response = self.client.delete(
self._url(cid=100),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 404)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_can_delete_signatory(self, signatory_path):
"""
Delete an existing certificate signatory
"""
self._add_course_certificates(count=2, signatory_count=3, asset_path_format=signatory_path)
certificates = self.course.certificates['certificates']
signatory = certificates[1].get("signatories")[1]
image_asset_location = AssetKey.from_string(signatory['signature_image_path'])
content = contentstore().find(image_asset_location)
self.assertIsNotNone(content)
test_url = '{}/signatories/1'.format(self._url(cid=1))
response = self.client.delete(
test_url,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.reload_course()
# Verify that certificates are properly updated in the course.
certificates = self.course.certificates['certificates']
self.assertEqual(len(certificates[1].get("signatories")), 2)
# make sure signatory signature image is deleted too
self.assertRaises(NotFoundError, contentstore().find, image_asset_location)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_deleting_signatory_without_signature(self, signatory_path):
"""
Delete an signatory whose signature image is already removed or does not exist
"""
self._add_course_certificates(count=2, signatory_count=4, asset_path_format=signatory_path)
test_url = '{}/signatories/3'.format(self._url(cid=1))
response = self.client.delete(
test_url,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
def test_delete_signatory_non_existing_certificate(self):
"""
Try to delete a non existing certificate signatory. It should return status code 404 Not found.
"""
self._add_course_certificates(count=2)
test_url = '{}/signatories/1'.format(self._url(cid=100))
response = self.client.delete(
test_url,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 404)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_certificate_activation_success(self, signatory_path):
"""
Activate and Deactivate the course certificate
"""
test_url = reverse_course_url('certificate_activation_handler', self.course.id)
self._add_course_certificates(count=1, signatory_count=2, asset_path_format=signatory_path)
is_active = True
for i in range(2):
if i == 1:
is_active = not is_active
response = self.client.post(
test_url,
data=json.dumps({"is_active": is_active}),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 200)
course = self.store.get_course(self.course.id)
certificates = course.certificates['certificates']
self.assertEqual(certificates[0].get('is_active'), is_active)
cert_event_type = 'activated' if is_active else 'deactivated'
self.assert_event_emitted(
'.'.join(['edx.certificate.configuration', cert_event_type]),
course_id=six.text_type(self.course.id),
)
@ddt.data(*itertools.product([True, False], [C4X_SIGNATORY_PATH, SIGNATORY_PATH]))
@ddt.unpack
def test_certificate_activation_without_write_permissions(self, activate, signatory_path):
"""
Tests certificate Activate and Deactivate should not be allowed if user
does not have write permissions on course.
"""
test_url = reverse_course_url('certificate_activation_handler', self.course.id)
self._add_course_certificates(count=1, signatory_count=2, asset_path_format=signatory_path)
user = UserFactory()
self.client.login(username=user.username, password='test')
response = self.client.post(
test_url,
data=json.dumps({"is_active": activate}),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 403)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_certificate_activation_failure(self, signatory_path):
"""
Certificate activation should fail when user has not read access to course then permission denied exception
should raised.
"""
test_url = reverse_course_url('certificate_activation_handler', self.course.id)
test_user_client, test_user = self.create_non_staff_authed_user_client()
CourseEnrollment.enroll(test_user, self.course.id)
self._add_course_certificates(count=1, signatory_count=2, asset_path_format=signatory_path)
response = test_user_client.post(
test_url,
data=json.dumps({"is_active": True}),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 403)
course = self.store.get_course(self.course.id)
certificates = course.certificates['certificates']
self.assertEqual(certificates[0].get('is_active'), False)
| stvstnfrd/edx-platform | cms/djangoapps/contentstore/views/tests/test_certificates.py | Python | agpl-3.0 | 31,931 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
import openerp
from openerp.osv import osv, fields
from openerp.tools import ustr
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class res_config_module_installation_mixin(object):
def _install_modules(self, cr, uid, modules, context):
"""Install the requested modules.
return the next action to execute
modules is a list of tuples
(mod_name, browse_record | None)
"""
ir_module = self.pool.get('ir.module.module')
to_install_ids = []
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_ids.append(module.id)
if to_install_ids:
ir_module.button_immediate_install(cr, uid, to_install_ids, context=context)
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return None
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
context=context)
user_groups = set(map(
lambda g: g.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
valid_todos_for_user = [
todo for todo in active_todos
if not todo.groups_id or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
]
if valid_todos_for_user:
return valid_todos_for_user[0]
return None
def _next(self, cr, uid, context=None):
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
_logger.info('next action is %s', next)
if next:
res = next.action_launch(context=context)
res['nodestroy'] = False
return res
# reload the client; open the first available root menu
menu_obj = self.pool.get('ir.ui.menu')
menu_ids = menu_obj.search(cr, uid, [('parent_id', '=', False)], context=context)
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu_ids and menu_ids[0] or False},
}
def start(self, cr, uid, ids, context=None):
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.execute(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
res_config_configurable()
class res_config_installer(osv.osv_memory, res_config_module_installation_mixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['project_mrp'],
}
will install both ``sale_crm`` and ``project_mrp`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_install_if = {}
def already_installed(self, cr, uid, context=None):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return map(attrgetter('name'),
self._already_installed(cr, uid, context=context))
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's browse_record
:returns: a list of all installed modules in this installer
:rtype: [browse_record]
"""
modules = self.pool.get('ir.module.module')
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns[module_name]) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals).difference(
self.already_installed(cr, uid, context))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
self.already_installed(cr, uid, context=context),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access)
for name in self.already_installed(cr, uid, context=context):
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
_logger.info('Selecting addons %s to install', to_install)
ir_module = self.pool.get('ir.module.module')
modules = []
for name in to_install:
mod_ids = ir_module.search(cr, uid, [('name', '=', name)])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
return self._install_modules(cr, uid, modules, context=context)
res_config_installer()
DEPRECATION_MESSAGE = 'You are using an addon using old-style configuration '\
'wizards (ir.actions.configuration.wizard). Old-style configuration '\
'wizards have been deprecated.\n'\
'The addon should be migrated to res.config objects.'
class ir_actions_configuration_wizard(osv.osv_memory):
''' Compatibility configuration wizard
The old configuration wizard has been replaced by res.config, but in order
not to break existing but not-yet-migrated addons, the old wizard was
reintegrated and gutted.
'''
_name='ir.actions.configuration.wizard'
_inherit = 'res.config'
def _next_action_note(self, cr, uid, ids, context=None):
next = self._next_action(cr, uid)
if next:
# if the next one is also an old-style extension, you never know...
if next.note:
return next.note
return _("Click 'Continue' to configure the next addon...")
return _("Your database is now fully configured.\n\n"\
"Click 'Continue' and enjoy your OpenERP experience...")
_columns = {
'note': fields.text('Next Wizard', readonly=True),
}
_defaults = {
'note': _next_action_note,
}
def execute(self, cr, uid, ids, context=None):
_logger.warning(DEPRECATION_MESSAGE)
ir_actions_configuration_wizard()
class res_config_settings(osv.osv_memory, res_config_module_installation_mixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class my_config_wizard(osv.osv_memory):
_name = 'my.settings'
_inherit = 'res.config.settings'
_columns = {
'default_foo': fields.type(..., default_model='my.model'),
'group_bar': fields.boolean(..., group='base.group_user', implied_group='my.group'),
'module_baz': fields.boolean(...),
'other_field': fields.type(...),
}
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For the other fields, the method ``execute`` invokes all methods with a name
that starts with 'set_'; such methods can be defined to implement the effect
of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX' and 'module_XXX'. It also invokes all methods
with a name that starts with 'get_default_'; such methods can be defined to provide
current values for other fields.
"""
_name = 'res.config.settings'
def copy(self, cr, uid, id, values, context=None):
raise osv.except_osv(_("Cannot duplicate configuration!"), "")
def _get_classified_fields(self, cr, uid, context=None):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', browse_group, browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'other': ['other_field', ...],
}
"""
ir_model_data = self.pool.get('ir.model.data')
ir_module = self.pool.get('ir.module.module')
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return ir_model_data.get_object(cr, uid, mod, xml, context)
defaults, groups, modules, others = [], [], [], []
for name, field in self._columns.items():
if name.startswith('default_') and hasattr(field, 'default_model'):
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_') and isinstance(field, fields.boolean) and hasattr(field, 'implied_group'):
field_group = getattr(field, 'group', 'base.group_user')
groups.append((name, ref(field_group), ref(field.implied_group)))
elif name.startswith('module_') and isinstance(field, fields.boolean):
mod_ids = ir_module.search(cr, uid, [('name', '=', name[7:])])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
else:
others.append(name)
return {'default': defaults, 'group': groups, 'module': modules, 'other': others}
def default_get(self, cr, uid, fields, context=None):
ir_values = self.pool.get('ir.values')
classified = self._get_classified_fields(cr, uid, context)
res = super(res_config_settings, self).default_get(cr, uid, fields, context)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = ir_values.get_default(cr, uid, model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, group, implied_group in classified['group']:
res[name] = implied_group in group.implied_ids
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = module and module.state in ('installed', 'to install', 'to upgrade')
# other fields: call all methods that start with 'get_default_'
for method in dir(self):
if method.startswith('get_default_'):
res.update(getattr(self, method)(cr, uid, fields, context))
return res
def execute(self, cr, uid, ids, context=None):
ir_values = self.pool.get('ir.values')
ir_module = self.pool.get('ir.module.module')
classified = self._get_classified_fields(cr, uid, context)
config = self.browse(cr, uid, ids[0], context)
# default values fields
for name, model, field in classified['default']:
ir_values.set_default(cr, uid, model, field, config[name])
# group fields: modify group / implied groups
for name, group, implied_group in classified['group']:
if config[name]:
group.write({'implied_ids': [(4, implied_group.id)]})
else:
group.write({'implied_ids': [(3, implied_group.id)]})
implied_group.write({'users': [(3, u.id) for u in group.users]})
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)(cr, uid, ids, context)
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(cr, uid, to_uninstall_ids, context=context)
action = self._install_modules(cr, uid, to_install, context=context)
if action:
return action
# After the uninstall/install calls, the self.pool is no longer valid.
# So we reach into the RegistryManager directly.
res_config = openerp.modules.registry.RegistryManager.get(cr.dbname)['res.config']
config = res_config.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self, cr, uid, ids, context=None):
# ignore the current record, and send the action to reopen the view
act_window = self.pool.get('ir.actions.act_window')
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)])
if action_ids:
return act_window.read(cr, uid, action_ids[0], [], context=context)
return {}
def name_get(self, cr, uid, ids, context=None):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
if not ids:
return []
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
act_window = self.pool.get('ir.actions.act_window')
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)], context=context)
name = self._name
if action_ids:
name = act_window.read(cr, uid, action_ids[0], ['name'], context=context)['name']
return [(record.id, name) for record in self.browse(cr, uid , ids, context=context)]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| titasakgm/brc-stock | openerp/addons/base/res/res_config.py | Python | agpl-3.0 | 25,514 |
import os
import tempfile
import urllib
from imposm.parser import OSMParser
from .importer import GraphImporter
def parse_file(filename, parse_direction=False, **kwargs):
"""
Return an OSM networkx graph from the input OSM file
Only works with OSM xml, xml.bz2 and pbf files. This function cannot take
OSM QA tile files. Use parse_qa_tile() for QA tiles.
>>> graph = parse_file(filename)
"""
importer, parser = make_importer_parser(OSMParser, **kwargs)
parser.parse(filename)
return importer.get_graph(parse_direction=parse_direction)
def parse_data(data, type, **kwargs):
"""
Return an OSM networkx graph from the input OSM data
Parameters
----------
data : string
type : string ('xml' or 'pbf')
>>> graph = parse_data(data, 'xml')
"""
suffixes = {
'xml': '.osm',
'pbf': '.pbf',
}
try:
suffix = suffixes[type]
except KeyError:
raise ValueError('Unknown data type "%s"' % type)
fd, filename = tempfile.mkstemp(suffix=suffix)
try:
os.write(fd, data)
os.close(fd)
return parse_file(filename, **kwargs)
finally:
os.remove(filename)
def parse_qa_tile(x, y, zoom, data, parse_direction=False, **kwargs):
"""
Return an OSM networkx graph from the input OSM QA tile data
Parameters
----------
data : string
x : int
tile's x coordinate
y : int
tile's y coordinate
zoom : int
tile's zoom level
>>> graph = parse_qa_tile(data, 1239, 1514, 12)
"""
import osmqa
importer, parser = make_importer_parser(osmqa.QATileParser, **kwargs)
parser.parse_data(x, y, zoom, data)
return importer.get_graph(parse_direction=parse_direction)
def parse_bbox(bbox, **kwargs):
"""
Download OSM data from a bounding box and parse into a graph
Parameters
----------
bbox : (west, south, east, north) tuple of 4 floats
>>> graph = parse_bbox([-71.06643, 42.36051, -71.06253, 42.36358])
"""
data = _dowload_osm_bbox(bbox)
return parse_data(data, 'xml', **kwargs)
def make_importer_parser(parser_class, **kwargs):
gi = GraphImporter()
if 'ways_tag_filter' not in kwargs:
kwargs['ways_tag_filter'] = default_ways_tag_filter
parser = parser_class(
coords_callback=gi.coords_callback,
nodes_callback=gi.nodes_callback,
ways_callback=gi.ways_callback,
**kwargs
)
return gi, parser
def default_ways_tag_filter(tags):
if 'highway' not in tags:
tags.clear()
def _dowload_osm_bbox(bbox):
bbox_arg = urllib.urlencode({'bbox': ','.join(str(x) for x in bbox)})
url = 'http://www.openstreetmap.org/api/0.6/map?' + bbox_arg
response = urllib.urlopen(url)
if response.code != 200:
error = response.headers.getheader('Error')
raise ValueError('Received %s from OSM with error: %s' % (response.code, error))
content = response.read()
response.close()
return content
| Mapkin/osmgraph | osmgraph/main.py | Python | mit | 3,041 |
# pylint: disable=too-few-public-methods
"""State which governs the SANS compatibility mode. This is not part of the reduction itself and should be removed
once the transition to the new reducer is satisfactory and complete. This feature allows users to have the
two reduction approaches produce the exact same results. If the results are different then that is a hint
that we are dealing with a bug
"""
from __future__ import (absolute_import, division, print_function)
import copy
from sans.state.state_base import (StateBase, rename_descriptor_names, BoolParameter, StringParameter)
from sans.state.automatic_setters import (automatic_setters)
from sans.common.enums import SANSFacility
# ----------------------------------------------------------------------------------------------------------------------
# State
# ----------------------------------------------------------------------------------------------------------------------
@rename_descriptor_names
class StateCompatibility(StateBase):
use_compatibility_mode = BoolParameter()
time_rebin_string = StringParameter()
def __init__(self):
super(StateCompatibility, self).__init__()
self.use_compatibility_mode = False
self.time_rebin_string = ""
def validate(self):
pass
# ----------------------------------------------------------------------------------------------------------------------
# Builder
# ----------------------------------------------------------------------------------------------------------------------
class StateCompatibilityBuilder(object):
@automatic_setters(StateCompatibility)
def __init__(self):
super(StateCompatibilityBuilder, self).__init__()
self.state = StateCompatibility()
def build(self):
self.state.validate()
return copy.copy(self.state)
def get_compatibility_builder(data_info):
facility = data_info.facility
if facility is SANSFacility.ISIS:
return StateCompatibilityBuilder()
else:
raise NotImplementedError("StateCompatibilityBuilder: Could not find any valid compatibility builder for the "
"specified StateData object {0}".format(str(data_info)))
| ScreamingUdder/mantid | scripts/SANS/sans/state/compatibility.py | Python | gpl-3.0 | 2,226 |
import autocomplete_light
from django.conf.urls import patterns, include, url
from django.contrib import admin
from SCIng import settings
autocomplete_light.autodiscover()
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'SCIng.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^',include('main.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^autocomplete/', include('autocomplete_light.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT,'show_indexes': True}),) | jesusgp22/scing | SCIng/urls.py | Python | gpl-2.0 | 710 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-30 19:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bot', '0013_alertausuario_frecuencia'),
]
operations = [
migrations.RemoveField(
model_name='alertausuario',
name='frecuencia',
),
]
| foxcarlos/decimemijobot | bot/respaldo/0014_remove_alertausuario_frecuencia.py | Python | gpl-3.0 | 406 |
# -*- coding: utf-8 -*-
"""
Authors: Gonzalo E. Espinoza-Dávalos
Contact: g.espinoza@un-ihe.org, gespinoza@utexas.edu
Repository: https://github.com/gespinoza/davgis
Module: davgis
Description:
This module is a python wrapper to simplify scripting and automation of common
GIS workflows used in water resources.
"""
from __future__ import division
import os
import math
import tempfile
import warnings
import ogr
import osr
import gdal
import pandas as pd
import netCDF4
from scipy.interpolate import griddata
np = pd.np
def Buffer(input_shp, output_shp, distance):
"""
Creates a buffer of the input shapefile by a given distance
"""
# Input
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer()
inp_lyr_defn = inp_lyr.GetLayerDefn()
inp_srs = inp_lyr.GetSpatialRef()
# Output
out_name = os.path.splitext(os.path.basename(output_shp))[0]
out_driver = ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists(output_shp):
out_driver.DeleteDataSource(output_shp)
out_source = out_driver.CreateDataSource(output_shp)
out_lyr = out_source.CreateLayer(out_name, inp_srs, ogr.wkbPolygon)
out_lyr_defn = out_lyr.GetLayerDefn()
# Add fields
for i in range(inp_lyr_defn.GetFieldCount()):
field_defn = inp_lyr_defn.GetFieldDefn(i)
out_lyr.CreateField(field_defn)
# Add features
for i in range(inp_lyr.GetFeatureCount()):
feature_inp = inp_lyr.GetNextFeature()
geometry = feature_inp.geometry()
feature_out = ogr.Feature(out_lyr_defn)
for j in range(0, out_lyr_defn.GetFieldCount()):
feature_out.SetField(out_lyr_defn.GetFieldDefn(j).GetNameRef(),
feature_inp.GetField(j))
feature_out.SetGeometry(geometry.Buffer(distance))
out_lyr.CreateFeature(feature_out)
feature_out = None
# Save and/or close the data sources
inp_source = None
out_source = None
# Return
return output_shp
def Feature_to_Raster(input_shp, output_tiff,
cellsize, field_name=False, NoData_value=-9999):
"""
Converts a shapefile into a raster
"""
# Input
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer()
inp_srs = inp_lyr.GetSpatialRef()
# Extent
x_min, x_max, y_min, y_max = inp_lyr.GetExtent()
x_ncells = int((x_max - x_min) / cellsize)
y_ncells = int((y_max - y_min) / cellsize)
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, gdal.GDT_Int16)
out_source.SetGeoTransform((x_min, cellsize, 0, y_max, 0, -cellsize))
out_source.SetProjection(inp_srs.ExportToWkt())
out_lyr = out_source.GetRasterBand(1)
out_lyr.SetNoDataValue(NoData_value)
# Rasterize
if field_name:
gdal.RasterizeLayer(out_source, [1], inp_lyr,
options=["ATTRIBUTE={0}".format(field_name)])
else:
gdal.RasterizeLayer(out_source, [1], inp_lyr, burn_values=[1])
# Save and/or close the data sources
inp_source = None
out_source = None
# Return
return output_tiff
def List_Fields(input_lyr):
"""
Lists the field names of input layer
"""
# Input
if isinstance(input_lyr, str):
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_lyr, 0)
inp_lyr = inp_source.GetLayer()
inp_lyr_defn = inp_lyr.GetLayerDefn()
elif isinstance(input_lyr, ogr.Layer):
inp_lyr_defn = input_lyr.GetLayerDefn()
# List
names_ls = []
# Loop
for j in range(0, inp_lyr_defn.GetFieldCount()):
field_defn = inp_lyr_defn.GetFieldDefn(j)
names_ls.append(field_defn.GetName())
# Save and/or close the data sources
inp_source = None
# Return
return names_ls
def Raster_to_Array(input_tiff, ll_corner, x_ncells, y_ncells,
values_type='float32'):
"""
Loads a raster into a numpy array
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
inp_data_type = inp_band.DataType
cellsize_x = inp_transform[1]
rot_1 = inp_transform[2]
rot_2 = inp_transform[4]
cellsize_y = inp_transform[5]
NoData_value = inp_band.GetNoDataValue()
ll_x = ll_corner[0]
ll_y = ll_corner[1]
top_left_x = ll_x
top_left_y = ll_y - cellsize_y*y_ncells
# Change start point
temp_path = tempfile.mkdtemp()
temp_driver = gdal.GetDriverByName('GTiff')
temp_tiff = os.path.join(temp_path, os.path.basename(input_tiff))
temp_source = temp_driver.Create(temp_tiff, x_ncells, y_ncells,
1, inp_data_type)
temp_source.GetRasterBand(1).SetNoDataValue(NoData_value)
temp_source.SetGeoTransform((top_left_x, cellsize_x, rot_1,
top_left_y, rot_2, cellsize_y))
temp_source.SetProjection(inp_srs)
# Snap
gdal.ReprojectImage(inp_lyr, temp_source, inp_srs, inp_srs,
gdal.GRA_Bilinear)
temp_source = None
# Read array
d_type = pd.np.dtype(values_type)
out_lyr = gdal.Open(temp_tiff)
array = out_lyr.ReadAsArray(0, 0, out_lyr.RasterXSize,
out_lyr.RasterYSize).astype(d_type)
array[pd.np.isclose(array, NoData_value)] = pd.np.nan
out_lyr = None
return array
def Resample(input_tiff, output_tiff, cellsize, method=None,
NoData_value=-9999):
"""
Resamples a raster to a different spatial resolution
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
inp_data_type = inp_band.DataType
top_left_x = inp_transform[0]
cellsize_x = inp_transform[1]
rot_1 = inp_transform[2]
top_left_y = inp_transform[3]
rot_2 = inp_transform[4]
cellsize_y = inp_transform[5]
# NoData_value = inp_band.GetNoDataValue()
x_tot_n = inp_lyr.RasterXSize
y_tot_n = inp_lyr.RasterYSize
x_ncells = int(math.floor(x_tot_n * (cellsize_x/cellsize)))
y_ncells = int(math.floor(y_tot_n * (-cellsize_y/cellsize)))
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, inp_data_type)
out_source.GetRasterBand(1).SetNoDataValue(NoData_value)
out_source.SetGeoTransform((top_left_x, cellsize, rot_1,
top_left_y, rot_2, -cellsize))
out_source.SetProjection(inp_srs)
# Resampling
method_dict = {'NearestNeighbour': gdal.GRA_NearestNeighbour,
'Bilinear': gdal.GRA_Bilinear,
'Cubic': gdal.GRA_Cubic,
'CubicSpline': gdal.GRA_CubicSpline,
'Lanczos': gdal.GRA_Lanczos,
'Average': gdal.GRA_Average,
'Mode': gdal.GRA_Mode}
if method in range(6):
method_sel = method
elif method in method_dict.keys():
method_sel = method_dict[method]
else:
warnings.warn('Using default interpolation method: Nearest Neighbour')
method_sel = 0
gdal.ReprojectImage(inp_lyr, out_source, inp_srs, inp_srs, method_sel)
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_tiff
def Array_to_Raster(input_array, output_tiff, ll_corner, cellsize,
srs_wkt):
"""
Saves an array into a raster file
"""
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
y_ncells, x_ncells = input_array.shape
gdal_datatype = gdaltype_from_dtype(input_array.dtype)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, gdal_datatype)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(-9999)
out_top_left_x = ll_corner[0]
out_top_left_y = ll_corner[1] + cellsize*y_ncells
out_source.SetGeoTransform((out_top_left_x, cellsize, 0,
out_top_left_y, 0, -cellsize))
out_source.SetProjection(str(srs_wkt))
out_band.WriteArray(input_array)
# Save and/or close the data sources
out_source = None
# Return
return output_tiff
def Clip(input_tiff, output_tiff, bbox):
"""
Clips a raster given a bounding box
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
inp_array = inp_band.ReadAsArray()
inp_data_type = inp_band.DataType
top_left_x = inp_transform[0]
cellsize_x = inp_transform[1]
rot_1 = inp_transform[2]
top_left_y = inp_transform[3]
rot_2 = inp_transform[4]
cellsize_y = inp_transform[5]
NoData_value = inp_band.GetNoDataValue()
x_tot_n = inp_lyr.RasterXSize
y_tot_n = inp_lyr.RasterYSize
# Bounding box
xmin, ymin, xmax, ymax = bbox
# Get indices, number of cells, and top left corner
x1 = max([0, int(math.floor((xmin - top_left_x)/cellsize_x))])
x2 = min([x_tot_n, int(math.ceil((xmax - top_left_x)/cellsize_x))])
y1 = max([0, int(math.floor((ymax - top_left_y)/cellsize_y))])
y2 = min([y_tot_n, int(math.ceil((ymin - top_left_y)/cellsize_y))])
x_ncells = x2 - x1
y_ncells = y2 - y1
out_top_left_x = top_left_x + x1*cellsize_x
out_top_left_y = top_left_y + y1*cellsize_y
# Output
out_array = inp_array[y1:y2, x1:x2]
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, inp_data_type)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(NoData_value)
out_source.SetGeoTransform((out_top_left_x, cellsize_x, rot_1,
out_top_left_y, rot_2, cellsize_y))
out_source.SetProjection(inp_srs)
out_band.WriteArray(out_array)
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_tiff
def Raster_to_Points(input_tiff, output_shp):
"""
Converts a raster to a point shapefile
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
top_left_x = transform[0]
cellsize_x = transform[1]
top_left_y = transform[3]
cellsize_y = transform[5]
NoData_value = inp_band.GetNoDataValue()
x_tot_n = inp_lyr.RasterXSize
y_tot_n = inp_lyr.RasterYSize
top_left_x_center = top_left_x + cellsize_x/2.0
top_left_y_center = top_left_y + cellsize_y/2.0
# Read array
array = inp_lyr.ReadAsArray(0, 0, x_tot_n, y_tot_n) # .astype(pd.np.float)
array[pd.np.isclose(array, NoData_value)] = pd.np.nan
# Output
out_srs = osr.SpatialReference()
out_srs.ImportFromWkt(inp_srs)
out_name = os.path.splitext(os.path.basename(output_shp))[0]
out_driver = ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists(output_shp):
out_driver.DeleteDataSource(output_shp)
out_source = out_driver.CreateDataSource(output_shp)
out_lyr = out_source.CreateLayer(out_name, out_srs, ogr.wkbPoint)
ogr_field_type = ogrtype_from_dtype(array.dtype)
Add_Field(out_lyr, "RASTERVALU", ogr_field_type)
out_lyr_defn = out_lyr.GetLayerDefn()
# Add features
for xi in range(x_tot_n):
for yi in range(y_tot_n):
value = array[yi, xi]
if ~pd.np.isnan(value):
feature_out = ogr.Feature(out_lyr_defn)
feature_out.SetField2(0, value)
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(top_left_x_center + xi*cellsize_x,
top_left_y_center + yi*cellsize_y)
feature_out.SetGeometry(point)
out_lyr.CreateFeature(feature_out)
feature_out = None
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_shp
def Add_Field(input_lyr, field_name, ogr_field_type):
"""
Add a field to a layer using the following ogr field types:
0 = ogr.OFTInteger
1 = ogr.OFTIntegerList
2 = ogr.OFTReal
3 = ogr.OFTRealList
4 = ogr.OFTString
5 = ogr.OFTStringList
6 = ogr.OFTWideString
7 = ogr.OFTWideStringList
8 = ogr.OFTBinary
9 = ogr.OFTDate
10 = ogr.OFTTime
11 = ogr.OFTDateTime
"""
# List fields
fields_ls = List_Fields(input_lyr)
# Check if field exist
if field_name in fields_ls:
raise Exception('Field: "{0}" already exists'.format(field_name))
# Create field
inp_field = ogr.FieldDefn(field_name, ogr_field_type)
input_lyr.CreateField(inp_field)
return inp_field
def Spatial_Reference(epsg, return_string=True):
"""
Obtain a spatial reference from the EPSG parameter
"""
srs = osr.SpatialReference()
srs.ImportFromEPSG(epsg)
if return_string:
return srs.ExportToWkt()
else:
return srs
def List_Datasets(path, ext):
"""
List the data sets in a folder
"""
datsets_ls = []
for f in os.listdir(path):
if os.path.splitext(f)[1][1:] == ext:
datsets_ls.append(f)
return datsets_ls
def NetCDF_to_Raster(input_nc, output_tiff, ras_variable,
x_variable='longitude', y_variable='latitude',
crs={'variable': 'crs', 'wkt': 'crs_wkt'}, time=None):
"""
Extract a layer from a netCDF file and save it as a raster file.
For temporal netcdf files, use the 'time' parameter as:
t = {'variable': 'time_variable', 'value': '30/06/2017'}
"""
# Input
inp_nc = netCDF4.Dataset(input_nc, 'r')
inp_values = inp_nc.variables[ras_variable]
x_index = inp_values.dimensions.index(x_variable)
y_index = inp_values.dimensions.index(y_variable)
if not time:
inp_array = inp_values[:]
else:
time_variable = time['variable']
time_value = time['value']
t_index = inp_values.dimensions.index(time_variable)
time_index = list(inp_nc.variables[time_variable][:]).index(time_value)
if t_index == 0:
inp_array = inp_values[time_index, :, :]
elif t_index == 1:
inp_array = inp_values[:, time_index, :]
elif t_index == 2:
inp_array = inp_values[:, :, time_index]
else:
raise Exception("The array has more dimensions than expected")
# Transpose array if necessary
if y_index > x_index:
inp_array = pd.np.transpose(inp_array)
# Additional parameters
gdal_datatype = gdaltype_from_dtype(inp_array.dtype)
NoData_value = inp_nc.variables[ras_variable]._FillValue
if type(crs) == str:
srs_wkt = crs
else:
crs_variable = crs['variable']
crs_wkt = crs['wkt']
exec('srs_wkt = str(inp_nc.variables["{0}"].{1})'.format(crs_variable,
crs_wkt))
inp_x = inp_nc.variables[x_variable]
inp_y = inp_nc.variables[y_variable]
cellsize_x = abs(pd.np.mean([inp_x[i] - inp_x[i-1]
for i in range(1, len(inp_x))]))
cellsize_y = -abs(pd.np.mean([inp_y[i] - inp_y[i-1]
for i in range(1, len(inp_y))]))
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
y_ncells, x_ncells = inp_array.shape
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, gdal_datatype)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(pd.np.asscalar(NoData_value))
out_top_left_x = inp_x[0] - cellsize_x/2.0
if inp_y[-1] > inp_y[0]:
out_top_left_y = inp_y[-1] - cellsize_y/2.0
inp_array = pd.np.flipud(inp_array)
else:
out_top_left_y = inp_y[0] - cellsize_y/2.0
out_source.SetGeoTransform((out_top_left_x, cellsize_x, 0,
out_top_left_y, 0, cellsize_y))
out_source.SetProjection(srs_wkt)
out_band.WriteArray(inp_array)
out_band.ComputeStatistics(True)
# Save and/or close the data sources
inp_nc.close()
out_source = None
# Return
return output_tiff
def Apply_Filter(input_tiff, output_tiff, number_of_passes):
"""
Smooth a raster by replacing cell value by the average value of the
surrounding cells
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
inp_array = inp_band.ReadAsArray()
inp_data_type = inp_band.DataType
top_left_x = inp_transform[0]
cellsize_x = inp_transform[1]
rot_1 = inp_transform[2]
top_left_y = inp_transform[3]
rot_2 = inp_transform[4]
cellsize_y = inp_transform[5]
NoData_value = inp_band.GetNoDataValue()
x_ncells = inp_lyr.RasterXSize
y_ncells = inp_lyr.RasterYSize
# Filter
inp_array[inp_array == NoData_value] = pd.np.nan
out_array = array_filter(inp_array, number_of_passes)
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, inp_data_type)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(NoData_value)
out_source.SetGeoTransform((top_left_x, cellsize_x, rot_1,
top_left_y, rot_2, cellsize_y))
out_source.SetProjection(inp_srs)
out_band.WriteArray(out_array)
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_tiff
def Extract_Band(input_tiff, output_tiff, band_number=1):
"""
Extract and save a raster band into a new raster
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(band_number)
inp_array = inp_band.ReadAsArray()
inp_data_type = inp_band.DataType
NoData_value = inp_band.GetNoDataValue()
x_ncells = inp_lyr.RasterXSize
y_ncells = inp_lyr.RasterYSize
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, inp_data_type)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(NoData_value)
out_source.SetGeoTransform(inp_transform)
out_source.SetProjection(inp_srs)
out_band.WriteArray(inp_array)
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_tiff
def Get_Extent(input_lyr):
"""
Obtain the input layer extent (xmin, ymin, xmax, ymax)
"""
# Input
filename, ext = os.path.splitext(input_lyr)
if ext.lower() == '.shp':
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_lyr)
inp_lyr = inp_source.GetLayer()
x_min, x_max, y_min, y_max = inp_lyr.GetExtent()
inp_lyr = None
inp_source = None
elif ext.lower() == '.tif':
inp_lyr = gdal.Open(input_lyr)
inp_transform = inp_lyr.GetGeoTransform()
x_min = inp_transform[0]
x_max = x_min + inp_transform[1] * inp_lyr.RasterXSize
y_max = inp_transform[3]
y_min = y_max + inp_transform[5] * inp_lyr.RasterYSize
inp_lyr = None
else:
raise Exception('The input data type is not recognized')
return (x_min, y_min, x_max, y_max)
def Interpolation_Default(input_shp, field_name, output_tiff,
method='nearest', cellsize=None):
'''
Interpolate point data into a raster
Available methods: 'nearest', 'linear', 'cubic'
'''
# Input
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer()
inp_srs = inp_lyr.GetSpatialRef()
inp_wkt = inp_srs.ExportToWkt()
# Extent
x_min, x_max, y_min, y_max = inp_lyr.GetExtent()
ll_corner = [x_min, y_min]
if not cellsize:
cellsize = min(x_max - x_min, y_max - y_min)/25.0
x_ncells = int((x_max - x_min) / cellsize)
y_ncells = int((y_max - y_min) / cellsize)
# Feature points
x = []
y = []
z = []
for i in range(inp_lyr.GetFeatureCount()):
feature_inp = inp_lyr.GetNextFeature()
point_inp = feature_inp.geometry().GetPoint()
x.append(point_inp[0])
y.append(point_inp[1])
z.append(feature_inp.GetField(field_name))
x = pd.np.array(x)
y = pd.np.array(y)
z = pd.np.array(z)
# Grid
X, Y = pd.np.meshgrid(pd.np.linspace(x_min + cellsize/2.0,
x_max - cellsize/2.0,
x_ncells),
pd.np.linspace(y_min + cellsize/2.0,
y_max - cellsize/2.0,
y_ncells))
# Interpolate
out_array = griddata((x, y), z, (X, Y), method=method)
out_array = pd.np.flipud(out_array)
# Save raster
Array_to_Raster(out_array, output_tiff, ll_corner, cellsize, inp_wkt)
# Return
return output_tiff
def Kriging_Interpolation_Points(input_shp, field_name, output_tiff, cellsize,
bbox=None):
"""
Interpolate point data using Ordinary Kriging
Reference: https://cran.r-project.org/web/packages/automap/automap.pdf
"""
# Spatial reference
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer()
inp_srs = inp_lyr.GetSpatialRef()
srs_wkt = inp_srs.ExportToWkt()
inp_source = None
# Temp folder
temp_dir = tempfile.mkdtemp()
temp_points_tiff = os.path.join(temp_dir, 'points_ras.tif')
# Points to raster
Feature_to_Raster(input_shp, temp_points_tiff,
cellsize, field_name, -9999)
# Raster extent
if bbox:
xmin, ymin, xmax, ymax = bbox
ll_corner = [xmin, ymin]
x_ncells = int(math.ceil((xmax - xmin)/cellsize))
y_ncells = int(math.ceil((ymax - ymin)/cellsize))
else:
temp_lyr = gdal.Open(temp_points_tiff)
x_min, x_max, y_min, y_max = temp_lyr.GetExtent()
ll_corner = [x_min, y_min]
x_ncells = temp_lyr.RasterXSize
y_ncells = temp_lyr.RasterYSize
temp_lyr = None
# Raster to array
points_array = Raster_to_Array(temp_points_tiff, ll_corner,
x_ncells, y_ncells, values_type='float32')
# Run kriging
x_vector = np.arange(xmin + cellsize/2, xmax + cellsize/2, cellsize)
y_vector = np.arange(ymin + cellsize/2, ymax + cellsize/2, cellsize)
out_array = Kriging_Interpolation_Array(points_array, x_vector, y_vector)
# Save array as raster
Array_to_Raster(out_array, output_tiff, ll_corner, cellsize, srs_wkt)
# Return
return output_tiff
def Kriging_Interpolation_Array(input_array, x_vector, y_vector):
"""
Interpolate data in an array using Ordinary Kriging
Reference: https://cran.r-project.org/web/packages/automap/automap.pdf
"""
import rpy2.robjects as robjects
from rpy2.robjects import pandas2ri
# Total values in array
n_values = np.isfinite(input_array).sum()
# Load function
pandas2ri.activate()
robjects.r('''
library(gstat)
library(sp)
library(automap)
kriging_interpolation <- function(x_vec, y_vec, values_arr,
n_values){
# Parameters
shape <- dim(values_arr)
counter <- 1
df <- data.frame(X=numeric(n_values),
Y=numeric(n_values),
INFZ=numeric(n_values))
# Save values into a data frame
for (i in seq(shape[2])) {
for (j in seq(shape[1])) {
if (is.finite(values_arr[j, i])) {
df[counter,] <- c(x_vec[i], y_vec[j], values_arr[j, i])
counter <- counter + 1
}
}
}
# Grid
coordinates(df) = ~X+Y
int_grid <- expand.grid(x_vec, y_vec)
names(int_grid) <- c("X", "Y")
coordinates(int_grid) = ~X+Y
gridded(int_grid) = TRUE
# Kriging
krig_output <- autoKrige(INFZ~1, df, int_grid)
# Array
values_out <- matrix(krig_output$krige_output$var1.pred,
nrow=length(y_vec),
ncol=length(x_vec),
byrow = TRUE)
return(values_out)
}
''')
kriging_interpolation = robjects.r['kriging_interpolation']
# Execute kriging function and get array
r_array = kriging_interpolation(x_vector, y_vector, input_array, n_values)
array_out = np.array(r_array)
# Return
return array_out
def get_neighbors(x, y, nx, ny, cells=1):
"""
Get a list of neighboring cells
"""
neighbors_ls = [(xi, yi)
for xi in range(x - 1 - cells + 1, x + 2 + cells - 1)
for yi in range(y - 1 - cells + 1, y + 2 + cells - 1)
if (-1 < x <= nx - 1 and -1 < y <= ny - 1 and
(x != xi or y != yi) and
(0 <= xi <= nx - 1) and (0 <= yi <= ny - 1))]
return neighbors_ls
def get_mean_neighbors(array, index, include_cell=False):
"""
Get the mean value of neighboring cells
"""
xi, yi = index
nx, ny = array.shape
stay = True
cells = 1
while stay:
neighbors_ls = get_neighbors(xi, yi, nx, ny, cells)
if include_cell:
neighbors_ls = neighbors_ls + [(xi, yi)]
values_ls = [array[i] for i in neighbors_ls]
if pd.np.isnan(values_ls).all():
cells += 1
else:
value = pd.np.nanmean(values_ls)
stay = False
return value
def array_filter(array, number_of_passes=1):
"""
Smooth cell values by replacing each cell value by the average value of the
surrounding cells
"""
while number_of_passes >= 1:
ny, nx = array.shape
arrayf = pd.np.empty(array.shape)
arrayf[:] = pd.np.nan
for j in range(ny):
for i in range(nx):
arrayf[j, i] = get_mean_neighbors(array, (j, i), True)
array[:] = arrayf[:]
number_of_passes -= 1
return arrayf
def ogrtype_from_dtype(d_type):
"""
Return the ogr data type from the numpy dtype
"""
# ogr field type
if 'float' in d_type.name:
ogr_data_type = 2
elif 'int' in d_type.name:
ogr_data_type = 0
elif 'string' in d_type.name:
ogr_data_type = 4
elif 'bool' in d_type.name:
ogr_data_type = 8
else:
raise Exception('"{0}" is not recognized'.format(d_type))
return ogr_data_type
def gdaltype_from_dtype(d_type):
"""
Return the gdal data type from the numpy dtype
"""
# gdal field type
if 'int8' == d_type.name:
gdal_data_type = 1
elif 'uint16' == d_type.name:
gdal_data_type = 2
elif 'int16' == d_type.name:
gdal_data_type = 3
elif 'uint32' == d_type.name:
gdal_data_type = 4
elif 'int32' == d_type.name:
gdal_data_type = 5
elif 'float32' == d_type.name:
gdal_data_type = 6
elif 'float64' == d_type.name:
gdal_data_type = 7
elif 'bool' in d_type.name:
gdal_data_type = 1
elif 'int' in d_type.name:
gdal_data_type = 5
elif 'float' in d_type.name:
gdal_data_type = 7
elif 'complex' == d_type.name:
gdal_data_type = 11
else:
warnings.warn('"{0}" is not recognized. '
'"Unknown" data type used'.format(d_type))
gdal_data_type = 0
return gdal_data_type
| wateraccounting/SEBAL | hants_old/wa_gdal/davgis/functions.py | Python | apache-2.0 | 29,458 |
from django.test import TestCase
from django.test.utils import override_settings
from rest_framework import status
from rest_framework.test import APIClient
from axes.signals import user_locked_out
import json
import time
from family_tree.models.family import Family
from family_tree.models.person import Person
from custom_user.models import User
@override_settings(SECURE_SSL_REDIRECT=False, AXES_BEHIND_REVERSE_PROXY=False)
class JWTAuthTest(TestCase):
'''
Tests JWT auth
'''
def setUp(self):
self.family = Family()
self.family.save()
self.user = User.objects.create_user(email='gracehopper@example.com',
password='compiler',
name='Grace Hopper',
family_id = self.family.id)
self.person = Person(name='Grace Hopper',
gender='F',
email='gracehopper@example.com',
family_id=self.family.id,
language='en',
user_id=self.user.id)
self.person.save()
def test_jwt_auth_and_refresh_token_created_on_correct_auth_details(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
auth_details = {
'email': 'gracehopper@example.com',
'password': 'compiler'
}
response = client.post('/api/auth/obtain_token/', auth_details, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
access_token = json.loads(response.content)["access"]
refresh_token = json.loads(response.content)["refresh"]
auth_token = {
'refresh': refresh_token
}
# Sleep to ensure new token is different
time.sleep(1)
refresh_response = client.post('/api/auth/refresh_token/', auth_token, format='json')
refresh_token = json.loads(refresh_response.content)["access"]
self.assertEqual(refresh_response.status_code, status.HTTP_200_OK)
self.assertNotEqual(refresh_token, access_token)
# Check verify token
new_auth_token ={#
'token': refresh_token
}
verify_new_token_response = client.post('/api/auth/verify_token/', new_auth_token, format='json')
self.assertEqual(verify_new_token_response.status_code, status.HTTP_200_OK)
# Check ip not locked
locked_response = client.get('/api/auth/is_locked/', format='json')
self.assertEqual(b'false', locked_response.content)
self.assertEqual(locked_response.status_code, status.HTTP_200_OK)
def test_jwt_fails_on_auth_incorrect_password(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
payload = {
'email': 'gracehopper@example.com',
'password': 'COBOL'
}
response = client.post('/api/auth/obtain_token/', payload, format='json')
self.assertNotEqual(response.status_code, status.HTTP_200_OK)
def test_verify_fails_on_invalid_token(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
invalid_auth_token ={#
'token': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImp0aSI6IjM1ODU0ODc3LWQyZjQtNDIxZS04ZDI5LWY3YTgxNTk3NzdhYyIsImlhdCI6MTU1NDM4NzU4NCwiZXhwIjoxNTU0MzkxMTg0fQ.yIr0TMbalatx7alU1TMGIxxaelqquMJfz3m4H7AA9v4'
}
verify_old_token_response = client.post('/api/auth/verify_token/', invalid_auth_token, format='json')
self.assertNotEqual(verify_old_token_response.status_code, status.HTTP_200_OK)
def test_account_locks_out_on_multiple_invalid_login_attempts(self):
user = User.objects.create_user(email='adelegoldberg@example.com',
password='smalltalk',
name='Adele Goldberg',
family_id = self.family.id)
person = Person(name='Adele Goldberg',
gender='F',
email='adelegoldberg@example.com',
family_id=self.family.id,
language='en',
user_id=user.id)
person.save()
# 127.0.0.1 is whitelisted
client = APIClient(HTTP_X_REAL_IP='127.0.0.2')
wrong_auth_details = {
'email': 'adelegoldberg@example.com',
'password': 'compiler'
}
for x in range(0, 6):
response = client.post('/api/auth/obtain_token/', wrong_auth_details, format='json')
correct_auth_details = {
'email': 'adelegoldberg@example.com',
'password': 'smalltalk'
}
final_response = client.post('/api/auth/obtain_token/', correct_auth_details, format='json')
self.assertNotEqual(final_response.status_code, status.HTTP_200_OK)
# Check ip locked
locked_response = client.get('/api/auth/is_locked/', format='json')
self.assertNotEqual(b'false', locked_response.content)
def test_api_docs_loads(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
client.force_authenticate(user=self.user)
response = client.get('/api/docs/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_schema_loads(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
client.force_authenticate(user=self.user)
response = client.get('/api/schema/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
| JustinWingChungHui/MyFamilyRoot | auth_api/tests.py | Python | gpl-2.0 | 5,613 |
"""Argument parsing."""
import argparse
from encarne.stats import show_stats, clean_movies
# Specifying commands
parser = argparse.ArgumentParser(
description='Encarne reencoder')
parser.add_argument(
'-d', '--directory', type=str,
help='Directory that should be explored for video container to be encoded.')
parser.add_argument(
'-s', '--size', type=str,
help='Specify minimun encoding file size (11GB, 100MB, ...).')
# Encoding stuff
parser.add_argument(
'-c', '--crf', type=int, choices=range(0, 51),
help='Constant rate factor for ffmpeg.')
preset_values = ['ultrafast', 'superfast', 'veryfast',
'faster', 'fast', 'medium', 'slow', 'slower',
'veryslow', 'placebo']
parser.add_argument(
'-p', '--preset', type=str, choices=preset_values,
help='Compression preset for ffmpeg.')
audio_values = ['aac', 'flac', 'None']
parser.add_argument(
'-a', '--audio', type=str, choices=audio_values,
help='Audio encoding for ffmpeg.')
parser.add_argument(
'-ba', '--kbitrate-audio', type=str,
help='Audio encoding bitrate (e.g. 128k or not specified for flac).')
parser.add_argument(
'-t', '--threads', type=int,
help='The threads used for encoding.')
# Initialize supbparser
subparsers = parser.add_subparsers(
title='Subcommands', description='Various client')
# Status
stat_subcommand = subparsers.add_parser(
'stat', help='Show some statistics.',
)
stat_subcommand.set_defaults(func=show_stats)
# clean
clean_subcommand = subparsers.add_parser(
'clean', help='Check if any movies have been removed.',
)
clean_subcommand.set_defaults(func=clean_movies)
| Nukesor/encarne | encarne/argument_parser.py | Python | mit | 1,664 |
# Copyright 2011-12 Michael Thomas
#
# See www.whatang.org for more information.
#
# This file is part of DrumBurp.
#
# DrumBurp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DrumBurp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DrumBurp. If not, see <http://www.gnu.org/licenses/>
'''
Created on 5 Jan 2011
@author: Mike Thomas
'''
from PyQt4 import QtGui, QtCore
from Data.NotePosition import NotePosition
from GUI.DBFSMEvents import MeasureLineContext
class QMeasureLine(QtGui.QGraphicsItem):
def __init__(self, qScore, lastMeasure, nextMeasure, index,
staffIndex, parent=None):
super(QMeasureLine, self).__init__(parent)
self._qStaff = parent
self._qScore = qScore
self._props = qScore.displayProperties
self._height = None
self._rect = QtCore.QRectF(0, 0, 0, 0)
self._lastMeasure = lastMeasure
self._nextMeasure = nextMeasure
self._painter = None
self._setPainter()
self._index = index
self._staffIndex = staffIndex
self.setDimensions()
self.setAcceptHoverEvents(True)
def boundingRect(self):
return self._rect
def _setPainter(self):
self._painter = PAINTER_FACTORY(self._lastMeasure, self._nextMeasure)
def paint(self, painter, dummyOption, dummyWidget=None):
scheme = self._qScore.parent().colourScheme
try:
painter.save()
painter.setPen(QtCore.Qt.SolidLine)
painter.setPen(QtGui.QPen(scheme.text.borderColour))
painter.setBrush(scheme.text.borderColour)
self._painter(self, painter, scheme.text.borderColour,
self._qScore.scale)
finally:
painter.restore()
def _setHeight(self):
if self._props.emptyLinesVisible:
self._height = self._qScore.ySpacing * self._qScore.kitSize
else:
score = self._qScore.score
self._height = (self._qScore.ySpacing *
score.numVisibleLines(self._staffIndex))
def setDimensions(self):
self.prepareGeometryChange()
self._setHeight()
self._rect.setBottomRight(QtCore.QPointF(self._qScore.xSpacing,
self._height))
def xSpacingChanged(self):
self.prepareGeometryChange()
self._rect.setRight(self._qScore.xSpacing)
def ySpacingChanged(self):
self._setHeight()
self.prepareGeometryChange()
self._rect.setBottom(self._height)
def height(self):
return self._height
def width(self):
return self._qScore.xSpacing
def _getEndNotePosition(self):
if self._index == 0:
return None
np = NotePosition(measureIndex=self._index - 1)
return self._qStaff.augmentNotePosition(np)
def _getStartNotePosition(self):
np = NotePosition(measureIndex=self._index)
return self._qStaff.augmentNotePosition(np)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
event.accept()
fsmEvent = MeasureLineContext(self._lastMeasure, self._nextMeasure,
self._getEndNotePosition(),
self._getStartNotePosition(),
event.screenPos())
self._qScore.sendFsmEvent(fsmEvent)
else:
event.ignore()
def hoverEnterEvent(self, event):
self._qScore.setStatusMessage("Right-click for barline options.")
event.accept()
def hoverLeaveEvent(self, event):
self._qScore.setStatusMessage()
event.accept()
class BarLinePainter(object):
THICK_LINE_WIDTH = 3
THICK_LINE_OFFSET = 1
EXTRA_LINE_OFFSET = 3
DOT_OFFSET = 5
DOT_RADIUS = 2
def __call__(self, qMeasureLine, painter, colour,
dummyScale):
raise NotImplementedError()
@classmethod
def _drawThickLine(cls, painter, xCenter, height, colour, scale):
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(colour)
painter.drawRect(xCenter - cls.THICK_LINE_OFFSET * scale, 0,
cls.THICK_LINE_WIDTH * scale, height + 1)
@classmethod
def _drawDot(cls, painter, x, y, colour, scale):
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(colour)
painter.drawEllipse(QtCore.QPointF(x, y),
cls.DOT_RADIUS * scale, cls.DOT_RADIUS * scale)
@classmethod
def _drawExtraLineBefore(cls, painter, xCenter, height, colour, scale):
painter.setPen(QtCore.Qt.SolidLine)
painter.setPen(QtGui.QPen(colour))
painter.drawLine(xCenter - cls.EXTRA_LINE_OFFSET * scale, 0,
xCenter - cls.EXTRA_LINE_OFFSET * scale, height)
@classmethod
def _drawExtraLineAfter(cls, painter, xCenter, height, colour, scale):
painter.setPen(QtCore.Qt.SolidLine)
painter.setPen(QtGui.QPen(colour))
painter.drawLine(xCenter + cls.EXTRA_LINE_OFFSET * scale, 0,
xCenter + cls.EXTRA_LINE_OFFSET * scale, height)
@classmethod
def _drawRepeatBefore(cls, painter, xCenter, height, colour, scale):
cls._drawExtraLineBefore(painter, xCenter, height, colour, scale)
y = height / 3
cls._drawDot(painter,
xCenter - cls.DOT_OFFSET * scale, y, colour, scale)
cls._drawDot(painter,
xCenter - cls.DOT_OFFSET * scale, 2 * y, colour, scale)
@classmethod
def _drawRepeatAfter(cls, painter, xCenter, height, colour, scale):
cls._drawExtraLineAfter(painter, xCenter, height, colour, scale)
y = height / 3
cls._drawDot(painter,
xCenter + cls.DOT_OFFSET * scale, y, colour, scale)
cls._drawDot(painter,
xCenter + cls.DOT_OFFSET * scale, 2 * y, colour, scale)
class NormalBarLinePainter(BarLinePainter):
def __call__(self, qMeasureLine, painter,
colour, dummyScale):
painter.setPen(QtCore.Qt.SolidLine)
painter.setPen(QtGui.QPen(colour))
x = qMeasureLine.width() / 2
painter.drawLine(x, 0, x, qMeasureLine.height())
class RepeatStartLinePainter(BarLinePainter):
def __call__(self, qMeasureLine, painter,
colour, scale):
x = qMeasureLine.width() / 2
self._drawThickLine(painter, x, qMeasureLine.height(), colour, scale)
self._drawRepeatAfter(painter, x, qMeasureLine.height(), colour, scale)
class RepeatEndLinePainter(BarLinePainter):
def __call__(self, qMeasureLine, painter,
colour, scale):
x = qMeasureLine.width() / 2
self._drawThickLine(painter, x, qMeasureLine.height(), colour, scale)
self._drawRepeatBefore(painter, x,
qMeasureLine.height(), colour, scale)
class RepeatStartEndLinePainter(BarLinePainter):
def __call__(self, qMeasureLine, painter,
colour, scale):
x = qMeasureLine.width() / 2
self._drawThickLine(painter, x, qMeasureLine.height(), colour, scale)
self._drawRepeatBefore(painter, x,
qMeasureLine.height(), colour, scale)
self._drawRepeatAfter(painter, x,
qMeasureLine.height(), colour, scale)
class SectionEndLinePainter(BarLinePainter):
def __call__(self, qMeasureLine, painter,
colour, scale):
x = qMeasureLine.width() / 2
self._drawThickLine(painter, x, qMeasureLine.height(), colour, scale)
self._drawExtraLineBefore(painter, x,
qMeasureLine.height(), colour, scale)
class BarLinePainterFactory(object):
def __init__(self):
self._painterCache = {}
self._normalLinePainter = NormalBarLinePainter()
self._painterCache[(True, False, False)] = RepeatStartLinePainter()
self._painterCache[(False, True, False)] = RepeatEndLinePainter()
self._painterCache[(True, True, False)] = RepeatStartEndLinePainter()
self._painterCache[(False, False, True)] = SectionEndLinePainter()
self._painterCache[(False, True, True)] = RepeatEndLinePainter()
@staticmethod
def _pairKey(lastMeasure, nextMeasure):
key = (nextMeasure is not None and nextMeasure.isRepeatStart(),
lastMeasure is not None and lastMeasure.isRepeatEnd(),
lastMeasure is not None and lastMeasure.isSectionEnd())
return key
def __call__(self, lastMeasure, nextMeasure):
pairKey = self._pairKey(lastMeasure, nextMeasure)
return self._painterCache.get(pairKey, self._normalLinePainter)
PAINTER_FACTORY = BarLinePainterFactory()
| Whatang/DrumBurp | src/GUI/QMeasureLine.py | Python | gpl-3.0 | 9,326 |
from django.contrib import admin
from .models import (
CatalogoTipoAlumno,
TablaAlumno,
CatalogoEstadoSolicitud,
CatalogoTipoDocumento,
TablaSolicitud,
TablaCambioSolicitud
)
@admin.register(CatalogoTipoAlumno)
class AdminCatalogoTipoAlumno(admin.ModelAdmin):
list_display = ('id','tipo',)
@admin.register(TablaAlumno)
class AdminTablaAlumno(admin.ModelAdmin):
list_display = ('id','nombre', 'apellidoP', 'apellidoM', 'boleta', 'tipo','codigo','correo','nacimiento')
list_filter = ('boleta',)
@admin.register(CatalogoEstadoSolicitud)
class AdminCatalogoEstadoSolicitud(admin.ModelAdmin):
list_display = ('estado',)
@admin.register(CatalogoTipoDocumento)
class AdminCatalogoTipoDocumento(admin.ModelAdmin):
list_display = ('tipo',)
@admin.register(TablaSolicitud)
class AdminTablaAlumno(admin.ModelAdmin):
list_display = ('id','fecha','estado', 'documento', 'fecha', 'folio','trabajador','alumno')
list_filter = ('folio',)
@admin.register(TablaCambioSolicitud)
class AdminTablaCambioSolicitud(admin.ModelAdmin):
list_display = ('id','solicitud','trabajador', 'fecha')
| CallmeTorre/Idalia | ESCOM/Ventanilla/admin.py | Python | apache-2.0 | 1,129 |
#!/usr/bin/env python
import os, time, requests, argparse, datetime
def main():
parser = argparse.ArgumentParser(description='Download all PrairieLearn course data as JSON via the API')
parser.add_argument('-t', '--token', required=True, help='the API token from PrairieLearn')
parser.add_argument('-i', '--course-instance-id', required=True, help='the course instance ID to download')
parser.add_argument('-o', '--output-dir', required=True, help='the output directory to store JSON into (will be created if necessary)')
parser.add_argument('-s', '--server', help='the server API address', default='https://www.prairielearn.org/pl/api/v1')
args = parser.parse_args()
print(f'ensure that {args.output_dir} directory exists...')
os.makedirs(args.output_dir, exist_ok=True)
print(f'successfully ensured directory existence')
logfilename = os.path.join(args.output_dir, 'download_log.txt')
print(f'opening log file {logfilename} ...')
with open(logfilename, 'wt') as logfile:
print(f'successfully opened log file')
download_course_instance(args, logfile)
def download_course_instance(args, logfile):
log(logfile, f'starting download at {local_iso_time()} ...')
start_time = time.time()
course_instance_path = f'/course_instances/{args.course_instance_id}'
course_instance_info = get_and_save_json(course_instance_path, 'course_instance_info', args, logfile)
gradebook = get_and_save_json(f'{course_instance_path}/gradebook', 'gradebook', args, logfile)
course_instance_access_rules = get_and_save_json(f'{course_instance_path}/course_instance_access_rules', 'course_instance_access_rules', args, logfile)
assessments = get_and_save_json(f'{course_instance_path}/assessments', 'assessments', args, logfile)
for assessment in assessments:
assessment_instances = get_and_save_json(
f'{course_instance_path}/assessments/{assessment["assessment_id"]}/assessment_instances',
f'assessment_{assessment["assessment_id"]}_instances',
args, logfile)
assessment_access_rules = get_and_save_json(
f'{course_instance_path}/assessments/{assessment["assessment_id"]}/assessment_access_rules',
f'assessment_{assessment["assessment_id"]}_access_rules',
args, logfile)
for assessment_instance in assessment_instances:
instance_questions = get_and_save_json(
f'{course_instance_path}/assessment_instances/{assessment_instance["assessment_instance_id"]}/instance_questions',
f'assessment_instance_{assessment_instance["assessment_instance_id"]}_instance_questions',
args, logfile)
submissions = get_and_save_json(
f'{course_instance_path}/assessment_instances/{assessment_instance["assessment_instance_id"]}/submissions',
f'assessment_instance_{assessment_instance["assessment_instance_id"]}_submissions',
args, logfile)
submission_log = get_and_save_json(
f'{course_instance_path}/assessment_instances/{assessment_instance["assessment_instance_id"]}/log',
f'assessment_instance_{assessment_instance["assessment_instance_id"]}_log',
args, logfile)
end_time = time.time()
log(logfile, f'successfully completed downloaded at {local_iso_time()}')
log(logfile, f'total time elapsed: {end_time - start_time} seconds')
def get_and_save_json(endpoint, filename, args, logfile):
url = args.server + endpoint
headers = {'Private-Token': args.token}
log(logfile, f'downloading {url} ...')
start_time = time.time()
r = requests.get(url, headers=headers)
retry_502_max = 30
retry_502_i = 0
while True:
r = requests.get(url, headers=headers)
if r.status_code == 200:
break
elif r.status_code == 502:
retry_502_i += 1
if retry_502_i >= retry_502_max:
raise Exception(f'Maximum number of retries reached on 502 Bad Gateway Error for {url}')
else:
log(logfile, f'Bad Gateway Error encountered for {url}, retrying in 10 seconds')
time.sleep(10)
continue
else:
raise Exception(f'Invalid status returned for {url}: {r.status_code}')
end_time = time.time()
log(logfile, f'successfully downloaded {r.headers["content-length"]} bytes in {end_time - start_time} seconds')
full_filename = os.path.join(args.output_dir, filename + '.json')
log(logfile, f'saving data to {full_filename} ...')
with open(full_filename, 'wt') as out_f:
out_f.write(r.text)
log(logfile, f'successfully wrote data')
log(logfile, f'parsing data as JSON...')
data = r.json()
log(logfile, f'successfully parsed JSON')
return data
def log(logfile, message):
logfile.write(message + '\n')
logfile.flush()
print(message)
def local_iso_time():
utc_dt = datetime.datetime.now(datetime.timezone.utc)
dt = utc_dt.astimezone()
return dt.isoformat()
if __name__ == '__main__':
main()
| PrairieLearn/PrairieLearn | tools/api_download.py | Python | agpl-3.0 | 5,246 |
#!/usr/bin/python
"""
.. module:: shellscribe
Shell-Scribe run.py
@author: Keith E. Miller <keithmiller@umass.edu>
Expected issues:
- cd command is shell-scribe specific so commands that use cd in a non-trivial
way might break the cd command
"""
import cmd
import os
import sys
import argparse as ap
import datetime
import json
from twilio.rest import TwilioRestClient
## Set to false to get rid of debug print statements
DEBUG = False
### PASTE FUNCTION DEFINITIONS HERE
def bashinator_9000(filename):
dic={}
inc=1
title = ''
author = ''
date = datetime.datetime.now()
title = raw_input("What is the title: ")
author = raw_input("Who is the author: ")
dic['welcome']= raw_input("Input a description for the lesson: ")
date = datetime.datetime.now()
if title =="": title = 'lesson'
if author=="": author = 'N/A'
dic["title"] = title
dic["author"] = author
with open(filename,'r') as file:
for row in file:
print '\033[91m' + "\nCode for the row: " + '\033[96m' + row + '\033[92m'
comment=raw_input('- ')
tempDic = {'comment':comment,'command':row}
dic.update({inc:tempDic})
inc+=1
print('\033[0m')
dic['command_count'] = inc - 1
with open(title+'.json','w') as file:
json.dump(dic,file)
def bashinator_10000(filename): #need sleeeeeep
#fname = filename.readFile() #attempting to have json file read-in
with open(filename, 'r') as f:
json_dict = json.load(f)
print json_dict
inc=1
# Welcomes them to Hell
print json_dict["welcome"], "\n"
for x in range(json_dict["command_count"]):
x = x + 1
print '\033[91m' +"Line: ", x,'\n'
print '\033[92m'+ "Comment: ", json_dict[str(x)]["comment"],'\n'
print '\033[96m' + "Input: ", json_dict[str(x)]["command"][:-1]
outfile = os.popen(json_dict[str(x)]["command"])
output = outfile.read()
return_val = outfile.close()
if return_val != None:
shell-scribe().send_call()
print '\033[93m' + "Output: ", os.popen(json_dict[str(x)]["command"]).read() + '\033[0m'
raw_input("-Press Enter-\n")
#not sure what to do with the rest of this code. whether or not it is even necessary
#with open('test.sh','r') as file:
# for row in file:
# print '\033[91m' + "\nCode for the row: " + '\033[96m' + row + '\033[92m'
# comment=raw_input('- ')
# tempDic = {'comment':comment,'command':row}
# dic.update({inc:tempDic})
# inc+=1
#dic['welcome']="""This is a welcome message"""
#print('\033[0m')
#with open(title+'.json','w') as file:
# json.dump(dic,file)
class Shell_Scribe(cmd.Cmd):
"""
Shell_Scribe is a commandline interface that automatically saves a history
of what commands were typed to a text file as well as creating a shell
script for them.
"""
## Return value for each command (None == 0)
return_value = None
## The prompt to the user
prompt = '\033[96m'+'S'+'\033[33m'+'hell-'+'\033[96m'+'S'+'\033[33m'+ \
'cribe>'+'\033[0m'
## Set to True for Working Directory as prompt"
location_prompt = False
## This is a list of commands that will not be stored by Shell-Scribe
storage_blacklist = ["ls", "pwd", ""]
## Config File Name
config_filename = "config.json"
## Twilio Attributes
TWILIO = False
ACCOUNT_SID = None
AUTH_TOKEN = None
message_recipient = None
message_sender = None
call_url = None
alert_type = None
## Properties
script_filename = "shell-scribe.sh"
script = None
def bashinator_9000(self, filename):
dic={}
inc=1
title = ''
author = ''
date = datetime.datetime.now()
title = raw_input("What is the title: ")
author = raw_input("Who is the author: ")
dic['welcome']= raw_input("Input a description for the lesson: ")
date = datetime.datetime.now()
if title =="": title = 'lesson'
if author=="": author = 'N/A'
dic["title"] = title
dic["author"] = author
with open(filename,'r') as file:
for row in file:
print '\033[91m' + "\nCode for the row: " + '\033[96m' + row + '\033[92m'
comment=raw_input('- ')
tempDic = {'comment':comment,'command':row}
dic.update({inc:tempDic})
inc+=1
print('\033[0m')
dic['command_count'] = inc - 1
with open(title+'.json','w') as file:
json.dump(dic,file)
def bashinator_10000(self, filename): #need sleeeeeep
#fname = filename.readFile() #attempting to have json file read-in
with open(filename, 'r') as f:
json_dict = json.load(f)
print json_dict
inc=1
# Welcomes them to Hell
print json_dict["welcome"], "\n"
for x in range(json_dict["command_count"]):
x = x + 1
print '\033[91m' +"Line: ", x,'\n'
print '\033[92m'+ "Comment: ", json_dict[str(x)]["comment"],'\n'
print '\033[96m' + "Input: ", json_dict[str(x)]["command"][:-1]
outfile = os.popen(json_dict[str(x)]["command"])
output = outfile.read()
return_val = outfile.close()
if return_val != None:
self.send_call()
print '\033[93m' + "Output: ", os.popen(json_dict[str(x)]["command"]).read() + '\033[0m'
raw_input("-Press Enter-\n")
## File Editing Methods
def store_to_script(self, line):
"""
Stores the shell command to the script
"""
self.script.write(line + "\n")
def load_config_json(self):
"""
Configures Shell-Scribe based on the JSON configuration file
"""
with open(self.config_filename, 'r') as f:
json_dict = json.load(f)
#print "Dict from Json:", json_dict
self.TWILIO = (1 == json_dict["twilio"]["TWILIO"])
if self.TWILIO:
self.ACCOUNT_SID = json_dict["twilio"]["ACCOUNT_SID"]
self.AUTH_TOKEN = json_dict["twilio"]["AUTH_TOKEN"]
self.message_recipient = json_dict["twilio"]["TO"]
self.message_sender = json_dict["twilio"]["FROM"]
if json_dict["twilio"]["ALERT_TYPE"].lower() == "call":
self.alert_type = json_dict["twilio"]["ALERT_TYPE"].lower()
self.call_url = json_dict["twilio"]["CALL_URL"]
if json_dict["appearance"]["prompt"].lower() == 'location':
self.location_prompt = True
def no_config_subroutine(self):
"""
Method that is called when there is no config found
"""
gen_config = input("Generate Default Config File? (Y/n)")
if gen_config == "": gen_conifg = "Y"
if gen_config.lower() == 'y':
self.generate_config()
self.load_config_json
else:
"No Configuration File. Running basic mode"
## Send text via Twilio
def send_text(self, line):
"""
Sends a text message via Twilio
"""
client = TwilioRestClient(self.ACCOUNT_SID, self.AUTH_TOKEN)
client.messages.create(to=self.message_recipient,
from_=self.message_sender,
body="Failed on command: " + line)
def send_call(self):
"""
Sends said call via Twilio
"""
print "Calling"
client = TwilioRestClient(self.ACCOUNT_SID, self.AUTH_TOKEN)
call = client.calls.create(to=self.message_recipient,
from_=self.message_sender,
url=self.call_url,
method="GET",
fallback_method="GET",
status_callback_method="GET",
record="false")
print call.sid
## Explicit Shell-Scribe Commands
def do_cd(self, line):
"""
Runs the cd equivalent
"""
if os.path.isdir(line):
os.chdir(line)
else:
print "Directory ", line, " does not exist"
def do_exit(self, line):
"""
Exits Shell-Scribe
"""
os.system("chmod +x %s" % self.script_filename)
sys.exit()
def do_quit(self, line):
"""
Exits Shell Scribe
"""
os.system("chmod +x %s" % self.script_filename)
sys.exit()
## Misc. Functions
def command_not_blank(self, line):
"""
Checks to make sure the command is not all space characters
"""
print "line:",line
for char in line:
if char != " ":
return True
return False
## CMD Overloads
def do_EOF(self, line):
"""
Method that is called at the end of a batch job.
"""
return True
def precmd(self, line):
"""
Method that is run just before the shell command is run
"""
return line
def emptyline(self):
"""
Controls what happens if the user enters an empty line. This is addded
to because without overloading this method it defaults to rerunning
the command which is not what we are looking for.
"""
return ""
def postcmd(self, stop, line):
"""
Method that is called after each of command is run
"""
if self.location_prompt:
self.prompt = os.getcwd() + " >"
if self.return_value == None:
if (line not in self.storage_blacklist) and self.command_not_blank(line):
self.store_to_script(line)
print "Stored!"
def default(self, line):
"""
This is the default method that is called if the shell command is not
a specific shell command (a do_ method_)
"""
cmd_file = os.popen(line)
output = cmd_file.read()
self.return_value = cmd_file.close()
if self.return_value != None:
if self.alert_type == 'text':
self.send_text(line)
if self.alert_type == 'call':
self.send_call()
if self.command_not_blank(line):
print output
def preloop(self):
"""
Method that is called before the CMD loop begins
"""
if self.location_prompt:
self.prompt = os.getcwd() + " >"
if os.path.isfile(self.script_filename):
pass
self.script = open(self.script_filename, 'a')
if __name__ == '__main__':
parser = ap.ArgumentParser(description="Documents Shell-Commands")
parser.add_argument('--location-prompt', action='store_true')
parser.add_argument('-config',
help="The name of the configuration JSON file")
parser.add_argument('-create-lesson',
help="The name of the script that we are building \
a lesson for")
parser.add_argument('-run-lesson',
help="The name of the lesson (JSON file) that we are \
running in shell-scribe")
args = parser.parse_args()
ss = Shell_Scribe()
ss.location_prompt = args.location_prompt
if args.config is not None:
if os.path.isfile(args.config):
print "Using configuration from file ", args.config
ss.config_filename = args.config
ss.load_config_json()
else:
print "Config does not exist"
self.no_config_subroutine()
elif os.path.isfile("config.json"):
print "Found config.json"
ss.load_config_json()
else:
ss.no_config_subroutine()
if DEBUG: print args
if args.create_lesson != None:
ss.bashinator_9000(args.create_lesson)
print "RUNNING CREATE LESSON BLOCK"
elif args.run_lesson != None:
# Run Lesson Function
ss.bashinator_10000(args.run_lesson)
else:
ss.cmdloop()
| keithemiller/shell-scribe | shell-scribe.py | Python | apache-2.0 | 12,528 |
# ext/preprocessors.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""preprocessing functions, used with the 'preprocessor'
argument on Template, TemplateLookup"""
import re
def convert_comments(text):
"""preprocess old style comments.
example:
from mako.ext.preprocessors import convert_comments
t = Template(..., preprocessor=preprocess_comments)"""
return re.sub(r'(?<=\n)\s*#[^#]', "##", text)
| alanjw/GreenOpenERP-Win-X86 | python/Lib/site-packages/mako/ext/preprocessors.py | Python | agpl-3.0 | 586 |
import logging
from allauth.account.adapter import DefaultAccountAdapter
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
logger = logging.getLogger(__name__)
class NoNewUsersAccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request):
"""
Checks whether or not the site is open for signups.
Next to simply returning True/False you can also intervene the
regular flow by raising an ImmediateHttpResponse
(Comment reproduced from the overridden method.)
"""
return False
class LoggingSocialAccountAdapter(DefaultSocialAccountAdapter):
"""
Exactly the same as the DefaultSocialAccountAdapter, but logs authentication
errors to the default log.
"""
def authentication_error(
self,
request,
provider_id,
error=None,
exception=None,
extra_context=None,
):
"""
Log errors to authenticating. This method in the parent class is
left blank and exists for overriding, so we can do what we want here.
"""
logger.error(
"Error logging in with provider '{}' with error '{}' ({})".format(
provider_id, error, exception
)
)
| DemocracyClub/yournextrepresentative | ynr/account_adapter.py | Python | agpl-3.0 | 1,278 |
from django.conf.urls import patterns, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = patterns('',
url(r'^$', 'charts.views.chart_index', name='chart_index'),
url(r'^total_incomes/$', 'charts.views.total_incomes', name='total_incomes'),
url(r'^incomes/(?P<year>\d{4})/(?P<scope>\S+)$', 'charts.views.incomes_by_year_and_scope', name='incomes_by_year_and_scope'),
url(r'^incomes/(?P<year>\d{4})$', 'charts.views.incomes_by_year', name='incomes_by_year'),
url(r'^incomes_by_project/$', 'charts.views.incomes_by_project_index', name='incomes_by_project_index'),
url(r'^incomes_by_project/(?P<project_slug>\S+)$', 'charts.views.incomes_by_project', name='incomes_by_project'),
)
urlpatterns += staticfiles_urlpatterns()
| OscarPDR/projects_morelab | charts/urls.py | Python | gpl-3.0 | 781 |
# -*- coding: utf-8 -*-
"""
Test file question type.
"""
from tests.pyxform_test_case import PyxformTestCase
class FileWidgetTest(PyxformTestCase):
"""
Test file widget class.
"""
def test_file_type(self):
"""
Test file question type.
"""
self.assertPyxformXform(
name="data",
md="""
| survey | | | |
| | type | name | label |
| | file | file | Attach a file |
""",
xml__contains=['<upload mediatype="application/*"'],
)
| XLSForm/pyxform | tests/test_file.py | Python | bsd-2-clause | 622 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The kernel density estimators.
"""
from __future__ import (division, print_function, absolute_import, unicode_literals)
import numpy as np
import numpy.ma as ma
from scipy import linalg as la
from scipy.cluster.vq import kmeans, vq
try:
from scipy.special import logsumexp
except ImportError: # scipy < 1.0.0
from scipy.misc import logsumexp
# Avoid log(0) warnings when weights go to 0
np.seterr(divide='ignore')
def optimized_kde(data, pool=None, kde=None, max_samples=None, **kwargs):
"""
Iteratively run a k-means clustering algorithm, estimating the distibution of each identified
cluster with an independent kernel density estimate. Starting with ``k = 1``, the distribution
is estimated and the Bayes Information criterion (BIC) is calculated. `k` is increased until
the BIC stops increasing.
:param data:
An `(N, ndim)`-shaped array, containing `N` samples from the target distribution.
:param pool: (optional)
A pool of processes with a :func:`map` function to use.
:param kde: (optional)
An old KDE to inherit samples from.
:param max_samples: (optional)
The maximum number of samples to use for constructing or updating the KDE. If a KDE is
supplied and adding the samples from it will go over this, old samples are thinned by
factors of two until under the limit.
:param kwargs: (optional)
Keyword arguments to pass to :class:`ClusteredKDE`.
:returns: :meth:`ClusteredKDE` that maximizes the BIC.
"""
# Trim data if too many samples were given
n_new = len(data)
if kde is None and n_new == 0:
return None
if max_samples is not None and max_samples <= n_new:
data = data[:max_samples]
else:
# Combine data, thinning old data if we need room
if kde is not None:
old_data = kde.data
if max_samples is not None:
nsamps = len(old_data) + n_new
while nsamps > max_samples:
old_data = old_data[::2]
nsamps = len(old_data) + n_new
if n_new == 0:
# If there's no new data, just use the old
data = old_data
else:
# Otherwise combine the old and the new
data = np.concatenate((old_data, data))
best_bic = -np.inf
best_kde = None
k = 1
while True:
try:
kde = ClusteredKDE(data, k, **kwargs)
bic = kde.bic(pool=pool)
except la.LinAlgError:
bic = -np.inf
if bic > best_bic:
best_kde = kde
best_bic = bic
else:
break
k += 1
return best_kde
class ClusteredKDE(object):
"""
Run a k-means clustering algorithm, estimating the distibution of each identified cluster with
an independent kernel density estimate. The full distibution is then estimated by combining the
individual KDE's, weighted by the fraction of samples assigned to each cluster.
:param data:
An `(N, ndim)`-shaped array, containing `N` samples from the target distribution.
:param k:
The number of clusters for k-means clustering.
"""
def __init__(self, data, k=1):
self._data = data
self._nclusters = k
self._mean = np.mean(data, axis=0)
self._std = np.std(data, axis=0)
# Cluster data that's mean 0 and scaled to unit width in each parameter independently
white_data = self._whiten(data)
self._centroids, _ = kmeans(white_data, k)
self._assignments, _ = vq(white_data, self.centroids)
self._kdes = [KDE(self.data[self.assignments == c]) for c in range(k)]
self._logweights = np.log([np.count_nonzero(self.assignments == c)/self.size
for c in range(k)])
def draw(self, size=1):
"""Draw `size` samples from the KDE."""
# Pick clusters randomly with the assigned weights
cumulative_weights = np.cumsum(np.exp(self._logweights))
clusters = np.searchsorted(cumulative_weights, np.random.rand(size))
draws = np.empty((size, self.ndim))
for cluster in range(self.nclusters):
sel = clusters == cluster
draws[sel] = self._kdes[cluster].draw(np.count_nonzero(sel))
return draws
def logpdf(self, pts, pool=None):
"""Evaluate the logpdf of the KDE at `pts`."""
logpdfs = [logweight + kde(pts, pool=pool)
for logweight, kde in zip(self._logweights, self._kdes)]
if len(pts.shape) == 1:
return logsumexp(logpdfs)
else:
return logsumexp(logpdfs, axis=0)
def _whiten(self, data):
"""Whiten `data`, probably before running k-means."""
return (data - self._mean)/self._std
def _color(self, data):
"""Recolor `data`, reversing :meth:`_whiten`."""
return data * self._std + self._mean
def bic(self, pool=None):
r"""
Evaluate Bayes Information Criterion for the KDE's estimate of the distribution
.. math::
\mathrm{BIC} = \mathrm{ln}\mathcal{L}_\mathrm{max} - \frac{d_m}{2} \mathrm{ln} N
where :math:`d_m` is the number of dimensions of the KDE model (:math:`n_\mathrm{clusters}
d` centroid location parameters, :math:`n_\mathrm{clusters} - 1` normalized weights, and
:math:`n_\mathrm{clusters} (d+1)*d/2` kernel covariance parameters, one matrix for each of
:math:`n_\mathrm{clusters}` clusters), and :math:`N` is the number of samples used to build
the KDE.
"""
log_l = np.sum(self.logpdf(self.data, pool=pool))
# Determine the total number of parameters in clustered-KDE
# Account for centroid locations
nparams = self.nclusters * self.ndim
# One for each cluster, minus one for constraint that all sum to unity
nparams += self.nclusters - 1
# Separate kernel covariances for each cluster
nparams += self.nclusters * (self.ndim + 1) * self.ndim/2
return log_l - nparams/2 * np.log(self.size)
@property
def data(self):
"""Samples used to build the KDE."""
return self._data
@property
def nclusters(self):
"""The number of clusters used for k-means."""
return self._nclusters
@property
def assignments(self):
"""Cluster assignments from k-means."""
return self._assignments
@property
def centroids(self):
"""Cluster centroids from k-means."""
return self._centroids
@property
def ndim(self):
"""The number of dimensions of the KDE."""
return self.data.shape[1]
@property
def size(self):
"""The number of samples used to build the KDE."""
return self.data.shape[0]
__call__ = logpdf
__len__ = size
class KDE(object):
"""
A Gaussian kernel density estimator that provides a means for evaluating the estimated
probability density function, and drawing additional samples from the estimated distribution.
Cholesky decomposition of the covariance matrix makes this class a bit more stable than the
:mod:`scipy`'s Gaussian KDE.
:param data:
An `(N, ndim)`-shaped array, containing `N` samples from the target distribution.
"""
def __init__(self, data):
self._data = np.atleast_2d(data)
self._mean = np.mean(data, axis=0)
self._cov = None
if self.data.shape[0] > 1:
try:
self._cov = np.cov(data.T)
# Try factoring now to see if regularization is needed
la.cho_factor(self._cov)
except la.LinAlgError:
self._cov = oas_cov(data)
self._set_bandwidth()
# store transformation variables for drawing random values
alphas = np.std(data, axis=0)
ms = 1./alphas
m_i, m_j = np.meshgrid(ms, ms)
ms = m_i * m_j
self._draw_cov = ms * self._kernel_cov
self._scale_fac = alphas
def __enter__(self):
return self
def _set_bandwidth(self):
r"""
Use Scott's rule to set the kernel bandwidth:
.. math::
\mathcal{K} = n^{-1/(d+4)} \Sigma^{1/2}
Also store Cholesky decomposition for later.
"""
if self.size > 0 and self._cov is not None:
self._kernel_cov = self._cov * self.size ** (-2/(self.ndim + 4))
# Used to evaluate PDF with cho_solve()
self._cho_factor = la.cho_factor(self._kernel_cov)
# Make sure the estimated PDF integrates to 1.0
self._lognorm = self.ndim/2 * np.log(2*np.pi) + np.log(self.size) +\
np.sum(np.log(np.diag(self._cho_factor[0])))
else:
self._lognorm = -np.inf
def draw(self, size=1):
"""
Draw samples from the estimated distribution.
"""
# Return nothing if this is an empty KDE
if self.size == 0:
return []
# Draw vanilla samples from a zero-mean multivariate Gaussian
draws = self._scale_fac * np.random.multivariate_normal(np.zeros(self.ndim),
self._draw_cov, size=size)
# Pick N random kernels as means
kernels = np.random.randint(0, self.size, size)
# Shift vanilla draws to be about chosen kernels
return self.data[kernels] + draws
def logpdf(self, pts, pool=None):
"""Evaluate the logpdf at `pts` as estimated by the KDE."""
pts = np.atleast_2d(pts)
npts, ndim = pts.shape
assert ndim == self.ndim
# Apply across the pool if it exists
if pool:
this_map = pool.map
else:
this_map = map
# Return -inf if this is an empty KDE
if np.isinf(self._lognorm):
results = np.zeros(npts) - np.inf
else:
args = [(pt, self.data, self._cho_factor) for pt in pts]
results = list(this_map(_evaluate_point_logpdf, args))
# Normalize and return
return np.array(results) - self._lognorm
@property
def data(self):
"""Samples used to build the KDE."""
return self._data
@property
def ndim(self):
"""The number of dimensions of the KDE."""
return self.data.shape[1]
@property
def size(self):
"""The number of samples used to build the KDE."""
return self.data.shape[0]
__len__ = size
__call__ = logpdf
def unique_spaces(mask):
"""
Determine the unique sets of dimensions based on a mask. Inverted 1D masks are returned to use
as selectors.
"""
ncols = mask.shape[1]
# Do some magic with views so `np.unique` can be used to find the unique sets of dimensions.
dtype = mask.dtype.descr * ncols
struct = mask.view(dtype)
uniq = np.unique(struct)
uniq = uniq.view(mask.dtype).reshape(-1, ncols)
return ~uniq
class TransdimensionalKDE(object):
"""
A generalized Gaussian kernel density estimator that reads masked arrays, constructs a
:class:`ClusteredKDE` using :func:`optimized_kde` for each unique parameter space, then weighs
the KDEs based on the number of samples in each parameter space.
:param data:
An `(N, max_dim)`-shaped masked array, containing N samples from the the target distribution.
:param kde: (optional)
An old trans-dimensional KDE to inherit samples from.
:param max_samples: (optional)
The maximum number of samples to use for constructing or updating the kde in each unique
parameter space. If a KDE is supplied and adding the samples from `data` will go over this,
old samples are thinned by factors of two until under the limit in each parameter space.
"""
def __init__(self, data, kde=None, max_samples=None, pool=None):
npts_new, max_ndim = data.shape
self._max_ndim = max_ndim
if kde is None:
# Save an (inverted) mask for each unique set of dimensions
self._spaces = unique_spaces(data.mask)
else:
# Inherit old space definitions, in case the new sample has no points in a subspace
self._spaces = kde.spaces
# Construct a separate clustered-KDE for each parameter space
weights = []
self._kdes = []
for space_id, space in enumerate(self.spaces):
# Construct a selector for the samples from this space
subspace = np.all(~data.mask == space, axis=1)
# Determine weights from only the new samples
npts_subspace = np.count_nonzero(subspace)
weight = npts_subspace/npts_new
weights.append(weight)
fixd_data = data[subspace]
if npts_subspace > 0:
fixd_data = np.asarray(fixd_data[~fixd_data.mask].reshape((npts_subspace, -1)))
old_kde = None
if kde is not None:
old_kde = kde.kdes[space_id]
self._kdes.append(optimized_kde(fixd_data, pool, old_kde, max_samples))
self._logweights = np.log(np.array(weights))
def draw(self, size=1, spaces=None):
"""
Draw samples from the transdimensional distribution.
"""
if spaces is not None:
if len(spaces) != size:
raise ValueError('Sample size inconsistent with number of spaces saved')
space_inds = np.empty(size)
for space_id, space in enumerate(self.spaces):
subspace = np.all(spaces == space, axis=1)
space_inds[subspace] = space_id
else:
# Draws spaces randomly with the assigned weights
cumulative_weights = np.cumsum(np.exp(self._logweights))
space_inds = np.searchsorted(cumulative_weights, np.random.rand(size))
draws = ma.masked_all((size, self._max_ndim))
for space_id in range(len(self.spaces)):
sel = space_inds == space_id
n_fixedd = np.count_nonzero(sel)
if n_fixedd > 0:
# Populate only the valid entries for this parameter space
draws[np.ix_(sel, self._spaces[space_id])] = self.kdes[space_id].draw(n_fixedd)
return draws
def logpdf(self, pts, pool=None):
"""Evaluate the log-transdimensional-pdf at `pts` as estimated by the KDE."""
logpdfs = []
for logweight, space, kde in zip(self._logweights,
self.spaces,
self.kdes):
# Calculate the probability for each parameter space individually
if np.all(space == ~pts.mask) and np.isfinite(logweight):
logpdfs.append(logweight + kde(pts[space], pool=pool))
return logsumexp(logpdfs, axis=0)
@property
def kdes(self):
"""List of fixed-dimension :meth:`ClusteredKDE` s"""
return self._kdes
@property
def spaces(self):
"""Unique sets of dimensions, usable as selectors."""
return self._spaces
__call__ = logpdf
def _evaluate_point_logpdf(args):
"""
Evaluate the Gaussian KDE at a given point `p`. This lives outside the KDE method to allow for
parallelization using :mod:`multipocessing`. Since :func:`map` only allows single-argument
functions, the following arguments to be packed into a single tuple.
:param p:
The point to evaluate the KDE at.
:param data:
The `(N, ndim)`-shaped array of data used to construct the KDE.
:param cho_factor:
A Cholesky decomposition of the kernel covariance matrix.
"""
point, data, cho_factor = args
# Use Cholesky decomposition to avoid direct inversion of covariance matrix
diff = data - point
tdiff = la.cho_solve(cho_factor, diff.T, check_finite=False).T
diff *= tdiff
# Work in the log to avoid large numbers
return logsumexp(-np.sum(diff, axis=1)/2)
def oas_cov(pts):
r"""
Estimate the covariance matrix using the Oracle Approximating Shrinkage algorithm
.. math::
(1 - s)\Sigma + s \mu \mathcal{I}_d
where :math:`\mu = \mathrm{tr}(\Sigma) / d`. This ensures the covariance matrix estimate is
well behaved for small sample sizes.
:param pts:
An `(N, ndim)`-shaped array, containing `N` samples from the target distribution.
This follows the implementation in `scikit-learn
<https://github.com/scikit-learn/scikit-learn/blob/31c5497/
sklearn/covariance/shrunk_covariance_.py>`_.
"""
pts = np.atleast_2d(pts)
npts, ndim = pts.shape
emperical_cov = np.cov(pts.T)
mean = np.trace(emperical_cov) / ndim
alpha = np.mean(emperical_cov * emperical_cov)
num = alpha + mean * mean
den = (npts + 1) * (alpha - (mean * mean) / ndim)
shrinkage = min(num / den, 1)
shrunk_cov = (1 - shrinkage) * emperical_cov
shrunk_cov[np.diag_indices(ndim)] += shrinkage * mean
return shrunk_cov
| bfarr/kombine | kombine/clustered_kde.py | Python | mit | 17,254 |
#!/usr/bin/python
#
#Copyright (C) 2011 by Venkata Pingali (pingali@gmail.com) & TCS
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
"""
Simplest possible python client
"""
import logging
import sys, os, os.path
from config import Config
import simplejson as json
def findpath(path):
return os.path.abspath(os.path.join(os.path.dirname(__file__),path))
log = logging.getLogger("SampleClient")
from AadhaarAuth.request import AuthRequest
from AadhaarAuth.data import AuthData
from AadhaarAuth.command import AuthConfig
from AadhaarAuth.response import AuthResponse
__author__ = "Venkata Pingali"
__copyright__ = "Copyright 2011,Venkata Pingali and TCS"
__credits__ = ["UIDAI", "MindTree", "GeoDesic", "Viral Shah"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Venkata Pingali"
__email__ = "pingali@gmail.com"
__status__ = "Pre-release"
if __name__ == '__main__':
cmd = AuthConfig()
cfg = cmd.update_config()
logging.getLogger().setLevel(cfg.common.loglevel)
logging.basicConfig()
# This is a simple client. Force use of name
cfg.request.demographics = ["Pi"]
cfg.request.biometrics = []
# => Gather the data from the (simulated) client
data = AuthData(cfg=cfg)
data.generate_client_xml()
exported_data = data.export_request_data()
# Create the request object and execute
req = AuthRequest(cfg)
req.import_request_data(exported_data)
req.execute()
# Load the response
data = json.loads(req.export_response_data())
res = AuthResponse(cfg=cfg, uid=cfg.request.uid)
res.load_string(data['xml'])
# Find all the attributes set
bits = res.lookup_usage_bits()
print "[%.3f] (%s) -> %s " % (data['latency'], bits, data['ret'])
if data['err'] is not None and data['err'] != -1:
print "Err %s: %s "% ( data['err'], data['err_message'])
| pingali/pyAadhaarAuth | bin/aadhaar-sample-client.py | Python | mit | 2,890 |
"""674. Longest Continuous Increasing Subsequence
https://leetcode.com/problems/longest-continuous-increasing-subsequence/
Given an unsorted array of integers nums, return the length of the longest
continuous increasing subsequence (i.e. subarray). The subsequence must be
strictly increasing.
A continuous increasing subsequence is defined by two indices l and r (l < r)
such that it is [nums[l], nums[l + 1], ..., nums[r - 1], nums[r]] and for each
l <= i < r, nums[i] < nums[i + 1].
Example 1:
Input: nums = [1,3,5,4,7]
Output: 3
Explanation: The longest continuous increasing subsequence is [1,3,5] with length 3.
Even though [1,3,5,7] is an increasing subsequence, it is not continuous as elements 5 and 7 are separated by element
4.
Example 2:
Input: nums = [2,2,2,2,2]
Output: 1
Explanation: The longest continuous increasing subsequence is [2] with length 1. Note that it must be strictly
increasing.
Constraints:
0 <= nums.length <= 10^4
-10^9 <= nums[i] <= 10^9
"""
from typing import List
class Solution:
def find_length_of_lcis(self, nums: List[int]) -> int:
n = len(nums)
if n == 0:
return 0
cnt, tmp = 1, 1
for i in range(n - 1):
if nums[i] < nums[i + 1]:
tmp += 1
else:
cnt = max(cnt, tmp)
tmp = 1
cnt = max(cnt, tmp)
return cnt
| isudox/leetcode-solution | python-algorithm/leetcode/problem_674.py | Python | mit | 1,398 |
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['ros2opencv'],
package_dir={'': 'src'},
)
setup(**setup_args) | vytasrgl/pi_vision | ros2opencv/setup.py | Python | gpl-2.0 | 309 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from collections import defaultdict
import logging
import re
import time
import types
import openerp
from openerp import SUPERUSER_ID
from openerp import models, tools, api
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS
from openerp.exceptions import UserError, AccessError
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class unknown(models.AbstractModel):
"""
Abstract model used as a substitute for relational fields with an unknown
comodel.
"""
_name = '_unknown'
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
def _inherited_models(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for model in self.browse(cr, uid, ids, context=context):
res[model.id] = []
inherited_models = [model_name for model_name in self.pool[model.model]._inherits]
if inherited_models:
res[model.id] = self.search(cr, uid, [('model', 'in', inherited_models)], context=context)
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'inherited_model_ids': fields.function(_inherited_models, type="many2many", obj="ir.model", string="Inherited models",
help="The list of models that extends the current model."),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'transient': fields.boolean(string="Transient Model"),
'modules': fields.function(_in_modules, type='char', string='In Apps', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': 'manual',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise UserError(_("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
if 'model' in vals:
raise UserError(_('Field "Model" cannot be modified on models.'))
if 'state' in vals:
raise UserError(_('Field "Type" cannot be modified on models.'))
if 'transient' in vals:
raise UserError(_('Field "Transient Model" cannot be modified on models.'))
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','manual')=='manual':
# setup models; this automatically adds model in registry
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context, update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, transient, context=None):
if isinstance(model, unicode):
model = model.encode('utf-8')
class CustomModel(models.Model):
_name = model
_module = False
_custom = True
_transient = bool(transient)
CustomModel._build_model(self.pool, cr)
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Field Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True, translate=True),
'help': fields.text('Field Help', translate=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'copy': fields.boolean('Copied', help="Whether the value is copied when duplicating a record."),
'related': fields.char('Related Field', help="The corresponding related field, if any. This must be a dot-separated list of field names."),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'index': fields.boolean('Indexed'),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Apps', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
'relation_table': fields.char("Relation Table", help="Used for custom many2many fields to define a custom relation table name"),
'column1': fields.char("Column 1", help="Column referring to the record in the model table"),
'column2': fields.char("Column 2", help="Column referring to the record in the comodel table"),
'compute': fields.text("Compute", help="Code to compute the value of the field.\n"
"Iterate on the recordset 'self' and assign the field's value:\n\n"
" for record in self:\n"
" record['size'] = len(record.name)\n\n"
"Modules time, datetime, dateutil are available."),
'depends': fields.char("Dependencies", help="Dependencies of compute method; "
"a list of comma-separated field names, like\n\n"
" name, partner_id.name"),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': 'manual',
'on_delete': 'set null',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.info('Invalid selection list definition for fields.selection', exc_info=True)
raise UserError(_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise UserError(_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _related_field(self):
""" Return the ``Field`` instance corresponding to ``self.related``. """
names = self.related.split(".")
last = len(names) - 1
model = self.env[self.model or self.model_id.model]
for index, name in enumerate(names):
field = model._fields.get(name)
if field is None:
raise UserError(_("Unknown field name '%s' in related field '%s'") % (name, self.related))
if index < last and not field.relational:
raise UserError(_("Non-relational field name '%s' in related field '%s'") % (name, self.related))
model = model[name]
return field
@api.one
@api.constrains('related')
def _check_related(self):
if self.state == 'manual' and self.related:
field = self._related_field()
if field.type != self.ttype:
raise UserError(_("Related field '%s' does not have type '%s'") % (self.related, self.ttype))
if field.relational and field.comodel_name != self.relation:
raise UserError(_("Related field '%s' does not have comodel '%s'") % (self.related, self.relation))
@api.onchange('related')
def _onchange_related(self):
if self.related:
try:
field = self._related_field()
except UserError as e:
return {'warning': {'title': _("Warning"), 'message': e.message}}
self.ttype = field.type
self.relation = field.comodel_name
self.readonly = True
self.copy = False
@api.onchange('compute')
def _onchange_compute(self):
if self.compute:
self.readonly = True
self.copy = False
@api.one
@api.constrains('relation_table')
def _check_relation_table(self):
models.check_pg_name(self.relation_table)
@api.model
def _custom_many2many_names(self, model_name, comodel_name):
""" Return default names for the table and columns of a custom many2many field. """
rel1 = self.env[model_name]._table
rel2 = self.env[comodel_name]._table
table = 'x_%s_%s_rel' % tuple(sorted([rel1, rel2]))
if rel1 == rel2:
return (table, 'id1', 'id2')
else:
return (table, '%s_id' % rel1, '%s_id' % rel2)
@api.onchange('ttype', 'model_id', 'relation')
def _onchange_ttype(self):
self.copy = (self.ttype != 'one2many')
if self.ttype == 'many2many' and self.model_id and self.relation:
names = self._custom_many2many_names(self.model_id.model, self.relation)
self.relation_table, self.column1, self.column2 = names
else:
self.relation_table = False
self.column1 = False
self.column2 = False
@api.onchange('relation_table')
def _onchange_relation_table(self):
if self.relation_table:
# check whether other fields use the same table
others = self.search([('ttype', '=', 'many2many'),
('relation_table', '=', self.relation_table),
('id', 'not in', self._origin.ids)])
if others:
for other in others:
if (other.model, other.relation) == (self.relation, self.model):
# other is a candidate inverse field
self.column1 = other.column2
self.column2 = other.column1
return
return {'warning':{
'title': _("Warning"),
'message': _("The table %r if used for other, possibly incompatible fields.") % self.relation_table,
}}
def _drop_column(self, cr, uid, ids, context=None):
tables_to_drop = set()
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('SELECT relkind FROM pg_class WHERE relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("""SELECT column_name FROM information_schema.columns
WHERE table_name=%s AND column_name=%s""",
(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = field.relation_table or model._fields[field.name].relation
tables_to_drop.add(rel_name)
model._pop_field(cr, uid, field.name, context=context)
if tables_to_drop:
# drop the relation tables that are not used by other fields
cr.execute("""SELECT relation_table FROM ir_model_fields
WHERE relation_table IN %s AND id NOT IN %s""",
(tuple(tables_to_drop), tuple(ids)))
tables_to_keep = set(row[0] for row in cr.fetchall())
for rel_name in tables_to_drop - tables_to_keep:
cr.execute('DROP TABLE "%s"' % rel_name)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise UserError(_("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# The field we just deleted might be inherited, and the registry is
# inconsistent in this case; therefore we reload the registry.
cr.commit()
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise UserError(_('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','manual') == 'manual':
if not vals['name'].startswith('x_'):
raise UserError(_("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise UserError(_("Model %s does not exist!") % vals['relation'])
if vals.get('ttype', False) == 'one2many':
if not self.search(cr, user, [('model_id','=',vals['relation']), ('name','=',vals['relation_field']), ('ttype','=','many2one')]):
raise UserError(_("Many2one %s on model %s does not exist!") % (vals['relation_field'], vals['relation']))
self.pool.clear_manual_fields()
if vals['model'] in self.pool:
# setup models; this re-initializes model in registry
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context, update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise UserError(_('Changing the storing system for field "%s" is not allowed.') % field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise UserError(_('Renaming sparse field "%s" is not allowed') % field.name)
# if set, *one* column can be renamed here
column_rename = None
# names of the models to patch
patched_models = set()
if vals and ids:
# check selection if given
if vals.get('selection'):
self._check_selection(cr, user, vals['selection'], context=context)
for item in self.browse(cr, user, ids, context=context):
if item.state != 'manual':
raise UserError(_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if vals.get('model_id', item.model_id.id) != item.model_id.id:
raise UserError(_("Changing the model of a field is forbidden!"))
if vals.get('ttype', item.ttype) != item.ttype:
raise UserError(_("Changing the type of a field is not yet supported. "
"Please drop it and create it again!"))
obj = self.pool.get(item.model)
field = getattr(obj, '_fields', {}).get(item.name)
if vals.get('name', item.name) != item.name:
# We need to rename the column
if column_rename:
raise UserError(_('Can only rename one field at a time!'))
if vals['name'] in obj._fields:
raise UserError(_('Cannot rename field to %s, because that field already exists!') % vals['name'])
if vals.get('state', 'manual') == 'manual' and not vals['name'].startswith('x_'):
raise UserError(_('New field name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj._table, item.name, vals['name'], item.index)
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None and field is not None:
patched_models.add(obj._name)
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
self.pool.clear_manual_fields()
if column_rename:
# rename column in database, and its corresponding index if present
table, oldname, newname, index = column_rename
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (table, oldname, newname))
if index:
cr.execute('ALTER INDEX "%s_%s_index" RENAME TO "%s_%s_index"' % (table, oldname, table, newname))
if column_rename or patched_models:
# setup models, this will reload all manual fields in registry
self.pool.setup_models(cr, partial=(not self.pool.ready))
if patched_models:
# update the database schema of the models to patch
ctx = dict(context, update_custom_fields=True)
for model_name in patched_models:
obj = self.pool[model_name]
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
if column_rename or patched_models:
RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'definition': fields.char('Definition', help="PostgreSQL constraint definition"),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise AccessError(_('Administrator access is required to uninstall a module'))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise AccessError(_('Administrator access is required to uninstall a module'))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('transient','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
# The context parameter is useful when the method translates error messages.
# But as the method raises an exception in that case, the key 'lang' might
# not be really necessary as a cache key, unless the `ormcache_context`
# decorator catches the exception (it does not at the moment.)
@tools.ormcache_context('uid', 'model', 'mode', 'raise_exception', keys=('lang',))
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.info('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return bool(r)
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def name_get(self, cr, uid, ids, context=None):
bymodel = defaultdict(dict)
names = {}
for res in self.browse(cr, uid, ids, context=context):
bymodel[res.model][res.res_id] = res
names[res.id] = res.complete_name
#result[res.model][res.res_id] = res.id
for model, id_map in bymodel.iteritems():
try:
ng = dict(self.pool[model].name_get(cr, uid, id_map.keys(), context=context))
except Exception:
pass
else:
for r in id_map.itervalues():
names[r.id] = ng.get(r.res_id, r.complete_name)
return [(i, names[i]) for i in ids]
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'model': fields.char('Model Name', required=True),
'module': fields.char('Module', required=True),
'res_id': fields.integer('Record ID', help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_model_data_module_name_uniq_index'")
if not cr.fetchone():
cr.execute('CREATE UNIQUE INDEX ir_model_data_module_name_uniq_index ON ir_model_data (module, name)')
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_model_data_model_res_id_index'")
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_model_res_id_index ON ir_model_data (model, res_id)')
# NEW V8 API
@tools.ormcache('xmlid')
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xmlid))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise AccessError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
id = False
try:
# One step to check the ID is defined and the record actually exists
record = self.get_object(cr, uid, module, xml_id)
if record:
id = record.id
self.loads[(module,xml_id)] = (model,id)
for table, inherit_field in self.pool[model]._inherits.iteritems():
parent_id = record[inherit_field].id
parent_xid = '%s_%s' % (xml_id, table.replace('.', '_'))
self.loads[(module, parent_xid)] = (table, parent_id)
except Exception:
pass
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, SUPERUSER_ID, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, SUPERUSER_ID, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, SUPERUSER_ID, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
inherit_xml_ids = []
for table, field_name in model_obj._inherits.items():
xml_ids = self.pool['ir.model.data'].search(cr, uid, [
('module', '=', module),
('name', '=', xml_id + '_' + table.replace('.', '_')),
], context=context)
# XML ID found in the database, try to recover an existing record
if xml_ids:
found_xml_id = self.pool['ir.model.data'].browse(cr, uid, xml_ids[0], context=context)
record = self.pool[found_xml_id.model].browse(cr, uid, [found_xml_id.res_id], context=context)[0]
# The record exists, store the id and don't recreate the XML ID
if record.exists():
inherit_xml_ids.append(found_xml_id.model)
values[field_name] = found_xml_id.res_id
# Orphan XML ID, delete it
else:
found_xml_id.unlink()
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
if table in inherit_xml_ids:
continue
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, SUPERUSER_ID, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, SUPERUSER_ID, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise AccessError(_('Administrator access is required to uninstall a module'))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules or config.get('import_partial'):
return True
bad_imd_ids = []
context = {MODULE_UNINSTALL_FLAG: True}
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC
""", (tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module, name) not in self.loads:
if model in self.pool:
_logger.info('Deleting %s@%s (%s.%s)', res_id, model, module, name)
if self.pool[model].exists(cr, uid, [res_id], context=context):
self.pool[model].unlink(cr, uid, [res_id], context=context)
else:
bad_imd_ids.append(id)
if bad_imd_ids:
self.unlink(cr, uid, bad_imd_ids, context=context)
self.loads.clear()
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,)
}, context)
return {'type':'ir.actions.act_window_close'}
| Fl0rianFischer/sme_odoo | openerp/addons/base/ir/ir_model.py | Python | gpl-3.0 | 63,424 |
# -*- coding: utf-8 -*-
"""Create an application instance."""
from flask.helpers import get_debug_flag
from authmgr.app import create_app
#from authmgr.settings import DevConfig, ProdConfig
#CONFIG = DevConfig if get_debug_flag() else ProdConfig
app = create_app()
| bcarroll/authmgr | authmgr/autoapp.py | Python | bsd-3-clause | 268 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import six
from hachoir_core.error import HachoirError
from hachoir_metadata import extractMetadata
from hachoir_parser import createParser
try:
from girder.utility.model_importer import ModelImporter
except ImportError:
ModelImporter = None
class MetadataExtractor(object):
def __init__(self, path, itemId):
"""
Initialize the metadata extractor.
:param path: path of file from which to extract metadata on client or
server
:param itemId: item ID of item containing file on server
"""
self.itemId = itemId
self.path = path
self.metadata = None
def extractMetadata(self):
"""
Extract metadata from file on client or server and attach to item on
server.
"""
self._extractMetadata()
if self.metadata is not None:
self._setMetadata()
def _extractMetadata(self):
"""
Extract metadata from file on client or server using hachoir-metadata.
"""
try:
parser = createParser(six.text_type(self.path),
six.binary_type(self.path))
if parser is None:
raise HachoirError
extractor = extractMetadata(parser)
if extractor is None:
raise HachoirError
self.metadata = dict()
for data in sorted(extractor):
if not data.values:
continue
key = data.description
value = ', '.join([item.text for item in data.values])
self.metadata[key] = value
except HachoirError:
self.metadata = None
def _setMetadata(self):
"""
Attach metadata to item on server.
"""
pass
class ClientMetadataExtractor(MetadataExtractor):
def __init__(self, client, path, itemId):
"""
Initialize client metadata extractor.
:param client: client instance
:param path: path of file from which to extract metadata on remote
client
:param itemId: item ID of item containing file on server
"""
super(ClientMetadataExtractor, self).__init__(path, itemId)
self.client = client
def _setMetadata(self):
"""
Attach metadata to item on server.
"""
super(ClientMetadataExtractor, self)._setMetadata()
self.client.addMetadataToItem(str(self.itemId), self.metadata)
class ServerMetadataExtractor(MetadataExtractor, ModelImporter):
def __init__(self, assetstore, uploadedFile):
"""
Initialize server metadata extractor.
:param assetstore: asset store containing file
:param uploadedFile: file from which to extract metadata
"""
path = os.path.join(assetstore['root'], uploadedFile['path'])
super(ServerMetadataExtractor, self).__init__(path,
uploadedFile['itemId'])
self.userId = uploadedFile['creatorId']
def _setMetadata(self):
"""
Attach metadata to item on server.
"""
super(ServerMetadataExtractor, self)._setMetadata()
item = self.model('item').load(self.itemId, force=True)
self.model('item').setMetadata(item, self.metadata)
| opadron/girder | plugins/metadata_extractor/server/metadata_extractor.py | Python | apache-2.0 | 4,146 |
"""
Django settings for gateway project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "gateway/views")
)
FIXTURE_DIRS = (
os.path.join(BASE_DIR, "gateway/fixture")
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8510tc3uzde_3d_4(*97f73*(za7&&7@&%137-@eg2%+*2=3b4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.core.mail',
'registration',
'widget_tweaks',
'gateway',
'haystack'
)
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'configuration.urls'
WSGI_APPLICATION = 'configuration.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'gateway',
'USER': 'root',
'PASSWORD': 'root',
'HOST': 'localhost',
'PORT': '',
'OPTIONS': {
# 'init_command' : 'SET storage_engine=MyISAM',
}
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Phoenix'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "bootstrap/dist"),
)
STATIC_URL = 'static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'gateway/static')
# Email Server Settings --can change
SITE_ID = 'gatewaygame.com'
ACCOUNT_ACTIVATION_DAYS = 4
LOGIN_REDIRECT_URL = '/accounts/login'
# Email Server Settings --dont change
EMAIL_HOST = 'mail.gatewaygame.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_USE_SSL = True
EMAIL_HOST_USER = 'registration@gatewaygame.com'
EMAIL_HOST_PASSWORD = 'TeamHEntrepreneurship'
DEFAULT_FROM_EMAIL = 'registration@gatewaygame.com'
| davisk/gateway | configuration/settings.py | Python | mit | 3,183 |
"""
Get Information about the fields in a excel table
"""
def get_columns_position(sheet_object, cols_name):
"""
Given the name, find the column position in the file
cols_position = {
column_name: position_index, ...
}
"""
from gasp import goToList
cols_name = goToList(cols_name)
if not cols_name:
raise ValueError('cols_name should be a string or a list')
cols_position = {}
for col in range(sheet_object.ncols):
name = str(sheet_object.cell(0, col).value)
if name in cols_name:
cols_position[name] = col
return cols_position
def get_columns_position_outside_options(sheet_obj, cols_name):
"""
Return a dict with the name and position of cols not in cols_name
cols_position = {
column_name: position_index, ...
}
"""
cols_position = {}
for col in range(sheet_obj.ncols):
name = str(sheet_obj.cell(0, col).value)
if name not in cols_name:
cols_position[name] = col
if u'' in cols_position.keys():
del cols_position[u'']
if '' in cols_position.keys():
del cols_position['']
return cols_position
def columns_by_order(xls_file, sheet_name=None, sheet_index=None):
import xlrd
from gasp.xls.sheet import get_sheet_obj
try:
xls = xlrd.open_workbook(xls_file)
sheet = get_sheet_obj(xls_file, name=sheet_name, index=sheet_index)
except:
sheet = xls_file
colname = [sheet.cell(0, x).value for x in range(sheet.ncols)]
try:
xls.release_resources()
except:
pass
return colname
| JoaquimPatriarca/senpy-for-gis | gasp/xls/fields.py | Python | gpl-3.0 | 1,736 |
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from repanier.const import PRODUCT_ORDER_UNIT_DEPOSIT
from repanier.models.product import Product
from rest_framework import serializers
from rest_framework.fields import DecimalField
class ProductSerializer(serializers.ModelSerializer):
producer_name = serializers.ReadOnlyField(source="producer.short_profile_name")
department_for_customer = serializers.ReadOnlyField(
source="department_for_customer.short_name_v2"
)
label = serializers.StringRelatedField(
source="production_mode", read_only=True, many=True
)
customer_unit_price_amount = DecimalField(
max_digits=8,
decimal_places=2,
source="customer_unit_price.amount",
read_only=True,
)
unit_deposit_amount = DecimalField(
max_digits=8, decimal_places=2, source="unit_deposit.amount", read_only=True
)
class Meta:
model = Product
fields = (
"reference",
"is_active",
"is_into_offer",
"producer_name",
"long_name",
"department_for_customer",
"order_unit",
"get_order_unit_display",
"order_average_weight",
"customer_unit_price_amount",
"unit_deposit_amount",
"vat_level",
"get_vat_level_display",
"customer_minimum_order_quantity",
"customer_increment_order_quantity",
"wrapped",
"stock",
"label",
"picture2",
"offer_description_v2",
)
@csrf_exempt
def products_rest(request, producer_short_profile_name):
"""
List all code snippets, or create a new snippet.
"""
if request.method == "GET":
products = Product.objects.filter(
producer__short_profile_name=producer_short_profile_name.decode(
"unicode-escape"
),
order_unit__lte=PRODUCT_ORDER_UNIT_DEPOSIT,
)
serializer = ProductSerializer(products, many=True)
return JsonResponse(serializer.data)
return HttpResponse(status=400)
@csrf_exempt
def product_rest(request, producer_short_profile_name, reference):
"""
Retrieve, update or delete a code snippet.
"""
if request.method == "GET":
product = Product.objects.filter(
reference=reference,
producer__short_profile_name=producer_short_profile_name.decode(
"unicode-escape"
),
order_unit__lte=PRODUCT_ORDER_UNIT_DEPOSIT,
).first()
if product is not None:
serializer = ProductSerializer(product)
return JsonResponse(serializer.data)
return HttpResponse(status=404)
| pcolmant/repanier | repanier/rest/product.py | Python | gpl-3.0 | 2,827 |
#!/usr/bin/env python3
#
# This is a VERY simple non-elegant Python script to scrape a Twitter Accounts
# followers.
# You'll need your OWN twitter OAuth keys.... login to http://apps.twitter.com
# to set this up.... ALSO you'll need to install the python tweepy module..
# because we are using the tweepy module to interact with the twitter API
#
#
#
# A couple of caveats: Twitters API throttles certain requests... So, when using
# this script on a user with lots of followers you WILL hit the "rate limit"
# at this point the script will stop and wait 15 minutes for the next "window"
# Please DO NOT quit the script.. just let it do it's thing... it WILL resume
# and continue scraping.
#
#
#
import time
import tweepy
# Replace the API_KEY and API_SECRET with your application's key and secret.
mykey ='your key'
mysecret='your secret'
print ("Authenticating")
auth = tweepy.AppAuthHandler(mykey, mysecret)
api = tweepy.API(auth, wait_on_rate_limit=True,wait_on_rate_limit_notify=True)
if (not api):
print ("Can't Authenticate")
sys.exit(-1)
userINP = input ('Enter the Twitter screen name: ')
#This writes a file named 'USERNAME_YOU_ENTERED.txt' to the same directory you
# have the script in - you can give it a path, if you like!
with open('{}.txt'.format(userINP),'w') as list:
# Login
if(api.verify_credentials):
print ('We are logged in!')
user = tweepy.Cursor(api.followers, screen_name=userINP).items()
x=1
while True:
try:
u = next(user, False)
list.write('https://twitter.com/intent/user?user_id={} -- screen name is @{}\n'.format(u.id,u.screen_name))
print ('Scraped user #{}'.format(x))
print ('Follower twitter.com/{}'.format(u.screen_name))
x+=1
except KeyboardInterrupt:
print (' <--- You pressed Ctrl-C - Program terminated. Incomplete list written to file')
quit()
except tweepy.error.TweepError:
print ('Houston, We have a problem! We may have reached the Twatter API rate limit')
print ('If you get this right away - you can\'t scrape this user- Try another! \n')
time.sleep(15*60)
#continue
except:
if u is False:
print ('Done! Now pastebin the contents of the list in this directory!')
print ('Post the pastebin url in #opISIS-Targets')
print ('for addition to the botnet...')
print ('Have a coffee and do it again!')
print ('Don\'t run the script too many times consecutively')
print ('or you\'ll run into rate limit problems.. 4 or 5 times an hour!')
quit()
| m00cat/twatterScripts | twatFollowersScraper.py | Python | gpl-3.0 | 2,733 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.