code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# stdlib
from collections import defaultdict
from datetime import datetime, timedelta
from itertools import islice
from urlparse import urljoin
# project
from checks import AgentCheck
# 3p
import requests
class ConsulCheck(AgentCheck):
CONSUL_CHECK = 'consul.up'
HEALTH_CHECK = 'consul.check'
CONSUL_CATALOG_CHECK = 'consul.catalog'
SOURCE_TYPE_NAME = 'consul'
MAX_CONFIG_TTL = 300 # seconds
MAX_SERVICES = 50 # cap on distinct Consul ServiceIDs to interrogate
STATUS_SC = {
'up': AgentCheck.OK,
'passing': AgentCheck.OK,
'warning': AgentCheck.WARNING,
'critical': AgentCheck.CRITICAL,
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
if instances is not None and len(instances) > 1:
raise Exception("Consul check only supports one configured instance.")
self._local_config = None
self._last_config_fetch_time = None
self._last_known_leader = None
def consul_request(self, instance, endpoint):
url = urljoin(instance.get('url'), endpoint)
try:
clientcertfile = instance.get('client_cert_file', self.init_config.get('client_cert_file', False))
privatekeyfile = instance.get('private_key_file', self.init_config.get('private_key_file', False))
cabundlefile = instance.get('ca_bundle_file', self.init_config.get('ca_bundle_file', True))
if clientcertfile:
if privatekeyfile:
resp = requests.get(url, cert=(clientcertfile,privatekeyfile), verify=cabundlefile)
else:
resp = requests.get(url, cert=clientcertfile, verify=cabundlefile)
else:
resp = requests.get(url, verify=cabundlefile)
except requests.exceptions.Timeout:
self.log.exception('Consul request to {0} timed out'.format(url))
raise
resp.raise_for_status()
return resp.json()
### Consul Config Accessors
def _get_local_config(self, instance):
if not self._local_config or datetime.now() - self._last_config_fetch_time > timedelta(seconds=self.MAX_CONFIG_TTL):
self._local_config = self.consul_request(instance, '/v1/agent/self')
self._last_config_fetch_time = datetime.now()
return self._local_config
def _get_cluster_leader(self, instance):
return self.consul_request(instance, '/v1/status/leader')
def _get_agent_url(self, instance):
self.log.debug("Starting _get_agent_url")
local_config = self._get_local_config(instance)
agent_addr = local_config.get('Config', {}).get('AdvertiseAddr')
agent_port = local_config.get('Config', {}).get('Ports', {}).get('Server')
agent_url = "{0}:{1}".format(agent_addr, agent_port)
self.log.debug("Agent url is %s" % agent_url)
return agent_url
def _get_agent_datacenter(self, instance):
local_config = self._get_local_config(instance)
agent_dc = local_config.get('Config', {}).get('Datacenter')
return agent_dc
### Consul Leader Checks
def _is_instance_leader(self, instance):
try:
agent_url = self._get_agent_url(instance)
leader = self._last_known_leader or self._get_cluster_leader(instance)
self.log.debug("Consul agent lives at %s . Consul Leader lives at %s" % (agent_url,leader))
return agent_url == leader
except Exception:
return False
def _check_for_leader_change(self, instance):
agent_dc = self._get_agent_datacenter(instance)
leader = self._get_cluster_leader(instance)
if not leader:
# A few things could be happening here.
# 1. Consul Agent is Down
# 2. The cluster is in the midst of a leader election
# 3. The Datadog agent is not able to reach the Consul instance (network partition et al.)
self.log.warn('Consul Leader information is not available!')
return
if not self._last_known_leader:
# We have no state preserved, store some and return
self._last_known_leader = leader
return
if leader != self._last_known_leader:
self.log.info(('Leader change from {0} to {1}. Sending new leader event').format(
self._last_known_leader, leader))
self.event({
"timestamp": int(datetime.now().strftime("%s")),
"event_type": "consul.new_leader",
"source_type_name": self.SOURCE_TYPE_NAME,
"msg_title": "New Consul Leader Elected in consul_datacenter:{0}".format(agent_dc),
"aggregation_key": "consul.new_leader",
"msg_text": "The Node at {0} is the new leader of the consul datacenter {1}".format(
leader,
agent_dc
),
"tags": ["prev_consul_leader:{0}".format(self._last_known_leader),
"curr_consul_leader:{0}".format(leader),
"consul_datacenter:{0}".format(agent_dc)]
})
self._last_known_leader = leader
### Consul Catalog Accessors
def get_peers_in_cluster(self, instance):
return self.consul_request(instance, '/v1/status/peers')
def get_services_in_cluster(self, instance):
return self.consul_request(instance, '/v1/catalog/services')
def get_nodes_with_service(self, instance, service):
consul_request_url = '/v1/health/service/{0}'.format(service)
return self.consul_request(instance, consul_request_url)
def _cull_services_list(self, services, service_whitelist):
if service_whitelist:
if len(service_whitelist) > self.MAX_SERVICES:
self.warning('More than %d services in whitelist. Service list will be truncated.' % self.MAX_SERVICES)
services = [s for s in services if s in service_whitelist][:self.MAX_SERVICES]
else:
if len(services) <= self.MAX_SERVICES:
self.warning('Consul service whitelist not defined. Agent will poll for all %d services found' % len(services))
else:
self.warning('Consul service whitelist not defined. Agent will poll for at most %d services' % self.MAX_SERVICES)
services = list(islice(services.iterkeys(), 0, self.MAX_SERVICES))
return services
def check(self, instance):
perform_new_leader_checks = instance.get('new_leader_checks',
self.init_config.get('new_leader_checks', False))
if perform_new_leader_checks:
self._check_for_leader_change(instance)
peers = self.get_peers_in_cluster(instance)
main_tags = []
agent_dc = self._get_agent_datacenter(instance)
if agent_dc is not None:
main_tags.append('consul_datacenter:{0}'.format(agent_dc))
if not self._is_instance_leader(instance):
self.gauge("consul.peers", len(peers), tags=main_tags + ["mode:follower"])
self.log.debug("This consul agent is not the cluster leader." +
"Skipping service and catalog checks for this instance")
return
else:
self.gauge("consul.peers", len(peers), tags=main_tags + ["mode:leader"])
service_check_tags = ['consul_url:{0}'.format(instance.get('url'))]
perform_catalog_checks = instance.get('catalog_checks',
self.init_config.get('catalog_checks'))
try:
# Make service checks from health checks for all services in catalog
health_state = self.consul_request(instance, '/v1/health/state/any')
for check in health_state:
status = self.STATUS_SC.get(check['Status'])
if status is None:
continue
tags = ["check:{0}".format(check["CheckID"])]
if check["ServiceName"]:
tags.append("service:{0}".format(check["ServiceName"]))
if check["ServiceID"]:
tags.append("consul_service_id:{0}".format(check["ServiceID"]))
self.service_check(self.HEALTH_CHECK, status, tags=main_tags+tags)
except Exception as e:
self.log.error(e)
self.service_check(self.CONSUL_CHECK, AgentCheck.CRITICAL,
tags=service_check_tags)
else:
self.service_check(self.CONSUL_CHECK, AgentCheck.OK,
tags=service_check_tags)
if perform_catalog_checks:
# Collect node by service, and service by node counts for a whitelist of services
services = self.get_services_in_cluster(instance)
service_whitelist = instance.get('service_whitelist',
self.init_config.get('service_whitelist', []))
services = self._cull_services_list(services, service_whitelist)
# {node_id: {"up: 0, "passing": 0, "warning": 0, "critical": 0}
nodes_to_service_status = defaultdict(lambda: defaultdict(int))
for service in services:
# For every service in the cluster,
# Gauge the following:
# `consul.catalog.nodes_up` : # of Nodes registered with that service
# `consul.catalog.nodes_passing` : # of Nodes with service status `passing` from those registered
# `consul.catalog.nodes_warning` : # of Nodes with service status `warning` from those registered
# `consul.catalog.nodes_critical` : # of Nodes with service status `critical` from those registered
service_tags = ['consul_service_id:{0}'.format(service)]
nodes_with_service = self.get_nodes_with_service(instance, service)
# {'up': 0, 'passing': 0, 'warning': 0, 'critical': 0}
node_status = defaultdict(int)
for node in nodes_with_service:
# The node_id is n['Node']['Node']
node_id = node.get('Node', {}).get("Node")
# An additional service is registered on this node. Bump up the counter
nodes_to_service_status[node_id]["up"] += 1
# If there is no Check for the node then Consul and dd-agent consider it up
if 'Checks' not in node:
node_status['passing'] += 1
node_status['up'] += 1
else:
found_critical = False
found_warning = False
found_serf_health = False
for check in node['Checks']:
if check['CheckID'] == 'serfHealth':
found_serf_health = True
# For backwards compatibility, the "up" node_status is computed
# based on the total # of nodes 'running' as part of the service.
# If the serfHealth is `critical` it means the Consul agent isn't even responding,
# and we don't register the node as `up`
if check['Status'] != 'critical':
node_status["up"] += 1
continue
if check['Status'] == 'critical':
found_critical = True
break
elif check['Status'] == 'warning':
found_warning = True
# Keep looping in case there is a critical status
# Increment the counters based on what was found in Checks
# `critical` checks override `warning`s, and if neither are found, register the node as `passing`
if found_critical:
node_status['critical'] += 1
nodes_to_service_status[node_id]["critical"] += 1
elif found_warning:
node_status['warning'] += 1
nodes_to_service_status[node_id]["warning"] += 1
else:
if not found_serf_health:
# We have not found a serfHealth check for this node, which is unexpected
# If we get here assume this node's status is "up", since we register it as 'passing'
node_status['up'] += 1
node_status['passing'] += 1
nodes_to_service_status[node_id]["passing"] += 1
for status_key in self.STATUS_SC:
status_value = node_status[status_key]
self.gauge(
'{0}.nodes_{1}'.format(self.CONSUL_CATALOG_CHECK, status_key),
status_value,
tags=main_tags+service_tags
)
for node, service_status in nodes_to_service_status.iteritems():
# For every node discovered for whitelisted services, gauge the following:
# `consul.catalog.services_up` : Total services registered on node
# `consul.catalog.services_passing` : Total passing services on node
# `consul.catalog.services_warning` : Total warning services on node
# `consul.catalog.services_critical` : Total critical services on node
node_tags = ['consul_node_id:{0}'.format(node)]
self.gauge('{0}.services_up'.format(self.CONSUL_CATALOG_CHECK),
len(services),
tags=main_tags+node_tags)
for status_key in self.STATUS_SC:
status_value = service_status[status_key]
self.gauge(
'{0}.services_{1}'.format(self.CONSUL_CATALOG_CHECK, status_key),
status_value,
tags=main_tags+node_tags
)
| huhongbo/dd-agent | checks.d/consul.py | Python | bsd-3-clause | 14,551 |
source = '''# line 1
'A module docstring.'
import sys, inspect
# line 5
# line 7
def spam(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h):
eggs(b + d, c + f)
# line 11
def eggs(x, y):
"A docstring."
global fr, st
fr = inspect.currentframe()
st = inspect.stack()
p = x
q = y / 0
# line 20
class StupidGit:
"""A longer,
indented
docstring."""
# line 27
def abuse(self, a, b, c):
"""Another
\tdocstring
containing
\ttabs
\t
"""
self.argue(a, b, c)
# line 40
def argue(self, a, b, c):
try:
spam(a, b, c)
except:
self.ex = sys.exc_info()
self.tr = inspect.trace()
# line 48
class MalodorousPervert(StupidGit):
pass
class ParrotDroppings:
pass
class FesteringGob(MalodorousPervert, ParrotDroppings):
pass
'''
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
# isbuiltin, isroutine, getmembers, getdoc, getfile, getmodule,
# getsourcefile, getcomments, getsource, getclasstree, getargspec,
# getargvalues, formatargspec, formatargvalues, currentframe, stack, trace
# isdatadescriptor
from test.test_support import TestFailed, TESTFN
import sys, imp, os, string
def test(assertion, message, *args):
if not assertion:
raise TestFailed, message % args
import inspect
file = open(TESTFN, 'w')
file.write(source)
file.close()
# Note that load_source creates file TESTFN+'c' or TESTFN+'o'.
mod = imp.load_source('testmod', TESTFN)
files_to_clean_up = [TESTFN, TESTFN + 'c', TESTFN + 'o']
def istest(func, exp):
obj = eval(exp)
test(func(obj), '%s(%s)' % (func.__name__, exp))
for other in [inspect.isbuiltin, inspect.isclass, inspect.iscode,
inspect.isframe, inspect.isfunction, inspect.ismethod,
inspect.ismodule, inspect.istraceback]:
if other is not func:
test(not other(obj), 'not %s(%s)' % (other.__name__, exp))
git = mod.StupidGit()
try:
1/0
except:
tb = sys.exc_traceback
istest(inspect.isbuiltin, 'sys.exit')
istest(inspect.isbuiltin, '[].append')
istest(inspect.isclass, 'mod.StupidGit')
istest(inspect.iscode, 'mod.spam.func_code')
istest(inspect.isframe, 'tb.tb_frame')
istest(inspect.isfunction, 'mod.spam')
istest(inspect.ismethod, 'mod.StupidGit.abuse')
istest(inspect.ismethod, 'git.argue')
istest(inspect.ismodule, 'mod')
istest(inspect.istraceback, 'tb')
import __builtin__
istest(inspect.isdatadescriptor, '__builtin__.file.closed')
istest(inspect.isdatadescriptor, '__builtin__.file.softspace')
test(inspect.isroutine(mod.spam), 'isroutine(mod.spam)')
test(inspect.isroutine([].count), 'isroutine([].count)')
classes = inspect.getmembers(mod, inspect.isclass)
test(classes ==
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
('StupidGit', mod.StupidGit)], 'class list')
tree = inspect.getclasstree(map(lambda x: x[1], classes), 1)
test(tree ==
[(mod.ParrotDroppings, ()),
(mod.StupidGit, ()),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert, mod.ParrotDroppings))
]
]
], 'class tree')
functions = inspect.getmembers(mod, inspect.isfunction)
test(functions == [('eggs', mod.eggs), ('spam', mod.spam)], 'function list')
test(inspect.getdoc(mod) == 'A module docstring.', 'getdoc(mod)')
test(inspect.getcomments(mod) == '# line 1\n', 'getcomments(mod)')
test(inspect.getmodule(mod.StupidGit) == mod, 'getmodule(mod.StupidGit)')
test(inspect.getfile(mod.StupidGit) == TESTFN, 'getfile(mod.StupidGit)')
test(inspect.getsourcefile(mod.spam) == TESTFN, 'getsourcefile(mod.spam)')
test(inspect.getsourcefile(git.abuse) == TESTFN, 'getsourcefile(git.abuse)')
def sourcerange(top, bottom):
lines = string.split(source, '\n')
return string.join(lines[top-1:bottom], '\n') + '\n'
test(inspect.getsource(git.abuse) == sourcerange(29, 39),
'getsource(git.abuse)')
test(inspect.getsource(mod.StupidGit) == sourcerange(21, 46),
'getsource(mod.StupidGit)')
test(inspect.getdoc(mod.StupidGit) ==
'A longer,\n\nindented\n\ndocstring.', 'getdoc(mod.StupidGit)')
test(inspect.getdoc(git.abuse) ==
'Another\n\ndocstring\n\ncontaining\n\ntabs', 'getdoc(git.abuse)')
test(inspect.getcomments(mod.StupidGit) == '# line 20\n',
'getcomments(mod.StupidGit)')
git.abuse(7, 8, 9)
istest(inspect.istraceback, 'git.ex[2]')
istest(inspect.isframe, 'mod.fr')
test(len(git.tr) == 3, 'trace() length')
test(git.tr[0][1:] == (TESTFN, 46, 'argue',
[' self.tr = inspect.trace()\n'], 0),
'trace() row 2')
test(git.tr[1][1:] == (TESTFN, 9, 'spam', [' eggs(b + d, c + f)\n'], 0),
'trace() row 2')
test(git.tr[2][1:] == (TESTFN, 18, 'eggs', [' q = y / 0\n'], 0),
'trace() row 3')
test(len(mod.st) >= 5, 'stack() length')
test(mod.st[0][1:] ==
(TESTFN, 16, 'eggs', [' st = inspect.stack()\n'], 0),
'stack() row 1')
test(mod.st[1][1:] ==
(TESTFN, 9, 'spam', [' eggs(b + d, c + f)\n'], 0),
'stack() row 2')
test(mod.st[2][1:] ==
(TESTFN, 43, 'argue', [' spam(a, b, c)\n'], 0),
'stack() row 3')
test(mod.st[3][1:] ==
(TESTFN, 39, 'abuse', [' self.argue(a, b, c)\n'], 0),
'stack() row 4')
args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
test(args == ['x', 'y'], 'mod.fr args')
test(varargs == None, 'mod.fr varargs')
test(varkw == None, 'mod.fr varkw')
test(locals == {'x': 11, 'p': 11, 'y': 14}, 'mod.fr locals')
test(inspect.formatargvalues(args, varargs, varkw, locals) ==
'(x=11, y=14)', 'mod.fr formatted argvalues')
args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
test(args == ['a', 'b', 'c', 'd', ['e', ['f']]], 'mod.fr.f_back args')
test(varargs == 'g', 'mod.fr.f_back varargs')
test(varkw == 'h', 'mod.fr.f_back varkw')
test(inspect.formatargvalues(args, varargs, varkw, locals) ==
'(a=7, b=8, c=9, d=3, (e=4, (f=5,)), *g=(), **h={})',
'mod.fr.f_back formatted argvalues')
for fname in files_to_clean_up:
try:
os.unlink(fname)
except:
pass
# Test classic-class method resolution order.
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, A, C)
got = inspect.getmro(D)
test(expected == got, "expected %r mro, got %r", expected, got)
# The same w/ new-class MRO.
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, C, A, object)
got = inspect.getmro(D)
test(expected == got, "expected %r mro, got %r", expected, got)
# Test classify_class_attrs.
def attrs_wo_objs(cls):
return [t[:3] for t in inspect.classify_class_attrs(cls)]
class A:
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', A) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', B) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'method', C) in attrs, 'missing plain method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', C) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', B) in attrs, 'missing plain method')
test(('m1', 'method', D) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
# Repeat all that, but w/ new-style classes.
class A(object):
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', A) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', B) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'method', C) in attrs, 'missing plain method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', C) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'method', C) in attrs, 'missing plain method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', B) in attrs, 'missing plain method')
test(('m1', 'method', D) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
args, varargs, varkw, defaults = inspect.getargspec(mod.eggs)
test(args == ['x', 'y'], 'mod.eggs args')
test(varargs == None, 'mod.eggs varargs')
test(varkw == None, 'mod.eggs varkw')
test(defaults == None, 'mod.eggs defaults')
test(inspect.formatargspec(args, varargs, varkw, defaults) ==
'(x, y)', 'mod.eggs formatted argspec')
args, varargs, varkw, defaults = inspect.getargspec(mod.spam)
test(args == ['a', 'b', 'c', 'd', ['e', ['f']]], 'mod.spam args')
test(varargs == 'g', 'mod.spam varargs')
test(varkw == 'h', 'mod.spam varkw')
test(defaults == (3, (4, (5,))), 'mod.spam defaults')
test(inspect.formatargspec(args, varargs, varkw, defaults) ==
'(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h)',
'mod.spam formatted argspec')
args, varargs, varkw, defaults = inspect.getargspec(A.m)
test(args == ['self'], 'A.m args')
test(varargs is None, 'A.m varargs')
test(varkw is None, 'A.m varkw')
test(defaults is None, 'A.m defaults')
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.3/Lib/test/test_inspect.py | Python | mit | 11,556 |
# -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Library General Public License,
# publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Library General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Library General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
from pysped.xml_sped import (ABERTURA, NAMESPACE_NFE, Signature, TagCaracter,
TagData, TagDecimal, TagHora, TagInteiro, XMLNFe)
from pysped.nfe.leiaute import ESQUEMA_ATUAL_VERSAO_1 as ESQUEMA_ATUAL
import os
DIRNAME = os.path.dirname(__file__)
class ISSQN(XMLNFe):
def __init__(self):
super(ISSQN, self).__init__()
self.vBC = TagDecimal(nome='vBC' , codigo='U02', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN')
self.vAliq = TagDecimal(nome='vAliq' , codigo='U03', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN')
self.vISSQN = TagDecimal(nome='vISSQN' , codigo='U04', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN')
self.cMunFG = TagInteiro(nome='cMunFG' , codigo='U05', tamanho=[7, 7, 7], raiz='//det/imposto/ISSQN')
self.cListServ = TagInteiro(nome='cListServ', codigo='U06', tamanho=[3, 4] , raiz='//det/imposto/ISSQN')
def get_xml(self):
if not (self.vBC.valor or self.vAliq.valor or self.vISSQN.valor or self.cMunFG.valor or self.cListServ.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<ISSQN>'
xml += self.vBC.xml
xml += self.vAliq.xml
xml += self.vISSQN.xml
xml += self.cMunFG.xml
xml += self.cListServ.xml
xml += '</ISSQN>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.vAliq.xml = arquivo
self.vISSQN.xml = arquivo
self.cMunFG.xml = arquivo
self.cListServ.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.vBC.valor or self.vAliq.valor or self.vISSQN.valor or self.cMunFG.valor or self.cListServ.valor):
return ''
txt = 'U|'
txt += self.vBC.txt + '|'
txt += self.vAliq.txt + '|'
txt += self.vISSQN.txt + '|'
txt += self.cMunFG.txt + '|'
txt += self.cListServ.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class COFINSST(XMLNFe):
def __init__(self):
super(COFINSST, self).__init__()
self.vBC = TagDecimal(nome='vBC' , codigo='T02', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/COFINS/COFINSST')
self.pCOFINS = TagDecimal(nome='pCOFINS' , codigo='T03', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='//det/imposto/COFINS/COFINSST')
self.qBCProd = TagDecimal(nome='qBCProd' , codigo='T04', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='//det/imposto/COFINS/COFINSST')
self.vAliqProd = TagDecimal(nome='vAliqProd', codigo='T05', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='//det/imposto/COFINS/COFINSST')
self.vCOFINS = TagDecimal(nome='vCOFINS' , codigo='T06', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/COFINS/COFINSST')
def get_xml(self):
if not (self.vBC.valor or self.pCOFINS.valor or self.qBCProd.valor or self.vAliqProd.valor or self.vCOFINS.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<COFINSST>'
if self.qBCProd.valor or self.vAliqProd.valor:
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
else:
xml += self.vBC.xml
xml += self.pCOFINS.xml
xml += self.vCOFINS.xml
xml += '</COFINSST>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.pCOFINS.xml = arquivo
self.qBCProd.xml = arquivo
self.vAliqProd.xml = arquivo
self.vCOFINS.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.vBC.valor or self.pCOFINS.valor or self.qBCProd.valor or self.vAliqProd.valor or self.vCOFINS.valor):
return ''
txt = 'T|'
txt += self.pCOFINS.txt + '|'
txt += '\n'
if self.qBCProd.valor or self.vAliqProd.valor:
txt += 'T02|'
txt += self.qBCProd.txt + '|'
txt += self.vAliqProd.txt + '|'
else:
txt += 'T04|'
txt += self.vBC.txt + '|'
txt += self.pCOFINS.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class TagCSTCOFINS(TagCaracter):
def __init__(self, *args, **kwargs):
super(TagCSTCOFINS, self).__init__(*args, **kwargs)
self.nome = 'CST'
self.codigo = 'S06'
self.tamanho = [2, 2]
self.raiz = ''
self.grupo_cofins = None
def set_valor(self, novo_valor):
super(TagCSTCOFINS, self).set_valor(novo_valor)
if not self.grupo_cofins:
return None
#
# Definimos todas as tags como não obrigatórias
#
self.grupo_cofins.vBC.obrigatorio = False
self.grupo_cofins.pCOFINS.obrigatorio = False
self.grupo_cofins.vCOFINS.obrigatorio = False
self.grupo_cofins.qBCProd.obrigatorio = False
self.grupo_cofins.vAliqProd.obrigatorio = False
#
# Por segurança, zeramos os valores das tags do
# grupo COFINS ao redefinirmos o código da situação
# tributária
#
self.grupo_cofins.vBC.valor = '0.00'
self.grupo_cofins.pCOFINS.valor = '0.00'
self.grupo_cofins.vCOFINS.valor = '0.00'
self.grupo_cofins.qBCProd.valor = '0.00'
self.grupo_cofins.vAliqProd.valor = '0.00'
#
# Para cada código de situação tributária,
# redefinimos a raiz e a obrigatoriedade das
# tags do grupo de COFINS
#
if self.valor in ('01', '02'):
self.grupo_cofins.nome_tag = 'COFINSAliq'
self.grupo_cofins.nome_tag_txt = 'S02'
self.grupo_cofins.raiz_tag = '//det/imposto/COFINS/COFINSAliq'
self.grupo_cofins.vBC.obrigatorio = True
self.grupo_cofins.pCOFINS.obrigatorio = True
self.grupo_cofins.vCOFINS.obrigatorio = True
#self.grupo_cofins.qBCProd.obrigatorio = True
#self.grupo_cofins.vAliqProd.obrigatorio = True
elif self.valor == '03':
self.grupo_cofins.nome_tag = 'COFINSQtde'
self.grupo_cofins.nome_tag_txt = 'S03'
self.grupo_cofins.raiz_tag = '//det/imposto/COFINS/COFINSQtde'
#self.grupo_cofins.vBC.obrigatorio = True
#self.grupo_cofins.pCOFINS.obrigatorio = True
self.grupo_cofins.vCOFINS.obrigatorio = True
self.grupo_cofins.qBCProd.obrigatorio = True
self.grupo_cofins.vAliqProd.obrigatorio = True
elif self.valor in ('04', '06', '07', '08', '09'):
self.grupo_cofins.nome_tag = 'COFINSNT'
self.grupo_cofins.nome_tag_txt = 'S04'
self.grupo_cofins.raiz_tag = '//det/imposto/COFINS/COFINSNT'
#self.grupo_cofins.vBC.obrigatorio = True
#self.grupo_cofins.pCOFINS.obrigatorio = True
#self.grupo_cofins.vCOFINS.obrigatorio = True
#self.grupo_cofins.qBCProd.obrigatorio = True
#self.grupo_cofins.vAliqProd.obrigatorio = True
else:
self.grupo_cofins.nome_tag = 'COFINSOutr'
self.grupo_cofins.nome_tag_txt = 'S05'
self.grupo_cofins.raiz_tag = '//det/imposto/COFINS/COFINSOutr'
self.grupo_cofins.vBC.obrigatorio = True
self.grupo_cofins.pCOFINS.obrigatorio = True
self.grupo_cofins.vCOFINS.obrigatorio = True
self.grupo_cofins.qBCProd.obrigatorio = True
self.grupo_cofins.vAliqProd.obrigatorio = True
#
# Redefine a raiz para todas as tags do grupo COFINS
#
self.grupo_cofins.CST.raiz = self.grupo_cofins.raiz_tag
self.grupo_cofins.vBC.raiz = self.grupo_cofins.raiz_tag
self.grupo_cofins.pCOFINS.raiz = self.grupo_cofins.raiz_tag
self.grupo_cofins.vCOFINS.raiz = self.grupo_cofins.raiz_tag
self.grupo_cofins.qBCProd.raiz = self.grupo_cofins.raiz_tag
self.grupo_cofins.vAliqProd.raiz = self.grupo_cofins.raiz_tag
def get_valor(self):
return self._valor_string
valor = property(get_valor, set_valor)
class COFINS(XMLNFe):
def __init__(self):
super(COFINS, self).__init__()
self.vBC = TagDecimal(nome='vBC' , codigo='S07', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='')
self.pCOFINS = TagDecimal(nome='pCOFINS' , codigo='S08', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='')
self.vCOFINS = TagDecimal(nome='vCOFINS' , codigo='S11', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='')
self.qBCProd = TagDecimal(nome='qBCProd' , codigo='S09', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='')
self.vAliqProd = TagDecimal(nome='vAliqProd', codigo='S10', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='')
self.CST = TagCSTCOFINS()
self.CST.grupo_cofins = self
self.CST.valor = '07'
self.nome_tag = 'COFINSNT'
self.nome_tag_txt = 'S04'
self.raiz_tag = '//det/imposto/COFINS/COFINSNT'
def get_xml(self):
#
# Define as tags baseado no código da situação tributária
#
xml = XMLNFe.get_xml(self)
xml += '<COFINS>'
xml += '<' + self.nome_tag + '>'
xml += self.CST.xml
if self.CST.valor in ('01', '02'):
xml += self.vBC.xml
xml += self.pCOFINS.xml
xml += self.vCOFINS.xml
elif self.CST.valor == '03':
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
xml += self.vCOFINS.xml
elif self.CST.valor in ('04', '06', '07', '08', '09'):
pass
else:
if self.qBCProd.valor or self.vAliqProd.valor:
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
else:
xml += self.vBC.xml
xml += self.pCOFINS.xml
xml += self.vCOFINS.xml
xml += '</' + self.nome_tag + '></COFINS>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
#
# Para ler corretamente o COFINS, primeiro temos que descobrir em
# qual grupo de situação tributária ele está
#
if self._le_noh('//det/imposto/COFINS/COFINSAliq') is not None:
self.CST.valor = '01'
elif self._le_noh('//det/imposto/COFINS/COFINSQtde') is not None:
self.CST.valor = '03'
elif self._le_noh('//det/imposto/COFINS/COFINSNT') is not None:
self.CST.valor = '04'
else:
self.CST.valor = '99'
#
# Agora podemos ler os valores tranquilamente...
#
self.CST.xml = arquivo
self.vBC.xml = arquivo
self.pCOFINS.xml = arquivo
self.vCOFINS.xml = arquivo
self.qBCProd.xml = arquivo
self.vAliqProd.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'S|\n'
#
# Define as tags baseado no código da situação tributária
#
txt += self.nome_tag_txt + '|'
txt += self.CST.txt + '|'
if self.CST.valor in ('01', '02'):
txt += self.vBC.txt + '|'
txt += self.pCOFINS.txt + '|'
txt += self.vCOFINS.txt + '|'
txt += '\n'
elif self.CST.valor == '03':
txt += self.qBCProd.txt + '|'
txt += self.vAliqProd.txt + '|'
txt += self.vCOFINS.txt + '|'
txt += '\n'
elif self.CST.valor in ('04', '06', '07', '08', '09'):
txt += '\n'
else:
txt += self.vCOFINS.txt + '|'
txt += '\n'
if self.qBCProd.valor or self.vAliqProd.valor:
txt += 'S09|'
txt += self.qBCProd.txt + '|'
txt += self.vAliqProd.txt + '|'
else:
txt += 'S07|'
txt += self.vBC.txt + '|'
txt += self.pCOFINS.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class PISST(XMLNFe):
def __init__(self):
super(PISST, self).__init__()
self.vBC = TagDecimal(nome='vBC' , codigo='R02', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/PIS/PISST')
self.pPIS = TagDecimal(nome='pPIS' , codigo='R03', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='//det/imposto/PIS/PISST')
self.qBCProd = TagDecimal(nome='qBCProd' , codigo='R04', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='//det/imposto/PIS/PISST')
self.vAliqProd = TagDecimal(nome='vAliqProd', codigo='R05', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='//det/imposto/PIS/PISST')
self.vPIS = TagDecimal(nome='vPIS' , codigo='R06', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/PIS/PISST')
def get_xml(self):
if not (self.vBC.valor or self.pPIS.valor or self.qBCProd.valor or self.vAliqProd.valor or self.vPIS.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<PISST>'
if self.qBCProd.valor or self.vAliqProd.valor:
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
else:
xml += self.vBC.xml
xml += self.pPIS.xml
xml += self.vPIS.xml
xml += '</PISST>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.pPIS.xml = arquivo
self.qBCProd.xml = arquivo
self.vAliqProd.xml = arquivo
self.vPIS.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.vBC.valor or self.pPIS.valor or self.qBCProd.valor or self.vAliqProd.valor or self.vPIS.valor):
return ''
txt = 'R|'
txt += self.pPIS.txt + '|'
txt += '\n'
if self.qBCProd.valor or self.vAliqProd.valor:
txt += 'R02|'
txt += self.qBCProd.txt + '|'
txt += self.vAliqProd.txt + '|'
else:
txt += 'R04|'
txt += self.vBC.txt + '|'
txt += self.pPIS.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class TagCSTPIS(TagCaracter):
def __init__(self, *args, **kwargs):
super(TagCSTPIS, self).__init__(*args, **kwargs)
self.nome = 'CST'
self.codigo = 'Q06'
self.tamanho = [2, 2]
self.raiz = ''
self.grupo_pis = None
def set_valor(self, novo_valor):
super(TagCSTPIS, self).set_valor(novo_valor)
if not self.grupo_pis:
return None
#
# Definimos todas as tags como não obrigatórias
#
self.grupo_pis.vBC.obrigatorio = False
self.grupo_pis.pPIS.obrigatorio = False
self.grupo_pis.vPIS.obrigatorio = False
self.grupo_pis.qBCProd.obrigatorio = False
self.grupo_pis.vAliqProd.obrigatorio = False
#
# Por segurança, zeramos os valores das tags do
# grupo PIS ao redefinirmos o código da situação
# tributária
#
self.grupo_pis.vBC.valor = '0.00'
self.grupo_pis.pPIS.valor = '0.00'
self.grupo_pis.vPIS.valor = '0.00'
self.grupo_pis.qBCProd.valor = '0.00'
self.grupo_pis.vAliqProd.valor = '0.00'
#
# Para cada código de situação tributária,
# redefinimos a raiz e a obrigatoriedade das
# tags do grupo de PIS
#
if self.valor in ('01', '02'):
self.grupo_pis.nome_tag = 'PISAliq'
self.grupo_pis.nome_tag_txt = 'Q02'
self.grupo_pis.raiz_tag = '//det/imposto/PIS/PISAliq'
self.grupo_pis.vBC.obrigatorio = True
self.grupo_pis.pPIS.obrigatorio = True
self.grupo_pis.vPIS.obrigatorio = True
#self.grupo_pis.qBCProd.obrigatorio = True
#self.grupo_pis.vAliqProd.obrigatorio = True
elif self.valor == '03':
self.grupo_pis.nome_tag = 'PISQtde'
self.grupo_pis.nome_tag_txt = 'Q03'
self.grupo_pis.raiz_tag = '//det/imposto/PIS/PISQtde'
#self.grupo_pis.vBC.obrigatorio = True
#self.grupo_pis.pPIS.obrigatorio = True
self.grupo_pis.vPIS.obrigatorio = True
self.grupo_pis.qBCProd.obrigatorio = True
self.grupo_pis.vAliqProd.obrigatorio = True
elif self.valor in ('04', '06', '07', '08', '09'):
self.grupo_pis.nome_tag = 'PISNT'
self.grupo_pis.nome_tag_txt = 'Q04'
self.grupo_pis.raiz_tag = '//det/imposto/PIS/PISNT'
#self.grupo_pis.vBC.obrigatorio = True
#self.grupo_pis.pPIS.obrigatorio = True
#self.grupo_pis.vPIS.obrigatorio = True
#self.grupo_pis.qBCProd.obrigatorio = True
#self.grupo_pis.vAliqProd.obrigatorio = True
else:
self.grupo_pis.nome_tag = 'PISOutr'
self.grupo_pis.nome_tag_txt = 'Q05'
self.grupo_pis.raiz_tag = '//det/imposto/PIS/PISOutr'
self.grupo_pis.vBC.obrigatorio = True
self.grupo_pis.pPIS.obrigatorio = True
self.grupo_pis.vPIS.obrigatorio = True
self.grupo_pis.qBCProd.obrigatorio = True
self.grupo_pis.vAliqProd.obrigatorio = True
#
# Redefine a raiz para todas as tags do grupo PIS
#
self.grupo_pis.CST.raiz = self.grupo_pis.raiz_tag
self.grupo_pis.vBC.raiz = self.grupo_pis.raiz_tag
self.grupo_pis.pPIS.raiz = self.grupo_pis.raiz_tag
self.grupo_pis.vPIS.raiz = self.grupo_pis.raiz_tag
self.grupo_pis.qBCProd.raiz = self.grupo_pis.raiz_tag
self.grupo_pis.vAliqProd.raiz = self.grupo_pis.raiz_tag
def get_valor(self):
return self._valor_string
valor = property(get_valor, set_valor)
class PIS(XMLNFe):
def __init__(self):
super(PIS, self).__init__()
self.vBC = TagDecimal(nome='vBC' , codigo='Q07', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='')
self.pPIS = TagDecimal(nome='pPIS' , codigo='Q08', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='')
self.vPIS = TagDecimal(nome='vPIS' , codigo='Q09', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='')
self.qBCProd = TagDecimal(nome='qBCProd' , codigo='Q10', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='')
self.vAliqProd = TagDecimal(nome='vAliqProd', codigo='Q11', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='')
self.CST = TagCSTPIS()
self.CST.grupo_pis = self
self.CST.valor = '07'
self.nome_tag = 'PISNT'
self.nome_tag_txt = 'Q04'
self.raiz_tag = '//det/imposto/PIS/PISNT'
def get_xml(self):
#
# Define as tags baseado no código da situação tributária
#
xml = XMLNFe.get_xml(self)
xml += '<PIS>'
xml += '<' + self.nome_tag + '>'
xml += self.CST.xml
if self.CST.valor in ('01', '02'):
xml += self.vBC.xml
xml += self.pPIS.xml
xml += self.vPIS.xml
elif self.CST.valor == '03':
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
xml += self.vPIS.xml
elif self.CST.valor in ('04', '06', '07', '08', '09'):
pass
else:
if self.qBCProd.valor or self.vAliqProd.valor:
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
else:
xml += self.vBC.xml
xml += self.pPIS.xml
xml += self.vPIS.xml
xml += '</' + self.nome_tag + '></PIS>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
#
# Para ler corretamente o PIS, primeiro temos que descobrir em
# qual grupo de situação tributária ele está
#
if self._le_noh('//det/imposto/PIS/PISAliq') is not None:
self.CST.valor = '01'
elif self._le_noh('//det/imposto/PIS/PISQtde') is not None:
self.CST.valor = '03'
elif self._le_noh('//det/imposto/PIS/PISNT') is not None:
self.CST.valor = '04'
else:
self.CST.valor = '99'
#
# Agora podemos ler os valores tranquilamente...
#
self.CST.xml = arquivo
self.vBC.xml = arquivo
self.pPIS.xml = arquivo
self.vPIS.xml = arquivo
self.qBCProd.xml = arquivo
self.vAliqProd.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'Q|\n'
#
# Define as tags baseado no código da situação tributária
#
txt += self.nome_tag_txt + '|'
txt += self.CST.txt + '|'
if self.CST.valor in ('01', '02'):
txt += self.vBC.txt + '|'
txt += self.pPIS.txt + '|'
txt += self.vPIS.txt + '|'
txt += '\n'
elif self.CST.valor == '03':
txt += self.qBCProd.txt + '|'
txt += self.vAliqProd.txt + '|'
txt += self.vPIS.txt + '|'
txt += '\n'
elif self.CST.valor in ('04', '06', '07', '08', '09'):
txt += '\n'
else:
txt += self.vPIS.txt + '|'
txt += '\n'
if self.qBCProd.valor or self.vAliqProd.valor:
txt += 'Q10|'
txt += self.qBCProd.txt + '|'
txt += self.vAliqProd.txt + '|'
else:
txt += 'Q07|'
txt += self.vBC.txt + '|'
txt += self.pPIS.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class II(XMLNFe):
def __init__(self):
super(II, self).__init__()
self.vBC = TagDecimal(nome='vBC' , codigo='P02', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/II')
self.vDespAdu = TagDecimal(nome='vDespAd', codigo='P03', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/II')
self.vII = TagDecimal(nome='vII' , codigo='P04', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/II')
self.vIOF = TagDecimal(nome='vIOF' , codigo='P05', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/II')
def get_xml(self):
if not (self.vBC.valor or self.vDespAdu.valor or self.vII.valor or self.vIOF.valor):
return ''
#
# Define as tags baseado no código da situação tributária
#
xml = XMLNFe.get_xml(self)
xml += '<II>'
xml += self.vBC.xml
xml += self.vDespAdu.xml
xml += self.vII.xml
xml += self.vIOF.xml
xml += '</II>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.vDespAdu.xml = arquivo
self.vII.xml = arquivo
self.vIOF.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.vBC.valor or self.vDespAdu.valor or self.vII.valor or self.vIOF.valor):
return ''
txt = 'P|'
txt += self.vBC.txt + '|'
txt += self.vDespAdu.txt + '|'
txt += self.vII.txt + '|'
txt += self.vIOF.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class TagCSTIPI(TagCaracter):
def __init__(self, *args, **kwargs):
super(TagCSTIPI, self).__init__(*args, **kwargs)
self.nome = 'CST'
self.codigo = 'O09'
self.tamanho = [2, 2]
self.raiz = ''
self.grupo_ipi = None
def set_valor(self, novo_valor):
super(TagCSTIPI, self).set_valor(novo_valor)
if not self.grupo_ipi:
return None
#
# Definimos todas as tags como não obrigatórias
#
self.grupo_ipi.vBC.obrigatorio = False
self.grupo_ipi.qUnid.obrigatorio = False
self.grupo_ipi.vUnid.obrigatorio = False
self.grupo_ipi.pIPI.obrigatorio = False
self.grupo_ipi.vIPI.obrigatorio = False
#
# Por segurança, zeramos os valores das tags do
# grupo IPI ao redefinirmos o código da situação
# tributária
#
self.grupo_ipi.vBC.valor = '0.00'
self.grupo_ipi.qUnid.valor = '0.00'
self.grupo_ipi.vUnid.valor = '0.00'
self.grupo_ipi.pIPI.valor = '0.00'
self.grupo_ipi.vIPI.valor = '0.00'
#
# Para cada código de situação tributária,
# redefinimos a raiz e a obrigatoriedade das
# tags do grupo de IPI
#
if self.valor in ('00', '49', '50', '99'):
self.grupo_ipi.nome_tag = 'IPITrib'
self.grupo_ipi.nome_tag_txt = 'O07'
self.grupo_ipi.raiz_tag = '//det/imposto/IPI/IPITrib'
self.grupo_ipi.vBC.obrigatorio = True
self.grupo_ipi.qUnid.obrigatorio = True
self.grupo_ipi.vUnid.obrigatorio = True
self.grupo_ipi.pIPI.obrigatorio = True
self.grupo_ipi.vIPI.obrigatorio = True
else:
self.grupo_ipi.nome_tag = 'IPINT'
self.grupo_ipi.nome_tag_txt = 'O08'
self.grupo_ipi.raiz_tag = '//det/imposto/IPI/IPINT'
#
# Redefine a raiz para todas as tags do grupo IPI
#
self.grupo_ipi.CST.raiz = self.grupo_ipi.raiz_tag
self.grupo_ipi.vBC.raiz = self.grupo_ipi.raiz_tag
self.grupo_ipi.qUnid.raiz = self.grupo_ipi.raiz_tag
self.grupo_ipi.vUnid.raiz = self.grupo_ipi.raiz_tag
self.grupo_ipi.pIPI.raiz = self.grupo_ipi.raiz_tag
self.grupo_ipi.vIPI.raiz = self.grupo_ipi.raiz_tag
def get_valor(self):
return self._valor_string
valor = property(get_valor, set_valor)
class IPI(XMLNFe):
def __init__(self):
super(IPI, self).__init__()
self.clEnq = TagCaracter(nome='clEnq' , codigo='O02', tamanho=[ 5, 5], raiz='//det/imposto/IPI', obrigatorio=False)
self.CNPJProd = TagCaracter(nome='CNPJProd', codigo='O03', tamanho=[14, 14], raiz='//det/imposto/IPI', obrigatorio=False)
self.cSelo = TagCaracter(nome='cSelo' , codigo='O04', tamanho=[ 1, 60], raiz='//det/imposto/IPI', obrigatorio=False)
self.qSelo = TagInteiro(nome='qSelo' , codigo='O05', tamanho=[ 1, 12], raiz='//det/imposto/IPI', obrigatorio=False)
self.cEnq = TagCaracter(nome='cEnq' , codigo='O06', tamanho=[ 3, 3], raiz='//det/imposto/IPI', valor='999')
self.vBC = TagDecimal(nome='vBC' , codigo='O10', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='')
self.qUnid = TagDecimal(nome='qUnid' , codigo='O11', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='')
self.vUnid = TagDecimal(nome='vUnid' , codigo='O12', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='')
self.pIPI = TagDecimal(nome='pIPI' , codigo='O13', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='')
self.vIPI = TagDecimal(nome='vIPI' , codigo='O13', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='')
self.CST = TagCSTIPI()
self.CST.grupo_ipi = self
self.CST.valor = '52'
self.nome_tag = 'IPINT'
self.nome_tag_txt = 'O08'
self.raiz_tag = '//det/imposto/IPI/IPINT'
def get_xml(self):
if not ((self.CST.valor in ('00', '49', '50', '99')) or
(self.qUnid.valor or self.vUnid.valor or self.vBC.valor or self.pIPI.valor or self.vIPI.valor)):
return ''
#
# Define as tags baseado no código da situação tributária
#
xml = XMLNFe.get_xml(self)
xml += '<IPI>'
xml += self.clEnq.xml
xml += self.CNPJProd.xml
xml += self.cSelo.xml
xml += self.qSelo.xml
xml += self.cEnq.xml
xml += '<' + self.nome_tag + '>'
xml += self.CST.xml
if self.CST.valor in ('00', '49', '50', '99'):
if self.qUnid.valor or self.vUnid.valor:
xml += self.qUnid.xml
xml += self.vUnid.xml
else:
xml += self.vBC.xml
xml += self.pIPI.xml
xml += self.vIPI.xml
xml += '</' + self.nome_tag + '></IPI>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
#
# Para ler corretamente o IPI, primeiro temos que descobrir em
# qual grupo de situação tributária ele está
#
if self._le_noh('//det/imposto/IPI/IPINT') is not None:
self.CST.valor = '01'
else:
self.CST.valor = '00'
#
# Agora podemos ler os valores tranquilamente...
#
self.CST.xml = arquivo
self.clEnq.xml = arquivo
self.CNPJProd.xml = arquivo
self.cSelo.xml = arquivo
self.qSelo.xml = arquivo
self.cEnq.xml = arquivo
self.vBC.xml = arquivo
self.qUnid.xml = arquivo
self.vUnid.xml = arquivo
self.pIPI.xml = arquivo
self.vIPI.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not ((self.CST.valor in ('00', '49', '50', '99')) or
(self.qUnid.valor or self.vUnid.valor or self.vBC.valor or self.pIPI.valor or self.vIPI.valor)):
return ''
#
# Define as tags baseado no código da situação tributária
#
txt = 'O|\n'
txt += self.clEnq.txt + '|'
txt += self.CNPJProd.txt + '|'
txt += self.cSelo.txt + '|'
txt += self.qSelo.txt + '|'
txt += self.cEnq.txt + '|'
txt += '\n'
#
# Define as tags baseado no código da situação tributária
#
txt += self.nome_tag_txt + '|'
txt += self.CST.txt + '|'
if self.CST.valor not in ('00', '49', '50', '99'):
txt += '\n'
else:
txt += self.vIPI.txt + '|'
txt += '\n'
if self.qUnid.valor or self.vUnid.valor:
txt += 'O10|'
txt += self.qUnid.txt + '|'
txt += self.vUnid.txt + '|'
else:
txt += 'O11|'
txt += self.vBC.txt + '|'
txt += self.pIPI.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class TagCSTICMS(TagCaracter):
def __init__(self, *args, **kwargs):
super(TagCSTICMS, self).__init__(*args, **kwargs)
self.nome = 'CST'
self.codigo = 'N12'
self.tamanho = [2, 2]
self.raiz = ''
self.grupo_icms = None
def set_valor(self, novo_valor):
super(TagCSTICMS, self).set_valor(novo_valor)
if not self.grupo_icms:
return None
#
# Definimos todas as tags como não obrigatórias
#
self.grupo_icms.modBC.obrigatorio = False
self.grupo_icms.vBC.obrigatorio = False
self.grupo_icms.pRedBC.obrigatorio = False
self.grupo_icms.pICMS.obrigatorio = False
self.grupo_icms.vICMS.obrigatorio = False
self.grupo_icms.modBCST.obrigatorio = False
self.grupo_icms.pMVAST.obrigatorio = False
self.grupo_icms.pRedBCST.obrigatorio = False
self.grupo_icms.vBCST.obrigatorio = False
self.grupo_icms.pICMSST.obrigatorio = False
self.grupo_icms.vICMSST.obrigatorio = False
#
# Por segurança, zeramos os valores das tags do
# grupo ICMS ao redefinirmos o código da situação
# tributária
#
self.grupo_icms.modBC.valor = 3
self.grupo_icms.vBC.valor = '0.00'
self.grupo_icms.pRedBC.valor = '0.00'
self.grupo_icms.pICMS.valor = '0.00'
self.grupo_icms.vICMS.valor = '0.00'
self.grupo_icms.modBCST.valor = 4
self.grupo_icms.pMVAST.valor = '0.00'
self.grupo_icms.pRedBCST.valor = '0.00'
self.grupo_icms.vBCST.valor = '0.00'
self.grupo_icms.pICMSST.valor = '0.00'
self.grupo_icms.vICMSST.valor = '0.00'
#
# Para cada código de situação tributária,
# redefinimos a raiz e a obrigatoriedade das
# tags do grupo de ICMS
#
if self.valor == '00':
self.grupo_icms.nome_tag = 'ICMS00'
self.grupo_icms.nome_tag_txt = 'N02'
self.grupo_icms.raiz_tag = '//det/imposto/ICMS/ICMS00'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
elif self.valor == '10':
self.grupo_icms.nome_tag = 'ICMS10'
self.grupo_icms.nome_tag_txt = 'N03'
self.grupo_icms.raiz_tag = '//det/imposto/ICMS/ICMS10'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
self.grupo_icms.modBCST.obrigatorio = True
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.pICMSST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
elif self.valor == '20':
self.grupo_icms.nome_tag = 'ICMS20'
self.grupo_icms.nome_tag_txt = 'N04'
self.grupo_icms.raiz_tag = '//det/imposto/ICMS/ICMS20'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pRedBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
elif self.valor == '30':
self.grupo_icms.nome_tag = 'ICMS30'
self.grupo_icms.nome_tag_txt = 'N05'
self.grupo_icms.raiz_tag = '//det/imposto/ICMS/ICMS30'
self.grupo_icms.modBCST.obrigatorio = True
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.pICMSST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
elif self.valor in ('40', '41', '50'):
self.grupo_icms.nome_tag = 'ICMS40'
self.grupo_icms.nome_tag_txt = 'N06'
self.grupo_icms.raiz_tag = '//det/imposto/ICMS/ICMS40'
elif self.valor == '51':
self.grupo_icms.nome_tag = 'ICMS51'
self.grupo_icms.nome_tag_txt = 'N07'
self.grupo_icms.raiz_tag = '//det/imposto/ICMS/ICMS51'
elif self.valor == '60':
self.grupo_icms.nome_tag = 'ICMS60'
self.grupo_icms.nome_tag_txt = 'N08'
self.grupo_icms.raiz_tag = '//det/imposto/ICMS/ICMS60'
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
elif self.valor == '70':
self.grupo_icms.nome_tag = 'ICMS70'
self.grupo_icms.nome_tag_txt = 'N09'
self.grupo_icms.raiz_tag = '//det/imposto/ICMS/ICMS70'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pRedBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
self.grupo_icms.modBCST.obrigatorio = True
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.pICMSST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
elif self.valor == '90':
self.grupo_icms.nome_tag = 'ICMS90'
self.grupo_icms.nome_tag_txt = 'N10'
self.grupo_icms.raiz_tag = '//det/imposto/ICMS/ICMS90'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
self.grupo_icms.modBCST.obrigatorio = True
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.pICMSST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
#
# Redefine a raiz para todas as tags do grupo ICMS
#
self.grupo_icms.orig.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.CST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.modBC.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vBC.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pRedBC.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pICMS.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vICMS.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.modBCST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pMVAST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pRedBCST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vBCST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pICMSST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vICMSST.raiz = self.grupo_icms.raiz_tag
def get_valor(self):
return self._valor_string
valor = property(get_valor, set_valor)
class ICMS(XMLNFe):
def __init__(self):
super(ICMS, self).__init__()
self.orig = TagInteiro(nome='orig' , codigo='N11', tamanho=[1, 1, 1], raiz='')
# codigo='N12' é o campo CST
self.modBC = TagInteiro(nome='modBC' , codigo='N13', tamanho=[1, 1, 1], raiz='')
self.pRedBC = TagDecimal(nome='pRedBC' , codigo='N14', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='')
self.vBC = TagDecimal(nome='vBC' , codigo='N15', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='')
self.pICMS = TagDecimal(nome='pICMS' , codigo='N16', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='')
self.vICMS = TagDecimal(nome='vICMS' , codigo='N17', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='')
self.modBCST = TagInteiro(nome='modBCST' , codigo='N18', tamanho=[1, 1, 1], raiz='')
self.pMVAST = TagDecimal(nome='pMVAST' , codigo='N19', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='')
self.pRedBCST = TagDecimal(nome='pRedBCST', codigo='N20', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='')
self.vBCST = TagDecimal(nome='vBCST' , codigo='N21', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='')
self.pICMSST = TagDecimal(nome='pICMSST' , codigo='N22', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='')
self.vICMSST = TagDecimal(nome='vICMSST' , codigo='N23', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='')
self.CST = TagCSTICMS()
self.CST.grupo_icms = self
self.CST.valor = '40'
self.nome_tag = 'ICMS40'
self.raiz_tag = '//det/imposto/ICMS/ICMS40'
self.nome_tag_txt = 'N06'
def get_xml(self):
#
# Define as tags baseado no código da situação tributária
#
xml = XMLNFe.get_xml(self)
xml += '<ICMS><' + self.nome_tag + '>'
xml += self.orig.xml
xml += self.CST.xml
if self.CST.valor == '00':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
elif self.CST.valor == '10':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
xml += self.modBCST.xml
# Somente quando for margem de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
elif self.CST.valor == '20':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pRedBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
elif self.CST.valor == '30':
xml += self.modBCST.xml
# Somente quando for margem de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
elif self.CST.valor in ('40', '41', '50'):
pass
elif self.CST.valor == '51':
xml += self.modBC.xml
xml += self.pRedBC.xml
xml += self.vBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
elif self.CST.valor == '60':
xml += self.vBCST.xml
xml += self.vICMSST.xml
elif self.CST.valor == '70':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pRedBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
xml += self.modBCST.xml
# Somente quando for margem de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
elif self.CST.valor == '90':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pRedBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
xml += self.modBCST.xml
# Somente quando for margem de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
xml += '</' + self.nome_tag + '></ICMS>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
#
# Para ler corretamente o ICMS, primeiro temos que descobrir em
# qual grupo de situação tributária ele está
#
if self._le_noh('//det/imposto/ICMS/ICMS00') is not None:
self.CST.valor = '00'
elif self._le_noh('//det/imposto/ICMS/ICMS10') is not None:
self.CST.valor = '10'
elif self._le_noh('//det/imposto/ICMS/ICMS20') is not None:
self.CST.valor = '20'
elif self._le_noh('//det/imposto/ICMS/ICMS30') is not None:
self.CST.valor = '30'
elif self._le_noh('//det/imposto/ICMS/ICMS40') is not None:
self.CST.valor = '40'
elif self._le_noh('//det/imposto/ICMS/ICMS51') is not None:
self.CST.valor = '51'
elif self._le_noh('//det/imposto/ICMS/ICMS60') is not None:
self.CST.valor = '60'
elif self._le_noh('//det/imposto/ICMS/ICMS70') is not None:
self.CST.valor = '70'
elif self._le_noh('//det/imposto/ICMS/ICMS90') is not None:
self.CST.valor = '90'
#
# Agora podemos ler os valores tranquilamente...
#
self.orig.xml = arquivo
self.CST.xml = arquivo
self.modBC.xml = arquivo
self.vBC.xml = arquivo
self.pRedBC.xml = arquivo
self.pICMS.xml = arquivo
self.vICMS.xml = arquivo
self.modBCST.xml = arquivo
self.pMVAST.xml = arquivo
self.pRedBCST.xml = arquivo
self.vBCST.xml = arquivo
self.pICMSST.xml = arquivo
self.vICMSST.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
#
# Define as tags baseado no código da situação tributária
#
txt = 'N|\n'
txt += self.nome_tag_txt + '|'
txt += self.orig.txt + '|'
txt += self.CST.txt + '|'
if self.CST.valor == '00':
txt += self.modBC.txt + '|'
txt += self.vBC.txt + '|'
txt += self.pICMS.txt + '|'
txt += self.vICMS.txt + '|'
elif self.CST.valor == '10':
txt += self.modBC.txt + '|'
txt += self.vBC.txt + '|'
txt += self.pICMS.txt + '|'
txt += self.vICMS.txt + '|'
txt += self.modBCST.txt + '|'
# Somente quando for margem de valor agregado
if self.modBCST.valor == 4:
txt += self.pMVAST.txt + '|'
else:
txt += '|'
txt += self.pRedBCST.txt + '|'
txt += self.vBCST.txt + '|'
txt += self.pICMSST.txt + '|'
txt += self.vICMSST.txt + '|'
elif self.CST.valor == '20':
txt += self.modBC.txt + '|'
txt += self.vBC.txt + '|'
txt += self.pRedBC.txt + '|'
txt += self.pICMS.txt + '|'
txt += self.vICMS.txt + '|'
elif self.CST.valor == '30':
txt += self.modBCST.txt + '|'
# Somente quando for margem de valor agregado
if self.modBCST.valor == 4:
txt += self.pMVAST.txt + '|'
else:
txt += '|'
txt += self.pRedBCST.txt + '|'
txt += self.vBCST.txt + '|'
txt += self.pICMSST.txt + '|'
txt += self.vICMSST.txt + '|'
elif self.CST.valor in ('40', '41', '50'):
pass
elif self.CST.valor == '51':
txt += self.modBC.txt + '|'
txt += self.pRedBC.txt + '|'
txt += self.vBC.txt + '|'
txt += self.pICMS.txt + '|'
txt += self.vICMS.txt + '|'
elif self.CST.valor == '60':
txt += self.vBCST.txt + '|'
txt += self.vICMSST.txt + '|'
elif self.CST.valor == '70':
txt += self.modBC.txt + '|'
txt += self.vBC.txt + '|'
txt += self.pRedBC.txt + '|'
txt += self.pICMS.txt + '|'
txt += self.vICMS.txt + '|'
txt += self.modBCST.txt + '|'
# Somente quando for margem de valor agregado
if self.modBCST.valor == 4:
txt += self.pMVAST.txt + '|'
else:
txt += '|'
txt += self.pRedBCST.txt + '|'
txt += self.vBCST.txt + '|'
txt += self.pICMSST.txt + '|'
txt += self.vICMSST.txt + '|'
elif self.CST.valor == '90':
txt += self.modBC.txt + '|'
txt += self.vBC.txt + '|'
txt += self.pRedBC.txt + '|'
txt += self.pICMS.txt + '|'
txt += self.vICMS.txt + '|'
txt += self.modBCST.txt + '|'
# Somente quando for margem de valor agregado
if self.modBCST.valor == 4:
txt += self.pMVAST.txt + '|'
else:
txt += '|'
txt += self.pRedBCST.txt + '|'
txt += self.vBCST.txt + '|'
txt += self.pICMSST.txt + '|'
txt += self.vICMSST.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Imposto(XMLNFe):
def __init__(self):
super(Imposto, self).__init__()
self.ICMS = ICMS()
self.IPI = IPI()
self.II = II()
self.PIS = PIS()
self.PISST = PISST()
self.COFINS = COFINS()
self.COFINSST = COFINSST()
self.ISSQN = ISSQN()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<imposto>'
xml += self.ICMS.xml
xml += self.IPI.xml
xml += self.II.xml
xml += self.PIS.xml
xml += self.PISST.xml
xml += self.COFINS.xml
xml += self.COFINSST.xml
xml += self.ISSQN.xml
xml += '</imposto>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.ICMS.xml = arquivo
self.IPI.xml = arquivo
self.II.xml = arquivo
self.PIS.xml = arquivo
self.PISST.xml = arquivo
self.COFINS.xml = arquivo
self.COFINSST.xml = arquivo
self.ISSQN.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'M|\n'
txt += self.ICMS.txt
txt += self.IPI.txt
txt += self.II.txt
txt += self.PIS.txt
txt += self.PISST.txt
txt += self.COFINS.txt
txt += self.COFINSST.txt
txt += self.ISSQN.txt
return txt
txt = property(get_txt)
class ICMSCons(XMLNFe):
def __init__(self):
super(ICMSCons, self).__init__()
self.vBCICMSSTCons = TagDecimal(nome='vBCICMSSTCons', codigo='L118', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod/comb/ICMSCons')
self.vICMSSTCons = TagDecimal(nome='vICMSSTCons' , codigo='L119', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod/comb/ICMSCons')
self.UFcons = TagCaracter(nome='UFcons' , codigo='L120', tamanho=[2, 2], raiz='//det/prod/comb/ICMSCons')
def get_xml(self):
if not (self.vBCICMSSTCons.valor or self.vICMSSTCons.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<ICMSCons>'
xml += self.vBCICMSSTCons.xml
xml += self.vICMSSTCons.xml
xml += self.UFcons.xml
xml += '</ICMSCons>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBCICMSSTCons.xml = arquivo
self.vICMSSTCons.xml = arquivo
self.UFcons.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.vBCICMSSTCons.valor or self.vICMSSTCons.valor):
return ''
txt = 'L117|'
txt += self.vBCICMSSTCons.txt + '|'
txt += self.vICMSSTCons.txt + '|'
txt += self.UFCons.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class ICMSInter(XMLNFe):
def __init__(self):
super(ICMSInter, self).__init__()
self.vBCICMSSTDest = TagDecimal(nome='vBCICMSSTDest', codigo='L115', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod/comb/ICMSInter')
self.vICMSSTDest = TagDecimal(nome='vICMSSTDest' , codigo='L116', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod/comb/ICMSInter')
def get_xml(self):
if not (self.vBCICMSSTDest.valor or self.vICMSSTDest.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<ICMSInter>'
xml += self.vBCICMSSTDest.xml
xml += self.vICMSSTDest.xml
xml += '</ICMSInter>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBCICMSSTDest.xml = arquivo
self.vICMSSTDest.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.vBCICMSSTDest.valor or self.vICMSSTDest.valor):
return ''
txt = 'L114|'
txt += self.vBCICMSSTDest.txt + '|'
txt += self.vICMSSTDest.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class ICMSComb(XMLNFe):
def __init__(self):
super(ICMSComb, self).__init__()
self.vBCICMS = TagDecimal(nome='vBCICMS' , codigo='L110', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod/comb/ICMSComb')
self.vICMS = TagDecimal(nome='vICMS' , codigo='L111', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod/comb/ICMSComb')
self.vBCICMSST = TagDecimal(nome='vBCICMSST', codigo='L112', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod/comb/ICMSComb')
self.vICMSST = TagDecimal(nome='vICMSST' , codigo='L113', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod/comb/ICMSComb')
def get_xml(self):
if not (self.vBCICMS.valor or self.vICMS.valor or self.vBCICMSST.valor or self.vICMSST.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<ICMSComb>'
xml += self.vBCICMS.xml
xml += self.vICMS.xml
xml += self.vBCICMSST.xml
xml += self.vICMSST.xml
xml += '</ICMSComb>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBCICMS.xml = arquivo
self.vICMS.xml = arquivo
self.vBCICMSST.xml = arquivo
self.vICMSST.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.vBCICMS.valor or self.vICMS.valor or self.vBCICMSST.valor or self.vICMSST.valor):
return ''
txt = 'L109|'
txt += self.vBCICMS.txt + '|'
txt += self.vICMS.txt + '|'
txt += self.vBCICMSST.txt + '|'
txt += self.vICMSST.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class CIDE(XMLNFe):
def __init__(self):
super(CIDE, self).__init__()
self.qBCProd = TagDecimal(nome='qBCProd' , codigo='L106', tamanho=[1, 16] , decimais=[0, 4, 4], raiz='//det/prod/comb/CIDE')
self.vAliqProd = TagDecimal(nome='vAliqProd', codigo='L107', tamanho=[1, 15] , decimais=[0, 4, 4], raiz='//det/prod/comb/CIDE')
self.vCIDE = TagDecimal(nome='vCIDE' , codigo='L108', tamanho=[1, 15] , decimais=[0, 2, 2], raiz='//det/prod/comb/CIDE')
def get_xml(self):
if not (self.qBCProd.valor or self.vAliqProd.valor or self.vCIDE.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<CIDE>'
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
xml += self.vCIDE.xml
xml += '</CIDE>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.qBCProd.xml = arquivo
self.vAliqProd.xml = arquivo
self.vCIDE.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.qBCProd.valor or self.vAliqProd.valor or self.vCIDE.valor):
return ''
txt = 'L105|'
txt += self.qBCProd.txt + '|'
txt += self.vAliqProd.txt + '|'
txt += self.vCIDE.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Comb(XMLNFe):
def __init__(self):
super(Comb, self).__init__()
self.cProdANP = TagInteiro(nome='cProdANP', codigo='L102', tamanho=[9, 9, 9], raiz='//det/prod/comb')
self.CODIF = TagInteiro(nome='CODIF' , codigo='L103', tamanho=[0, 21] , raiz='//det/prod/comb', obrigatorio=False)
self.qTemp = TagDecimal(nome='qTemp' , codigo='L104', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='//det/prod/comb', obrigatorio=False)
self.CIDE = CIDE()
self.ICMSComb = ICMSComb()
self.ICMSInter = ICMSInter()
self.ICMSCons = ICMSCons()
def get_xml(self):
if not self.cProdANP.valor:
return ''
xml = XMLNFe.get_xml(self)
xml += '<comb>'
xml += self.cProdANP.xml
xml += self.CODIF.xml
xml += self.qTemp.xml
xml += self.CIDE.xml
xml += self.ICMSComb.xml
xml += self.ICMSInter.xml
xml += self.ICMSCons.xml
xml += '</comb>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cProdANP.xml = arquivo
self.CODIF.xml = arquivo
self.qTemp.xml = arquivo
self.CIDE.xml = arquivo
self.ICMSComb.xml = arquivo
self.ICMSInter.xml = arquivo
self.ICMSCons.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not self.cProdANP.valor:
return ''
txt = 'L1|'
txt += self.cProdANP.txt + '|'
txt += self.CODIF.txt + '|'
txt += self.qTemp.txt + '|'
txt += '\n'
txt += self.CIDE.txt
txt += self.ICMSComb.txt
txt += self.ICMSInter.txt
txt += self.ICMSCons.txt
return txt
txt = property(get_txt)
class Arma(XMLNFe):
def __init__(self):
super(Arma, self).__init__()
self.tpArma = TagInteiro(nome='tpArma', codigo='L02', tamanho=[1, 1], raiz='//arma')
self.nSerie = TagInteiro(nome='nSerie', codigo='L03', tamanho=[1, 9], raiz='//arma')
self.nCano = TagInteiro(nome='nCano', codigo='L04', tamanho=[1, 9], raiz='//arma')
self.descr = TagCaracter(nome='descr', codigo='L05', tamanho=[1, 256], raiz='//arma')
def get_xml(self):
if not self.nSerie:
return ''
xml = XMLNFe.get_xml(self)
xml += '<arma>'
xml += self.tpArma.xml
xml += self.nSerie.xml
xml += self.nCano.xml
xml += self.descr.xml
xml += '</arma>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.tpArma.xml = arquivo
self.nSerie.xml = arquivo
self.nCano.xml = arquivo
self.descr.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not self.nLote.valor:
return ''
txt = 'L|'
txt += self.tpArma.txt + '|'
txt += self.nSerie.txt + '|'
txt += self.nCano.txt + '|'
txt += self.descr.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Med(XMLNFe):
def __init__(self):
super(Med, self).__init__()
self.nLote = TagCaracter(nome='nLote', codigo='K02', tamanho=[1, 20] , raiz='//med')
self.qLote = TagDecimal(nome='qLote' , codigo='K03', tamanho=[1, 11], decimais=[0, 3, 3], raiz='//med')
self.dFab = TagData(nome='dFab' , codigo='K04' , raiz='//med')
self.dVal = TagData(nome='dVal' , codigo='K05' , raiz='//med')
self.vPMC = TagDecimal(nome='vPMC' , codigo='K06', tamanho=[1, 15], decimais=[0, 2, 2], raiz='//med')
def get_xml(self):
if not self.nLote.valor:
return ''
xml = XMLNFe.get_xml(self)
xml += '<med>'
xml += self.nLote.xml
xml += self.qLote.xml
xml += self.dFab.xml
xml += self.dVal.xml
xml += self.vPMC.xml
xml += '</med>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nLote.xml = arquivo
self.qLote.xml = arquivo
self.dFab.xml = arquivo
self.dVal.xml = arquivo
self.vPMC.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not self.nLote.valor:
return ''
txt = 'K|'
txt += self.nLote.txt + '|'
txt += self.qLote.txt + '|'
txt += self.dFab.txt + '|'
txt += self.dVal.txt + '|'
txt += self.vPMC.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class VeicProd(XMLNFe):
def __init__(self):
super(VeicProd, self).__init__()
self.tpOp = TagInteiro(nome='tpOp' , codigo='J02', tamanho=[ 1, 1, 1], raiz='//det/prod/veicProd')
self.chassi = TagCaracter(nome='chassi' , codigo='J03', tamanho=[ 1, 17] , raiz='//det/prod/veicProd')
self.cCor = TagCaracter(nome='cCor' , codigo='J04', tamanho=[ 1, 4] , raiz='//det/prod/veicProd')
self.xCor = TagCaracter(nome='xCor' , codigo='J05', tamanho=[ 1, 40] , raiz='//det/prod/veicProd')
self.pot = TagCaracter(nome='pot' , codigo='J06', tamanho=[ 1, 4] , raiz='//det/prod/veicProd')
self.CM3 = TagCaracter(nome='CM3' , codigo='J07', tamanho=[ 1, 4] , raiz='//det/prod/veicProd')
self.pesoL = TagCaracter(nome='pesoL' , codigo='J08', tamanho=[ 1, 9] , raiz='//det/prod/veicProd')
self.pesoB = TagCaracter(nome='pesoB' , codigo='J09', tamanho=[ 1, 9] , raiz='//det/prod/veicProd')
self.nSerie = TagCaracter(nome='nSerie' , codigo='J10', tamanho=[ 1, 9] , raiz='//det/prod/veicProd')
self.tpComb = TagCaracter(nome='tpComb' , codigo='J11', tamanho=[ 1, 8] , raiz='//det/prod/veicProd')
self.nMotor = TagCaracter(nome='nMotor' , codigo='J12', tamanho=[ 1, 21] , raiz='//det/prod/veicProd')
self.CMKG = TagCaracter(nome='CMKG' , codigo='J13', tamanho=[ 1, 9] , raiz='//det/prod/veicProd')
self.dist = TagCaracter(nome='dist' , codigo='J14', tamanho=[ 1, 4] , raiz='//det/prod/veicProd')
self.RENAVAM = TagCaracter(nome='RENAVAM', codigo='J15', tamanho=[ 1, 9] , raiz='//det/prod/veicProd', obrigatorio=False)
self.anoMod = TagInteiro(nome='anoMod' , codigo='J16', tamanho=[ 4, 4, 4], raiz='//det/prod/veicProd')
self.anoFab = TagInteiro(nome='anoFab' , codigo='J17', tamanho=[ 4, 4, 4], raiz='//det/prod/veicProd')
self.tpPint = TagCaracter(nome='tpPint' , codigo='J18', tamanho=[ 1, 1] , raiz='//det/prod/veicProd')
self.tpVeic = TagInteiro(nome='tpVeic' , codigo='J19', tamanho=[ 2, 2, 2], raiz='//det/prod/veicProd')
self.espVeic = TagInteiro(nome='espVeic' , codigo='J20', tamanho=[ 1, 1] , raiz='//det/prod/veicProd')
self.VIN = TagCaracter(nome='VIN' , codigo='J21', tamanho=[ 1, 1] , raiz='//det/prod/veicProd')
self.condVeic = TagInteiro(nome='condVeic', codigo='J22', tamanho=[ 1, 1] , raiz='//det/prod/veicProd')
self.cMod = TagInteiro(nome='cMod' , codigo='J23', tamanho=[ 6, 6, 6], raiz='//det/prod/veicProd')
def get_xml(self):
if not self.chassi.valor:
return ''
xml = XMLNFe.get_xml(self)
xml += '<veicProd>'
xml += self.tpOp.xml
xml += self.chassi.xml
xml += self.cCor.xml
xml += self.xCor.xml
xml += self.pot.xml
xml += self.CM3.xml
xml += self.pesoL.xml
xml += self.pesoB.xml
xml += self.nSerie.xml
xml += self.tpComb.xml
xml += self.nMotor.xml
xml += self.CMKG.xml
xml += self.dist.xml
xml += self.RENAVAM.xml
xml += self.anoMod.xml
xml += self.anoFab.xml
xml += self.tpPint.xml
xml += self.tpVeic.xml
xml += self.espVeic.xml
xml += self.VIN.xml
xml += self.condVeic.xml
xml += self.cMod.xml
xml += '</veicProd>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.tpOp.xml = arquivo
self.chassi.xml = arquivo
self.cCor.xml = arquivo
self.xCor.xml = arquivo
self.pot.xml = arquivo
self.CM3.xml = arquivo
self.pesoL.xml = arquivo
self.pesoB.xml = arquivo
self.nSerie.xml = arquivo
self.tpComb.xml = arquivo
self.nMotor.xml = arquivo
self.CMKG.xml = arquivo
self.dist.xml = arquivo
self.RENAVAM.xml = arquivo
self.anoMod.xml = arquivo
self.anoFab.xml = arquivo
self.tpPint.xml = arquivo
self.tpVeic.xml = arquivo
self.espVeic.xml = arquivo
self.VIN.xml = arquivo
self.condVeic.xml = arquivo
self.cMod.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not self.chassi.valor:
return ''
txt = 'J|'
txt += self.tpOp.txt + '|'
txt += self.chassi.txt + '|'
txt += self.cCor.txt + '|'
txt += self.xCor.txt + '|'
txt += self.pot.txt + '|'
txt += self.CM3.txt + '|'
txt += self.pesoL.txt + '|'
txt += self.pesoB.txt + '|'
txt += self.nSerie.txt + '|'
txt += self.tpComb.txt + '|'
txt += self.nMotor.txt + '|'
txt += self.CMKG.txt + '|'
txt += self.dist.txt + '|'
txt += self.RENAVAM.txt + '|'
txt += self.anoMod.txt + '|'
txt += self.anoFab.txt + '|'
txt += self.tpPint.txt + '|'
txt += self.tpVeic.txt + '|'
txt += self.espVeic.txt + '|'
txt += self.VIN.txt + '|'
txt += self.condVeic.txt + '|'
txt += self.cMod.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Adi(XMLNFe):
def __init__(self):
super(Adi, self).__init__()
self.nAdicao = TagInteiro(nome='nAdicao' , codigo='I26', tamanho=[1, 3], raiz='//adi')
self.nSeqAdic = TagInteiro(nome='nSeqAdic' , codigo='I27', tamanho=[1, 3], raiz='//adi')
self.cFabricante = TagCaracter(nome='cFabricante', codigo='I28', tamanho=[1, 60], raiz='//adi')
self.vDescDI = TagDecimal(nome='vDescDI' , codigo='I29', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//adi', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<adi>'
xml += self.nAdicao.xml
xml += self.nSeqAdic.xml
xml += self.cFabricante.xml
xml += self.vDescDI.xml
xml += '</adi>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nAdicao.xml = arquivo
self.nSeqAdic.xml = arquivo
self.cFabricante.xml = arquivo
self.vDescDI.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'I25|'
txt += self.nAdicao.txt + '|'
txt += self.nSeqAdic.txt + '|'
txt += self.cFabricante.txt + '|'
txt += self.vDescDI.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class DI(XMLNFe):
def __init__(self):
super(DI, self).__init__()
self.nDI = TagCaracter(nome='nDI' , codigo='I19', tamanho=[1, 10], raiz='//DI')
self.dDI = TagData(nome='dDI' , codigo='I20', raiz='//DI')
self.xLocDesemb = TagCaracter(nome='xLocDesemb' , codigo='I21', tamanho=[1, 60], raiz='//DI')
self.UFDesemb = TagCaracter(nome='UFDesemb' , codigo='I22', tamanho=[2, 2], raiz='//DI')
self.dDesemb = TagData(nome='dDesemb' , codigo='I23', raiz='//DI')
self.cExportador = TagCaracter(nome='cExportador', codigo='I24', tamanho=[1, 60], raiz='//DI')
self.adi = [Adi()]
def get_xml(self):
if not self.nDI:
return ''
xml = XMLNFe.get_xml(self)
xml += '<DI>'
xml += self.nDI.xml
xml += self.dDI.xml
xml += self.xLocDesemb.xml
xml += self.UFDesemb.xml
xml += self.dDesemb.xml
xml += self.cExportador.xml
for a in self.adi:
xml += a.xml
xml += '</DI>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nDI.xml = arquivo
self.dDI.xml = arquivo
self.xLocDesemb.xml = arquivo
self.UFDesemb.xml = arquivo
self.dDesemb.xml = arquivo
self.cExportador.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
adis = self._le_nohs('//DI/adi')
self.adi = []
if adis is not None:
self.adi = [Adi() for a in adis]
for i in range(len(adis)):
self.adi[i].xml = adis[i]
xml = property(get_xml, set_xml)
def get_txt(self):
if not self.nDI:
return ''
txt = 'I18|'
txt += self.nDI.txt + '|'
txt += self.dDI.txt + '|'
txt += self.xLocDesemb.txt + '|'
txt += self.UFDesemb.txt + '|'
txt += self.dDesemb.txt + '|'
txt += self.cExportador.txt + '|'
txt += '\n'
for a in self.adi:
txt += a.txt
return txt
txt = property(get_txt)
class Prod(XMLNFe):
def __init__(self):
super(Prod, self).__init__()
self.cProd = TagCaracter(nome='cProd' , codigo='I02' , tamanho=[1, 60] , raiz='//det/prod')
self.cEAN = TagCaracter(nome='cEAN' , codigo='I03' , tamanho=[0, 14] , raiz='//det/prod')
self.xProd = TagCaracter(nome='xProd' , codigo='I04' , tamanho=[1, 120] , raiz='//det/prod')
self.NCM = TagCaracter(nome='NCM' , codigo='I05' , tamanho=[2, 8] , raiz='//det/prod', obrigatorio=False)
self.EXTIPI = TagCaracter(nome='EXTIPI' , codigo='I06' , tamanho=[2, 3] , raiz='//det/prod', obrigatorio=False)
self.genero = TagCaracter(nome='genero' , codigo='I07' , tamanho=[2, 2, 2] , raiz='//det/prod', obrigatorio=False)
self.CFOP = TagInteiro(nome='CFOP' , codigo='I08' , tamanho=[4, 4, 4] , raiz='//det/prod')
self.uCom = TagCaracter(nome='uCom' , codigo='I09' , tamanho=[1, 6] , raiz='//det/prod')
self.qCom = TagDecimal(nome='qCom' , codigo='I10' , tamanho=[1, 12, 1], decimais=[0, 4, 4], raiz='//det/prod')
self.vUnCom = TagDecimal(nome='vUnCom' , codigo='I10a', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='//det/prod')
self.vProd = TagDecimal(nome='vProd' , codigo='I11' , tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod')
self.cEANTrib = TagCaracter(nome='cEANTrib', codigo='I12' , tamanho=[0, 14] , raiz='//det/prod')
self.uTrib = TagCaracter(nome='uTrib' , codigo='I13' , tamanho=[1, 6] , raiz='//det/prod')
self.qTrib = TagDecimal(nome='qTrib' , codigo='I14' , tamanho=[1, 12, 1], decimais=[0, 4, 4], raiz='//det/prod')
self.vUnTrib = TagDecimal(nome='vUnTrib' , codigo='I14a', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz='//det/prod')
self.vTrib = TagDecimal(nome='vTrib' , codigo='' , tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod', obrigatorio=False)
self.vFrete = TagDecimal(nome='vFrete' , codigo='I15' , tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod', obrigatorio=False)
self.vSeg = TagDecimal(nome='vSeg' , codigo='I16' , tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod', obrigatorio=False)
self.vDesc = TagDecimal(nome='vDesc' , codigo='I17' , tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod', obrigatorio=False)
self.DI = []
self.veicProd = VeicProd()
self.med = []
self.arma = []
self.comb = Comb()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<prod>'
xml += self.cProd.xml
xml += self.cEAN.xml
xml += self.xProd.xml
xml += self.NCM.xml
xml += self.EXTIPI.xml
xml += self.genero.xml
xml += self.CFOP.xml
xml += self.uCom.xml
xml += self.qCom.xml
xml += self.vUnCom.xml
xml += self.vProd.xml
xml += self.cEANTrib.xml
xml += self.uTrib.xml
xml += self.qTrib.xml
xml += self.vUnTrib.xml
xml += self.vFrete.xml
xml += self.vSeg.xml
xml += self.vDesc.xml
for d in self.DI:
xml += d.xml
xml += self.veicProd.xml
for m in self.med:
xml += m.xml
for a in self.arma:
xml += a.xml
xml += self.comb.xml
xml += '</prod>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cProd.xml = arquivo
self.cEAN.xml = arquivo
self.xProd.xml = arquivo
self.NCM.xml = arquivo
self.EXTIPI.xml = arquivo
self.genero.xml = arquivo
self.CFOP.xml = arquivo
self.uCom.xml = arquivo
self.qCom.xml = arquivo
self.vUnCom.xml = arquivo
self.vProd.xml = arquivo
self.cEANTrib.xml = arquivo
self.uTrib.xml = arquivo
self.qTrib.xml = arquivo
self.vUnTrib.xml = arquivo
self.vFrete.xml = arquivo
self.vSeg.xml = arquivo
self.vDesc.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.DI = self.le_grupo('//det/prod/DI', DI)
self.veicProd.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.med = self.le_grupo('//det/prod/med', Med)
self.arma = self.le_grupo('//det/prod/arma', Arma)
self.comb.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'I|'
txt += self.cProd.txt + '|'
txt += self.cEAN.txt + '|'
txt += self.xProd.txt + '|'
txt += self.NCM.txt + '|'
txt += self.EXTIPI.txt + '|'
txt += self.genero.txt + '|'
txt += self.CFOP.txt + '|'
txt += self.uCom.txt + '|'
txt += self.qCom.txt + '|'
txt += self.vUnCom.txt + '|'
txt += self.vProd.txt + '|'
txt += self.cEANTrib.txt + '|'
txt += self.uTrib.txt + '|'
txt += self.qTrib.txt + '|'
txt += self.vUnTrib.txt + '|'
txt += self.vFrete.txt + '|'
txt += self.vSeg.txt + '|'
txt += self.vDesc.txt + '|'
txt += '\n'
for d in self.DI:
txt += d.txt
txt += self.veicProd.txt
for m in self.med:
txt += m.txt
for a in self.arma:
txt += a.txt
txt += self.comb.txt
return txt
txt = property(get_txt)
class Det(XMLNFe):
def __init__(self):
super(Det, self).__init__()
self.nItem = TagInteiro(nome='det' , codigo='H01', tamanho=[1, 3], propriedade='nItem', raiz='/') #, namespace=NAMESPACE_NFE)
self.prod = Prod()
self.imposto = Imposto()
self.infAdProd = TagCaracter(nome='infAdProd', codigo='V01', tamanho=[1, 500], raiz='//det', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += self.nItem.xml
xml += self.prod.xml
xml += self.imposto.xml
xml += self.infAdProd.xml
xml += '</det>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nItem.xml = arquivo
self.prod.xml = arquivo
self.imposto.xml = arquivo
self.infAdProd.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'H|'
txt += self.nItem.txt + '|'
txt += self.infAdProd.txt + '|'
txt += '\n'
txt += self.prod.txt
txt += self.imposto.txt
return txt
txt = property(get_txt)
def descricao_produto_formatada(self):
formatado = self.prod.xProd.valor.replace('|', '<br />')
if len(self.infAdProd.valor):
formatado += '<br />'
formatado += self.infAdProd.valor.replace('|', '<br />')
return formatado
def cst_formatado(self):
formatado = unicode(self.imposto.ICMS.orig.valor).zfill(1)
formatado += unicode(self.imposto.ICMS.CST.valor).zfill(2)
return formatado
class Compra(XMLNFe):
def __init__(self):
super(Compra, self).__init__()
self.xNEmp = TagCaracter(nome='xNEmp', codigo='ZB02', tamanho=[1, 17], raiz='//NFe/infNFe/compra', obrigatorio=False)
self.xPed = TagCaracter(nome='xPed' , codigo='ZB03', tamanho=[1, 60], raiz='//NFe/infNFe/compra', obrigatorio=False)
self.xCont = TagCaracter(nome='xCont', codigo='ZB04', tamanho=[1, 60], raiz='//NFe/infNFe/compra', obrigatorio=False)
def get_xml(self):
if not (self.xNEmp.valor or self.xPed.valor or self.xCont.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<compra>'
xml += self.xNEmp.xml
xml += self.xPed.xml
xml += self.xCont.xml
xml += '</compra>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xNEmp.xml = arquivo
self.xPed.xml = arquivo
self.xCont.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.xNEmp.valor or self.xPed.valor or self.xCont.valor):
return ''
txt = 'ZB|'
txt += self.xNEmp.txt + '|'
txt += self.xPed.txt + '|'
txt += self.xCont.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Exporta(XMLNFe):
def __init__(self):
super(Exporta, self).__init__()
self.UFEmbarq = TagCaracter(nome='UFEmbarq' , codigo='ZA02', tamanho=[2, 2], raiz='//NFe/infNFe/exporta', obrigatorio=False)
self.xLocEmbarq = TagCaracter(nome='xLocEmbarq', codigo='ZA03', tamanho=[1, 60], raiz='//NFe/infNFe/exporta', obrigatorio=False)
def get_xml(self):
if not (self.UFEmbarq.valor or self.xLocEmbarq.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<exporta>'
xml += self.UFEmbarq.xml
xml += self.xLocEmbarq.xml
xml += '</exporta>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.UFEmbarq.xml = arquivo
self.xLocEmbarq.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.UFEmbarq.valor or self.xLocEmbarq.valor):
return ''
txt = 'ZA|'
txt += self.UFEmbarq.txt + '|'
txt += self.xLocEmbarq.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class ProcRef(XMLNFe):
def __init__(self):
super(ProcRef, self).__init__()
self.nProc = TagCaracter(nome='nProc' , codigo='Z11', tamanho=[1, 60], raiz='//procRef')
self.indProc = TagInteiro(nome='indProc', codigo='Z12', tamanho=[1, 1], raiz='//procRef')
def get_xml(self):
if not (self.nProc.valor or self.indProc.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<procRef>'
xml += self.nProc.xml
xml += self.indProc.xml
xml += '</procRef>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nProc.xml = arquivo
self.indProc.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.nProc.valor or self.indProc.valor):
return ''
txt = 'Z10|'
txt += self.nProc.txt + '|'
txt += self.indProc.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class ObsFisco(XMLNFe):
def __init__(self):
super(ObsFisco, self).__init__()
self.xCampo = TagCaracter(nome='obsFisco', codigo='Z08', propriedade='xCampo', tamanho=[1, 20], raiz='/')
self.xTexto = TagCaracter(nome='xTexto', codigo='Z09', tamanho=[1, 60], raiz='//obsFisco')
def get_xml(self):
if not (self.xCampo.valor or self.xTexto.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += self.xCampo.xml
xml += self.xTexto.xml
xml += '</obsFisco>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xCampo.xml = arquivo
self.xTexto.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.xCampo.valor or self.xTexto.valor):
return ''
txt = 'Z07|'
txt += self.xCampo.txt + '|'
txt += self.xTexto.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class ObsCont(XMLNFe):
def __init__(self):
super(ObsCont, self).__init__()
self.xCampo = TagCaracter(nome='obsCont', codigo='Z05', propriedade='xCampo', tamanho=[1, 20], raiz='/')
self.xTexto = TagCaracter(nome='xTexto', codigo='Z06', tamanho=[1, 60], raiz='//obsCont')
def get_xml(self):
if not (self.xCampo.valor or self.xTexto.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += self.xCampo.xml
xml += self.xTexto.xml
xml += '</obsCont>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xCampo.xml = arquivo
self.xTexto.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.xCampo.valor or self.xTexto.valor):
return ''
txt = 'Z04|'
txt += self.xCampo.txt + '|'
txt += self.xTexto.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class InfAdic(XMLNFe):
def __init__(self):
super(InfAdic, self).__init__()
self.infAdFisco = TagCaracter(nome='infAdFisco', codigo='Z02', tamanho=[1, 256], raiz='//NFe/infNFe/infAdic', obrigatorio=False)
self.infCpl = TagCaracter(nome='infCpl' , codigo='Z03', tamanho=[1, 5000], raiz='//NFe/infNFe/infAdic', obrigatorio=False)
self.obsCont = []
self.obsFisco = []
self.procRef = []
def get_xml(self):
if not (self.infAdFisco.valor or self.infCpl.valor or len(self.obsCont) or len(self.obsFisco) or len(self.procRef)):
return ''
xml = XMLNFe.get_xml(self)
xml += '<infAdic>'
xml += self.infAdFisco.xml
xml += self.infCpl.xml
for o in self.obsCont:
xml += o.xml
for o in self.obsFisco:
xml += o.xml
for p in self.procRef:
xml += p.xml
xml += '</infAdic>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.infAdFisco.xml = arquivo
self.infCpl.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.obsCont = self.le_grupo('//NFe/infNFe/infAdic/obsCont', ObsCont)
self.obsFisco = self.le_grupo('//NFe/infNFe/infAdic/obsFisco', ObsFisco)
self.procRef = self.le_grupo('//NFe/infNFe/infAdic/procRef', ProcRef)
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.infAdFisco.valor or self.infCpl.valor or len(self.obsCont) or len(self.obsFisco) or len(self.procRef)):
return ''
txt = 'Z|'
txt += self.infAdFisco.txt + '|'
txt += self.infCpl.txt + '|'
txt += '\n'
for o in self.obsCont:
txt += o.txt
for o in self.obsFisco:
txt += o.txt
for p in self.procRef:
txt += p.txt
return txt
txt = property(get_txt)
class Dup(XMLNFe):
def __init__(self):
super(Dup, self).__init__()
self.nDup = TagCaracter(nome='nDup', codigo='Y08', tamanho=[1, 60], raiz='//dup', obrigatorio=False)
self.dVenc = TagData(nome='dVenc' , codigo='Y09', raiz='//dup', obrigatorio=False)
self.vDup = TagDecimal(nome='vDup' , codigo='Y10', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//dup', obrigatorio=False)
def get_xml(self):
if not (self.nDup.valor or self.dVenc.valor or self.vDup.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<dup>'
xml += self.nDup.xml
xml += self.dVenc.xml
xml += self.vDup.xml
xml += '</dup>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nDup.xml = arquivo
self.dVenc.xml = arquivo
self.vDup.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.nDup.valor or self.dVenc.valor or self.vDup.valor):
return ''
txt = 'Y07|'
txt += self.nDup.txt + '|'
txt += self.dVenc.txt + '|'
txt += self.vDup.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Fat(XMLNFe):
def __init__(self):
super(Fat, self).__init__()
self.nFat = TagCaracter(nome='nFat', codigo='Y03', tamanho=[1, 60], raiz='//NFe/infNFe/cobr/fat', obrigatorio=False)
self.vOrig = TagDecimal(nome='vOrig', codigo='Y04', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/cobr/fat', obrigatorio=False)
self.vDesc = TagDecimal(nome='vDesc', codigo='Y05', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/cobr/fat', obrigatorio=False)
self.vLiq = TagDecimal(nome='vLiq' , codigo='Y06', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/cobr/fat', obrigatorio=False)
def get_xml(self):
if not (self.nFat.valor or self.vOrig.valor or self.vDesc.valor or self.vLiq.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<fat>'
xml += self.nFat.xml
xml += self.vOrig.xml
xml += self.vDesc.xml
xml += self.vLiq.xml
xml += '</fat>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nFat.xml = arquivo
self.vOrig.xml = arquivo
self.vDesc.xml = arquivo
self.vLiq.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.nFat.valor or self.vOrig.valor or self.vDesc.valor or self.vLiq.valor):
return ''
txt = 'Y02|'
txt += self.nFat.txt + '|'
txt += self.vOrig.txt + '|'
txt += self.vDesc.txt + '|'
txt += self.vLiq.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Cobr(XMLNFe):
def __init__(self):
super(Cobr, self).__init__()
self.fat = Fat()
self.dup = []
def get_xml(self):
if not (self.fat.xml or len(self.dup)):
return ''
xml = XMLNFe.get_xml(self)
xml += '<cobr>'
xml += self.fat.xml
for d in self.dup:
xml += d.xml
xml += '</cobr>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.fat.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.dup = self.le_grupo('//NFe/infNFe/cobr/dup', Dup)
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.fat.xml or len(self.dup)):
return ''
txt = 'Y|\n'
txt += self.fat.txt
for d in self.dup:
txt += d.txt
return txt
txt = property(get_txt)
class Lacres(XMLNFe):
def __init__(self):
super(Lacres, self).__init__()
self.nLacre = TagCaracter(nome='nLacre', codigo='X34', tamanho=[1, 60], raiz='//lacres')
def get_xml(self):
if not self.nLacre.valor:
return ''
xml = XMLNFe.get_xml(self)
xml += '<lacres>'
xml += self.nLacre.xml
xml += '</lacres>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nLacre.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not self.nLacre.valor:
return ''
txt = 'X33|'
txt += self.nLacre.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Vol(XMLNFe):
#
# No caso dos volumes, se o valor da quantidade, peso bruto ou líquido for zero ou inexistente
# não imprime os valores no DANFE
#
class TagInteiroVolume(TagInteiro):
def formato_danfe(self):
if not self._valor_inteiro:
return ''
else:
return super(Vol.TagInteiroVolume, self).formato_danfe()
class TagDecimalVolume(TagDecimal):
def formato_danfe(self):
if not self._valor_decimal:
return ''
else:
return super(Vol.TagDecimalVolume, self).formato_danfe()
def __init__(self, xml=None):
super(Vol, self).__init__()
self.qVol = TagInteiro(nome='qVol' , codigo='X27', tamanho=[1, 15], raiz='//vol', obrigatorio=False)
#self.qVol = self.TagInteiroVolume(nome='qVol' , codigo='X27', tamanho=[1, 15], raiz='//vol', obrigatorio=False)
self.esp = TagCaracter(nome='esp' , codigo='X28', tamanho=[1, 60], raiz='//vol', obrigatorio=False)
self.marca = TagCaracter(nome='marca', codigo='X29', tamanho=[1, 60], raiz='//vol', obrigatorio=False)
self.nVol = TagCaracter(nome='nVol' , codigo='X30', tamanho=[1, 60], raiz='//vol', obrigatorio=False)
self.pesoL = TagDecimal(nome='pesoL' , codiog='X31', tamanho=[1, 15, 1], decimais=[0, 3, 3], raiz='//vol', obrigatorio=False)
self.pesoB = TagDecimal(nome='pesoB' , codiog='X32', tamanho=[1, 15, 1], decimais=[0, 3, 3], raiz='//vol', obrigatorio=False)
#self.pesoL = self.TagDecimalVolume(nome='pesoL' , codiog='X31', tamanho=[1, 15, 1], decimais=[0, 3, 3], raiz='//vol', obrigatorio=False)
#self.pesoB = self.TagDecimalVolume(nome='pesoB' , codiog='X32', tamanho=[1, 15, 1], decimais=[0, 3, 3], raiz='//vol', obrigatorio=False)
self.lacres = []
def get_xml(self):
if not (self.qVol.valor or self.esp.valor or self.marca.valor or self.nVol.valor or self.pesoL.valor or self.pesoB.valor or len(self.lacres)):
return ''
xml = XMLNFe.get_xml(self)
xml += '<vol>'
xml += self.qVol.xml
xml += self.esp.xml
xml += self.marca.xml
xml += self.nVol.xml
xml += self.pesoL.xml
xml += self.pesoB.xml
for l in self.lacres:
xml += l.xml
xml += '</vol>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.qVol.xml = arquivo
self.esp.xml = arquivo
self.marca.xml = arquivo
self.nVol.xml = arquivo
self.pesoL.xml = arquivo
self.pesoB.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.lacres = self.le_grupo('//vol/lacres', Lacres)
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.qVol.valor or self.esp.valor or self.marca.valor or self.nVol.valor or self.pesoL.valor or self.pesoB.valor or len(self.lacres)):
return ''
txt = 'X26|'
txt += self.qVol.txt + '|'
txt += self.esp.txt + '|'
txt += self.marca.txt + '|'
txt += self.nVol.txt + '|'
txt += self.pesoL.txt + '|'
txt += self.pesoB.txt + '|'
txt += '\n'
for l in self.lacres:
txt += l.txt
return txt
txt = property(get_txt)
class Reboque(XMLNFe):
def __init__(self):
super(Reboque, self).__init__()
self.placa = TagCaracter(nome='placa', codigo='X23', tamanho=[1, 8], raiz='//reboque')
self.UF = TagCaracter(nome='UF' , codigo='X24', tamanho=[2, 2], raiz='//reboque')
self.RNTC = TagCaracter(nome='RNTC' , codigo='X25', tamanho=[1, 20], raiz='//reboque', obrigatorio=False)
def get_xml(self):
if not (self.placa.valor or self.UF.valor or self.RNTC.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<reboque>'
xml += self.placa.xml
xml += self.UF.xml
xml += self.RNTC.xml
xml += '</reboque>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.placa.xml = arquivo
self.UF.xml = arquivo
self.RNTC.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.placa.valor or self.UF.valor or self.RNTC.valor):
return ''
txt = 'X22|'
txt += self.placa.txt + '|'
txt += self.UF.txt + '|'
txt += self.RNTC.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class VeicTransp(XMLNFe):
def __init__(self):
super(VeicTransp, self).__init__()
self.placa = TagCaracter(nome='placa', codigo='X19', tamanho=[1, 8], raiz='//NFe/infNFe/transp/veicTransp')
self.UF = TagCaracter(nome='UF' , codigo='X20', tamanho=[2, 2], raiz='//NFe/infNFe/transp/veicTransp')
self.RNTC = TagCaracter(nome='RNTC' , codigo='X21', tamanho=[1, 20], raiz='//NFe/infNFe/transp/veicTransp', obrigatorio=False)
def get_xml(self):
if not (self.placa.valor or self.UF.valor or self.RNTC.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<veicTransp>'
xml += self.placa.xml
xml += self.UF.xml
xml += self.RNTC.xml
xml += '</veicTransp>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.placa.xml = arquivo
self.UF.xml = arquivo
self.RNTC.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.placa.valor or self.UF.valor or self.RNTC.valor):
return ''
txt = 'X18|'
txt += self.placa.txt + '|'
txt += self.UF.txt + '|'
txt += self.RNTC.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class RetTransp(XMLNFe):
def __init__(self):
super(RetTransp, self).__init__()
self.vServ = TagDecimal(nome='vServ' , codigo='X12', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/transp/retTransp')
self.vBCRet = TagDecimal(nome='vBCRet' , codigo='X13', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/transp/retTransp')
self.pICMSRet = TagDecimal(nome='vICMSRet', codigo='X14', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/transp/retTransp')
self.vICMSRet = TagDecimal(nome='vICMSRet', codigo='X15', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/transp/retTransp')
self.CFOP = TagInteiro(nome='CFOP' , codigo='X16', tamanho=[4, 4, 4], raiz='//NFe/infNFe/transp/retTransp')
self.cMunFG = TagInteiro(nome='cMunFG' , codigo='X17', tamanho=[7, 7, 7], raiz='//NFe/infNFe/transp/retTransp')
def get_xml(self):
if not (self.vServ.valor or self.vBCRet.valor or self.pICMSRet.valor or self.vICMSRet.valor or self.CFOP.valor or self.cMunFG.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<retTransp>'
xml += self.vServ.xml
xml += self.vBCRet.xml
xml += self.pICMSRet.xml
xml += self.vICMSRet.xml
xml += self.CFOP.xml
xml += self.cMunFG.xml
xml += '</retTransp>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vServ.xml = arquivo
self.vBCRet.xml = arquivo
self.pICMSRet.xml = arquivo
self.vICMSRet.xml = arquivo
self.CFOP.xml = arquivo
self.cMunFG.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.vServ.valor or self.vBCRet.valor or self.pICMSRet.valor or self.vICMSRet.valor or self.CFOP.valor or self.cMunFG.valor):
return ''
txt = 'X11|'
txt += self.vServ.txt + '|'
txt += self.vBCRet.txt + '|'
txt += self.pICMSRet.txt + '|'
txt += self.vICMSRet.txt + '|'
txt += self.CFOP.txt + '|'
txt += self.cMunFG.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Transporta(XMLNFe):
def __init__(self):
super(Transporta, self).__init__()
self.CNPJ = TagCaracter(nome='CNPJ' , codigo='X04', tamanho=[14, 14], raiz='//NFe/infNFe/transp/transporta', obrigatorio=False)
self.CPF = TagCaracter(nome='CPF' , codigo='X05', tamanho=[11, 11], raiz='//NFe/infNFe/transp/transporta', obrigatorio=False)
self.xNome = TagCaracter(nome='xNome' , codigo='X06', tamanho=[ 1, 60], raiz='//NFe/infNFe/transp/transporta', obrigatorio=False)
self.IE = TagCaracter(nome='IE' , codigo='X07', tamanho=[ 2, 14], raiz='//NFe/infNFe/transp/transporta', obrigatorio=False)
self.xEnder = TagCaracter(nome='xEnder', codigo='X08', tamanho=[ 1, 60], raiz='//NFe/infNFe/transp/transporta', obrigatorio=False)
self.xMun = TagCaracter(nome='xMun' , codigo='X09', tamanho=[ 1, 60], raiz='//NFe/infNFe/transp/transporta', obrigatorio=False)
self.UF = TagCaracter(nome='UF' , codigo='X10', tamanho=[ 2, 2], raiz='//NFe/infNFe/transp/transporta', obrigatorio=False)
def get_xml(self):
if not (self.CNPJ.valor or self.CPF.valor or self.xNome.valor or self.IE.valor or self.xEnder.valor or self.xMun.valor or self.UF.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<transporta>'
xml += self.CNPJ.xml
xml += self.CPF.xml
xml += self.xNome.xml
xml += self.IE.xml
xml += self.xEnder.xml
xml += self.xMun.xml
xml += self.UF.xml
xml += '</transporta>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.CPF.xml = arquivo
self.xNome.xml = arquivo
self.IE.xml = arquivo
self.xEnder.xml = arquivo
self.xMun.xml = arquivo
self.UF.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.CNPJ.valor or self.CPF.valor or self.xNome.valor or self.IE.valor or self.xEnder.valor or self.xMun.valor or self.UF.valor):
return ''
txt = 'X03|'
txt += self.xNome.txt + '|'
txt += self.IE.txt + '|'
txt += self.xEnder.txt + '|'
txt += self.xMun.txt + '|'
txt += self.UF.txt + '|'
txt += '\n'
if self.CPF.valor:
txt += 'X05|' + self.CPF.txt + '|\n'
else:
txt += 'X04|' + self.CNPJ.txt + '|\n'
return txt
txt = property(get_txt)
class Transp(XMLNFe):
def __init__(self):
super(Transp, self).__init__()
self.modFrete = TagInteiro(nome='modFrete', codigo='X02', tamanho=[ 1, 1, 1], raiz='//NFe/infNFe/transp')
self.transporta = Transporta()
self.retTransp = RetTransp()
self.veicTransp = VeicTransp()
self.reboque = []
self.vol = []
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<transp>'
xml += self.modFrete.xml
xml += self.transporta.xml
xml += self.retTransp.xml
xml += self.veicTransp.xml
for r in self.reboque:
xml += r.xml
for v in self.vol:
xml += v.xml
xml += '</transp>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.modFrete.xml = arquivo
self.transporta.xml = arquivo
self.retTransp.xml = arquivo
self.veicTransp.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.reboque = self.le_grupo('//NFe/infNFe/transp/reboque', Reboque)
self.vol = self.le_grupo('//NFe/infNFe/transp/vol', Vol)
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'X|'
txt += self.modFrete.txt + '|\n'
txt += self.transporta.txt
txt += self.retTransp.txt
txt += self.veicTransp.txt
for r in self.reboque:
txt += r.txt
for v in self.vol:
txt += v.txt
return txt
txt = property(get_txt)
class RetTrib(XMLNFe):
def __init__(self):
super(RetTrib, self).__init__()
self.vRetPIS = TagDecimal(nome='vRetPIS' , codigo='W24', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/retTrib', obrigatorio=False)
self.vRetCOFINS = TagDecimal(nome='vRetCOFINS', codigo='W25', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/retTrib', obrigatorio=False)
self.vRetCSLL = TagDecimal(nome='vRetCSLL' , codigo='W26', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/retTrib', obrigatorio=False)
self.vBCIRRF = TagDecimal(nome='vBCIRRF' , codigo='W27', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/retTrib', obrigatorio=False)
self.vIRRF = TagDecimal(nome='vIRRF' , codigo='W28', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/retTrib', obrigatorio=False)
self.vBCRetPrev = TagDecimal(nome='vBCRetPrev', codigo='W29', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/retTrib', obrigatorio=False)
self.vRetPrev = TagDecimal(nome='vRetPrev' , codigo='W30', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/retTrib', obrigatorio=False)
def get_xml(self):
if not (self.vRetPIS.valor or self.vRetCOFINS.valor or self.vRetCSLL.valor or self.vBCIRRF.valor or self.vIRRF.valor or self.vBCRetPrev.valor or self.vRetPrev.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<retTrib>'
xml += self.vRetPIS.xml
xml += self.vRetCOFINS.xml
xml += self.vRetCSLL.xml
xml += self.vBCIRRF.xml
xml += self.vIRRF.xml
xml += self.vBCRetPrev.xml
xml += self.vRetPrev.xml
xml += '</retTrib>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vRetPIS.xml = arquivo
self.vRetCOFINS.xml = arquivo
self.vRetCSLL.xml = arquivo
self.vBCIRRF.xml = arquivo
self.vIRRF.xml = arquivo
self.vBCRetPrev.xml = arquivo
self.vRetPrev.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.vRetPIS.valor or self.vRetCOFINS.valor or self.vRetCSLL.valor or self.vBCIRRF.valor or self.vIRRF.valor or self.vBCRetPrev.valor or self.vRetPrev.valor):
return ''
txt = 'W23|'
txt += self.vRetPIS.txt + '|'
txt += self.vRetCOFINS.txt + '|'
txt += self.vRetCSLL.txt + '|'
txt += self.vBCIRRF.txt + '|'
txt += self.vIRRF.txt + '|'
txt += self.vBCRetPrev.txt + '|'
txt += self.vRetPrev.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class ISSQNTot(XMLNFe):
def __init__(self):
super(ISSQNTot, self).__init__()
self.vServ = TagDecimal(nome='vServ' , codigo='W18', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.vBC = TagDecimal(nome='vBC' , codigo='W19', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.vISS = TagDecimal(nome='vISS' , codigo='W20', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.vPIS = TagDecimal(nome='vPIS' , codigo='W21', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.vCOFINS = TagDecimal(nome='vCOFINS', codigo='W22', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
def get_xml(self):
if not (self.vServ.valor or self.vBC.valor or self.vISS.valor or self.vPIS.valor or self.vCOFINS.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<ISSQNtot>'
xml += self.vServ.xml
xml += self.vBC.xml
xml += self.vISS.xml
xml += self.vPIS.xml
xml += self.vCOFINS.xml
xml += '</ISSQNtot>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vServ.xml = arquivo
self.vBC.xml = arquivo
self.vISS.xml = arquivo
self.vPIS.xml = arquivo
self.vCOFINS.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.vServ.valor or self.vBC.valor or self.vISS.valor or self.vPIS.valor or self.vCOFINS.valor):
return ''
txt = 'W17|'
txt += self.vServ.txt + '|'
txt += self.vBC.txt + '|'
txt += self.vISS.txt + '|'
txt += self.vPIS.txt + '|'
txt += self.vCOFINS.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class ICMSTot(XMLNFe):
def __init__(self):
super(ICMSTot, self).__init__()
self.vBC = TagDecimal(nome='vBC' , codigo='W03', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vICMS = TagDecimal(nome='vICMS' , codigo='W04', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vBCST = TagDecimal(nome='vBCST' , codigo='W05', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vST = TagDecimal(nome='vST' , codigo='W06', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vProd = TagDecimal(nome='vProd' , codigo='W07', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vFrete = TagDecimal(nome='vFrete' , codigo='W08', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vSeg = TagDecimal(nome='vSeg' , codigo='W09', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vDesc = TagDecimal(nome='vDesc' , codigo='W10', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vII = TagDecimal(nome='vII' , codigo='W11', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vIPI = TagDecimal(nome='vIPI' , codigo='W12', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vPIS = TagDecimal(nome='vPIS' , codigo='W13', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vCOFINS = TagDecimal(nome='vCOFINS', codigo='W14', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vOutro = TagDecimal(nome='vOutro' , codigo='W15', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vNF = TagDecimal(nome='vNF' , codigo='W16', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<ICMSTot>'
xml += self.vBC.xml
xml += self.vICMS.xml
xml += self.vBCST.xml
xml += self.vST.xml
xml += self.vProd.xml
xml += self.vFrete.xml
xml += self.vSeg.xml
xml += self.vDesc.xml
xml += self.vII.xml
xml += self.vIPI.xml
xml += self.vPIS.xml
xml += self.vCOFINS.xml
xml += self.vOutro.xml
xml += self.vNF.xml
xml += '</ICMSTot>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.vICMS.xml = arquivo
self.vBCST.xml = arquivo
self.vST.xml = arquivo
self.vProd.xml = arquivo
self.vFrete.xml = arquivo
self.vSeg.xml = arquivo
self.vDesc.xml = arquivo
self.vII.xml = arquivo
self.vIPI.xml = arquivo
self.vPIS.xml = arquivo
self.vCOFINS.xml = arquivo
self.vOutro.xml = arquivo
self.vNF.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'W02|'
txt += self.vBC.txt + '|'
txt += self.vICMS.txt + '|'
txt += self.vBCST.txt + '|'
txt += self.vST.txt + '|'
txt += self.vProd.txt + '|'
txt += self.vFrete.txt + '|'
txt += self.vSeg.txt + '|'
txt += self.vDesc.txt + '|'
txt += self.vII.txt + '|'
txt += self.vIPI.txt + '|'
txt += self.vPIS.txt + '|'
txt += self.vCOFINS.txt + '|'
txt += self.vOutro.txt + '|'
txt += self.vNF.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Total(XMLNFe):
def __init__(self):
super(Total, self).__init__()
self.ICMSTot = ICMSTot()
self.ISSQNTot = ISSQNTot()
self.retTrib = RetTrib()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<total>'
xml += self.ICMSTot.xml
xml += self.ISSQNTot.xml
xml += self.retTrib.xml
xml += '</total>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.ICMSTot.xml = arquivo
self.ISSQNTot.xml = arquivo
self.retTrib.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'W|\n'
txt += self.ICMSTot.txt
txt += self.ISSQNTot.txt
txt += self.retTrib.txt
return txt
txt = property(get_txt)
class Entrega(XMLNFe):
def __init__(self):
super(Entrega, self).__init__()
self.CNPJ = TagCaracter(nome='CNPJ' , codigo='G01', tamanho=[14, 14] , raiz='//NFe/infNFe/entrega')
self.xLgr = TagCaracter(nome='xLgr' , codigo='G02', tamanho=[ 2, 60] , raiz='//NFe/infNFe/entrega')
self.nro = TagCaracter(nome='nro' , codigo='G03', tamanho=[ 1, 60] , raiz='//NFe/infNFe/entrega')
self.xCpl = TagCaracter(nome='xCpl' , codigo='G04', tamanho=[ 1, 60] , raiz='//NFe/infNFe/entrega', obrigatorio=False)
self.xBairro = TagCaracter(nome='xBairro', codigo='G05', tamanho=[ 2, 60] , raiz='//NFe/infNFe/entrega')
self.cMun = TagInteiro(nome='cMun' , codigo='G06', tamanho=[ 7, 7, 7], raiz='//NFe/infNFe/entrega')
self.xMun = TagCaracter(nome='xMun' , codigo='G07', tamanho=[ 2, 60] , raiz='//NFe/infNFe/entrega')
self.UF = TagCaracter(nome='UF' , codigo='G08', tamanho=[ 2, 2] , raiz='//NFe/infNFe/entrega')
def get_xml(self):
if not len(self.CNPJ.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<entrega>'
xml += self.CNPJ.xml
xml += self.xLgr.xml
xml += self.nro.xml
xml += self.xCpl.xml
xml += self.xBairro.xml
xml += self.cMun.xml
xml += self.xMun.xml
xml += self.UF.xml
xml += '</entrega>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.xLgr.xml = arquivo
self.nro.xml = arquivo
self.xCpl.xml = arquivo
self.xBairro.xml = arquivo
self.cMun.xml = arquivo
self.xMun.xml = arquivo
self.UF.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not len(self.CNPJ.valor):
return ''
txt = 'G|'
txt += self.CNPJ.txt + '|'
txt += self.xLgr.txt + '|'
txt += self.nro.txt + '|'
txt += self.xCpl.txt + '|'
txt += self.xBairro.txt + '|'
txt += self.cMun.txt + '|'
txt += self.xMun.txt + '|'
txt += self.UF.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Retirada(XMLNFe):
def __init__(self):
super(Retirada, self).__init__()
self.CNPJ = TagCaracter(nome='CNPJ' , codigo='F01', tamanho=[14, 14] , raiz='//NFe/infNFe/retirada')
self.xLgr = TagCaracter(nome='xLgr' , codigo='F02', tamanho=[ 2, 60] , raiz='//NFe/infNFe/retirada')
self.nro = TagCaracter(nome='nro' , codigo='F03', tamanho=[ 1, 60] , raiz='//NFe/infNFe/retirada')
self.xCpl = TagCaracter(nome='xCpl' , codigo='F04', tamanho=[ 1, 60] , raiz='//NFe/infNFe/retirada', obrigatorio=False)
self.xBairro = TagCaracter(nome='xBairro', codigo='F05', tamanho=[ 2, 60] , raiz='//NFe/infNFe/retirada')
self.cMun = TagInteiro(nome='cMun' , codigo='F06', tamanho=[ 7, 7, 7], raiz='//NFe/infNFe/retirada')
self.xMun = TagCaracter(nome='xMun' , codigo='F07', tamanho=[ 2, 60] , raiz='//NFe/infNFe/retirada')
self.UF = TagCaracter(nome='UF' , codigo='F08', tamanho=[ 2, 2] , raiz='//NFe/infNFe/retirada')
def get_xml(self):
if not len(self.CNPJ.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<retirada>'
xml += self.CNPJ.xml
xml += self.xLgr.xml
xml += self.nro.xml
xml += self.xCpl.xml
xml += self.xBairro.xml
xml += self.cMun.xml
xml += self.xMun.xml
xml += self.UF.xml
xml += '</retirada>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.xLgr.xml = arquivo
self.nro.xml = arquivo
self.xCpl.xml = arquivo
self.xBairro.xml = arquivo
self.cMun.xml = arquivo
self.xMun.xml = arquivo
self.UF.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not len(self.CNPJ.valor):
return ''
txt = 'F|'
txt += self.CNPJ.txt + '|'
txt += self.xLgr.txt + '|'
txt += self.nro.txt + '|'
txt += self.xCpl.txt + '|'
txt += self.xBairro.txt + '|'
txt += self.cMun.txt + '|'
txt += self.xMun.txt + '|'
txt += self.UF.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class EnderDest(XMLNFe):
def __init__(self):
super(EnderDest, self).__init__()
self.xLgr = TagCaracter(nome='xLgr' , codigo='E06', tamanho=[ 2, 60] , raiz='//NFe/infNFe/dest/enderDest')
self.nro = TagCaracter(nome='nro' , codigo='E07', tamanho=[ 1, 60] , raiz='//NFe/infNFe/dest/enderDest')
self.xCpl = TagCaracter(nome='xCpl' , codigo='E08', tamanho=[ 1, 60] , raiz='//NFe/infNFe/dest/enderDest', obrigatorio=False)
self.xBairro = TagCaracter(nome='xBairro', codigo='E09', tamanho=[ 2, 60] , raiz='//NFe/infNFe/dest/enderDest')
self.cMun = TagInteiro(nome='cMun' , codigo='E10', tamanho=[ 7, 7, 7], raiz='//NFe/infNFe/dest/enderDest')
self.xMun = TagCaracter(nome='xMun' , codigo='E11', tamanho=[ 2, 60] , raiz='//NFe/infNFe/dest/enderDest')
self.UF = TagCaracter(nome='UF' , codigo='E12', tamanho=[ 2, 2] , raiz='//NFe/infNFe/dest/enderDest')
self.CEP = TagCaracter(nome='CEP' , codigo='E13', tamanho=[ 8, 8, 8], raiz='//NFe/infNFe/dest/enderDest', obrigatorio=False)
self.cPais = TagCaracter(nome='cPais' , codigo='E14', tamanho=[ 4, 4, 4], raiz='//NFe/infNFe/dest/enderDest', obrigatorio=False)
self.xPais = TagCaracter(nome='xPais' , codigo='E15', tamanho=[ 1, 60] , raiz='//NFe/infNFe/dest/enderDest', obrigatorio=False)
self.fone = TagInteiro(nome='fone' , codigo='E16', tamanho=[ 1, 10] , raiz='//NFe/infNFe/dest/enderDest', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<enderDest>'
xml += self.xLgr.xml
xml += self.nro.xml
xml += self.xCpl.xml
xml += self.xBairro.xml
xml += self.cMun.xml
xml += self.xMun.xml
xml += self.UF.xml
xml += self.CEP.xml
xml += self.cPais.xml
xml += self.xPais.xml
xml += self.fone.xml
xml += '</enderDest>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xLgr.xml = arquivo
self.nro.xml = arquivo
self.xCpl.xml = arquivo
self.xBairro.xml = arquivo
self.cMun.xml = arquivo
self.xMun.xml = arquivo
self.UF.xml = arquivo
self.CEP.xml = arquivo
self.cPais.xml = arquivo
self.xPais.xml = arquivo
self.fone.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'E05|'
txt += self.xLgr.txt + '|'
txt += self.nro.txt + '|'
txt += self.xCpl.txt + '|'
txt += self.xBairro.txt + '|'
txt += self.cMun.txt + '|'
txt += self.xMun.txt + '|'
txt += self.UF.txt + '|'
txt += self.CEP.txt + '|'
txt += self.cPais.txt + '|'
txt += self.xPais.txt + '|'
txt += self.fone.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Dest(XMLNFe):
def __init__(self):
super(Dest, self).__init__()
self.CNPJ = TagCaracter(nome='CNPJ' , codigo='E02', tamanho=[0 , 14] , raiz='//NFe/infNFe/dest', obrigatorio=False)
self.CPF = TagCaracter(nome='CPF' , codigo='E03', tamanho=[11, 11] , raiz='//NFe/infNFe/dest', obrigatorio=False)
self.xNome = TagCaracter(nome='xNome', codigo='E04', tamanho=[ 2, 60] , raiz='//NFe/infNFe/dest')
self.enderDest = EnderDest()
self.IE = TagCaracter(nome='IE' , codigo='E17', tamanho=[ 2, 14] , raiz='//NFe/infNFe/dest')
self.ISUF = TagCaracter(nome='ISUF' , codigo='E18', tamanho=[ 9, 9] , raiz='//NFe/infNFe/dest', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<dest>'
#
# Força o uso da tag CNPJ quando a nota for em homologação
#
if self.CNPJ.valor == '99999999000191':
xml += self.CNPJ.xml
elif self.CPF.valor:
xml += self.CPF.xml
else:
xml += self.CNPJ.xml
xml += self.xNome.xml
xml += self.enderDest.xml
xml += self.IE.xml
xml += self.ISUF.xml
xml += '</dest>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.CPF.xml = arquivo
self.xNome.xml = arquivo
self.enderDest.xml = arquivo
self.IE.xml = arquivo
self.ISUF.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'E|'
txt += self.xNome.txt + '|'
txt += self.IE.txt + '|'
txt += self.ISUF.txt + '|'
txt += '\n'
if self.CPF.valor:
txt += 'E03|' + self.CPF.txt + '|\n'
else:
txt += 'E02|' + self.CNPJ.txt + '|\n'
txt += self.enderDest.txt
return txt
txt = property(get_txt)
class Avulsa(XMLNFe):
def __init__(self):
super(Avulsa, self).__init__()
self.CNPJ = TagCaracter(nome='CNPJ' , codigo='D02', tamanho=[14, 14], raiz='//NFe/infNFe/avulsa')
self.xOrgao = TagCaracter(nome='xOrgao' , codigo='D03', tamanho=[ 1, 60], raiz='//NFe/infNFe/avulsa')
self.matr = TagCaracter(nome='matr' , codigo='D04', tamanho=[ 1, 60], raiz='//NFe/infNFe/avulsa')
self.xAgente = TagCaracter(nome='xAgente', codigo='D05', tamanho=[ 1, 60], raiz='//NFe/infNFe/avulsa')
self.fone = TagInteiro(nome='fone' , codigo='D06', tamanho=[ 1, 10], raiz='//NFe/infNFe/avulsa')
self.UF = TagCaracter(nome='UF' , codigo='D07', tamanho=[ 2, 2], raiz='//NFe/infNFe/avulsa')
self.nDAR = TagCaracter(nome='nDAR' , codigo='D08', tamanho=[ 1, 60], raiz='//NFe/infNFe/avulsa')
self.dEmi = TagData(nome='dEmi' , codigo='D09', raiz='//NFe/infNFe/avulsa')
self.vDAR = TagDecimal(nome='vDAR' , codigo='D10', tamanho=[ 1, 15], decimais=[0, 2, 2], raiz='//NFe/infNFe/avulsa')
self.repEmi = TagCaracter(nome='repEmi' , codigo='D11', tamanho=[ 1, 60], raiz='//NFe/infNFe/avulsa')
self.dPag = TagData(nome='dPag' , codigo='D12', raiz='//NFe/infNFe/avulsa', obrigatorio=False)
def get_xml(self):
if not len(self.CNPJ.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<avulsa>'
xml += self.CNPJ.xml
xml += self.xOrgao.xml
xml += self.matr.xml
xml += self.xAgente.xml
xml += self.fone.xml
xml += self.UF.xml
xml += self.nDAR.xml
xml += self.dEmi.xml
xml += self.vDAR.xml
xml += self.repEmi.xml
xml += self.dPag.xml
xml += '</avulsa>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.xOrgao.xml = arquivo
self.matr.xml = arquivo
self.xAgente.xml = arquivo
self.fone.xml = arquivo
self.UF.xml = arquivo
self.nDAR.xml = arquivo
self.dEmi.xml = arquivo
self.vDAR.xml = arquivo
self.repEmi.xml = arquivo
self.dPag.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not len(self.CNPJ.valor):
return ''
txt = 'D|'
txt += self.CNPJ.txt + '|'
txt += self.xOrgao.txt + '|'
txt += self.matr.txt + '|'
txt += self.xAgente.txt + '|'
txt += self.fone.txt + '|'
txt += self.UF.txt + '|'
txt += self.nDAR.txt + '|'
txt += self.dEmi.txt + '|'
txt += self.vDAR.txt + '|'
txt += self.repEmi.txt + '|'
txt += self.dPag.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class EnderEmit(XMLNFe):
def __init__(self):
super(EnderEmit, self).__init__()
self.xLgr = TagCaracter(nome='xLgr' , codigo='C06', tamanho=[ 2, 60] , raiz='//NFe/infNFe/emit/enderEmit')
self.nro = TagCaracter(nome='nro' , codigo='C07', tamanho=[ 1, 60] , raiz='//NFe/infNFe/emit/enderEmit')
self.xCpl = TagCaracter(nome='xCpl' , codigo='C08', tamanho=[ 1, 60] , raiz='//NFe/infNFe/emit/enderEmit', obrigatorio=False)
self.xBairro = TagCaracter(nome='xBairro', codigo='C09', tamanho=[ 2, 60] , raiz='//NFe/infNFe/emit/enderEmit')
self.cMun = TagInteiro(nome='cMun' , codigo='C10', tamanho=[ 7, 7, 7], raiz='//NFe/infNFe/emit/enderEmit')
self.xMun = TagCaracter(nome='xMun' , codigo='C11', tamanho=[ 2, 60] , raiz='//NFe/infNFe/emit/enderEmit')
self.UF = TagCaracter(nome='UF' , codigo='C12', tamanho=[ 2, 2] , raiz='//NFe/infNFe/emit/enderEmit')
self.CEP = TagCaracter(nome='CEP' , codigo='C13', tamanho=[ 8, 8, 8], raiz='//NFe/infNFe/emit/enderEmit', obrigatorio=False)
self.cPais = TagCaracter(nome='cPais' , codigo='C14', tamanho=[ 4, 4, 4], raiz='//NFe/infNFe/emit/enderEmit', obrigatorio=False)
self.xPais = TagCaracter(nome='xPais' , codigo='C15', tamanho=[ 1, 60] , raiz='//NFe/infNFe/emit/enderEmit', obrigatorio=False)
self.fone = TagInteiro(nome='fone' , codigo='C16', tamanho=[ 1, 10] , raiz='//NFe/infNFe/emit/enderEmit', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<enderEmit>'
xml += self.xLgr.xml
xml += self.nro.xml
xml += self.xCpl.xml
xml += self.xBairro.xml
xml += self.cMun.xml
xml += self.xMun.xml
xml += self.UF.xml
xml += self.CEP.xml
xml += self.cPais.xml
xml += self.xPais.xml
xml += self.fone.xml
xml += '</enderEmit>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xLgr.xml = arquivo
self.nro.xml = arquivo
self.xCpl.xml = arquivo
self.xBairro.xml = arquivo
self.cMun.xml = arquivo
self.xMun.xml = arquivo
self.UF.xml = arquivo
self.CEP.xml = arquivo
self.cPais.xml = arquivo
self.xPais.xml = arquivo
self.fone.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'C05|'
txt += self.xLgr.txt + '|'
txt += self.nro.txt + '|'
txt += self.xCpl.txt + '|'
txt += self.xBairro.txt + '|'
txt += self.cMun.txt + '|'
txt += self.xMun.txt + '|'
txt += self.UF.txt + '|'
txt += self.CEP.txt + '|'
txt += self.cPais.txt + '|'
txt += self.xPais.txt + '|'
txt += self.fone.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class Emit(XMLNFe):
def __init__(self):
super(Emit, self).__init__()
self.CNPJ = TagCaracter(nome='CNPJ' , codigo='C02' , tamanho=[14, 14], raiz='//NFe/infNFe/emit', obrigatorio=False)
self.CPF = TagCaracter(nome='CPF' , codigo='C02a', tamanho=[11, 11], raiz='//NFe/infNFe/emit', obrigatorio=False)
self.xNome = TagCaracter(nome='xNome', codigo='C03' , tamanho=[ 2, 60], raiz='//NFe/infNFe/emit')
self.xFant = TagCaracter(nome='xFant', codigo='C04' , tamanho=[ 1, 60], raiz='//NFe/infNFe/emit', obrigatorio=False)
self.enderEmit = EnderEmit()
self.IE = TagCaracter(nome='IE' , codigo='C17' , tamanho=[ 2, 14], raiz='//NFe/infNFe/emit', obrigatorio=False)
self.IEST = TagCaracter(nome='IEST' , codigo='C18' , tamanho=[ 2, 14], raiz='//NFe/infNFe/emit', obrigatorio=False)
self.IM = TagCaracter(nome='IM' , codigo='C19' , tamanho=[ 1, 15], raiz='//NFe/infNFe/emit', obrigatorio=False)
self.CNAE = TagCaracter(nome='CNAE' , codigo='C20' , tamanho=[ 7, 7], raiz='//NFe/infNFe/emit', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<emit>'
xml += self.CNPJ.xml
xml += self.CPF.xml
xml += self.xNome.xml
xml += self.xFant.xml
xml += self.enderEmit.xml
xml += self.IE.xml
xml += self.IEST.xml
xml += self.IM.xml
xml += self.CNAE.xml
xml += '</emit>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.CPF.xml = arquivo
self.xNome.xml = arquivo
self.xFant.xml = arquivo
self.enderEmit.xml = arquivo
self.IE.xml = arquivo
self.IEST.xml = arquivo
self.IM.xml = arquivo
self.CNAE.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'C|'
txt += self.xNome.txt + '|'
txt += self.xFant.txt + '|'
txt += self.IE.txt + '|'
txt += self.IEST.txt + '|'
txt += self.IM.txt + '|'
txt += self.CNAE.txt + '|'
txt += '\n'
if self.CNPJ.valor:
txt += 'C02|' + self.CNPJ.txt + '|\n'
else:
txt += 'C02a|' + self.CPF.txt + '|\n'
txt += self.enderEmit.txt
return txt
txt = property(get_txt)
class RefNF(XMLNFe):
def __init__(self):
super(RefNF, self).__init__()
self.cUF = TagInteiro(nome='cUF' , codigo='B15', tamanho=[ 2, 2, 2], raiz='//NFref/refNF')
self.AAMM = TagCaracter(nome='AAMM', codigo='B16', tamanho=[ 4, 4, 4], raiz='//NFref/refNF')
self.CNPJ = TagCaracter(nome='CNPJ', codigo='B17', tamanho=[14, 14] , raiz='//NFref/refNF')
self.mod = TagCaracter(nome='mod' , codigo='B18', tamanho=[ 2, 2, 2], raiz='//NFref/refNF')
self.serie = TagInteiro(nome='serie', codigo='B19', tamanho=[ 1, 3, 1], raiz='//NFref/refNF')
self.nNF = TagInteiro(nome='nNF' , codigo='B20', tamanho=[ 1, 9, 1], raiz='//NFref/refNF')
def get_xml(self):
if not (self.cUF.valor or self.AAMM.valor or self.CNPJ.valor or self.mod.valor or self.serie.valor or self.nNF.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<refNF>'
xml += self.cUF.xml
xml += self.AAMM.xml
xml += self.CNPJ.xml
xml += self.mod.xml
xml += self.serie.xml
xml += self.nNF.xml
xml += '</refNF>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cUF.xml = arquivo
self.AAMM.xml = arquivo
self.CNPJ.xml = arquivo
self.mod.xml = arquivo
self.serie.xml = arquivo
self.nNF.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.cUF.valor or self.AAMM.valor or self.CNPJ.valor or self.mod.valor or self.serie.valor or self.nNF.valor):
return ''
txt = 'B14|'
txt += self.cUF.txt + '|'
txt += self.AAMM.txt + '|'
txt += self.CNPJ.txt + '|'
txt += self.mod.txt + '|'
txt += self.serie.txt + '|'
txt += self.nNF.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class NFRef(XMLNFe):
def __init__(self):
super(NFRef, self).__init__()
self.refNFe = TagCaracter(nome='refNFe', codigo='B13', tamanho=[44, 44], raiz='//NFRef', obrigatorio=False)
self.refNF = RefNF()
def get_xml(self):
if not (self.refNFe.valor or self.refNF.xml):
return ''
xml = XMLNFe.get_xml(self)
xml += '<NFref>'
xml += self.refNFe.xml
xml += self.refNF.xml
xml += '</NFref>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.refNFe.xml = arquivo
self.refNF.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.refNFe.valor or self.refNF.xml):
return ''
if self.refNFe.valor:
txt = 'B13|' + self.refNFe.txt + '|\n'
else:
txt = self.refNF.txt
return txt
txt = property(get_txt)
class Ide(XMLNFe):
def __init__(self):
super(Ide, self).__init__()
self.cUF = TagInteiro(nome='cUF' , codigo='B02', tamanho=[ 2, 2, 2], raiz='//NFe/infNFe/ide')
self.cNF = TagCaracter(nome='cNF' , codigo='B03', tamanho=[ 9, 9, 9], raiz='//NFe/infNFe/ide')
self.natOp = TagCaracter(nome='natOp' , codigo='B04', tamanho=[ 1, 60] , raiz='//NFe/infNFe/ide')
self.indPag = TagInteiro(nome='indPag' , codigo='B05', tamanho=[ 1, 1, 1], raiz='//NFe/infNFe/ide')
self.mod = TagInteiro(nome='mod' , codigo='B06', tamanho=[ 2, 2, 2], raiz='//NFe/infNFe/ide', valor=55)
self.serie = TagInteiro(nome='serie' , codigo='B07', tamanho=[ 1, 3, 1], raiz='//NFe/infNFe/ide')
self.nNF = TagInteiro(nome='nNF' , codigo='B08', tamanho=[ 1, 9, 1], raiz='//NFe/infNFe/ide')
self.dEmi = TagData(nome='dEmi' , codigo='B09', raiz='//NFe/infNFe/ide')
self.dSaiEnt = TagData(nome='dSaiEnt' , codigo='B10', raiz='//NFe/infNFe/ide', obrigatorio=False)
self.tpNF = TagInteiro(nome='tpNF' , codigo='B11', tamanho=[ 1, 1, 1], raiz='//NFe/infNFe/ide', valor=1)
self.cMunFG = TagInteiro(nome='cMunFG' , codigo='B12', tamanho=[ 7, 7, 7], raiz='//NFe/infNFe/ide')
self.NFref = []
self.tpImp = TagInteiro(nome='tpImp' , codigo='B21', tamanho=[ 1, 1, 1], raiz='//NFe/infNFe/ide', valor=1)
self.tpEmis = TagInteiro(nome='tpEmis' , codigo='B22', tamanho=[ 1, 1, 1], raiz='//NFe/infNFe/ide', valor=1)
self.cDV = TagInteiro(nome='cDV' , codigo='B23', tamanho=[ 1, 1, 1], raiz='//NFe/infNFe/ide')
self.tpAmb = TagInteiro(nome='tpAmb' , codigo='B24', tamanho=[ 1, 1, 1], raiz='//NFe/infNFe/ide', valor=2)
self.finNFe = TagInteiro(nome='finNFe' , codigo='B25', tamanho=[ 1, 1, 1], raiz='//NFe/infNFe/ide', valor=1)
self.procEmi = TagInteiro(nome='procEmi' , codigo='B26', tamanho=[ 1, 1, 1], raiz='//NFe/infNFe/ide')
self.verProc = TagCaracter(nome='verProc', codigo='B27', tamanho=[ 1, 20] , raiz='//NFe/infNFe/ide')
self.hSaiEnt = TagHora(nome='hSaiEnt' , codigo='' , raiz='//NFe/infNFe/ide', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<ide>'
xml += self.cUF.xml
xml += self.cNF.xml
xml += self.natOp.xml
xml += self.indPag.xml
xml += self.mod.xml
xml += self.serie.xml
xml += self.nNF.xml
xml += self.dEmi.xml
xml += self.dSaiEnt.xml
xml += self.tpNF.xml
xml += self.cMunFG.xml
for nr in self.NFref:
xml += nr.xml
xml += self.tpImp.xml
xml += self.tpEmis.xml
xml += self.cDV.xml
xml += self.tpAmb.xml
xml += self.finNFe.xml
xml += self.procEmi.xml
xml += self.verProc.xml
xml += '</ide>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cUF.xml = arquivo
self.cNF.xml = arquivo
self.natOp.xml = arquivo
self.indPag.xml = arquivo
self.mod.xml = arquivo
self.serie.xml = arquivo
self.nNF.xml = arquivo
self.dEmi.xml = arquivo
self.dSaiEnt.xml = arquivo
self.tpNF.xml = arquivo
self.cMunFG.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.NFRef = self.le_grupo('//NFe/infNFe/ide/NFref', NFRef)
self.tpImp.xml = arquivo
self.tpEmis.xml = arquivo
self.cDV.xml = arquivo
self.tpAmb.xml = arquivo
self.finNFe.xml = arquivo
self.procEmi.xml = arquivo
self.verProc.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'B|'
txt += self.cUF.txt + '|'
txt += self.cNF.txt + '|'
txt += self.natOp.txt + '|'
txt += self.indPag.txt + '|'
txt += self.mod.txt + '|'
txt += self.serie.txt + '|'
txt += self.nNF.txt + '|'
txt += self.dEmi.txt + '|'
txt += self.dSaiEnt.txt + '|'
txt += self.tpNF.txt + '|'
txt += self.cMunFG.txt + '|'
txt += self.tpImp.txt + '|'
txt += self.tpEmis.txt + '|'
txt += self.cDV.txt + '|'
txt += self.tpAmb.txt + '|'
txt += self.finNFe.txt + '|'
txt += self.procEmi.txt + '|'
txt += self.verProc.txt + '|'
txt += '\n'
for nr in self.NFref:
txt += nr.txt
return txt
txt = property(get_txt)
class InfNFe(XMLNFe):
def __init__(self):
super(InfNFe, self).__init__()
self.versao = TagDecimal(nome='infNFe' , codigo='A01', propriedade='versao', raiz='//NFe', namespace=NAMESPACE_NFE, valor='1.10')
self.Id = TagCaracter(nome='infNFe', codigo='A03', propriedade='Id' , raiz='//NFe', namespace=NAMESPACE_NFE)
self.ide = Ide()
self.emit = Emit()
self.avulsa = Avulsa()
self.dest = Dest()
self.retirada = Retirada()
self.entrega = Entrega()
self.det = []
self.total = Total()
self.transp = Transp()
self.cobr = Cobr()
self.infAdic = InfAdic()
self.exporta = Exporta()
self.compra = Compra()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<infNFe versao="' + unicode(self.versao.valor) + '" Id="' + self.Id.valor + '">'
xml += self.ide.xml
xml += self.emit.xml
xml += self.avulsa.xml
xml += self.dest.xml
xml += self.retirada.xml
xml += self.entrega.xml
for d in self.det:
xml += d.xml
xml += self.total.xml
xml += self.transp.xml
xml += self.cobr.xml
xml += self.infAdic.xml
xml += self.exporta.xml
xml += self.compra.xml
xml += '</infNFe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.Id.xml = arquivo
self.ide.xml = arquivo
self.emit.xml = arquivo
self.avulsa.xml = arquivo
self.dest.xml = arquivo
self.retirada.xml = arquivo
self.entrega.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.det = self.le_grupo('//NFe/infNFe/det', Det)
self.total.xml = arquivo
self.transp.xml = arquivo
self.cobr.xml = arquivo
self.infAdic.xml = arquivo
self.exporta.xml = arquivo
self.compra.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'A|'
txt += self.versao.txt + '|'
txt += self.Id.txt + '|'
txt += '\n'
txt += self.ide.txt
txt += self.emit.txt
txt += self.avulsa.txt
txt += self.dest.txt
txt += self.retirada.txt
txt += self.entrega.txt
for d in self.det:
txt += d.txt
txt += self.total.txt
txt += self.transp.txt
txt += self.cobr.txt
txt += self.infAdic.txt
txt += self.exporta.txt
txt += self.compra.txt
return txt
txt = property(get_txt)
class NFe(XMLNFe):
def __init__(self):
super(NFe, self).__init__()
self.infNFe = InfNFe()
self.Signature = Signature()
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'nfe_v1.10.xsd'
self.chave = ''
self.dados_contingencia_fsda = ''
self.site = ''
self.email = ''
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += '<NFe xmlns="http://www.portalfiscal.inf.br/nfe">'
xml += self.infNFe.xml
#
# Define a URI a ser assinada
#
self.Signature.URI = '#' + self.infNFe.Id.valor
xml += self.Signature.xml
xml += '</NFe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.infNFe.xml = arquivo
self.Signature.xml = self._le_noh('//NFe/sig:Signature')
xml = property(get_xml, set_xml)
def get_txt(self):
txt = self.infNFe.txt
return txt
txt = property(get_txt)
def _calcula_dv(self, valor):
soma = 0
m = 2
for i in range(len(valor)-1, -1, -1):
c = valor[i]
soma += int(c) * m
m += 1
if m > 9:
m = 2
digito = 11 - (soma % 11)
if digito > 9:
digito = 0
return digito
def gera_nova_chave(self):
chave = unicode(self.infNFe.ide.cUF.valor).zfill(2)
chave += unicode(self.infNFe.ide.dEmi.valor.strftime('%y%m')).zfill(4)
chave += unicode(self.infNFe.emit.CNPJ.valor).zfill(14)
chave += unicode(self.infNFe.ide.mod.valor).zfill(2)
chave += unicode(self.infNFe.ide.serie.valor).zfill(3)
chave += unicode(self.infNFe.ide.nNF.valor).zfill(9)
#
# A inclusão do tipo de emissão na chave já torna a chave válida também
# para a versão 2.00 da NF-e
#
chave += unicode(self.infNFe.ide.tpEmis.valor).zfill(1)
#
# O código numério é um número aleatório
#
#chave += unicode(random.randint(0, 99999999)).strip().rjust(8, '0')
#
# Mas, por segurança, é preferível que esse número não seja aleatório de todo
#
soma = 0
for c in chave:
soma += int(c) ** 3 ** 2
codigo = unicode(soma)
if len(codigo) > 8:
codigo = codigo[-8:]
else:
codigo = codigo.rjust(8, '0')
chave += codigo
#
# Define na estrutura do XML o campo cNF
#
self.infNFe.ide.cNF.valor = unicode(self.infNFe.ide.tpEmis.valor).zfill(1) + codigo
#
# Gera o dígito verificador
#
digito = self._calcula_dv(chave)
#
# Define na estrutura do XML o campo cDV
#
self.infNFe.ide.cDV.valor = digito
chave += unicode(digito)
self.chave = chave
#
# Define o Id
#
self.infNFe.Id.valor = 'NFe' + chave
def monta_chave(self):
chave = unicode(self.infNFe.ide.cUF.valor).zfill(2)
chave += unicode(self.infNFe.ide.dEmi.valor.strftime('%y%m')).zfill(4)
chave += unicode(self.infNFe.emit.CNPJ.valor).zfill(14)
chave += unicode(self.infNFe.ide.mod.valor).zfill(2)
chave += unicode(self.infNFe.ide.serie.valor).zfill(3)
chave += unicode(self.infNFe.ide.nNF.valor).zfill(9)
chave += unicode(self.infNFe.ide.cNF.valor).zfill(9)
chave += unicode(self.infNFe.ide.cDV.valor).zfill(1)
self.chave = chave
def chave_para_codigo_barras(self):
#
# As funções do reportlabs para geração de códigos de barras não estão
# aceitando strings unicode
#
return self.chave.encode('utf-8')
def monta_dados_contingencia_fsda(self):
dados = unicode(self.infNFe.ide.cUF.valor).zfill(2)
dados += unicode(self.infNFe.ide.tpEmis.valor).zfill(1)
dados += unicode(self.infNFe.emit.CNPJ.valor).zfill(14)
dados += unicode(int(self.infNFe.total.ICMSTot.vNF.valor * 100)).zfill(14)
#
# Há ICMS próprio?
#
if self.infNFe.total.ICMSTot.vICMS.valor:
dados += '1'
else:
dados += '2'
#
# Há ICMS ST?
#
if self.infNFe.total.ICMSTot.vST.valor:
dados += '1'
else:
dados += '2'
dados += self.infNFe.ide.dEmi.valor.strftime('%d').zfill(2)
digito = self._calcula_dv(dados)
dados += unicode(digito)
self.dados_contingencia_fsda = dados
def dados_contingencia_fsda_para_codigo_barras(self):
#
# As funções do reportlabs para geração de códigos de barras não estão
# aceitando strings unicode
#
self.monta_dados_contingencia_fsda()
return self.dados_contingencia_fsda.encode('utf-8')
#
# Funções para formatar campos para o DANFE
#
def chave_formatada(self):
chave = self.chave
chave_formatada = ' '.join((chave[0:4], chave[4:8], chave[8:12], chave[12:16], chave[16:20], chave[20:24], chave[24:28], chave[28:32], chave[32:36], chave[36:40], chave[40:44]))
return chave_formatada
def dados_contingencia_fsda_formatados(self):
self.monta_dados_contingencia_fsda()
dados = self.dados_contingencia_fsda
dados_formatados = ' '.join((dados[0:4], dados[4:8], dados[8:12], dados[12:16], dados[16:20], dados[20:24], dados[24:28], dados[28:32], dados[32:36]))
return dados_formatados
def numero_formatado(self):
num = unicode(self.infNFe.ide.nNF.valor).zfill(9)
num_formatado = '.'.join((num[0:3], num[3:6], num[6:9]))
return 'Nº ' + num_formatado
def serie_formatada(self):
return 'SÉRIE ' + unicode(self.infNFe.ide.serie.valor).zfill(3)
def _formata_cpf(self, cpf):
if not len(cpf.strip()):
return ''
formatado = cpf[0:3] + '.' + cpf[3:6] + '.' + cpf[6:9] + '-' + cpf[9:11]
return formatado
def _formata_cnpj(self, cnpj):
if not len(cnpj.strip()):
return ''
formatado = cnpj[0:2] + '.' + cnpj[2:5] + '.' + cnpj[5:8] + '/' + cnpj[8:12] + '-' + cnpj[12:14]
return formatado
def cnpj_emitente_formatado(self):
if len(self.infNFe.emit.CPF.valor):
return self._formata_cpf(unicode(self.infNFe.emit.CPF.valor))
else:
return self._formata_cnpj(unicode(self.infNFe.emit.CNPJ.valor))
def endereco_emitente_formatado(self):
formatado = self.infNFe.emit.enderEmit.xLgr.valor
formatado += ', ' + self.infNFe.emit.enderEmit.nro.valor
if len(self.infNFe.emit.enderEmit.xCpl.valor.strip()):
formatado += ' - ' + self.infNFe.emit.enderEmit.xCpl.valor
return formatado
def _formata_cep(self, cep):
if not len(cep.strip()):
return ''
return cep[0:5] + '-' + cep[5:8]
def cep_emitente_formatado(self):
return self._formata_cep(self.infNFe.emit.enderEmit.CEP.valor)
def endereco_emitente_formatado_linha_1(self):
formatado = self.endereco_emitente_formatado()
formatado += ' - ' + self.infNFe.emit.enderEmit.xBairro.valor
return formatado
def endereco_emitente_formatado_linha_2(self):
formatado = self.infNFe.emit.enderEmit.xMun.valor
formatado += ' - ' + self.infNFe.emit.enderEmit.UF.valor
formatado += ' - ' + self.cep_emitente_formatado()
return formatado
def endereco_emitente_formatado_linha_3(self):
if self.fone_emitente_formatado().strip() != '':
formatado = 'Fone: ' + self.fone_emitente_formatado()
else:
formatado = ''
return formatado
def endereco_emitente_formatado_linha_4(self):
return self.site
def _formata_fone(self, fone):
if not len(fone.strip()):
return ''
if fone.strip() == '0':
return ''
if len(fone) <= 8:
formatado = fone[:-4] + '-' + fone[-4:]
elif len(fone) <= 10:
ddd = fone[0:2]
fone = fone[2:]
formatado = '(' + ddd + ') ' + fone[:-4] + '-' + fone[-4:]
#
# Celulares de SP agora têm 9 dígitos...
#
elif len(fone) <= 11:
ddd = fone[0:3]
fone = fone[3:]
formatado = '(' + ddd + ') ' + fone[:-4] + '-' + fone[-4:]
#
# Assume 8 dígitos para o número, 2 para o DD, e o restante é o DDI
#
else:
numero = fone[len(fone)-8:]
ddd = fone[len(fone)-10:len(fone)-8]
ddi = fone[:len(fone)-10]
formatado = '+' + ddi + ' (' + ddd + ') ' + numero[:-4] + '-' + numero[-4:]
return formatado
def fone_emitente_formatado(self):
return self._formata_fone(unicode(self.infNFe.emit.enderEmit.fone.valor))
def cnpj_destinatario_formatado(self):
if self.infNFe.dest.CPF.valor and len(self.infNFe.dest.CPF.valor):
return self._formata_cpf(unicode(self.infNFe.dest.CPF.valor))
elif self.infNFe.dest.CNPJ.valor and len(self.infNFe.dest.CNPJ.valor):
return self._formata_cnpj(unicode(self.infNFe.dest.CNPJ.valor))
else:
return ''
def endereco_destinatario_formatado(self):
formatado = self.infNFe.dest.enderDest.xLgr.valor
formatado += ', ' + self.infNFe.dest.enderDest.nro.valor
if len(self.infNFe.dest.enderDest.xCpl.valor.strip()):
formatado += ' - ' + self.infNFe.dest.enderDest.xCpl.valor
return formatado
def cep_destinatario_formatado(self):
return self._formata_cep(self.infNFe.dest.enderDest.CEP.valor)
def fone_destinatario_formatado(self):
return self._formata_fone(unicode(self.infNFe.dest.enderDest.fone.valor))
def cnpj_retirada_formatado(self):
return self._formata_cnpj(self.infNFe.retirada.CNPJ.valor)
def endereco_retirada_formatado(self):
formatado = self.infNFe.retirada.xLgr.valor
formatado += ', ' + self.infNFe.retirada.nro.valor
if len(self.infNFe.retirada.xCpl.valor.strip()):
formatado += ' - ' + self.infNFe.retirada.xCpl.valor
formatado += ' - ' + self.infNFe.retirada.xBairro.valor
formatado += ' - ' + self.infNFe.retirada.xMun.valor
formatado += '-' + self.infNFe.retirada.UF.valor
return formatado
def cnpj_entrega_formatado(self):
return self._formata_cnpj(self.infNFe.entrega.CNPJ.valor)
def endereco_entrega_formatado(self):
formatado = self.infNFe.entrega.xLgr.valor
formatado += ', ' + self.infNFe.entrega.nro.valor
if len(self.infNFe.entrega.xCpl.valor.strip()):
formatado += ' - ' + self.infNFe.entrega.xCpl.valor
formatado += ' - ' + self.infNFe.entrega.xBairro.valor
formatado += ' - ' + self.infNFe.entrega.xMun.valor
formatado += '-' + self.infNFe.entrega.UF.valor
return formatado
def cnpj_transportadora_formatado(self):
if self.infNFe.transp.transporta.CPF.valor:
return self._formata_cpf(self.infNFe.transp.transporta.CPF.valor)
else:
return self._formata_cnpj(self.infNFe.transp.transporta.CNPJ.valor)
def placa_veiculo_formatada(self):
if not self.infNFe.transp.veicTransp.placa.valor:
return ''
placa = self.infNFe.transp.veicTransp.placa.valor
placa = placa[:-4] + '-' + placa[-4:]
return placa
def dados_adicionais(self):
da = ''
if self.infNFe.infAdic.infAdFisco.valor:
da = self.infNFe.infAdic.infAdFisco.valor.replace('|', '<br />')
if self.infNFe.infAdic.infCpl.valor:
if len(da) > 0:
da += '<br />'
da += self.infNFe.infAdic.infCpl.valor.replace('|', '<br />')
return da
def canhoto_formatado(self):
formatado = 'RECEBEMOS DE <b>'
formatado += self.infNFe.emit.xNome.valor.upper()
formatado += '</b> OS PRODUTOS E/OU SERVIÇOS CONSTANTES DA <b>NOTA FISCAL ELETRÔNICA</b> INDICADA AO LADO'
return formatado
def frete_formatado(self):
if self.infNFe.transp.modFrete.valor == 0:
formatado = '0-EMITENTE'
elif self.infNFe.transp.modFrete.valor == 1:
if self.infNFe.ide.tpNF.valor == 0:
formatado = '1-REMETENTE'
else:
formatado = '1-DESTINATÁRIO'
elif self.infNFe.transp.modFrete.valor == 2:
formatado = '2-DE TERCEIROS'
elif self.infNFe.transp.modFrete.valor == 9:
formatado = '9-SEM FRETE'
else:
formatado = ''
return formatado
def cst_descricao(self):
return 'CST'
def crt_descricao(self):
return ''
| henriquechehad/PySPED | pysped/nfe/leiaute/nfe_110.py | Python | lgpl-2.1 | 156,153 |
#!/usr/bin/env python
#
# Written by Dougal Scott <dougal.scott@gmail.com>
#
# Copyright (C) 2017 Dougal Scott
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
import argparse
from hostinfo_client import hostinfo_delete
###############################################################################
def parse_args():
description = 'Remove an allowable value from a restricted key'
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'keyvalue',
help='Name of the key/value pair to disallow (key=value)')
args = parser.parse_args()
return args
###############################################################################
def main():
args = parse_args()
m = re.match("(?P<key>\w+)=(?P<value>.+)", args.keyvalue)
if not m:
sys.stderr.write("Must be specified in key=value format\n")
return(1)
key = m.group('key').lower()
value = m.group('value').lower()
hostinfo_delete('rval/{}/{}'.format(key, value))
return(0)
###############################################################################
if __name__ == "__main__":
sys.exit(main())
# EOF
| dwagon/hostinfo_client | bin/hostinfo_deleterestrictedvalue.py | Python | gpl-3.0 | 1,805 |
"""
Platform for the opengarage.io cover component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/cover.opengarage/
"""
import logging
import voluptuous as vol
import requests
from homeassistant.components.cover import (
CoverDevice, PLATFORM_SCHEMA, SUPPORT_OPEN, SUPPORT_CLOSE)
from homeassistant.const import (
CONF_DEVICE, CONF_NAME, STATE_UNKNOWN, STATE_CLOSED, STATE_OPEN,
CONF_COVERS, CONF_HOST, CONF_PORT)
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = 'OpenGarage'
DEFAULT_PORT = 80
CONF_DEVICEKEY = "device_key"
ATTR_SIGNAL_STRENGTH = "wifi_signal"
ATTR_DISTANCE_SENSOR = "distance_sensor"
ATTR_DOOR_STATE = "door_state"
STATE_OPENING = "opening"
STATE_CLOSING = "closing"
STATE_STOPPED = "stopped"
STATE_OFFLINE = "offline"
STATES_MAP = {
0: STATE_CLOSED,
1: STATE_OPEN
}
# Validation of the user's configuration
COVER_SCHEMA = vol.Schema({
vol.Required(CONF_DEVICEKEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME): cv.string
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COVERS): vol.Schema({cv.slug: COVER_SCHEMA}),
})
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup OpenGarage covers."""
covers = []
devices = config.get(CONF_COVERS)
for device_id, device_config in devices.items():
args = {
CONF_NAME: device_config.get(CONF_NAME),
CONF_HOST: device_config.get(CONF_HOST),
CONF_PORT: device_config.get(CONF_PORT),
"device_id": device_config.get(CONF_DEVICE, device_id),
CONF_DEVICEKEY: device_config.get(CONF_DEVICEKEY)
}
covers.append(OpenGarageCover(hass, args))
add_devices(covers, True)
class OpenGarageCover(CoverDevice):
"""Representation of a OpenGarage cover."""
# pylint: disable=no-self-use
def __init__(self, hass, args):
"""Initialize the cover."""
self.opengarage_url = 'http://{}:{}'.format(
args[CONF_HOST],
args[CONF_PORT])
self.hass = hass
self._name = args[CONF_NAME]
self.device_id = args['device_id']
self._devicekey = args[CONF_DEVICEKEY]
self._state = STATE_UNKNOWN
self._state_before_move = None
self.dist = None
self.signal = None
self._available = True
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the device state attributes."""
data = {}
if self.signal is not None:
data[ATTR_SIGNAL_STRENGTH] = self.signal
if self.dist is not None:
data[ATTR_DISTANCE_SENSOR] = self.dist
if self._state is not None:
data[ATTR_DOOR_STATE] = self._state
return data
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._state == STATE_UNKNOWN:
return None
else:
return self._state in [STATE_CLOSED, STATE_OPENING]
def close_cover(self):
"""Close the cover."""
if self._state not in [STATE_CLOSED, STATE_CLOSING]:
self._state_before_move = self._state
self._state = STATE_CLOSING
self._push_button()
def open_cover(self):
"""Open the cover."""
if self._state not in [STATE_OPEN, STATE_OPENING]:
self._state_before_move = self._state
self._state = STATE_OPENING
self._push_button()
def update(self):
"""Get updated status from API."""
try:
status = self._get_status()
if self._name is None:
if status["name"] is not None:
self._name = status["name"]
state = STATES_MAP.get(status.get('door'), STATE_UNKNOWN)
if self._state_before_move is not None:
if self._state_before_move != state:
self._state = state
self._state_before_move = None
else:
self._state = state
_LOGGER.debug("%s status: %s", self._name, self._state)
self.signal = status.get('rssi')
self.dist = status.get('dist')
self._available = True
except (requests.exceptions.RequestException) as ex:
_LOGGER.error('Unable to connect to OpenGarage device: %(reason)s',
dict(reason=ex))
self._state = STATE_OFFLINE
def _get_status(self):
"""Get latest status."""
url = '{}/jc'.format(self.opengarage_url)
ret = requests.get(url, timeout=10)
return ret.json()
def _push_button(self):
"""Send commands to API."""
url = '{}/cc?dkey={}&click=1'.format(
self.opengarage_url, self._devicekey)
try:
response = requests.get(url, timeout=10).json()
if response["result"] == 2:
_LOGGER.error("Unable to control %s: device_key is incorrect.",
self._name)
self._state = self._state_before_move
self._state_before_move = None
except (requests.exceptions.RequestException) as ex:
_LOGGER.error('Unable to connect to OpenGarage device: %(reason)s',
dict(reason=ex))
self._state = self._state_before_move
self._state_before_move = None
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return 'garage'
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
| JshWright/home-assistant | homeassistant/components/cover/opengarage.py | Python | apache-2.0 | 6,089 |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import uuid
import edgedb
from edb.testbase import http as tb
from edb.tools import test
class TestGraphQLFunctional(tb.GraphQLTestCase):
SCHEMA_DEFAULT = os.path.join(os.path.dirname(__file__), 'schemas',
'graphql.esdl')
SCHEMA_OTHER = os.path.join(os.path.dirname(__file__), 'schemas',
'graphql_other.esdl')
SETUP = os.path.join(os.path.dirname(__file__), 'schemas',
'graphql_setup.edgeql')
# GraphQL queries cannot run in a transaction
TRANSACTION_ISOLATION = False
def test_graphql_http_keepalive_01(self):
with self.http_con() as con:
for _ in range(3):
req1_data = {
'query': '''
{
Setting(order: {value: {dir: ASC}}) {
value
}
}
'''
}
data, headers, status = self.http_con_request(con, req1_data)
self.assertEqual(status, 200)
self.assertNotIn('connection', headers)
self.assertEqual(
headers.get('content-type'),
'application/json')
self.assertEqual(
json.loads(data)['data'],
{'Setting': [{'value': 'blue'}, {'value': 'full'},
{'value': 'none'}]})
req2_data = {
'query': '''
{
NON_EXISTING_TYPE {
name
}
}
'''
}
data, headers, status = self.http_con_request(con, req2_data)
self.assertEqual(status, 200)
self.assertNotIn('connection', headers)
self.assertEqual(
headers.get('content-type'),
'application/json')
self.assertIn(
'QueryError:',
json.loads(data)['errors'][0]['message'])
def test_graphql_http_errors_01(self):
with self.http_con() as con:
data, headers, status = self.http_con_request(
con, {}, path='non-existant')
self.assertEqual(status, 404)
self.assertEqual(headers['connection'], 'close')
self.assertIn(b'Unknown path', data)
with self.assertRaises(OSError):
self.http_con_request(con, {}, path='non-existant2')
def test_graphql_http_errors_02(self):
with self.http_con() as con:
data, headers, status = self.http_con_request(con, {})
self.assertEqual(status, 400)
self.assertEqual(headers['connection'], 'close')
self.assertIn(b'query is missing', data)
with self.assertRaises(OSError):
self.http_con_request(con, {}, path='non-existant')
def test_graphql_http_errors_03(self):
with self.http_con() as con:
data, headers, status = self.http_con_request(
con, {'query': 'blah', 'variables': 'bazz'})
self.assertEqual(status, 400)
self.assertEqual(headers['connection'], 'close')
self.assertIn(b'must be a JSON object', data)
with self.assertRaises(OSError):
self.http_con_request(con, {}, path='non-existant')
def test_graphql_http_errors_04(self):
with self.http_con() as con:
con.send(b'blah\r\n\r\n\r\n\r\n')
data, headers, status = self.http_con_request(
con, {'query': 'blah', 'variables': 'bazz'})
self.assertEqual(status, 400)
self.assertEqual(headers['connection'], 'close')
self.assertIn(b'HttpParserInvalidMethodError', data)
with self.assertRaises(OSError):
self.http_con_request(con, {}, path='non-existant')
def test_graphql_functional_query_01(self):
for _ in range(10): # repeat to test prepared pgcon statements
self.assert_graphql_query_result(r"""
query {
Setting {
name
value
}
}
""", {
'Setting': [{
'name': 'template',
'value': 'blue',
}, {
'name': 'perks',
'value': 'full',
}, {
'name': 'template',
'value': 'none',
}],
}, sort=lambda x: x['value'])
def test_graphql_functional_query_02(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
name
age
groups {
id
name
}
}
}
""", {
'User': [{
'name': 'Alice',
'age': 27,
'groups': []
}, {
'name': 'Bob',
'age': 21,
'groups': []
}, {
'name': 'Jane',
'age': 25,
'groups': [{
'id': uuid.UUID,
'name': 'upgraded',
}]
}, {
'name': 'John',
'age': 25,
'groups': [{
'id': uuid.UUID,
'name': 'basic',
}]
}],
})
def test_graphql_functional_query_03(self):
self.assert_graphql_query_result(r"""
query mixed {
User {
name
}
Setting {
name
}
}
""", {
'User': [{
'name': 'Alice',
}, {
'name': 'Bob',
}, {
'name': 'Jane',
}, {
'name': 'John',
}],
'Setting': [{
'name': 'perks',
}, {
'name': 'template',
}, {
'name': 'template',
}],
}, sort=lambda x: x['name'])
def test_graphql_functional_query_04(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {name: {eq: "John"}}) {
name
age
groups {
id
name
}
}
}
""", {
'User': [{
'name': 'John',
'age': 25,
'groups': [{
'id': uuid.UUID,
'name': 'basic',
}]
}],
})
def test_graphql_functional_query_05(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'Bogus' on type 'Query'",
_line=3, _col=21):
self.graphql_query(r"""
query {
Bogus {
name,
groups {
id
name
}
}
}
""")
def test_graphql_functional_query_06(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'bogus' on type 'User'",
_line=5, _col=25):
self.graphql_query(r"""
query {
User {
name,
bogus,
groups {
id
name
}
}
}
""")
def test_graphql_functional_query_07(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'age' on type 'NamedObject'",
_line=5, _col=25):
self.graphql_query(r"""
query {
NamedObject {
name,
age,
groups {
id
name
}
}
}
""")
def test_graphql_functional_query_08(self):
self.assert_graphql_query_result(
r"""
query names {
Setting {
name
}
}
query values {
Setting {
value
}
}
""",
{
'Setting': [{
'name': 'perks',
}, {
'name': 'template',
}, {
'name': 'template',
}],
},
sort=lambda x: x['name'],
operation_name='names'
)
self.assert_graphql_query_result(
r"""
query names {
Setting {
name
}
}
query values {
Setting {
value
}
}
""",
{
'Setting': [{
'value': 'blue',
}, {
'value': 'full',
}, {
'value': 'none',
}],
},
sort=lambda x: x['value'],
operation_name='values',
use_http_post=False
)
def test_graphql_functional_query_09(self):
with self.assertRaisesRegex(edgedb.QueryError,
r'provide operation name'):
self.graphql_query('''
query names {
Setting {
name
}
}
query values {
Setting {
value
}
}
''')
def test_graphql_functional_query_10(self):
with self.assertRaisesRegex(edgedb.QueryError,
r'unknown operation named "foo"'):
self.graphql_query('''
query names {
Setting {
name
}
}
query values {
Setting {
value
}
}
''', operation_name='foo')
def test_graphql_functional_query_11(self):
# Test that parse error marshal from the compiler correctly.
with self.assertRaisesRegex(edgedb.QueryError,
r"Expected Name, found '}'",
_line=4, _col=21):
self.graphql_query(r"""
query {
Setting {
}
}
""")
def test_graphql_functional_query_12(self):
# Regression test: variables names were shadowing query names.
self.assert_graphql_query_result(
r"""
query users($name: String, $age: Int64) {
User(filter: {or: [{name: {eq: $name}},
{age: {gt: $age}}]},
order: {name: {dir: ASC}})
{
name
age
}
}
query settings {
Setting {
name
}
}
""",
{
'User': [{
'name': 'Alice',
'age': 27
}],
},
variables={'age': 25, 'name': 'Alice'},
operation_name='users'
)
def test_graphql_functional_query_13(self):
# Test special case errors.
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'gibberish' on type 'Query'\. "
r"There's no corresponding type or alias \"gibberish\" "
r"exposed in EdgeDB\. Please check the configuration settings "
r"for this port to make sure that you're connecting to the "
r"right database\.",
_line=3, _col=21):
self.graphql_query(r"""
query {
gibberish
}
""")
def test_graphql_functional_query_14(self):
# Test special case errors.
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'more__gibberish' on type 'Query'\. "
r"There's no corresponding type or alias \"more::gibberish\" "
r"exposed in EdgeDB\. Please check the configuration settings "
r"for this port to make sure that you're connecting to the "
r"right database\.",
_line=3, _col=21):
self.graphql_query(r"""
query {
more__gibberish
}
""")
def test_graphql_functional_query_15(self):
# Test special case errors.
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'Uxer' on type 'Query'\. "
r"Did you mean 'User'\?",
_line=3, _col=21):
self.graphql_query(r"""
query {
Uxer
}
""")
def test_graphql_functional_query_16(self):
# test filtering by nested object
self.assert_graphql_query_result(r"""
query {
User(filter: {groups: {name: {eq: "basic"}}}) {
name
age
groups {
id
name
}
}
}
""", {
'User': [{
'name': 'John',
'age': 25,
'groups': [{
'id': uuid.UUID,
'name': 'basic',
}]
}],
})
def test_graphql_functional_query_17(self):
# Test unused & null variables
self.assert_graphql_query_result(
r"""
query Person {
Person
{
name
}
}
""",
{
'Person': [{
'name': 'Bob',
}],
},
variables={'name': None},
)
self.assert_graphql_query_result(
r"""
query Person($name: String) {
Person(filter: {name: {eq: $name}})
{
name
}
}
""",
{
'Person': [],
},
variables={'name': None},
)
def test_graphql_functional_query_18(self):
# test filtering by nested object
self.assert_graphql_query_result(r"""
query {
User(filter: {name: {eq: "Alice"}}) {
name
favorites(order: {name: {dir: ASC}}) {
name
}
}
}
""", {
'User': [{
'name': 'Alice',
'favorites': [
{'name': 'basic'},
{'name': 'perks'},
{'name': 'template'},
{'name': 'template'},
{'name': 'unused'},
{'name': 'upgraded'},
]
}],
})
def test_graphql_functional_query_19(self):
# Test built-in object types, by making sure we can query them
# and get some results.
res = self.graphql_query(r"""
{
Object {id}
}
""")
self.assertTrue(len(res) > 0,
'querying "Object" returned no results')
def test_graphql_functional_query_20(self):
# Test built-in object types, by making sure we can query them
# and get some results.
res = self.graphql_query(r"""
{
BaseObject {id}
}
""")
self.assertTrue(len(res) > 0,
'querying "BaseObject" returned no results')
def test_graphql_functional_alias_01(self):
self.assert_graphql_query_result(
r"""
{
SettingAlias {
__typename
name
value
}
Setting {
__typename
name
value
}
}
""",
{
"SettingAlias": [
{
"__typename": "SettingAlias",
"name": "template",
"value": "blue",
},
{
"__typename": "SettingAlias",
"name": "perks",
"value": "full",
},
{
"__typename": "SettingAlias",
"name": "template",
"value": "none",
},
],
"Setting": [
{
"__typename": "Setting_Type",
"name": "template",
"value": "blue",
},
{
"__typename": "Setting_Type",
"name": "perks",
"value": "full",
},
{
"__typename": "Setting_Type",
"name": "template",
"value": "none",
},
],
},
sort=lambda x: x['value']
)
def test_graphql_functional_alias_02(self):
self.assert_graphql_query_result(
r"""
{
SettingAlias {
__typename
name
value
of_group {
__typename
name
}
}
}
""",
{
"SettingAlias": [
{
"__typename": "SettingAlias",
"name": "template",
"value": "blue",
"of_group": {
"__typename": "UserGroup_Type",
"name": "upgraded",
}
},
{
"__typename": "SettingAlias",
"name": "perks",
"value": "full",
"of_group": {
"__typename": "UserGroup_Type",
"name": "upgraded",
}
},
{
"__typename": "SettingAlias",
"name": "template",
"value": "none",
"of_group": {
"__typename": "UserGroup_Type",
"name": "unused",
}
},
],
},
sort=lambda x: x['value']
)
def test_graphql_functional_alias_03(self):
self.assert_graphql_query_result(
r"""
{
SettingAliasAugmented {
__typename
name
value
of_group {
__typename
name
name_upper
}
}
}
""",
{
"SettingAliasAugmented": [
{
"__typename": "SettingAliasAugmented",
"name": "template",
"value": "blue",
"of_group": {
"__typename":
"__SettingAliasAugmented__of_group",
"name": "upgraded",
"name_upper": "UPGRADED",
}
},
{
"__typename": "SettingAliasAugmented",
"name": "perks",
"value": "full",
"of_group": {
"__typename":
"__SettingAliasAugmented__of_group",
"name": "upgraded",
"name_upper": "UPGRADED",
}
},
{
"__typename": "SettingAliasAugmented",
"name": "template",
"value": "none",
"of_group": {
"__typename":
"__SettingAliasAugmented__of_group",
"name": "unused",
"name_upper": "UNUSED",
}
},
],
},
sort=lambda x: x['value']
)
def test_graphql_functional_alias_04(self):
self.assert_graphql_query_result(
r"""
{
ProfileAlias {
__typename
name
value
owner {
__typename
id
}
}
}
""",
{
"ProfileAlias": [
{
"__typename": "ProfileAlias",
"name": "Alice profile",
"value": "special",
"owner": [
{
"__typename": "User_Type",
"id": uuid.UUID,
}
]
},
{
"__typename": "ProfileAlias",
"name": "Bob profile",
"value": "special",
"owner": [
{
"__typename": "Person_Type",
"id": uuid.UUID,
}
]
}
]
},
)
result = self.graphql_query(r"""
query {
ProfileAlias {
owner {
id
}
}
}
""")
user_id = result['ProfileAlias'][0]['owner'][0]['id']
self.assert_graphql_query_result(f"""
query {{
User(filter: {{id: {{eq: "{user_id}"}}}}) {{
name
}}
}}
""", {
'User': [{'name': 'Alice'}]
})
def test_graphql_functional_alias_05(self):
self.assert_graphql_query_result(
r"""
{
SettingAliasAugmented(
filter: {of_group: {name_upper: {eq: "UPGRADED"}}}
) {
name
of_group {
name
name_upper
}
}
}
""",
{
"SettingAliasAugmented": [
{
"name": "perks",
"of_group": {
"name": "upgraded",
"name_upper": "UPGRADED",
}
},
{
"name": "template",
"of_group": {
"name": "upgraded",
"name_upper": "UPGRADED",
}
},
],
},
sort=lambda x: x['name']
)
def test_graphql_functional_alias_06(self):
self.assert_graphql_query_result(
r"""
{
SettingAliasAugmented(
filter: {name: {eq: "perks"}}
) {
name
of_group(
filter: {name_upper: {gt: "U"}}
) {
name
name_upper
}
}
}
""",
{
"SettingAliasAugmented": [
{
"name": "perks",
"of_group": {
"name": "upgraded",
"name_upper": "UPGRADED",
}
},
],
},
)
def test_graphql_functional_arguments_01(self):
result = self.graphql_query(r"""
query {
User {
id
name
age
}
}
""")
alice = [res for res in result['User']
if res['name'] == 'Alice'][0]
self.assert_graphql_query_result(f"""
query {{
User(filter: {{id: {{eq: "{alice['id']}"}}}}) {{
id
name
age
}}
}}
""", {
'User': [alice]
})
def test_graphql_functional_arguments_02(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {
name: {eq: "Bob"},
active: {eq: true},
age: {eq: 21}
}) {
name
age
groups {
id
name
}
}
}
""", {
'User': [{
'name': 'Bob',
'age': 21,
'groups': [],
}],
})
def test_graphql_functional_arguments_03(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {
and: [{name: {eq: "Bob"}}, {active: {eq: true}}],
age: {eq: 21}
}) {
name
score
}
}
""", {
'User': [{
'name': 'Bob',
'score': 4.2,
}],
})
def test_graphql_functional_arguments_04(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {
not: {name: {eq: "Bob"}},
age: {eq: 21}
}) {
name
score
}
}
""", {
'User': [],
})
def test_graphql_functional_arguments_05(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {
or: [
{not: {name: {eq: "Bob"}}},
{age: {eq: 20}}
]
},
order: {name: {dir: ASC}}
) {
name
score
}
}
""", {
'User': [
{'name': 'Alice', 'score': 5},
{'name': 'Jane', 'score': 1.23},
{'name': 'John', 'score': 3.14},
],
})
def test_graphql_functional_arguments_06(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {
or: [
{name: {neq: "Bob"}},
{age: {eq: 20}}
]
},
order: {name: {dir: ASC}}
) {
name
score
}
}
""", {
'User': [
{'name': 'Alice', 'score': 5},
{'name': 'Jane', 'score': 1.23},
{'name': 'John', 'score': 3.14},
],
})
def test_graphql_functional_arguments_07(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {
name: {ilike: "%o%"},
age: {gt: 22}
}) {
name
age
}
}
""", {
'User': [
{'name': 'John', 'age': 25},
],
}, sort=lambda x: x['name'])
def test_graphql_functional_arguments_08(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {
name: {like: "J%"},
score: {
gte: 3
lt: 4.5
}
}) {
name
score
}
}
""", {
'User': [
{'name': 'John', 'score': 3.14},
],
}, sort=lambda x: x['name'])
def test_graphql_functional_arguments_09(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {
name: {ilike: "%e"},
age: {lte: 25}
}) {
name
age
}
}
""", {
'User': [
{'name': 'Jane', 'age': 25},
],
}, sort=lambda x: x['name'])
def test_graphql_functional_arguments_10(self):
self.assert_graphql_query_result(r"""
query {
User(
order: {
age: {dir: DESC}
name: {dir: ASC}
}
) {
name
age
}
}
""", {
'User': [
{'age': 27, 'name': 'Alice'},
{'age': 25, 'name': 'Jane'},
{'age': 25, 'name': 'John'},
{'age': 21, 'name': 'Bob'},
],
})
def test_graphql_functional_arguments_11(self):
self.assert_graphql_query_result(r"""
query {
User(
order: {
name: {dir: ASC}
age: {dir: DESC}
}
) {
name
age
}
}
""", {
'User': [
{'age': 27, 'name': 'Alice'},
{'age': 21, 'name': 'Bob'},
{'age': 25, 'name': 'Jane'},
{'age': 25, 'name': 'John'},
],
})
def test_graphql_functional_arguments_12(self):
self.assert_graphql_query_result(r"""
query {
other__Foo(
order: {
select: {dir: ASC, nulls: BIGGEST}
}
) {
after
select
}
}
""", {
'other__Foo': [
{'after': None, 'select': 'a'},
{'after': 'w', 'select': 'b'},
{'after': 'q', 'select': None},
],
})
def test_graphql_functional_arguments_13(self):
self.assert_graphql_query_result(r"""
query {
other__Foo(
order: {
select: {dir: DESC, nulls: SMALLEST}
}
) {
after
select
}
}
""", {
'other__Foo': [
{'after': 'w', 'select': 'b'},
{'after': None, 'select': 'a'},
{'after': 'q', 'select': None},
],
})
def test_graphql_functional_arguments_14(self):
self.assert_graphql_query_result(r"""
query {
User(
order: {name: {dir: ASC}},
first: 2
) {
name
age
}
}
""", {
'User': [
{'age': 27, 'name': 'Alice'},
{'age': 21, 'name': 'Bob'},
],
})
def test_graphql_functional_arguments_15(self):
self.assert_graphql_query_result(r"""
query {
u0: User(
order: {name: {dir: ASC}},
after: "0",
first: 2
) {
name
}
u1: User(
order: {name: {dir: ASC}},
first: 2
) {
name
}
u2: User(
order: {name: {dir: ASC}},
after: "0",
before: "2"
) {
name
}
u3: User(
order: {name: {dir: ASC}},
before: "2",
last: 1
) {
name
}
}
""", {
'u0': [
{'name': 'Bob'},
{'name': 'Jane'},
],
'u1': [
{'name': 'Alice'},
{'name': 'Bob'},
],
'u2': [
{'name': 'Bob'},
],
'u3': [
{'name': 'Bob'},
],
})
@test.xfail('''
'last' is not fully implemented in all cases and ideally
requires negative OFFSET to be implemented
''')
def test_graphql_functional_arguments_16(self):
self.assert_graphql_query_result(r"""
query {
u4: User(
order: {name: {dir: ASC}},
after: "2",
last: 2
) {
name
}
u5: User(
order: {name: {dir: ASC}},
after: "0",
last: 2
) {
name
}
u6: User(
order: {name: {dir: ASC}},
after: "0",
before: "3",
first: 2,
last: 1
) {
name
}
}
""", {
'u4': [
{'name': 'John'},
],
'u5': [
{'name': 'Jane'},
{'name': 'John'},
],
'u6': [
{'name': 'Jane'},
],
})
def test_graphql_functional_arguments_17(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {name: {eq: "Jane"}}) {
name
groups {
name
settings(
order: {name: {dir: ASC}},
first: 1
) {
name
}
}
}
}
""", {
'User': [{
'name': 'Jane',
'groups': [{
'name': 'upgraded',
'settings': [{
'name': 'perks'
}]
}]
}]
})
def test_graphql_functional_arguments_18(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Expected type String, found 42',
_line=3, _col=46):
self.graphql_query(r"""
query {
User(filter: {name: {eq: 42}}) {
id,
}
}
""")
def test_graphql_functional_arguments_19(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Expected type String, found 20\.5',
_line=3, _col=46):
self.graphql_query(r"""
query {
User(filter: {name: {eq: 20.5}}) {
id,
}
}
""")
def test_graphql_functional_arguments_20(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Expected type Float, found "3\.5"',
_line=3, _col=47):
self.graphql_query(r"""
query {
User(filter: {score: {eq: "3.5"}}) {
id,
}
}
""")
def test_graphql_functional_arguments_21(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Expected type Boolean, found 0',
_line=3, _col=48):
self.graphql_query(r"""
query {
User(filter: {active: {eq: 0}}) {
id,
}
}
""")
def test_graphql_functional_arguments_22(self):
with self.assertRaisesRegex(
edgedb.QueryError,
# this error message is subpar, but this is what we get
# from postgres, because we transfer bigint values to postgres
# as strings
r'invalid input syntax for type bigint: "aaaaa"',
# _line=5, _col=32,
):
self.graphql_query(r"""
query {
u0: User(
order: {name: {dir: ASC}},
after: "aaaaa",
first: 2
) {
name
}
}
""")
def test_graphql_functional_arguments_23(self):
self.assert_graphql_query_result(r"""
query {
User(
order: {name: {dir: ASC}},
first: 1
) {
name
}
}
""", {
'User': [{
'name': 'Alice',
}]
})
def test_graphql_functional_arguments_24(self):
# Test boolean AND handling {} like Postgres
self.assert_graphql_query_result(r"""
query {
other__Foo(
filter: {
not: {
color: {eq: GREEN},
after: {neq: "b"},
},
},
order: {color: {dir: ASC}}
) {
select
after
color
}
}
""", {
"other__Foo": [{
"select": "a",
"after": None,
"color": "RED",
}, {
"select": None,
"after": "q",
"color": "BLUE",
}]
})
def test_graphql_functional_arguments_25(self):
# Test boolean AND handling {} like Postgres
self.assert_graphql_query_result(r"""
query {
other__Foo(
filter: {
not: {
and: [
{color: {eq: GREEN}},
{after: {neq: "b"}},
]
},
},
order: {color: {dir: ASC}}
) {
select
after
color
}
}
""", {
"other__Foo": [{
"select": "a",
"after": None,
"color": "RED",
}, {
"select": None,
"after": "q",
"color": "BLUE",
}]
})
def test_graphql_functional_arguments_26(self):
# Test boolean OR handling {} like Postgres
self.assert_graphql_query_result(r"""
query {
other__Foo(
filter: {
or: [
{color: {neq: GREEN}},
{after: {eq: "b"}},
]
},
order: {color: {dir: ASC}}
) {
select
after
color
}
}
""", {
"other__Foo": [{
"select": "a",
"after": None,
"color": "RED",
}, {
"select": None,
"after": "q",
"color": "BLUE",
}]
})
def test_graphql_functional_enums_01(self):
self.assert_graphql_query_result(r"""
query {
other__Foo(
order: {color: {dir: DESC}},
first: 1
) {
select
color
}
}
""", {
'other__Foo': [{
'select': None,
'color': "BLUE",
}]
})
def test_graphql_functional_enums_02(self):
self.assert_graphql_query_result(r"""
query {
other__Foo(
order: {color: {dir: ASC}},
after: "0"
) {
select
color
}
}
""", {
"other__Foo": [{
"select": "b",
"color": "GREEN",
}, {
"select": None,
"color": "BLUE",
}]
})
def test_graphql_functional_enums_03(self):
self.assert_graphql_query_result(r"""
query {
other__Foo(
filter: {color: {eq: RED}},
) {
select
color
}
}
""", {
"other__Foo": [{
"select": "a",
"color": "RED",
}]
})
def test_graphql_functional_enums_04(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'String cannot represent a non string value: admin',
_line=4, _col=51):
self.graphql_query(r"""
query {
# enum supplied instead of a string
UserGroup(filter: {name: {eq: admin}}) {
id,
name,
}
}
""")
def test_graphql_functional_fragment_01(self):
self.assert_graphql_query_result(r"""
fragment groupFrag on UserGroup {
id
name
}
query {
User(filter: {name: {eq: "Jane"}}) {
name,
groups {
... groupFrag
}
}
}
""", {
'User': [{
'name': 'Jane',
'groups': [{
'id': uuid.UUID,
'name': 'upgraded',
}]
}],
})
def test_graphql_functional_fragment_02(self):
self.assert_graphql_query_result(r"""
fragment userFrag1 on User {
name
... userFrag2
}
fragment userFrag2 on User {
groups {
... groupFrag
}
}
fragment groupFrag on UserGroup {
id
name
}
query {
User(filter: {name: {eq: "Jane"}}) {
... userFrag1
}
}
""", {
'User': [{
'name': 'Jane',
'groups': [{
'id': uuid.UUID,
'name': 'upgraded',
}]
}],
})
def test_graphql_functional_fragment_03(self):
self.assert_graphql_query_result(r"""
fragment userFrag2 on User {
groups {
... groupFrag
}
}
fragment groupFrag on UserGroup {
id
name
}
query {
User(filter: {name: {eq: "Jane"}}) {
... on User {
name
... userFrag2
}
}
}
""", {
'User': [{
'name': 'Jane',
'groups': [{
'id': uuid.UUID,
'name': 'upgraded',
}]
}],
})
def test_graphql_functional_fragment_04(self):
self.assert_graphql_query_result(r"""
fragment userFrag1 on User {
name
... {
groups {
... groupFrag
}
}
}
fragment groupFrag on UserGroup {
id
name
}
query {
User(filter: {name: {eq: "Jane"}}) {
... userFrag1
}
}
""", {
'User': [{
'name': 'Jane',
'groups': [{
'id': uuid.UUID,
'name': 'upgraded',
}]
}],
})
def test_graphql_functional_fragment_05(self):
# ISSUE #3514
#
# Fragment on the actual type should also work.
self.assert_graphql_query_result(r"""
fragment userFrag on User_Type {
active
profile {
value
}
}
query {
User(filter: {name: {eq: "Alice"}}) {
name
... userFrag
}
}
""", {
'User': [{
'name': 'Alice',
'active': True,
'profile': {
'value': 'special',
}
}],
})
def test_graphql_functional_fragment_type_01(self):
self.assert_graphql_query_result(r"""
fragment userFrag on User {
id,
name,
}
query {
User(filter: {name: {eq: "Alice"}}) {
... userFrag
}
}
""", {
'User': [{
'id': uuid.UUID,
'name': 'Alice',
}],
})
def test_graphql_functional_fragment_type_02(self):
self.assert_graphql_query_result(r"""
fragment namedFrag on NamedObject {
id,
name,
}
query {
User(filter: {name: {eq: "Alice"}}) {
... namedFrag
}
}
""", {
'User': [{
'id': uuid.UUID,
'name': 'Alice',
}],
})
def test_graphql_functional_fragment_type_03(self):
self.assert_graphql_query_result(r"""
fragment namedFrag on NamedObject {
id,
name,
}
fragment userFrag on User {
... namedFrag
age
}
query {
User(filter: {name: {eq: "Alice"}}) {
... userFrag
}
}
""", {
'User': [{
'id': uuid.UUID,
'name': 'Alice',
'age': 27,
}],
})
def test_graphql_functional_fragment_type_04(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Fragment 'userFrag' cannot be spread here "
r"as objects of type 'UserGroup' can never be of type 'User'.",
_line=9, _col=25):
self.graphql_query(r"""
fragment userFrag on User {
id,
name,
}
query {
UserGroup {
... userFrag
}
}
""")
def test_graphql_functional_fragment_type_05(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Fragment 'userFrag' cannot be spread here "
r"as objects of type 'UserGroup' can never be of type 'User'.",
_line=8, _col=21):
self.graphql_query(r"""
fragment userFrag on User {
id,
name,
}
fragment groupFrag on UserGroup {
... userFrag
}
query {
User {
... userFrag
groups {
... groupFrag
}
}
}
""")
def test_graphql_functional_fragment_type_06(self):
self.assert_graphql_query_result(r"""
fragment userFrag on User {
age
score
}
query {
NamedObject {
name
... userFrag
}
}
""", {
"NamedObject": [
{"age": None, "name": "1st", "score": None},
{"age": None, "name": "2nd", "score": None},
{"age": None, "name": "3rd", "score": None},
{"age": None, "name": "4th", "score": None},
{"age": 27, "name": "Alice", "score": 5},
{"age": None, "name": "Alice profile", "score": None},
{"age": 21, "name": "Bob", "score": 4.2},
{"age": None, "name": "Bob profile", "score": None},
{"age": 25, "name": "Jane", "score": 1.23},
{"age": 25, "name": "John", "score": 3.14},
{"age": None, "name": "basic", "score": None},
{"age": None, "name": "perks", "score": None},
{"age": None, "name": "template", "score": None},
{"age": None, "name": "template", "score": None},
{"age": None, "name": "unused", "score": None},
{"age": None, "name": "upgraded", "score": None},
]
}, sort=lambda x: x['name'])
def test_graphql_functional_fragment_type_07(self):
self.assert_graphql_query_result(r"""
fragment frag on NamedObject {
id,
name,
}
query {
NamedObject {
... frag
}
}
""", {
"NamedObject": [
{"id": uuid.UUID, "name": "1st"},
{"id": uuid.UUID, "name": "2nd"},
{"id": uuid.UUID, "name": "3rd"},
{"id": uuid.UUID, "name": "4th"},
{"id": uuid.UUID, "name": "Alice"},
{"id": uuid.UUID, "name": "Alice profile"},
{"id": uuid.UUID, "name": "Bob"},
{"id": uuid.UUID, "name": "Bob profile"},
{"id": uuid.UUID, "name": "Jane"},
{"id": uuid.UUID, "name": "John"},
{"id": uuid.UUID, "name": "basic"},
{"id": uuid.UUID, "name": "perks"},
{"id": uuid.UUID, "name": "template"},
{"id": uuid.UUID, "name": "template"},
{"id": uuid.UUID, "name": "unused"},
{"id": uuid.UUID, "name": "upgraded"},
]
}, sort=lambda x: x['name'])
def test_graphql_functional_fragment_type_08(self):
with self.assertRaisesRegex(
edgedb.QueryError,
"Cannot query field 'age' on type 'NamedObject'",
_line=5, _col=21):
self.graphql_query(r"""
fragment frag on NamedObject {
id,
name,
age,
}
query {
User {
... frag
}
}
""")
def test_graphql_functional_fragment_type_09(self):
with self.assertRaisesRegex(
edgedb.QueryError,
"Cannot query field 'age' on type 'NamedObject'",
_line=7, _col=29):
self.graphql_query(r"""
query {
User {
... on NamedObject {
id,
name,
age,
}
}
}
""")
def test_graphql_functional_fragment_type_10(self):
self.assert_graphql_query_result(r"""
fragment namedFrag on NamedObject {
id,
name,
... userFrag
}
fragment userFrag on User {
age
}
query {
NamedObject {
... namedFrag
}
}
""", {
"NamedObject": [
{"id": uuid.UUID, "name": "1st", "age": None},
{"id": uuid.UUID, "name": "2nd", "age": None},
{"id": uuid.UUID, "name": "3rd", "age": None},
{"id": uuid.UUID, "name": "4th", "age": None},
{"id": uuid.UUID, "name": "Alice", "age": 27},
{"id": uuid.UUID, "name": "Alice profile", "age": None},
{"id": uuid.UUID, "name": "Bob", "age": 21},
{"id": uuid.UUID, "name": "Bob profile", "age": None},
{"id": uuid.UUID, "name": "Jane", "age": 25},
{"id": uuid.UUID, "name": "John", "age": 25},
{"id": uuid.UUID, "name": "basic", "age": None},
{"id": uuid.UUID, "name": "perks", "age": None},
{"id": uuid.UUID, "name": "template", "age": None},
{"id": uuid.UUID, "name": "template", "age": None},
{"id": uuid.UUID, "name": "unused", "age": None},
{"id": uuid.UUID, "name": "upgraded", "age": None},
]
}, sort=lambda x: x['name'])
def test_graphql_functional_fragment_type_11(self):
self.assert_graphql_query_result(r"""
fragment namedFrag on NamedObject {
id,
name,
... userFrag
}
fragment userFrag on User {
age
}
query {
User {
... namedFrag
}
}
""", {
"User": [
{"id": uuid.UUID, "name": "Alice", "age": 27},
{"id": uuid.UUID, "name": "Bob", "age": 21},
{"id": uuid.UUID, "name": "Jane", "age": 25},
{"id": uuid.UUID, "name": "John", "age": 25},
]
}, sort=lambda x: x['name'])
@test.xfail(
"Known collation issue on Heroku Postgres",
unless=os.getenv("EDGEDB_TEST_BACKEND_VENDOR") != "heroku-postgres"
)
def test_graphql_functional_fragment_type_12(self):
self.assert_graphql_query_result(r"""
query {
NamedObject(order: {name: {dir: ASC}}) {
... on User {
age
}
}
}
""", {
"NamedObject": [
{"age": None},
{"age": None},
{"age": None},
{"age": None},
{"age": 27},
{"age": None},
{"age": 21},
{"age": None},
{"age": 25},
{"age": 25},
{"age": None},
{"age": None},
{"age": None},
{"age": None},
{"age": None},
{"age": None},
]
})
def test_graphql_functional_fragment_type_13(self):
# ISSUE #1800
#
# After using a typed inline fragment the nested fields or
# fields following after the fragment are erroneously using
# the type intersection.
self.assert_graphql_query_result(r"""
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... on User {
active
profile {
value
}
}
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': {
'value': 'special',
}
}],
})
def test_graphql_functional_fragment_type_14(self):
# ISSUE #1800
#
# After using a typed inline fragment the nested fields or
# fields following after the fragment are erroneously using
# the type intersection.
self.assert_graphql_query_result(r"""
fragment userFrag on User {
active
profile {
value
}
}
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... userFrag
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': {
'value': 'special',
}
}],
})
def test_graphql_functional_fragment_type_15(self):
# ISSUE #1800
#
# After using a typed inline fragment the nested fields or
# fields following after the fragment are erroneously using
# the type intersection.
self.assert_graphql_query_result(r"""
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... on User {
active
profile(filter: {name: {eq: "Alice profile"}}) {
value
}
}
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': {
'value': 'special',
}
}],
})
self.assert_graphql_query_result(r"""
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... on User {
active
profile(filter: {name: {eq: "no such profile"}}) {
value
}
}
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': None,
}],
})
def test_graphql_functional_fragment_type_16(self):
# ISSUE #1800
#
# After using a typed inline fragment the nested fields or
# fields following after the fragment are erroneously using
# the type intersection.
self.assert_graphql_query_result(r"""
fragment userFrag on User {
active
profile(filter: {name: {eq: "Alice profile"}}) {
value
}
}
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... userFrag
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': {
'value': 'special',
}
}],
})
self.assert_graphql_query_result(r"""
fragment userFrag on User {
active
profile(filter: {name: {eq: "no such profile"}}) {
value
}
}
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... userFrag
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': None,
}],
})
def test_graphql_functional_fragment_type_17(self):
# ISSUE #1800
#
# After using a typed inline fragment the nested fields or
# fields following after the fragment are erroneously using
# the type intersection.
self.assert_graphql_query_result(r"""
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... on User {
... {
active
profile {
value
}
}
}
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': {
'value': 'special',
}
}],
})
def test_graphql_functional_fragment_type_18(self):
# ISSUE #3514
#
# Fragment on the actual type should also work.
self.assert_graphql_query_result(r"""
query {
User(filter: {name: {eq: "Alice"}}) {
... on User_Type {
active
profile {
value
}
}
name
}
}
""", {
'User': [{
'name': 'Alice',
'active': True,
'profile': {
'value': 'special',
}
}],
})
def test_graphql_functional_directives_01(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
name @include(if: true),
groups @include(if: false) {
id
name
}
}
}
""", {
"User": [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Jane"},
{"name": "John"},
]
})
def test_graphql_functional_directives_02(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
name @skip(if: true),
groups @skip(if: false) {
id @skip(if: true)
name @skip(if: false)
}
}
}
""", {
"User": [
{"groups": []},
{"groups": []},
{"groups": [{"name": "upgraded"}]},
{"groups": [{"name": "basic"}]},
]
})
def test_graphql_functional_directives_03(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
name @skip(if: true), @include(if: true),
groups @skip(if: false), @include(if: true) {
id @skip(if: true), @include(if: false)
name @skip(if: false), @include(if: true)
}
}
}
""", {
"User": [
{"groups": []},
{"groups": []},
{"groups": [{"name": "upgraded"}]},
{"groups": [{"name": "basic"}]},
]
})
def test_graphql_functional_directives_04(self):
self.assert_graphql_query_result(r"""
fragment userFrag1 on User {
name
... {
groups @include(if: false) {
... groupFrag
}
}
}
fragment groupFrag on UserGroup {
id
name
}
query {
User(order: {name: {dir: ASC}}) {
... userFrag1
}
}
""", {
"User": [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Jane"},
{"name": "John"},
]
})
def test_graphql_functional_directives_05(self):
self.assert_graphql_query_result(r"""
fragment userFrag1 on User {
name
... @skip(if: true) {
groups {
... groupFrag
}
}
}
fragment groupFrag on UserGroup {
id
name
}
query {
User(order: {name: {dir: ASC}}) {
... userFrag1
}
}
""", {
"User": [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Jane"},
{"name": "John"},
]
})
def test_graphql_functional_directives_06(self):
self.assert_graphql_query_result(r"""
fragment userFrag1 on User {
name
... {
groups {
... groupFrag @skip(if: true)
name
}
}
}
fragment groupFrag on UserGroup {
id
}
query {
User(order: {name: {dir: ASC}}) {
... userFrag1
}
}
""", {
"User": [
{"name": "Alice", "groups": []},
{"name": "Bob", "groups": []},
{"name": "Jane", "groups": [{"name": "upgraded"}]},
{"name": "John", "groups": [{"name": "basic"}]},
]
})
def test_graphql_functional_directives_07(self):
with self.assertRaisesRegex(
edgedb.QueryError,
'Expected type Boolean!, found "true".',
_line=4, _col=43):
self.graphql_query(r"""
query {
User {
name @include(if: "true"),
id
}
}
""")
def test_graphql_functional_typename_01(self):
self.assert_graphql_query_result(r"""
query {
User {
name
__typename
groups {
id
name
__typename
}
}
}
""", {
'User': [{
'name': 'Alice',
'__typename': 'User_Type',
'groups': []
}, {
'name': 'Bob',
'__typename': 'Person_Type',
'groups': []
}, {
'name': 'Jane',
'__typename': 'User_Type',
'groups': [{
'id': uuid.UUID,
'name': 'upgraded',
'__typename': 'UserGroup_Type',
}]
}, {
'name': 'John',
'__typename': 'User_Type',
'groups': [{
'id': uuid.UUID,
'name': 'basic',
'__typename': 'UserGroup_Type',
}]
}],
}, sort=lambda x: x['name'])
def test_graphql_functional_typename_02(self):
self.assert_graphql_query_result(r"""
query {
__typename
__schema {
__typename
}
}
""", {
'__typename': 'Query',
'__schema': {
'__typename': '__Schema',
},
})
def test_graphql_functional_typename_03(self):
self.assert_graphql_query_result(r"""
query {
foo: __typename
User(order: {name: {dir: ASC}}) {
name
bar: __typename
}
}
""", {
"foo": "Query",
"User": [
{"bar": "User_Type", "name": "Alice"},
{"bar": "Person_Type", "name": "Bob"},
{"bar": "User_Type", "name": "Jane"},
{"bar": "User_Type", "name": "John"},
]
})
def test_graphql_functional_scalars_01(self):
self.assert_graphql_query_result(r"""
query {
ScalarTest {
p_bool
p_str
p_datetime
p_local_datetime
p_local_date
p_local_time
p_duration
p_int16
p_int32
p_int64
p_bigint
p_float32
p_float64
p_decimal
}
}
""", {
"ScalarTest": [{
'p_bool': True,
'p_str': 'Hello',
'p_datetime': '2018-05-07T20:01:22.306916+00:00',
'p_local_datetime': '2018-05-07T20:01:22.306916',
'p_local_date': '2018-05-07',
'p_local_time': '20:01:22.306916',
'p_duration': 'PT20H',
'p_int16': 12345,
'p_int32': 1234567890,
'p_int64': 1234567890123,
'p_bigint': 123456789123456789123456789,
'p_float32': 2.5,
'p_float64': 2.5,
'p_decimal':
123456789123456789123456789.123456789123456789123456789,
}]
})
def test_graphql_functional_scalars_02(self):
self.assert_graphql_query_result(r"""
query {
ScalarTest {
p_json
}
}
""", {
"ScalarTest": [{
'p_json': {"foo": [1, None, "bar"]},
}]
})
def test_graphql_functional_scalars_03(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'p_bytes' on type 'ScalarTest'",
_line=4, _col=25):
self.graphql_query(r"""
query {
ScalarTest {
p_bytes
}
}
""")
def test_graphql_functional_scalars_04(self):
self.assert_graphql_query_result(r"""
query {
ScalarTest {
p_array_json
}
}
""", {
"ScalarTest": [{
'p_array_json': ["hello", "world"],
}]
})
def test_graphql_functional_scalars_05(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'p_array_bytes' on type 'ScalarTest'",
_line=4, _col=25):
self.graphql_query(r"""
query {
ScalarTest {
p_array_bytes
}
}
""")
def test_graphql_functional_scalars_06(self):
self.assert_graphql_query_result(r"""
query {
ScalarTest {
p_posint
}
}
""", {
"ScalarTest": [{
'p_posint': 42,
}]
})
def test_graphql_functional_scalars_07(self):
self.assert_graphql_query_result(r"""
query {
ScalarTest {
p_array_str
}
}
""", {
"ScalarTest": [{
'p_array_str': ['hello', 'world'],
}]
})
def test_graphql_functional_scalars_08(self):
self.assert_graphql_query_result(r"""
query {
ScalarTest {
p_tuple
}
}
""", {
"ScalarTest": [{
'p_tuple': [123, "test"],
}]
})
def test_graphql_functional_scalars_09(self):
self.assert_graphql_query_result(r"""
query {
ScalarTest {
p_array_tuple
}
}
""", {
"ScalarTest": [{
'p_array_tuple': [["hello", True], ["world", False]],
}]
})
def test_graphql_functional_duplicates_01(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
name
name
name
age
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_duplicates_02(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
name @include(if: true)
age
name @include(if: true)
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_duplicates_03(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
... on User @skip(if: false) {
name @include(if: true)
}
age
name @include(if: true)
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_duplicates_04(self):
self.assert_graphql_query_result(r"""
fragment f1 on User {
name @include(if: true)
}
fragment f2 on User {
age
name @include(if: true)
... f1
}
query {
User(order: {name: {dir: ASC}}) {
... f2
age
name @include(if: true)
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_duplicates_05(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
age
name
name @include(if: true)
name @skip(if: false)
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_duplicates_06(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
... @skip(if: false) {
name @include(if: true)
}
age
name
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_duplicates_07(self):
self.assert_graphql_query_result(r"""
fragment f1 on User {
name @skip(if: false)
}
fragment f2 on User {
age
name @include(if: true)
... f1
}
query {
User(order: {name: {dir: ASC}}) {
... f2
age
name @include(if: true)
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_variables_01(self):
query = r"""
query($name: String) {
User(filter: {name: {eq: $name}}) {
name,
groups {
name
}
}
}
"""
expected_result = {
'User': [{
'name': 'John',
'groups': [{
'name': 'basic',
}]
}],
}
self.assert_graphql_query_result(
query,
expected_result,
variables={'name': 'John'},
use_http_post=True
)
self.assert_graphql_query_result(
query,
expected_result,
variables={'name': 'John'},
use_http_post=False
)
def test_graphql_functional_variables_02(self):
self.assert_graphql_query_result(
r"""
query($name: String, $age: Int64) {
User(filter: {or: [{name: {eq: $name}},
{age: {gt: $age}}]},
order: {name: {dir: ASC}})
{
name
age
}
}
""",
{
"User": [
{
"name": "Alice",
"age": 27,
},
{
"name": "Jane",
"age": 25,
},
{
"name": "John",
"age": 25,
},
]
},
variables={
"age": 24,
"name": "Alice"
}
)
def test_graphql_functional_variables_03(self):
self.assert_graphql_query_result(r"""
query($val: Int = 3) {
User(filter: {score: {eq: $val}}) {
id,
}
}
""", {
'User': [],
})
def test_graphql_functional_variables_04(self):
self.assert_graphql_query_result(r"""
query($val: Boolean = true) {
User(order: {name: {dir: ASC}}) {
name @include(if: $val),
groups @skip(if: $val) {
name
}
}
}
""", {
"User": [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Jane"},
{"name": "John"},
]
})
def test_graphql_functional_variables_05(self):
self.assert_graphql_query_result(r"""
query($val: Boolean! = true) {
User(order: {name: {dir: ASC}}) {
name @include(if: $val),
id
}
}
""", {
"User": [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Jane"},
{"name": "John"},
]
})
def test_graphql_functional_variables_06(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"no value for the 'val' variable",
_line=4, _col=31):
self.graphql_query(r"""
query($val: Boolean!) {
User {
name @include(if: $val),
id
}
}
""")
def test_graphql_functional_variables_07(self):
self.assert_graphql_query_result(r"""
query($val: String = "John") {
User(filter: {name: {eq: $val}}) {
age,
}
}
""", {
"User": [
{"age": 25},
]
})
def test_graphql_functional_variables_08(self):
self.assert_graphql_query_result(r"""
query($val: Int64 = 20) {
User(filter: {age: {eq: $val}}) {
name,
}
}
""", {
"User": []
})
def test_graphql_functional_variables_09(self):
self.assert_graphql_query_result(r"""
query($val: Float = 3.5) {
User(filter: {score: {eq: $val}}) {
name,
}
}
""", {
"User": []
})
def test_graphql_functional_variables_10(self):
self.assert_graphql_query_result(r"""
query($val: Int = 3) {
User(filter: {score: {eq: $val}}) {
id,
}
}
""", {
"User": []
})
def test_graphql_functional_variables_11(self):
self.assert_graphql_query_result(r"""
query($val: Float = 3) {
User(filter: {score: {eq: $val}}) {
id,
}
}
""", {
"User": []
})
def test_graphql_functional_variables_12(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Boolean cannot represent a non boolean value: 1',
_line=2, _col=39):
self.graphql_query(r"""
query($val: Boolean = 1) {
User {
name @include(if: $val),
id
}
}
""")
def test_graphql_functional_variables_13(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Boolean cannot represent a non boolean value: "1"',
_line=2, _col=39):
self.graphql_query(r"""
query($val: Boolean = "1") {
User {
name @include(if: $val),
id
}
}
""")
def test_graphql_functional_variables_14(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Boolean cannot represent a non boolean value: 1\.3',
_line=2, _col=39):
self.graphql_query(r"""
query($val: Boolean = 1.3) {
User {
name @include(if: $val),
id
}
}
""")
def test_graphql_functional_variables_15(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'String cannot represent a non string value: 1',
_line=2, _col=38):
self.graphql_query(r"""
query($val: String = 1) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_16(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'String cannot represent a non string value: 1\.1',
_line=2, _col=38):
self.graphql_query(r"""
query($val: String = 1.1) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_17(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'String cannot represent a non string value: true',
_line=2, _col=38):
self.graphql_query(r"""
query($val: String = true) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_18(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Int cannot represent non-integer value: 1\.1',
_line=2, _col=35):
self.graphql_query(r"""
query($val: Int = 1.1) {
User(filter: {age: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_19(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Int cannot represent non-integer value: "1"',
_line=2, _col=35):
self.graphql_query(r"""
query($val: Int = "1") {
User(filter: {age: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_20(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Int cannot represent non-integer value: true',
_line=2, _col=35):
self.graphql_query(r"""
query($val: Int = true) {
User(filter: {age: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_21(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Float cannot represent non numeric value: "1"',
_line=2, _col=37):
self.graphql_query(r"""
query($val: Float = "1") {
User(filter: {score: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_22(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Float cannot represent non numeric value: true',
_line=2, _col=37):
self.graphql_query(r"""
query($val: Float = true) {
User(filter: {score: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_23(self):
self.assert_graphql_query_result(r"""
query($val: ID = "00000000-3576-11e9-8723-cf18c8790091") {
User(filter: {id: {eq: $val}}) {
name
}
}
""", {
"User": []
})
def test_graphql_functional_variables_25(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'ID cannot represent a non-string and non-integer.+: 1\.1',
_line=2, _col=34):
self.graphql_query(r"""
query($val: ID = 1.1) {
User(filter: {id: {eq: $val}}) {
name
}
}
""")
def test_graphql_functional_variables_26(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'ID cannot represent a non-string and non-integer.+: true',
_line=2, _col=34):
self.graphql_query(r"""
query($val: ID = true) {
User(filter: {id: {eq: $val}}) {
name
}
}
""")
def test_graphql_functional_variables_27(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variable '\$val' of type '\[String\]' used in position "
r"expecting type 'String'\."):
self.graphql_query(r"""
query($val: [String] = "Foo") {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_28(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variable '\$val' of type '\[String\]' used in position "
r"expecting type 'String'\."):
self.graphql_query(r"""
query($val: [String]) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_29(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variable '\$val' of type '\[String\]!' used in position "
r"expecting type 'String'."):
self.graphql_query(r"""
query($val: [String]!) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_30(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"no value for the 'val' variable"):
self.graphql_query(r"""
query($val: String!) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_31(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"String cannot represent a non string value: 123",
_line=2, _col=48):
self.graphql_query(r"""
query($val: [String] = ["Foo", 123]) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_32(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variable '\$val' of type '\[String\]' used in position "
r"expecting type 'String'\."):
self.graphql_query(r"""
query($val: [String]) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_33(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'expected JSON string'):
self.graphql_query(
r"""
query($name: String) {
User(filter: {name: {eq: $name}}) {
name,
groups {
name
}
}
}
""",
variables={'name': 11})
def test_graphql_functional_variables_34(self):
# Test multiple requests to make sure that caching works correctly
for _ in range(2):
for _ in range(2):
self.assert_graphql_query_result(
r"""
query($val: Boolean!, $min_age: Int64!) {
User(filter: {age: {gt: $min_age}}) {
name @include(if: $val),
age
}
}
""",
{'User': [{'age': 27, 'name': 'Alice'}]},
variables={'val': True, 'min_age': 26}
)
self.assert_graphql_query_result(
r"""
query($val: Boolean!, $min_age: Int64!) {
User(filter: {age: {gt: $min_age}}) {
name @include(if: $val),
age
}
}
""",
{'User': [{'age': 27}]},
variables={'val': False, 'min_age': 26}
)
def test_graphql_functional_variables_35(self):
self.assert_graphql_query_result(
r"""
query($limit: Int!) {
User(
order: {name: {dir: ASC}},
first: $limit
) {
name
}
}
""",
{
'User': [{
'name': 'Alice',
}]
},
variables={'limit': 1},
)
def test_graphql_functional_variables_36(self):
self.assert_graphql_query_result(
r"""
query($idx: String!) {
User(
order: {name: {dir: ASC}},
# this is actually equivalent to OFFSET 2,
# since 'after' doesn't include the value
# referenced by the index
after: $idx
) {
name
}
}
""",
{
'User': [{
'name': 'Jane',
}, {
'name': 'John',
}]
},
variables={'idx': '1'},
)
def test_graphql_functional_variables_37(self):
self.assert_graphql_query_result(
r"""
query($idx: String!, $num: Int!) {
User(
order: {name: {dir: ASC}},
# this is actually equivalent to OFFSET 2,
# since 'after' doesn't include the value
# referenced by the index
after: $idx,
first: $num
) {
name
}
}
""",
{
'User': [{
'name': 'Jane',
}]
},
variables={'idx': '1', 'num': 1},
)
def test_graphql_functional_variables_38(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variable '\$limit' of type 'String!' used in position "
r"expecting type 'Int'."):
self.graphql_query(
r"""
query($limit: String!) {
User(
order: {name: {dir: ASC}},
first: $limit
) {
name
}
}
""",
variables={'limit': '1'},
)
# FIXME: the error here comes all the way from Postgres and as
# such refers to Postgres types, ideally we'd like to have an
# error message expressed in terms of GraphQL types.
def test_graphql_functional_variables_39(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'expected JSON number.+got JSON string'):
self.graphql_query(
r"""
query($limit: Int!) {
User(
order: {name: {dir: ASC}},
first: $limit
) {
name
}
}
""",
variables={'limit': '1'},
)
def test_graphql_functional_variables_40(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Only scalar defaults are allowed\. "
r"Variable 'val' has non-scalar default value\."):
self.graphql_query(r"""
query($val: FilterFloat = {eq: 3.0}) {
User(filter: {score: $val}) {
id,
}
}
""")
def test_graphql_functional_variables_41(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variables starting with '_edb_arg__' are prohibited"):
self.graphql_query(r"""
query($_edb_arg__1: Int!) {
User(limit: $_edb_arg__1) {
id,
}
}
""", variables={'_edb_arg__1': 1})
def test_graphql_functional_variables_42(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variables starting with '_edb_arg__' are prohibited"):
self.graphql_query(r"""
query($_edb_arg__1: Int = 1) {
User(limit: $_edb_arg__1) {
id,
}
}
""")
def test_graphql_functional_variables_43(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Only scalar input variables are allowed\. "
r"Variable 'f' has non-scalar value\."):
self.graphql_query(r"""
query user($f: FilterUser!) {
User(filter: $f) {
name
}
}
""", variables={"f": {"name": {"eq": "Alice"}}})
def test_graphql_functional_variables_44(self):
self.assert_graphql_query_result(
r"""
query foo($color: other__ColorEnum!) {
other__Foo(
filter: {color: {eq: $color}},
) {
select
color
}
}
""", {
"other__Foo": [{
"select": "a",
"color": "RED",
}]
},
variables={"color": "RED"},
)
def test_graphql_functional_variables_45(self):
self.assert_graphql_query_result(
r"""
query foo($color: other__ColorEnum! = GREEN) {
other__Foo(
filter: {color: {eq: $color}},
) {
select
color
}
}
""", {
"other__Foo": [{
"select": "b",
"color": "GREEN",
}]
},
)
def test_graphql_functional_variables_46(self):
self.assert_graphql_query_result(
r"""
query($val: JSON) {
ScalarTest(filter: {p_json: {eq: $val}}) {
p_json
}
}
""", {
"ScalarTest": [{
'p_json': {"foo": [1, None, "bar"]},
}]
},
# JSON can only be passed as a variable.
variables={"val": {"foo": [1, None, "bar"]}},
)
def test_graphql_functional_variables_47(self):
# Test boolean AND handling {} like Postgres
self.assert_graphql_query_result(
r"""
query($color: other__ColorEnum!, $after: String!) {
other__Foo(
filter: {
not: {
color: {eq: $color},
after: {neq: $after},
},
},
order: {color: {dir: ASC}}
) {
select
after
color
}
}
""", {
"other__Foo": [{
"select": "a",
"after": None,
"color": "RED",
}, {
"select": None,
"after": "q",
"color": "BLUE",
}]
},
variables={'color': 'GREEN', 'after': 'b'},
)
def test_graphql_functional_variables_48(self):
# Test boolean AND handling {} like Postgres
self.assert_graphql_query_result(
r"""
query($color: other__ColorEnum!, $after: String!) {
other__Foo(
filter: {
not: {
and: [
{color: {eq: $color}},
{after: {neq: $after}},
]
},
},
order: {color: {dir: ASC}}
) {
select
after
color
}
}
""", {
"other__Foo": [{
"select": "a",
"after": None,
"color": "RED",
}, {
"select": None,
"after": "q",
"color": "BLUE",
}]
},
variables={'color': 'GREEN', 'after': 'b'},
)
def test_graphql_functional_variables_49(self):
# Test boolean OR handling {} like Postgres
self.assert_graphql_query_result(
r"""
query($color: other__ColorEnum!, $after: String!) {
other__Foo(
filter: {
or: [
{color: {neq: $color}},
{after: {eq: $after}},
]
},
order: {color: {dir: ASC}}
) {
select
after
color
}
}
""", {
"other__Foo": [{
"select": "a",
"after": None,
"color": "RED",
}, {
"select": None,
"after": "q",
"color": "BLUE",
}]
},
variables={'color': 'GREEN', 'after': 'b'},
)
def test_graphql_functional_inheritance_01(self):
# ISSUE: #709
#
# Testing type and sub-type.
self.assert_graphql_query_result(r"""
query {
Bar {
__typename
q
}
}
""", {
'Bar': [{
'__typename': 'Bar_Type',
'q': 'bar',
}, {
'__typename': 'Bar2_Type',
'q': 'bar2',
}],
}, sort=lambda x: x['q'])
def test_graphql_functional_inheritance_02(self):
# ISSUE: #709
#
# Testing type and sub-type, with a covariant lint target.
self.assert_graphql_query_result(r"""
query {
Rab {
__typename
blah {
__typename
q
}
}
}
""", {
'Rab': [{
'__typename': 'Rab_Type',
'blah': {
'__typename': 'Bar_Type',
'q': 'bar',
}
}, {
'__typename': 'Rab2_Type',
'blah': {
'__typename': 'Bar2_Type',
'q': 'bar2',
}
}],
}, sort=lambda x: x['blah']['q'])
def test_graphql_functional_inheritance_03(self):
# ISSUE: #709
#
# Testing type and sub-type, with a covariant lint target.
#
# Rab2 must keep the target type of the link same as the base
# type, due to limitations of GraphQL inheritance. But as long
# as the actual target type is known, it can be explicitly
# referenced.
self.assert_graphql_query_result(r"""
query {
Rab2 {
blah {
__typename
... on Bar2 {
q
w
}
}
}
}
""", {
'Rab2': [{
'blah': {
'__typename': 'Bar2_Type',
'q': 'bar2',
'w': 'special'
}
}],
})
def test_graphql_functional_order_01(self):
# Test order by nested objects
self.assert_graphql_query_result(r"""
query {
Rab(order: {blah: {q: {dir: DESC}}}) {
blah {
q
}
}
}
""", {
"Rab": [
{
"blah": {
"q": "bar2"
}
},
{
"blah": {
"q": "bar"
},
}
]
})
def test_graphql_functional_order_02(self):
# Test order by nested objects
self.assert_graphql_query_result(r"""
query {
SettingAliasAugmented(
order: {
of_group: {name_upper: {dir: ASC}},
name: {dir: DESC}
}
) {
name
of_group {
name_upper
}
}
}
""", {
"SettingAliasAugmented": [
{
"name": "template",
"of_group": {
"name_upper": "UNUSED"
},
},
{
"name": "template",
"of_group": {
"name_upper": "UPGRADED"
},
},
{
"name": "perks",
"of_group": {
"name_upper": "UPGRADED"
},
},
]
})
def test_graphql_functional_order_03(self):
# Test order by nested objects
self.assert_graphql_query_result(r"""
query {
LinkedList(order: {
next: {next: {name: {dir: DESC, nulls: SMALLEST}}},
name: {dir: ASC}
}) {
name
}
}
""", {
"LinkedList": [
{
"name": "2nd"
},
{
"name": "1st"
},
{
"name": "3rd"
},
{
"name": "4th"
}
]
})
def test_graphql_functional_order_04(self):
# Test order by nested objects
self.assert_graphql_query_result(r"""
query {
User(order: {
profile: {
value: {dir: ASC},
name: {dir: DESC}
}
}) {
name
profile {
name
value
}
}
}
""", {
"User": [
{
"name": "John",
"profile": None,
},
{
"name": "Jane",
"profile": None,
},
{
"name": "Bob",
"profile": {
"name": "Bob profile",
"value": "special",
},
},
{
"name": "Alice",
"profile": {
"name": "Alice profile",
"value": "special",
},
}
]
})
def test_graphql_functional_exists_01(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {profile: {exists: true}},
order: {name: {dir: ASC}}
) {
name
profile {
name
}
}
}
""", {
"User": [
{
"name": "Alice",
"profile": {
"name": "Alice profile",
},
},
{
"name": "Bob",
"profile": {
"name": "Bob profile",
},
},
]
})
def test_graphql_functional_exists_02(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {profile: {exists: false}},
order: {name: {dir: ASC}}
) {
name
profile {
name
}
}
}
""", {
"User": [
{
"name": "Jane",
"profile": None,
},
{
"name": "John",
"profile": None,
},
]
})
def test_graphql_functional_exists_03(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {groups: {settings: {exists: false}}},
order: {name: {dir: ASC}}
) {
name
groups {
name
settings {
name
}
}
}
}
""", {
"User": [
{
"name": "Alice",
"groups": [],
},
{
"name": "Bob",
"groups": [],
},
{
"name": "John",
"groups": [
{
"name": "basic",
"settings": [],
}
],
},
]
})
def test_graphql_functional_exists_04(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {groups: {settings: {exists: true}}}
) {
name
groups {
name
settings(order: {name: {dir: ASC}}) {
name
}
}
}
}
""", {
"User": [
{
"name": "Jane",
"groups": [
{
"name": "upgraded",
"settings": [
{
"name": "perks",
},
{
"name": "template",
},
]
}
]
}
]
})
def test_graphql_functional_exists_05(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {groups: {settings: {id: {exists: false}}}},
order: {name: {dir: ASC}}
) {
name
groups {
name
settings {
name
}
}
}
}
""", {
"User": [
{
"name": "Alice",
"groups": [],
},
{
"name": "Bob",
"groups": [],
},
{
"name": "John",
"groups": [
{
"name": "basic",
"settings": [],
}
],
},
]
})
def test_graphql_functional_exists_06(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {groups: {settings: {id: {exists: true}}}}
) {
name
groups {
name
settings(order: {name: {dir: ASC}}) {
name
}
}
}
}
""", {
"User": [
{
"name": "Jane",
"groups": [
{
"name": "upgraded",
"settings": [
{
"name": "perks",
},
{
"name": "template",
},
]
}
]
}
]
})
def test_graphql_functional_in_01(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {name: {in: ["Alice", "Bob"]}},
order: {name: {dir: ASC}}
) {
name
}
}
""", {
"User": [
{
"name": "Alice",
},
{
"name": "Bob",
},
]
})
def test_graphql_functional_in_02(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {name: {in: ["Zoe", "Alice"]}},
order: {name: {dir: ASC}}
) {
name
}
}
""", {
"User": [
{
"name": "Alice",
},
]
})
class TestGraphQLInit(tb.GraphQLTestCase):
"""Test GraphQL initialization on an empty database."""
# GraphQL queries cannot run in a transaction
TRANSACTION_ISOLATION = False
def test_graphql_init_type_01(self):
# An empty database should still have an "Object" interface.
self.assert_graphql_query_result(r"""
query {
__type(name: "Object") {
__typename
name
kind
}
}
""", {
"__type": {
"kind": "INTERFACE",
"name": "Object",
"__typename": "__Type"
}
})
| edgedb/edgedb | tests/test_http_graphql_query.py | Python | apache-2.0 | 121,035 |
import numpy as np
import pylab as pl
pl.figure(figsize=[12,6])
cm = np.genfromtxt('/media/luna1/vinu/software/cosmosis/mft_output/mass_function/m_h.txt')
cmf = np.genfromtxt('/media/luna1/vinu/software/cosmosis/mft_output/mass_function/dndlnmh.txt')
h = 0.71
pl.subplot(121)
pl.loglog(cm, cmf[0], label='COSMOSIS z=0')
hf = np.genfromtxt('../hmf/mVector_PLANCK-SMT z: 0.0.txt')
pl.loglog(hf[:,0], hf[:,7], label='HMF z=0 dn/dlog10m')
pl.xlabel(r'$M_\odot/h$')
pl.ylabel(r'dn/dlnm $h^3 Mpc^{-3}$')
pl.xlim([1e10,1e16])
pl.ylim([1e-14,1e1])
pl.legend(loc=0)
pl.subplot(122)
pl.loglog(cm, cmf[0], label='COSMOSIS z=0')
hf = np.genfromtxt('../hmf/mVector_PLANCK-SMT z: 0.0.txt')
pl.loglog(hf[:,0], hf[:,6], label='HMF z=0 dn/dlnm')
pl.xlabel(r'$M_\odot/h$')
pl.ylabel(r'dn/dlnm $h^3 Mpc^{-3}$')
pl.xlim([1e10,1e16])
pl.ylim([1e-14,1e1])
pl.legend(loc=0)
pl.savefig('../figs/compare_hmf_cosmosis_tinker.png', bbox_inches='tight')
pl.show()
| vvinuv/HaloModel | plotting_scripts/compare_massfunction.py | Python | gpl-3.0 | 941 |
'''
Given an unsorted integer array, find the first missing positive integer.
For example,
Given [1,2,0] return 3,
and [3,4,-1,1] return 2.
Your algorithm should run in O(n) time and uses constant space.
'''
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n == 0:
return 1
for i, elem in enumerate(nums):
if nums[i] > i + 1:
j = nums[i] - 1
nums[i] = -1
while j < n and j > -1 and nums[j] != j + 1:
k = nums[j] - 1
nums[j] = j + 1
j = k
elif nums[i] > 0:
nums[nums[i] - 1] = nums[i]
for i, elem in enumerate(nums, 1):
if i != elem:
return i
return i + 1
if __name__ == '__main__':
assert Solution().firstMissingPositive([3, 4, -1, 1]) == 2
| wufangjie/leetcode | 041. First Missing Positive.py | Python | gpl-3.0 | 988 |
import os
import shutil
import yaml
from twisted.internet.defer import inlineCallbacks
from juju.charm.directory import CharmDirectory
from juju.charm.tests import local_charm_id
from juju.charm.tests.test_directory import sample_directory
from juju.charm.tests.test_repository import unbundled_repository
from juju.state.charm import CharmStateManager
from juju.state.errors import CharmStateNotFound
from juju.state.tests.common import StateTestBase
class CharmStateManagerTest(StateTestBase):
@inlineCallbacks
def setUp(self):
yield super(CharmStateManagerTest, self).setUp()
self.charm_state_manager = CharmStateManager(self.client)
self.charm_id = local_charm_id(self.charm)
self.unbundled_repo_path = self.makeDir()
os.rmdir(self.unbundled_repo_path)
shutil.copytree(unbundled_repository, self.unbundled_repo_path)
@inlineCallbacks
def test_add_charm(self):
"""
Adding a Charm into a CharmStateManager should register
the charm within the Zookeeper state, according to the
specification.
"""
charm_state = yield self.charm_state_manager.add_charm_state(
self.charm_id, self.charm, "http://example.com/abc")
self.assertEquals(charm_state.id, "local:series/dummy-1")
children = yield self.client.get_children("/charms")
self.assertEquals(children, ["local_3a_series_2f_dummy-1"])
content, stat = yield self.client.get(
"/charms/local_3a_series_2f_dummy-1")
charm_data = yaml.load(content)
self.assertEquals(charm_data, {
"metadata": self.charm.metadata.get_serialization_data(),
"config": self.charm.config.get_serialization_data(),
"sha256": self.charm.get_sha256(),
"url": "http://example.com/abc"
})
@inlineCallbacks
def test_get_charm(self):
"""
A CharmState should be available if one get()s a charm
that was previously added into the manager.
"""
yield self.charm_state_manager.add_charm_state(
self.charm_id, self.charm, "")
charm_state = yield self.charm_state_manager.get_charm_state(
"local:series/dummy-1")
self.assertEquals(charm_state.id, "local:series/dummy-1")
@inlineCallbacks
def test_charm_state_attributes(self):
"""
Verify that the basic (invariant) attributes of the
CharmState are correctly in place.
"""
yield self.charm_state_manager.add_charm_state(
self.charm_id, self.charm, "http://example.com/abc")
charm_state = yield self.charm_state_manager.get_charm_state(
"local:series/dummy-1")
self.assertEquals(charm_state.name, "dummy")
self.assertEquals(charm_state.revision, 1)
self.assertEquals(charm_state.id, "local:series/dummy-1")
self.assertEquals(charm_state.bundle_url, "http://example.com/abc")
@inlineCallbacks
def test_is_subordinate(self):
"""
Verify is_subordinate for traditional and subordinate charms
"""
yield self.charm_state_manager.add_charm_state(
self.charm_id, self.charm, "")
charm_state = yield self.charm_state_manager.get_charm_state(
"local:series/dummy-1")
self.assertEquals(charm_state.is_subordinate(), False)
sub_charm = CharmDirectory(
os.path.join(self.unbundled_repo_path, "series", "logging"))
self.charm_state_manager.add_charm_state("local:series/logging-1",
sub_charm, "")
charm_state = yield self.charm_state_manager.get_charm_state(
"local:series/logging-1")
self.assertEquals(charm_state.is_subordinate(), True)
@inlineCallbacks
def test_charm_state_metadata(self):
"""
Check that the charm metadata was correctly saved and loaded.
"""
yield self.charm_state_manager.add_charm_state(
self.charm_id, self.charm, "")
charm_state = yield self.charm_state_manager.get_charm_state(
"local:series/dummy-1")
metadata = yield charm_state.get_metadata()
self.assertEquals(metadata.name, "dummy")
self.assertFalse(metadata.is_subordinate)
self.assertFalse(charm_state.is_subordinate())
@inlineCallbacks
def test_charm_state_is_subordinate(self):
log_dir = os.path.join(os.path.dirname(sample_directory), "logging")
charm = CharmDirectory(log_dir)
yield self.charm_state_manager.add_charm_state(
"local:series/logging-1", charm, "")
charm_state = yield self.charm_state_manager.get_charm_state(
"local:series/logging-1")
self.assertTrue(charm_state.is_subordinate)
@inlineCallbacks
def test_charm_state_config_options(self):
"""Verify ConfigOptions present and correct."""
from juju.charm.tests.test_config import sample_yaml_data
yield self.charm_state_manager.add_charm_state(
self.charm_id, self.charm, "")
charm_state = yield self.charm_state_manager.get_charm_state(
"local:series/dummy-1")
config = yield charm_state.get_config()
self.assertEquals(config.get_serialization_data(),
sample_yaml_data)
@inlineCallbacks
def test_get_non_existing_charm_prior_to_initialization(self):
"""
Getting a charm before the charms node was even
initialized should raise an error about the charm not
being present.
"""
try:
yield self.charm_state_manager.get_charm_state(
"local:series/dummy-1")
except CharmStateNotFound, e:
self.assertEquals(e.charm_id, "local:series/dummy-1")
else:
self.fail("Error not raised.")
@inlineCallbacks
def test_get_non_existing_charm(self):
"""
Trying to retrieve a charm from the state when it was
never added should raise an error.
"""
yield self.charm_state_manager.add_charm_state(
self.charm_id, self.charm, "")
try:
yield self.charm_state_manager.get_charm_state(
"local:anotherseries/dummy-1")
except CharmStateNotFound, e:
self.assertEquals(e.charm_id, "local:anotherseries/dummy-1")
else:
self.fail("Error not raised.")
@inlineCallbacks
def test_get_sha256(self):
"""
We should be able to retrieve the sha256 of a stored
charm.
"""
yield self.charm_state_manager.add_charm_state(
self.charm_id, self.charm, "")
charm_state = yield self.charm_state_manager.get_charm_state(
"local:series/dummy-1")
sha256 = yield charm_state.get_sha256()
self.assertEquals(sha256, self.charm.get_sha256())
| anbangr/trusted-juju | juju/state/tests/test_charm.py | Python | agpl-3.0 | 6,967 |
# -*- coding: UTF-8 -*-
from flask import jsonify
from sqlalchemy.orm.exc import NoResultFound
from flask.ext.databrowser import ModelView, col_spec, sa
from flask.ext.databrowser.action import DeleteAction
from flask.ext.babel import lazy_gettext, gettext as _
from genuine_ap.config import config_ws
from genuine_ap.models import Config
from genuine_ap.apis import wraps
from genuine_ap.database import db
@config_ws.route('/config/<opts>')
def config_ws(opts):
opts = opts.split(',')
try:
opts = [wraps(Config.query.filter(Config.name == opt).one()) for opt
in opts]
except NoResultFound:
return "invalid config options", 403
return jsonify(dict((opt.name, opt.as_dict()) for opt in opts))
class ConfigModelView(ModelView):
@ModelView.cached
@property
def list_columns(self):
return [
col_spec.ColSpec('id', _('id')),
col_spec.ColSpec('name', _('name')),
col_spec.ColSpec('type_', _('type')),
col_spec.ColSpec('brief', _('brief')),
col_spec.ColSpec('value', _('value')),
]
@ModelView.cached
@property
def create_columns(self):
return [
col_spec.InputColSpec('name', _('name')),
col_spec.InputColSpec('type_', _('type')),
col_spec.InputColSpec('brief', _('brief')),
col_spec.InputColSpec('value', _('value'),
doc=u'若是布尔类型,应当填写1(真)或0(假)'),
]
@ModelView.cached
@property
def edit_columns(self):
return [
col_spec.InputColSpec('name', _('name')),
col_spec.InputColSpec('type_', _('type')),
col_spec.InputColSpec('brief', _('brief')),
col_spec.InputColSpec('value', _('value'),
doc=u'若是布尔类型,应当填写1(真)或0(假)'),
]
def get_actions(self, processed_objs=None):
return [DeleteAction(_("remove"))]
config_model_view = ConfigModelView(sa.SAModell(Config, db,
lazy_gettext('Config')))
| PuZheng/lejian-backend | lejian/config/views.py | Python | mit | 2,153 |
from __future__ import absolute_import
import os
import pytest
from scipy import ndimage
TESTDATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'testdata')
@pytest.fixture(scope='session')
def easy_plate():
return ndimage.io.imread(
os.path.join(TESTDATA, 'test_fixture_easy.tiff')
)
@pytest.fixture(scope='session')
def hard_plate():
return ndimage.io.imread(
os.path.join(TESTDATA, 'test_fixture_difficult.tiff')
)
| Scan-o-Matic/scanomatic | tests/unit/image_analysis/conftest.py | Python | gpl-3.0 | 470 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this script tests vtkImageReslice with different interpolation modes,
# with the wrap-pad feature turned on
# Image pipeline
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
reslice1 = vtk.vtkImageReslice()
reslice1.SetInputConnection(reader.GetOutputPort())
reslice1.MirrorOn()
reslice1.SetInterpolationModeToCubic()
reslice1.SetResliceAxesDirectionCosines([0,1,0,-1,0,0,0,0,1])
reslice1.SetResliceAxesOrigin(0,0,40)
reslice1.SetOutputSpacing(2.0,2.0,1.5)
reslice1.SetOutputOrigin(-32,-32,0)
reslice1.SetOutputExtent(0,127,0,127,0,0)
reslice2 = vtk.vtkImageReslice()
reslice2.SetInputConnection(reader.GetOutputPort())
reslice2.MirrorOn()
reslice2.SetResliceAxesDirectionCosines([0,1,0,-1,0,0,0,0,1])
reslice2.SetResliceAxesOrigin(0,0,40)
reslice2.SetInterpolationModeToLinear()
reslice2.SetOutputSpacing(2.0,2.0,1.5)
reslice2.SetOutputOrigin(-32,-32,0)
reslice2.SetOutputExtent(0,127,0,127,0,0)
reslice3 = vtk.vtkImageReslice()
reslice3.SetInputConnection(reader.GetOutputPort())
reslice3.MirrorOn()
reslice3.SetResliceAxesDirectionCosines([0,1,0,-1,0,0,0,0,1])
reslice3.SetResliceAxesOrigin(0,0,40)
reslice3.SetInterpolationModeToNearestNeighbor()
reslice3.SetOutputSpacing(2.0,2.0,1.5)
reslice3.SetOutputOrigin(-32,-32,0)
reslice3.SetOutputExtent(0,127,0,127,0,0)
reslice4 = vtk.vtkImageReslice()
reslice4.SetInputConnection(reader.GetOutputPort())
reslice4.MirrorOn()
reslice4.SetResliceAxesDirectionCosines([0,1,0,-1,0,0,0,0,1])
reslice4.SetResliceAxesOrigin(0,0,40)
reslice4.SetInterpolationModeToLinear()
reslice4.SetOutputSpacing(3.2,3.2,1.5)
reslice4.SetOutputOrigin(-102.4,-102.4,0)
reslice4.SetOutputExtent(0,127,0,127,0,0)
mapper1 = vtk.vtkImageMapper()
mapper1.SetInputConnection(reslice1.GetOutputPort())
mapper1.SetColorWindow(2000)
mapper1.SetColorLevel(1000)
mapper1.SetZSlice(0)
mapper2 = vtk.vtkImageMapper()
mapper2.SetInputConnection(reslice2.GetOutputPort())
mapper2.SetColorWindow(2000)
mapper2.SetColorLevel(1000)
mapper2.SetZSlice(0)
mapper3 = vtk.vtkImageMapper()
mapper3.SetInputConnection(reslice3.GetOutputPort())
mapper3.SetColorWindow(2000)
mapper3.SetColorLevel(1000)
mapper3.SetZSlice(0)
mapper4 = vtk.vtkImageMapper()
mapper4.SetInputConnection(reslice4.GetOutputPort())
mapper4.SetColorWindow(2000)
mapper4.SetColorLevel(1000)
mapper4.SetZSlice(0)
actor1 = vtk.vtkActor2D()
actor1.SetMapper(mapper1)
actor2 = vtk.vtkActor2D()
actor2.SetMapper(mapper2)
actor3 = vtk.vtkActor2D()
actor3.SetMapper(mapper3)
actor4 = vtk.vtkActor2D()
actor4.SetMapper(mapper4)
imager1 = vtk.vtkRenderer()
imager1.AddActor2D(actor1)
imager1.SetViewport(0.5,0.0,1.0,0.5)
imager2 = vtk.vtkRenderer()
imager2.AddActor2D(actor2)
imager2.SetViewport(0.0,0.0,0.5,0.5)
imager3 = vtk.vtkRenderer()
imager3.AddActor2D(actor3)
imager3.SetViewport(0.5,0.5,1.0,1.0)
imager4 = vtk.vtkRenderer()
imager4.AddActor2D(actor4)
imager4.SetViewport(0.0,0.5,0.5,1.0)
imgWin = vtk.vtkRenderWindow()
imgWin.AddRenderer(imager1)
imgWin.AddRenderer(imager2)
imgWin.AddRenderer(imager3)
imgWin.AddRenderer(imager4)
imgWin.SetSize(256,256)
imgWin.Render()
# --- end of script --
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Imaging/Core/Testing/Python/ResliceMirrorPad.py | Python | gpl-3.0 | 3,424 |
"""
This app can be hooked in to a page via the CMS template options.
The template file needs adding to CMS_TEMPLATES in settings.
Content is controlled via django admin.
"""
__author__ = 'mwalker' | marksweb/django-cms-app-examples | clinics/__init__.py | Python | mit | 199 |
__author__ = 'Michael'
import codecs
import logging
class Ccedict():
@staticmethod
def splitLine(line):
"""Returns simplified character, pinyin, and definitions in a set"""
if (line.startswith("#")):
return None
pinyin_start = line.find("[")
pinyin_end = line.find("]", pinyin_start)
def_index = line.find("/", pinyin_end)
def_last_index = line.rfind("/") # Remove any trailing characters
if (pinyin_start == -1 or
pinyin_end == -1 or
def_index == -1 or
def_last_index == -1):
return None
hanzi = line[0:pinyin_start].strip().split(" ")[-1] # Simplified is the 2nd one
pinyin = line[pinyin_start + 1:pinyin_end]
return {
'id': "{0}[{1}]".format(hanzi, pinyin),
'hanzi': hanzi,
'pinyin': pinyin,
'definition': line[def_index:def_last_index + 1][1:-1].split("/")
}
def __init__(self, file_name):
"""Constructs a Ccedict object from the given dictionary file."""
self.words = {}
with codecs.open(file_name, "r", "utf-8") as dict:
for line in list(dict):
word = Ccedict.splitLine(line)
if (not word is None):
self.words[word["hanzi"]] = word
logging.info("Loaded cedict ({0} words)".format(len(self.words.keys())))
# Cheap unit tests
if __name__ == "__main__":
entry = Ccedict.splitLine(u"\u4fdd\u62a4")
if (not entry is None):
raise "Entry should be none"
entry = Ccedict.splitLine(u"\u4fdd\u62a4 \u4fdd\u62a5 [pei2gen1] /bacon/")
if (entry is None):
raise "Entry is none"
if (entry["id"] != u"\u4fdd\u62a5[pei2gen1]"):
raise "Entry ID is wrong"
if (entry["definition"][0] != "bacon"):
raise "Definition is wrong"
entry = Ccedict.splitLine(u"\u4fdd\u62a4 [pei2gen1] /bacon/")
if (entry is None):
raise "Entry is none"
if (entry["id"] != u"\u4fdd\u62a4[pei2gen1]"):
raise "Entry ID is wrong"
if (entry["definition"][0] != "bacon"):
raise "Definition is wrong"
| The80sCalled/flash-card-builder | ccedict.py | Python | mit | 2,205 |
from tests.models import Theme, Question, Option, Course, Document, Help
from rest_framework import routers, serializers, viewsets
# Serializers define the API representation.
class OptionSerializer(serializers.ModelSerializer):
class Meta:
model = Option
fields = ('right', 'text')
class HelpSerializer(serializers.ModelSerializer):
class Meta:
model = Help
fields = ('name', 'content')
class QuestionHyperlinkSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ('url', 'text')
class QuestionSerializer(serializers.ModelSerializer):
options = OptionSerializer(many=True)
# TODO replace many value to false (seems like many=false require any help for question)
help = HelpSerializer(many=True)
class Meta:
model = Question
fields = ('id', 'text', 'help', 'options',('theme'))
def create(self, validated_data):
if len(validated_data['options']) < 2:
raise serializers.ValidationError('you need to send at least 2 options for question')
"""
same_questions = Question.objects.filter(text=validated_data['text'])
if len(same_questions) > 0:
for same_question in same_questions:
raise serializers.ValidationError({
'text' : 'you can not add same question in system, please update existing',
'id' : same_question.id})
"""
print(validated_data)
if ('theme' in validated_data):
question = Question.objects.create(text=validated_data['text'], theme=validated_data['theme'])
else:
raise serializers.ValidationError({ 'error':'question should be part of theme',
'text': validated_data['text']})
for option_data in validated_data['options']:
serializer = OptionSerializer(data=option_data)
if not serializer.is_valid():
raise serializers.ValidationError(serializer.errors)
option = Option.objects.create(text=option_data['text'], right=option_data['right'], question=question)
return question
class ThemeHyperlinkSerializer(serializers.ModelSerializer):
class Meta:
model = Theme
fields = ('name', 'url')
class ThemeSerializer(serializers.ModelSerializer):
questions = QuestionSerializer(many=True)
class Meta:
model = Theme
fields = ('name', 'course', 'questions')
def create(self, validated_data):
serializers.ValidationError('fuck yoyu')
theme = Theme.objects.create(name=validated_data['name'], course=validated_data['course'])
for question_data in validated_data['questions']:
print(question_data)
question_data['theme'] = 1
print(question_data)
serializer = QuestionSerializer(data=question_data)
if serializer.is_valid():
serializer.save()
else:
serializers.ValidationError('=(')
return theme
class DocumentSerializer(serializers.ModelSerializer):
class Meta:
model = Document
fields = ('name', 'content')
class CourseHyperlinkSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ('name', 'url')
class CourseSerializer(serializers.ModelSerializer):
themes = ThemeHyperlinkSerializer(many=True)
documents = DocumentSerializer(many=True)
class Meta:
model = Course
fields = ('name', 'documents', 'themes')
depth = 1
| simpleTestSystem/Server_core | tests/serializers.py | Python | mit | 3,627 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
# Functions & classes =========================================================
class Field(object):
def __init__(self, name, docstring, is_comm_field, is_db_field,
is_saveable=True):
self.name = name
self.docstring = docstring
self.is_comm_field = is_comm_field
self.is_db_field = is_db_field
self.is_saveable = is_saveable
def __repr__(self):
return "Field(name)" % self.name
| edeposit/edeposit.amqp.storage | src/edeposit/amqp/storage/structures/templates/tools/field.py | Python | mit | 619 |
#!/usr/bin/python2
# This is a decision tree classifier based on the scikit-learn example,
# taking as input an X and Y and any tree complexity parameters, and
# returning a classifier that can then be analyzed with the classifier.
# See the example in the main method for that and error-checking.
#
# Decision tree documentation:
# http://scikit-learn.org/stable/modules/tree.html
import sys
from imputation import load_data
from util import shuffle_split
from metrics import acc
from sklearn import tree
def classify(Xtrain, Ytrain):
""" Use entirety of provided X, Y to predict
Arguments
Xtrain -- Training data
Ytrain -- Training prediction
Returns
ready_tree -- a tree fitted to Xtrain and Ytrain
"""
ready_tree = tree.DecisionTreeClassifier()
ready_tree.fit(Xtrain, Ytrain)
return ready_tree
if __name__ == "__main__":
# Let's take our training data and train a decision tree
# on a subset. Scikit-learn provides a good module for cross-
# validation.
if len(sys.argv) < 2:
print "Usage: $ python decision-tree.py /path/to/data/file/"
else:
training = sys.argv[1]
X,Y,n,f = load_data(training)
Xt, Xv, Yt, Yv = shuffle_split(X,Y)
tree = classify(Xt, Yt)
print "Decision Tree Accuracy:",acc(Yv, tree.predict(Xv)),"%"
| bravelittlescientist/kdd-particle-physics-ml-fall13 | src/decision_tree.py | Python | gpl-2.0 | 1,339 |
from abc import ABCMeta, abstractmethod
import itertools
from FibonacciHeap import FibHeap
import heapq
import Queue
class PriorityQueue():
__metaclass__ = ABCMeta
@abstractmethod
def __len__(self): pass
@abstractmethod
def insert(self, node): pass
@abstractmethod
def minimum(self): pass
@abstractmethod
def removeminimum(self): pass
@abstractmethod
def decreasekey(self, node, new_priority): pass
class FibPQ(PriorityQueue):
def __init__(self):
self.heap = FibHeap()
def __len__(self):
return self.heap.count
def insert(self, node):
self.heap.insert(node)
def minimum(self):
return self.heap.minimum()
def removeminimum(self):
return self.heap.removeminimum()
def decreasekey(self, node, new_priority):
self.heap.decreasekey(node, new_priority)
class HeapPQ(PriorityQueue):
def __init__(self):
self.pq = []
self.removed = set()
self.count = 0
def __len__(self):
return self.count
def insert(self, node):
entry = node.key, node.value
if entry in self.removed:
self.removed.discard(entry)
heapq.heappush(self.pq, entry)
self.count += 1
def minimum(self):
priority, item = heapq.heappop(self.pq)
node = FibHeap.Node(priority, item)
self.insert(node)
return node
def removeminimum(self):
while True:
(priority, item) = heapq.heappop(self.pq)
if (priority, item) in self.removed:
self.removed.discard((priority, item))
else:
self.count -= 1
return FibHeap.Node(priority, item)
def remove(self, node):
entry = node.key, node.value
if entry not in self.removed:
self.removed.add(entry)
self.count -= 1
def decreasekey(self, node, new_priority):
self.remove(node)
node.key = new_priority
self.insert(node)
class QueuePQ(PriorityQueue):
def __init__(self):
self.pq = Queue.PriorityQueue()
self.removed = set()
self.count = 0
def __len__(self):
return self.count
def insert(self, node):
entry = node.key, node.value
if entry in self.removed:
self.removed.discard(entry)
self.pq.put(entry)
self.count += 1
def minimum(self):
(priority, item) = self.pq.get_nowait()
node = FibHeap.Node(priority, item)
self.insert(node)
return node
def removeminimum(self):
while True:
(priority, item) = self.pq.get_nowait()
if (priority, item) in self.removed:
self.removed.discard((priority, item))
else:
self.count -= 1
return FibHeap.Node(priority, item)
def remove(self, node):
entry = node.key, node.value
if entry not in self.removed:
self.removed.add(entry)
self.count -= 1
def decreasekey(self, node, new_priority):
self.remove(node)
node.key = new_priority
self.insert(node)
| mikepound/mazesolving | priority_queue.py | Python | unlicense | 3,189 |
#!/usr/bin/env python
'''
Copyright (C) 2005 Aaron Spike, aaron@ekips.org
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
import math, inkex, simplestyle, simplepath, bezmisc
class Motion(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("-a", "--angle",
action="store", type="float",
dest="angle", default=45.0,
help="direction of the motion vector")
self.OptionParser.add_option("-m", "--magnitude",
action="store", type="float",
dest="magnitude", default=100.0,
help="magnitude of the motion vector")
def makeface(self,last,(cmd, params)):
a = []
a.append(['M',last[:]])
a.append([cmd, params[:]])
#translate path segment along vector
np = params[:]
defs = simplepath.pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'x':
np[i] += self.vx
elif defs[3][i] == 'y':
np[i] += self.vy
a.append(['L',[np[-2],np[-1]]])
#reverse direction of path segment
np[-2:] = last[0]+self.vx,last[1]+self.vy
if cmd == 'C':
c1 = np[:2], np[2:4] = np[2:4], np[:2]
a.append([cmd,np[:]])
a.append(['Z',[]])
face = inkex.etree.SubElement(self.facegroup,inkex.addNS('path','svg'),{'d':simplepath.formatPath(a)})
def effect(self):
self.vx = math.cos(math.radians(self.options.angle))*self.options.magnitude
self.vy = math.sin(math.radians(self.options.angle))*self.options.magnitude
for id, node in self.selected.iteritems():
if node.tag == inkex.addNS('path','svg'):
group = inkex.etree.SubElement(node.getparent(),inkex.addNS('g','svg'))
self.facegroup = inkex.etree.SubElement(group, inkex.addNS('g','svg'))
group.append(node)
t = node.get('transform')
if t:
group.set('transform', t)
node.set('transform','')
s = node.get('style')
self.facegroup.set('style', s)
p = simplepath.parsePath(node.get('d'))
for cmd,params in p:
tees = []
if cmd == 'C':
bez = (last,params[:2],params[2:4],params[-2:])
tees = [t for t in bezmisc.beziertatslope(bez,(self.vy,self.vx)) if 0<t<1]
tees.sort()
segments = []
if len(tees) == 0 and cmd in ['L','C']:
segments.append([cmd,params[:]])
elif len(tees) == 1:
one,two = bezmisc.beziersplitatt(bez,tees[0])
segments.append([cmd,list(one[1]+one[2]+one[3])])
segments.append([cmd,list(two[1]+two[2]+two[3])])
elif len(tees) == 2:
one,two = bezmisc.beziersplitatt(bez,tees[0])
two,three = bezmisc.beziersplitatt(two,tees[1])
segments.append([cmd,list(one[1]+one[2]+one[3])])
segments.append([cmd,list(two[1]+two[2]+two[3])])
segments.append([cmd,list(three[1]+three[2]+three[3])])
for seg in segments:
self.makeface(last,seg)
last = seg[1][-2:]
if cmd == 'M':
subPathStart = params[-2:]
if cmd == 'Z':
last = subPathStart
else:
last = params[-2:]
if __name__ == '__main__':
e = Motion()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 encoding=utf-8 textwidth=99
| step21/inkscape-osx-packaging-native | packaging/macosx/Inkscape.app/Contents/Resources/extensions/motion.py | Python | lgpl-2.1 | 4,709 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2014 Noviat nv/sa (www.noviat.com). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import xlwt
from datetime import datetime
from openerp.osv import orm
from openerp.report import report_sxw
from openerp.addons.report_xls.report_xls import report_xls
from openerp.addons.report_xls.utils import rowcol_to_cell, _render
from openerp.tools.translate import translate, _
import logging
_logger = logging.getLogger(__name__)
_ir_translation_name = 'move.line.list.xls'
class move_line_xls_parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(move_line_xls_parser, self).__init__(
cr, uid, name, context=context)
move_obj = self.pool.get('account.move.line')
self.context = context
wanted_list = move_obj._report_xls_fields(cr, uid, context)
template_changes = move_obj._report_xls_template(cr, uid, context)
self.localcontext.update({
'datetime': datetime,
'wanted_list': wanted_list,
'template_changes': template_changes,
'_': self._,
})
def _(self, src):
lang = self.context.get('lang', 'en_US')
return translate(self.cr, _ir_translation_name, 'report', lang, src) \
or src
class move_line_xls(report_xls):
def __init__(self, name, table, rml=False, parser=False, header=True,
store=False):
super(move_line_xls, self).__init__(
name, table, rml, parser, header, store)
# Cell Styles
_xs = self.xls_styles
# header
rh_cell_format = _xs['bold'] + _xs['fill'] + _xs['borders_all']
self.rh_cell_style = xlwt.easyxf(rh_cell_format)
self.rh_cell_style_center = xlwt.easyxf(rh_cell_format + _xs['center'])
self.rh_cell_style_right = xlwt.easyxf(rh_cell_format + _xs['right'])
# lines
aml_cell_format = _xs['borders_all']
self.aml_cell_style = xlwt.easyxf(aml_cell_format)
self.aml_cell_style_center = xlwt.easyxf(
aml_cell_format + _xs['center'])
self.aml_cell_style_date = xlwt.easyxf(
aml_cell_format + _xs['left'],
num_format_str=report_xls.date_format)
self.aml_cell_style_decimal = xlwt.easyxf(
aml_cell_format + _xs['right'],
num_format_str=report_xls.decimal_format)
# totals
rt_cell_format = _xs['bold'] + _xs['fill'] + _xs['borders_all']
self.rt_cell_style = xlwt.easyxf(rt_cell_format)
self.rt_cell_style_right = xlwt.easyxf(rt_cell_format + _xs['right'])
self.rt_cell_style_decimal = xlwt.easyxf(
rt_cell_format + _xs['right'],
num_format_str=report_xls.decimal_format)
# XLS Template
self.col_specs_template = {
'move': {
'header': [1, 20, 'text', _render("_('Entry')")],
'lines': [1, 0, 'text', _render("line.move_id.name or ''")],
'totals': [1, 0, 'text', None]},
'name': {
'header': [1, 42, 'text', _render("_('Name')")],
'lines': [1, 0, 'text', _render("line.name or ''")],
'totals': [1, 0, 'text', None]},
'ref': {
'header': [1, 42, 'text', _render("_('Reference')")],
'lines': [1, 0, 'text', _render("line.ref or ''")],
'totals': [1, 0, 'text', None]},
'date': {
'header': [1, 13, 'text', _render("_('Effective Date')")],
'lines': [1, 0, 'date',
_render("datetime.strptime(line.date,'%Y-%m-%d')"),
None, self.aml_cell_style_date],
'totals': [1, 0, 'text', None]},
'period': {
'header': [1, 12, 'text', _render("_('Period')")],
'lines':
[1, 0, 'text',
_render("line.period_id.code or line.period_id.name")],
'totals': [1, 0, 'text', None]},
'partner': {
'header': [1, 36, 'text', _render("_('Partner')")],
'lines':
[1, 0, 'text',
_render("line.partner_id and line.partner_id.name or ''")],
'totals': [1, 0, 'text', None]},
'partner_ref': {
'header': [1, 36, 'text', _render("_('Partner Reference')")],
'lines':
[1, 0, 'text',
_render("line.partner_id and line.partner_id.ref or ''")],
'totals': [1, 0, 'text', None]},
'account': {
'header': [1, 12, 'text', _render("_('Account')")],
'lines': [1, 0, 'text', _render("line.account_id.code")],
'totals': [1, 0, 'text', None]},
'date_maturity': {
'header': [1, 13, 'text', _render("_('Maturity Date')")],
'lines':
[1, 0,
_render("line.date_maturity.val and 'date' or 'text'"),
_render(
"line.date_maturity.val \
and datetime.strptime(line.date_maturity,'%Y-%m-%d') \
or None"),
None, self.aml_cell_style_date],
'totals': [1, 0, 'text', None]},
'debit': {
'header': [1, 18, 'text', _render("_('Debit')"), None,
self.rh_cell_style_right],
'lines': [1, 0, 'number', _render("line.debit"), None,
self.aml_cell_style_decimal],
'totals': [1, 0, 'number', None, _render("debit_formula"),
self.rt_cell_style_decimal]},
'credit': {
'header': [1, 18, 'text', _render("_('Credit')"), None,
self.rh_cell_style_right],
'lines': [1, 0, 'number', _render("line.credit"), None,
self.aml_cell_style_decimal],
'totals': [1, 0, 'number', None, _render("credit_formula"),
self.rt_cell_style_decimal]},
'balance': {
'header': [1, 18, 'text', _render("_('Balance')"), None,
self.rh_cell_style_right],
'lines': [1, 0, 'number', None, _render("bal_formula"),
self.aml_cell_style_decimal],
'totals': [1, 0, 'number', None, _render("bal_formula"),
self.rt_cell_style_decimal]},
'reconcile': {
'header': [1, 12, 'text', _render("_('Rec.')"), None,
self.rh_cell_style_center],
'lines': [1, 0, 'text',
_render("line.reconcile_id.name or ''"), None,
self.aml_cell_style_center],
'totals': [1, 0, 'text', None]},
'reconcile_partial': {
'header': [1, 12, 'text', _render("_('Part. Rec.')"), None,
self.rh_cell_style_center],
'lines': [1, 0, 'text',
_render("line.reconcile_partial_id.name or ''"),
None, self.aml_cell_style_center],
'totals': [1, 0, 'text', None]},
'tax_code': {
'header': [1, 12, 'text', _render("_('Tax Code')"), None,
self.rh_cell_style_center],
'lines': [1, 0, 'text', _render("line.tax_code_id.code or ''"),
None, self.aml_cell_style_center],
'totals': [1, 0, 'text', None]},
'tax_amount': {
'header': [1, 18, 'text', _render("_('Tax/Base Amount')"),
None, self.rh_cell_style_right],
'lines': [1, 0, 'number', _render("line.tax_amount"), None,
self.aml_cell_style_decimal],
'totals': [1, 0, 'text', None]},
'amount_currency': {
'header': [1, 18, 'text', _render("_('Am. Currency')"), None,
self.rh_cell_style_right],
'lines':
[1, 0,
_render("line.amount_currency and 'number' or 'text'"),
_render("line.amount_currency or None"),
None, self.aml_cell_style_decimal],
'totals': [1, 0, 'text', None]},
'currency_name': {
'header': [1, 6, 'text', _render("_('Curr.')"), None,
self.rh_cell_style_center],
'lines':
[1, 0, 'text',
_render("line.currency_id and line.currency_id.name or ''"),
None, self.aml_cell_style_center],
'totals': [1, 0, 'text', None]},
'journal': {
'header': [1, 12, 'text', _render("_('Journal')")],
'lines': [1, 0, 'text', _render("line.journal_id.code or ''")],
'totals': [1, 0, 'text', None]},
'company_currency': {
'header': [1, 10, 'text', _render("_('Comp. Curr.')")],
'lines': [1, 0, 'text',
_render("line.company_id.currency_id.name or ''"),
None, self.aml_cell_style_center],
'totals': [1, 0, 'text', None]},
'analytic_account': {
'header': [1, 36, 'text', _render("_('Analytic Account')")],
'lines': [1, 0, 'text',
_render("line.analytic_account_id.code or ''")],
'totals': [1, 0, 'text', None]},
'product': {
'header': [1, 36, 'text', _render("_('Product')")],
'lines': [1, 0, 'text', _render("line.product_id.name or ''")],
'totals': [1, 0, 'text', None]},
'product_ref': {
'header': [1, 36, 'text', _render("_('Product Reference')")],
'lines': [1, 0, 'text',
_render("line.product_id.default_code or ''")],
'totals': [1, 0, 'text', None]},
'product_uom': {
'header': [1, 20, 'text', _render("_('Unit of Measure')")],
'lines': [1, 0, 'text',
_render("line.product_uom_id.name or ''")],
'totals': [1, 0, 'text', None]},
'quantity': {
'header': [1, 8, 'text', _render("_('Qty')"), None,
self.rh_cell_style_right],
'lines': [1, 0,
_render("line.quantity and 'number' or 'text'"),
_render("line.quantity or None"), None,
self.aml_cell_style_decimal],
'totals': [1, 0, 'text', None]},
'statement': {
'header': [1, 20, 'text', _render("_('Statement')")],
'lines':
[1, 0, 'text',
_render("line.statement_id and line.statement_id.name or ''")
],
'totals': [1, 0, 'text', None]},
'invoice': {
'header': [1, 20, 'text', _render("_('Invoice')")],
'lines':
[1, 0, 'text',
_render("line.invoice and line.invoice.number or ''")],
'totals': [1, 0, 'text', None]},
'amount_residual': {
'header': [1, 18, 'text', _render("_('Residual Amount')"),
None, self.rh_cell_style_right],
'lines':
[1, 0,
_render("line.amount_residual and 'number' or 'text'"),
_render("line.amount_residual or None"),
None, self.aml_cell_style_decimal],
'totals': [1, 0, 'text', None]},
'amount_residual_currency': {
'header': [1, 18, 'text', _render("_('Res. Am. in Curr.')"),
None, self.rh_cell_style_right],
'lines':
[1, 0,
_render(
"line.amount_residual_currency and 'number' or 'text'"),
_render("line.amount_residual_currency or None"),
None, self.aml_cell_style_decimal],
'totals': [1, 0, 'text', None]},
'narration': {
'header': [1, 42, 'text', _render("_('Notes')")],
'lines': [1, 0, 'text',
_render("line.move_id.narration or ''")],
'totals': [1, 0, 'text', None]},
'blocked': {
'header': [1, 4, 'text', _('Lit.'),
None, self.rh_cell_style_right],
'lines': [1, 0, 'text', _render("line.blocked and 'x' or ''"),
None, self.aml_cell_style_center],
'totals': [1, 0, 'text', None]},
}
def generate_xls_report(self, _p, _xs, data, objects, wb):
wanted_list = _p.wanted_list
self.col_specs_template.update(_p.template_changes)
_ = _p._
debit_pos = 'debit' in wanted_list and wanted_list.index('debit')
credit_pos = 'credit' in wanted_list and wanted_list.index('credit')
if not (credit_pos and debit_pos) and 'balance' in wanted_list:
raise orm.except_orm(
_('Customisation Error!'),
_("The 'Balance' field is a calculated XLS field requiring \
the presence of the 'Debit' and 'Credit' fields !"))
# report_name = objects[0]._description or objects[0]._name
report_name = _("Journal Items")
ws = wb.add_sheet(report_name[:31])
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0 # Landscape
ws.fit_width_to_pages = 1
row_pos = 0
# set print header/footer
ws.header_str = self.xls_headers['standard']
ws.footer_str = self.xls_footers['standard']
# Title
cell_style = xlwt.easyxf(_xs['xls_title'])
c_specs = [
('report_name', 1, 0, 'text', report_name),
]
row_data = self.xls_row_template(c_specs, ['report_name'])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=cell_style)
row_pos += 1
# Column headers
c_specs = map(lambda x: self.render(
x, self.col_specs_template, 'header', render_space={'_': _p._}),
wanted_list)
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=self.rh_cell_style,
set_column_size=True)
ws.set_horz_split_pos(row_pos)
# account move lines
for line in objects:
debit_cell = rowcol_to_cell(row_pos, debit_pos)
credit_cell = rowcol_to_cell(row_pos, credit_pos)
bal_formula = debit_cell + '-' + credit_cell
_logger.debug('dummy call - %s', bal_formula)
c_specs = map(
lambda x: self.render(x, self.col_specs_template, 'lines'),
wanted_list)
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=self.aml_cell_style)
# Totals
aml_cnt = len(objects)
debit_start = rowcol_to_cell(row_pos - aml_cnt, debit_pos)
debit_stop = rowcol_to_cell(row_pos - 1, debit_pos)
debit_formula = 'SUM(%s:%s)' % (debit_start, debit_stop)
_logger.debug('dummy call - %s', debit_formula)
credit_start = rowcol_to_cell(row_pos - aml_cnt, credit_pos)
credit_stop = rowcol_to_cell(row_pos - 1, credit_pos)
credit_formula = 'SUM(%s:%s)' % (credit_start, credit_stop)
_logger.debug('dummy call - %s', credit_formula)
debit_cell = rowcol_to_cell(row_pos, debit_pos)
credit_cell = rowcol_to_cell(row_pos, credit_pos)
bal_formula = debit_cell + '-' + credit_cell
_logger.debug('dummy call - %s', bal_formula)
c_specs = map(
lambda x: self.render(x, self.col_specs_template, 'totals'),
wanted_list)
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=self.rt_cell_style_right)
move_line_xls('report.move.line.list.xls',
'account.move.line',
parser=move_line_xls_parser)
| rschnapka/account-financial-reporting | account_move_line_report_xls/report/move_line_list_xls.py | Python | agpl-3.0 | 17,500 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
******************************
espressopp.integrator.CapForce
******************************
This class can be used to forcecap all particles or a group of particles.
Force capping means that the force vector of a particle is rescaled
so that the length of the force vector is <= capforce
Example Usage:
>>> capforce = espressopp.integrator.CapForce(system, 1000.0)
>>> integrator.addExtension(capForce)
CapForce can also be used to forcecap only a group of particles:
>>> particle_group = [45, 67, 89, 103]
>>> capforce = espressopp.integrator.CapForce(system, 1000.0, particle_group)
>>> integrator.addExtension(capForce)
.. function:: espressopp.integrator.CapForce(system, capForce, particleGroup)
:param system:
:param capForce:
:param particleGroup: (default: None)
:type system:
:type capForce:
:type particleGroup:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_CapForce
class CapForceLocal(ExtensionLocal, integrator_CapForce):
def __init__(self, system, capForce, particleGroup = None):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if (particleGroup == None) or (particleGroup.size() == 0):
cxxinit(self, integrator_CapForce, system, capForce)
else:
cxxinit(self, integrator_CapForce, system, capForce, particleGroup)
if pmi.isController :
class CapForce(Extension, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.integrator.CapForceLocal',
pmicall = ['setCapForce', 'setAbsCapForce', 'getCapForce', 'getAbsCapForce'],
pmiproperty = [ 'particleGroup', 'adress' ]
)
| espressopp/espressopp | src/integrator/CapForce.py | Python | gpl-3.0 | 2,764 |
# Copyright 2008, 2009 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
import logging
from nox.lib.core import *
from collections import defaultdict
from itertools import chain
import nox.lib.openflow as openflow
from nox.lib.packet.packet_utils import mac_to_str
from nox.lib.netinet.netinet import datapathid,create_ipaddr,c_htonl
from nox.netapps.switchstats.pycswitchstats import pycswitchstats
from twisted.python import log
from nox.lib.directory import Directory
from nox.lib.directory import LocationInfo
# Default values for the periodicity of polling for each class of
# statistic
DEFAULT_POLL_TABLE_PERIOD = 5
DEFAULT_POLL_PORT_PERIOD = 5
DEFAULT_POLL_AGGREGATE_PERIOD = 5
lg = logging.getLogger('switchstats')
## \ingroup noxcomponents
# Collects and maintains switch and port stats for the network.
#
# Monitors switch and port stats by sending out port_stats requests
# periodically to all connected switches.
#
# The primary method of accessing the ports stats is through the
# webserver (see switchstatsws.py) however, components can also
# register port listeners which are called each time stats are
# received for a particular port.
#
class switchstats(Component):
"""Track switch statistics during runtime"""
def add_port_listener(self, dpid, port, listener):
self.port_listeners[dpid][port].append(listener)
def remove_port_listener(self, dpid, port, listener):
try:
self.port_listeners[dpid][port].remove(listener)
except Exception, e:
lg.warn('Failed to remove port %d from dpid %d' %(port, dpid))
pass
def fire_port_listeners(self, dpid, portno, port):
for listener in self.port_listeners[dpid][portno]:
if not listener(port):
self.remove_port_listener(dpid, portno, listener)
def __init__(self, ctxt):
Component.__init__(self, ctxt)
# {dpid : {port : [listeners]}}
self.port_listeners = defaultdict(lambda: defaultdict(list))
self.dp_stats = {}
self.dp_poll_period = {}
self.dp_table_stats = {}
self.dp_desc_stats = {}
self.dp_port_stats = {}
def port_timer(self, dp):
if dp in self.dp_stats:
self.ctxt.send_port_stats_request(dp)
self.post_callback(self.dp_poll_period[dp]['port'] + 1, lambda : self.port_timer(dp))
def table_timer(self, dp):
if dp in self.dp_stats:
self.ctxt.send_table_stats_request(dp)
self.post_callback(self.dp_poll_period[dp]['table'], lambda : self.table_timer(dp))
def dp_join(self, dp, stats):
dpid_obj = datapathid.from_host(dp)
stats['dpid'] = dp
self.dp_stats[dp] = stats
# convert all port hw_addrs to ASCII
# and register all port names with bindings storage
port_list = self.dp_stats[dp]['ports']
for i in range(0,len(port_list)):
new_mac = mac_to_str(port_list[i]['hw_addr']).replace(':','-')
port_list[i]['hw_addr'] = new_mac
# polling intervals for switch statistics
self.dp_poll_period[dp] = {}
self.dp_poll_period[dp]['table'] = DEFAULT_POLL_TABLE_PERIOD
self.dp_poll_period[dp]['port'] = DEFAULT_POLL_PORT_PERIOD
self.dp_poll_period[dp]['aggr'] = DEFAULT_POLL_AGGREGATE_PERIOD
# Switch descriptions do not change while connected, so just send once
self.ctxt.send_desc_stats_request(dp)
# stagger timers by one second
self.post_callback(self.dp_poll_period[dp]['table'],
lambda : self.table_timer(dp))
self.post_callback(self.dp_poll_period[dp]['port'] + 1,
lambda : self.port_timer(dp))
return CONTINUE
def dp_leave(self, dp):
dpid_obj = datapathid.from_host(dp)
if self.dp_stats.has_key(dp):
del self.dp_stats[dp]
else:
log.err('Unknown datapath leave', system='switchstats')
if self.dp_poll_period.has_key(dp):
del self.dp_poll_period[dp]
if self.dp_table_stats.has_key(dp):
del self.dp_table_stats[dp]
if self.dp_desc_stats.has_key(dp):
del self.dp_desc_stats[dp]
if self.dp_port_stats.has_key(dp):
del self.dp_port_stats[dp]
if dp in self.port_listeners:
del self.port_listeners[dp]
return CONTINUE
def map_name_to_portno(self, dpid, name):
for port in self.dp_stats[dpid]['ports']:
if port['name'] == name:
return port['port_no']
return None
def table_stats_in_handler(self, dpid, tables):
# Merge the new info in tables with the older info in dp_table_stats,
# replacing entries with the same 'name'.
if dpid not in self.dp_table_stats:
self.dp_table_stats[dpid] = []
self.dp_table_stats[dpid] = dict(chain(
((m['name'],m) for m in self.dp_table_stats[dpid]),
((m['name'],m) for m in tables))).values()
def desc_stats_in_handler(self, dpid, desc):
self.dp_desc_stats[dpid] = desc
ip = self.ctxt.get_switch_ip(dpid)
self.dp_desc_stats[dpid]["ip"] = str(create_ipaddr(c_htonl(ip)))
def port_stats_in_handler(self, dpid, ports):
if dpid not in self.dp_port_stats:
new_ports = {}
for port in ports:
port['delta_bytes'] = 0
new_ports[port['port_no']] = port
self.dp_port_stats[dpid] = new_ports
return
new_ports = {}
for port in ports:
if port['port_no'] in self.dp_port_stats[dpid]:
port['delta_bytes'] = port['tx_bytes'] - \
self.dp_port_stats[dpid][port['port_no']]['tx_bytes']
new_ports[port['port_no']] = port
else:
port['delta_bytes'] = 0
new_ports[port['port_no']] = port
# XXX Fire listeners for port stats
self.fire_port_listeners(dpid, port['port_no'], port)
self.dp_port_stats[dpid] = new_ports
def port_status_handler(self, dpid, reason, port):
intdp = int(dpid)
if intdp not in self.dp_stats:
log.err('port status from unknown datapath', system='switchstats')
return
# copy over existing port status
for i in range(0, len(self.dp_stats[intdp]['ports'])):
oldport = self.dp_stats[intdp]['ports'][i]
if oldport['name'] == port['name']:
port['hw_addr'] = mac_to_str(port['hw_addr']).replace(':','-')
self.dp_stats[intdp]['ports'][i] = port
def get_switch_conn_p_s_heavy_hitters(self):
hitters = []
for dp in self.dp_stats:
hitters.append((dp, self.cswitchstats.get_switch_conn_p_s(dp)))
return hitters
def get_switch_port_error_heavy_hitters(self):
error_list = []
for dpid in self.dp_port_stats:
ports = self.dp_port_stats[dpid].values()
for port in ports:
error_list.append((dpid, port['port_no'], port['rx_errors'] + port['tx_errors']))
return error_list
def get_switch_port_bandwidth_hitters(self):
error_list = []
for dpid in self.dp_port_stats:
ports = self.dp_port_stats[dpid].values()
for port in ports:
error_list.append((dpid, port['port_no'],
(port['delta_bytes']) / DEFAULT_POLL_PORT_PERIOD))
return error_list
def get_global_conn_p_s(self):
return self.cswitchstats.get_global_conn_p_s()
def get_switch_conn_p_s(self, dpid):
return self.cswitchstats.get_switch_conn_p_s(datapathid.from_host(dpid))
def install(self):
self.cswitchstats = self.resolve(pycswitchstats)
self.register_for_datapath_join (self.dp_join)
self.register_for_datapath_leave(self.dp_leave)
self.register_for_table_stats_in(self.table_stats_in_handler)
self.register_for_desc_stats_in(self.desc_stats_in_handler)
self.register_for_port_stats_in(self.port_stats_in_handler)
self.register_for_port_status(self.port_status_handler)
def getInterface(self):
return str(switchstats)
def getFactory():
class Factory:
def instance(self, ctxt):
return switchstats(ctxt)
return Factory()
| tlodge/homehub.nox | src/nox/netapps/switchstats/switchstats.py | Python | gpl-3.0 | 9,266 |
import copy
from nose.tools import *
from vcloudtools.vcloud import Link, Org, OrgList
MOCK_LINK = {
'type': 'application/foo+xml',
'href': 'https://test/foo',
'rel': 'test_rel',
'name': 'foo',
}
MOCK_ORG = {
'type': 'application/vnd.vmware.vcloud.org+xml',
'href': 'http://test-api-client/org/734772e4-166f-4dcc-9391-d35ddafce90e',
'name': 'Test-Org',
'id': 'urn:vcloud:org:734772e4-166f-4dcc-9391-d35ddafce90e',
'full_name': 'Test Organisation',
'description': None,
'links': { },
}
class TestLink(object):
def setup(self):
self.l = copy.deepcopy(MOCK_LINK)
def test_name_optional(self):
self.l.pop('name')
link = Link(**self.l)
assert_equal(link.name, None)
class TestOrg(object):
def test_optional(self):
def _test(key):
o = copy.deepcopy(MOCK_ORG)
o.pop(key)
org = Org(**o)
assert_equal(getattr(org, key), None)
for k in ['id', 'full_name', 'description', 'links']:
yield _test, k
class TestOrgList(object):
def test_org_by_name(self):
foo = Org(type='testtype', href='testhref', name='foo')
bar = Org(type='testtype', href='testhref', name='bar')
baz = Org(type='testtype', href='testhref', name='baz')
olist = OrgList(orgs=[foo, bar, baz])
assert_equal(olist.org_by_name('bar'), bar)
| exoscale/vcloudtools | test/unit/test_vcloud.py | Python | mit | 1,412 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.eventhub.v2021_01_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.OperationListResult"]:
"""Lists all of the available Event Hub REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.eventhub.v2021_01_01_preview.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.EventHub/operations'} # type: ignore
| Azure/azure-sdk-for-python | sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/v2021_01_01_preview/aio/operations/_operations.py | Python | mit | 4,822 |
import logging
from unittest import TestCase, skipIf
from hvac import exceptions
from tests import utils
from tests.utils.hvac_integration_test_case import HvacIntegrationTestCase
class IntegrationTest(HvacIntegrationTestCase, TestCase):
def setUp(self):
super(IntegrationTest, self).setUp()
if "secret/" not in self.client.sys.list_mounted_secrets_engines()["data"]:
self.client.sys.enable_secrets_engine(
backend_type="kv",
path="secret",
options=dict(version=1),
)
def test_generic_secret_backend(self):
self.client.write("secret/foo", zap="zip")
result = self.client.read("secret/foo")
assert result["data"]["zap"] == "zip"
self.client.delete("secret/foo")
def test_list_directory(self):
self.client.write("secret/test-list/bar/foo", value="bar")
self.client.write("secret/test-list/foo", value="bar")
result = self.client.list("secret/test-list")
assert result["data"]["keys"] == ["bar/", "foo"]
self.client.delete("secret/test-list/bar/foo")
self.client.delete("secret/test-list/foo")
def test_write_with_response(self):
if "transit/" in self.client.sys.list_mounted_secrets_engines()["data"]:
self.client.sys.disable_secrets_engine("transit")
self.client.sys.enable_secrets_engine("transit")
plaintext = "test"
self.client.write("transit/keys/foo")
result = self.client.write("transit/encrypt/foo", plaintext=plaintext)
ciphertext = result["data"]["ciphertext"]
result = self.client.write("transit/decrypt/foo", ciphertext=ciphertext)
assert result["data"]["plaintext"] == plaintext
def test_read_nonexistent_key(self):
assert not self.client.read("secret/I/dont/exist")
def test_auth_token_manipulation(self):
result = self.client.create_token(lease="1h", renewable=True)
assert result["auth"]["client_token"]
lookup = self.client.lookup_token(result["auth"]["client_token"])
assert result["auth"]["client_token"] == lookup["data"]["id"]
renew = self.client.renew_token(lookup["data"]["id"])
assert result["auth"]["client_token"] == renew["auth"]["client_token"]
self.client.revoke_token(lookup["data"]["id"])
try:
lookup = self.client.lookup_token(result["auth"]["client_token"])
assert False
except exceptions.Forbidden:
assert True
except exceptions.InvalidPath:
assert True
except exceptions.InvalidRequest:
assert True
def test_self_auth_token_manipulation(self):
result = self.client.create_token(lease="1h", renewable=True)
assert result["auth"]["client_token"]
self.client.token = result["auth"]["client_token"]
lookup = self.client.lookup_token(result["auth"]["client_token"])
assert result["auth"]["client_token"] == lookup["data"]["id"]
renew = self.client.renew_self_token()
assert result["auth"]["client_token"] == renew["auth"]["client_token"]
self.client.revoke_token(lookup["data"]["id"])
try:
lookup = self.client.lookup_token(result["auth"]["client_token"])
assert False
except exceptions.Forbidden:
assert True
except exceptions.InvalidPath:
assert True
except exceptions.InvalidRequest:
assert True
def test_userpass_auth(self):
if "userpass/" in self.client.sys.list_auth_methods()["data"]:
self.client.sys.disable_auth_method("userpass")
self.client.sys.enable_auth_method("userpass")
self.client.write(
"auth/userpass/users/testuser", password="testpass", policies="not_root"
)
result = self.client.auth_userpass("testuser", "testpass")
assert self.client.token == result["auth"]["client_token"]
assert self.client.is_authenticated()
self.client.token = self.manager.root_token
self.client.sys.disable_auth_method("userpass")
def test_create_userpass(self):
if "userpass/" not in self.client.sys.list_auth_methods()["data"]:
self.client.sys.enable_auth_method("userpass")
self.client.create_userpass(
"testcreateuser", "testcreateuserpass", policies="not_root"
)
result = self.client.auth_userpass("testcreateuser", "testcreateuserpass")
assert self.client.token == result["auth"]["client_token"]
assert self.client.is_authenticated()
# Test ttl:
self.client.token = self.manager.root_token
self.client.create_userpass(
"testcreateuser", "testcreateuserpass", policies="not_root", ttl="10s"
)
self.client.token = result["auth"]["client_token"]
result = self.client.auth_userpass("testcreateuser", "testcreateuserpass")
assert result["auth"]["lease_duration"] == 10
self.client.token = self.manager.root_token
self.client.sys.disable_auth_method("userpass")
def test_list_userpass(self):
if "userpass/" not in self.client.sys.list_auth_methods()["data"]:
self.client.sys.enable_auth_method("userpass")
# add some users and confirm that they show up in the list
self.client.create_userpass(
"testuserone", "testuseronepass", policies="not_root"
)
self.client.create_userpass(
"testusertwo", "testusertwopass", policies="not_root"
)
user_list = self.client.list_userpass()
assert "testuserone" in user_list["data"]["keys"]
assert "testusertwo" in user_list["data"]["keys"]
# delete all the users and confirm that list_userpass() doesn't fail
for user in user_list["data"]["keys"]:
self.client.delete_userpass(user)
no_users_list = self.client.list_userpass()
assert no_users_list is None
def test_read_userpass(self):
if "userpass/" not in self.client.sys.list_auth_methods()["data"]:
self.client.sys.enable_auth_method("userpass")
# create user to read
self.client.create_userpass("readme", "mypassword", policies="not_root")
# test that user can be read
read_user = self.client.read_userpass("readme")
assert "not_root" in read_user["data"]["policies"]
# teardown
self.client.sys.disable_auth_method("userpass")
def test_update_userpass_policies(self):
if "userpass/" not in self.client.sys.list_auth_methods()["data"]:
self.client.sys.enable_auth_method("userpass")
# create user and then update its policies
self.client.create_userpass(
"updatemypolicies", "mypassword", policies="not_root"
)
self.client.update_userpass_policies(
"updatemypolicies", policies="somethingelse"
)
# test that policies have changed
updated_user = self.client.read_userpass("updatemypolicies")
assert "somethingelse" in updated_user["data"]["policies"]
# teardown
self.client.sys.disable_auth_method("userpass")
def test_update_userpass_password(self):
if "userpass/" not in self.client.sys.list_auth_methods()["data"]:
self.client.sys.enable_auth_method("userpass")
# create user and then change its password
self.client.create_userpass("changeme", "mypassword", policies="not_root")
self.client.update_userpass_password("changeme", "mynewpassword")
# test that new password authenticates user
result = self.client.auth_userpass("changeme", "mynewpassword")
assert self.client.token == result["auth"]["client_token"]
assert self.client.is_authenticated()
# teardown
self.client.token = self.manager.root_token
self.client.sys.disable_auth_method("userpass")
def test_delete_userpass(self):
if "userpass/" not in self.client.sys.list_auth_methods()["data"]:
self.client.sys.enable_auth_method("userpass")
self.client.create_userpass(
"testcreateuser", "testcreateuserpass", policies="not_root"
)
result = self.client.auth_userpass("testcreateuser", "testcreateuserpass")
assert self.client.token == result["auth"]["client_token"]
assert self.client.is_authenticated()
self.client.token = self.manager.root_token
self.client.delete_userpass("testcreateuser")
self.assertRaises(
exceptions.InvalidRequest,
self.client.auth_userpass,
"testcreateuser",
"testcreateuserpass",
)
def test_app_id_auth(self):
if "app-id/" in self.client.sys.list_auth_methods()["data"]:
self.client.sys.disable_auth_method("app-id")
self.client.sys.enable_auth_method("app-id")
self.client.write("auth/app-id/map/app-id/foo", value="not_root")
self.client.write("auth/app-id/map/user-id/bar", value="foo")
result = self.client.auth_app_id("foo", "bar")
assert self.client.token == result["auth"]["client_token"]
assert self.client.is_authenticated()
self.client.token = self.manager.root_token
self.client.sys.disable_auth_method("app-id")
def test_create_app_id(self):
if "app-id/" not in self.client.sys.list_auth_methods()["data"]:
self.client.sys.enable_auth_method("app-id")
self.client.create_app_id(
"testappid", policies="not_root", display_name="displayname"
)
result = self.client.read("auth/app-id/map/app-id/testappid")
lib_result = self.client.get_app_id("testappid")
del result["request_id"]
del lib_result["request_id"]
assert result == lib_result
assert result["data"]["key"] == "testappid"
assert result["data"]["display_name"] == "displayname"
assert result["data"]["value"] == "not_root"
self.client.delete_app_id("testappid")
assert self.client.get_app_id("testappid")["data"] is None
self.client.token = self.manager.root_token
self.client.sys.disable_auth_method("app-id")
def test_create_user_id(self):
if "app-id/" not in self.client.sys.list_auth_methods()["data"]:
self.client.sys.enable_auth_method("app-id")
self.client.create_app_id(
"testappid", policies="not_root", display_name="displayname"
)
self.client.create_user_id("testuserid", app_id="testappid")
result = self.client.read("auth/app-id/map/user-id/testuserid")
lib_result = self.client.get_user_id("testuserid")
del result["request_id"]
del lib_result["request_id"]
assert result == lib_result
assert result["data"]["key"] == "testuserid"
assert result["data"]["value"] == "testappid"
result = self.client.auth_app_id("testappid", "testuserid")
assert self.client.token == result["auth"]["client_token"]
assert self.client.is_authenticated()
self.client.token = self.manager.root_token
self.client.delete_user_id("testuserid")
assert self.client.get_user_id("testuserid")["data"] is None
self.client.token = self.manager.root_token
self.client.sys.disable_auth_method("app-id")
def test_missing_token(self):
client = utils.create_client()
assert not client.is_authenticated()
def test_invalid_token(self):
client = utils.create_client(token="not-a-real-token")
assert not client.is_authenticated()
def test_illegal_token(self):
client = utils.create_client(token="token-with-new-line\n")
try:
client.is_authenticated()
except ValueError as e:
assert "Invalid header value" in str(e)
def test_broken_token(self):
client = utils.create_client(token="\x1b")
try:
client.is_authenticated()
except exceptions.InvalidRequest as e:
assert "invalid header value" in str(e)
def test_client_authenticated(self):
assert self.client.is_authenticated()
def test_client_logout(self):
self.client.logout()
assert not self.client.is_authenticated()
def test_client_logout_and_revoke(self):
# create a new token
result = self.client.auth.token.create(ttl="1h", renewable=True)
# set the token
self.client.token = result["auth"]["client_token"]
# logout and revoke the token
self.client.logout(revoke_token=True)
# set the original token back
self.client.token = result["auth"]["client_token"]
# confirm that it no longer is able to authenticate
assert not self.client.is_authenticated()
def test_revoke_self_token(self):
if "userpass/" in self.client.sys.list_auth_methods()["data"]:
self.client.sys.disable_auth_method("userpass")
self.client.sys.enable_auth_method("userpass")
self.client.write(
"auth/userpass/users/testuser", password="testpass", policies="not_root"
)
self.client.auth_userpass("testuser", "testpass")
self.client.revoke_self_token()
assert not self.client.is_authenticated()
def test_gh51(self):
key = "secret/http://test.com"
self.client.write(key, foo="bar")
result = self.client.read(key)
assert result["data"]["foo"] == "bar"
def test_token_accessor(self):
# Create token, check accessor is provided
result = self.client.create_token(lease="1h")
token_accessor = result["auth"].get("accessor", None)
assert token_accessor
# Look up token by accessor, make sure token is excluded from results
lookup = self.client.lookup_token(token_accessor, accessor=True)
assert lookup["data"]["accessor"] == token_accessor
assert not lookup["data"]["id"]
# Revoke token using the accessor
self.client.revoke_token(token_accessor, accessor=True)
# Look up by accessor should fail
with self.assertRaises(exceptions.InvalidRequest):
lookup = self.client.lookup_token(token_accessor, accessor=True)
# As should regular lookup
with self.assertRaises(exceptions.Forbidden):
lookup = self.client.lookup_token(result["auth"]["client_token"])
def test_create_token_explicit_max_ttl(self):
token = self.client.create_token(ttl="30m", explicit_max_ttl="5m")
assert token["auth"]["client_token"]
assert token["auth"]["lease_duration"] == 300
# Validate token
lookup = self.client.lookup_token(token["auth"]["client_token"])
assert token["auth"]["client_token"] == lookup["data"]["id"]
def test_create_token_max_ttl(self):
token = self.client.create_token(ttl="5m")
assert token["auth"]["client_token"]
assert token["auth"]["lease_duration"] == 300
# Validate token
lookup = self.client.lookup_token(token["auth"]["client_token"])
assert token["auth"]["client_token"] == lookup["data"]["id"]
def test_create_token_periodic(self):
token = self.client.create_token(period="30m")
assert token["auth"]["client_token"]
assert token["auth"]["lease_duration"] == 1800
# Validate token
lookup = self.client.lookup_token(token["auth"]["client_token"])
assert token["auth"]["client_token"] == lookup["data"]["id"]
assert lookup["data"]["period"] == 1800
def test_token_roles(self):
# No roles, list_token_roles == None
before = self.client.list_token_roles()
assert not before
# Create token role
assert self.client.create_token_role("testrole").status_code == 204
# List token roles
during = self.client.list_token_roles()["data"]["keys"]
assert len(during) == 1
assert during[0] == "testrole"
# Delete token role
self.client.delete_token_role("testrole")
# No roles, list_token_roles == None
after = self.client.list_token_roles()
assert not after
def test_create_token_w_role(self):
# Create policy
self.prep_policy("testpolicy")
# Create token role w/ policy
assert (
self.client.create_token_role(
"testrole", allowed_policies="testpolicy"
).status_code
== 204
)
# Create token against role
token = self.client.create_token(lease="1h", role="testrole")
assert token["auth"]["client_token"]
assert token["auth"]["policies"] == ["default", "testpolicy"]
# Cleanup
self.client.delete_token_role("testrole")
self.client.sys.delete_policy("testpolicy")
def test_auth_gcp_alternate_mount_point_with_no_client_token_exception(self):
test_mount_point = "gcp-custom-path"
# Turn on the gcp backend with a custom mount_point path specified.
if (
"{0}/".format(test_mount_point)
in self.client.sys.list_auth_methods()["data"]
):
self.client.sys.disable_auth_method(test_mount_point)
self.client.sys.enable_auth_method("gcp", path=test_mount_point)
# Drop the client's token to replicate a typical end user's use of any auth method.
# I.e., its reasonable to expect the method is being called to _retrieve_ a token in the first place.
self.client.token = None
# Load a mock JWT stand in for a real document from GCP.
with open(utils.get_config_file_path("example.jwt")) as fp:
jwt = fp.read()
# When attempting to auth (POST) to an auth backend mounted at a different path than the default, we expect a
# generic 'missing client token' response from Vault.
with self.assertRaises(exceptions.InvalidRequest) as assertRaisesContext:
self.client.auth.gcp.login("example-role", jwt)
expected_exception_message = "missing client token"
actual_exception_message = str(assertRaisesContext.exception)
self.assertIn(expected_exception_message, actual_exception_message)
# Reset test state.
self.client.token = self.manager.root_token
self.client.sys.disable_auth_method(path=test_mount_point)
@skipIf(
utils.if_vault_version("0.10.0"),
"KV version 2 secret engine not available before Vault version 0.10.0",
)
def test_kv2_secret_backend(self):
if "test/" in self.client.sys.list_mounted_secrets_engines()["data"]:
self.client.sys.disable_secrets_engine("test")
self.client.sys.enable_secrets_engine(
"kv", path="test", options={"version": "2"}
)
secret_backends = self.client.sys.list_mounted_secrets_engines()["data"]
assert "test/" in secret_backends
self.assertDictEqual(secret_backends["test/"]["options"], {"version": "2"})
self.client.sys.disable_secrets_engine("test")
def test_create_kubernetes_configuration(self):
expected_status_code = 204
test_mount_point = "k8s"
# Turn on the kubernetes backend with a custom mount_point path specified.
if (
"{0}/".format(test_mount_point)
in self.client.sys.list_auth_methods()["data"]
):
self.client.sys.disable_auth_method(test_mount_point)
self.client.sys.enable_auth_method("kubernetes", path=test_mount_point)
with open(utils.get_config_file_path("client-cert.pem")) as fp:
certificate = fp.read()
response = self.client.create_kubernetes_configuration(
kubernetes_host="127.0.0.1:80",
pem_keys=[certificate],
mount_point=test_mount_point,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code,
)
# Reset integration test state
self.client.sys.disable_auth_method(path=test_mount_point)
def test_get_kubernetes_configuration(self):
test_host = "127.0.0.1:80"
test_mount_point = "k8s"
# Turn on the kubernetes backend with a custom mount_point path specified.
if (
"{0}/".format(test_mount_point)
in self.client.sys.list_auth_methods()["data"]
):
self.client.sys.disable_auth_method(test_mount_point)
self.client.sys.enable_auth_method("kubernetes", path=test_mount_point)
with open(utils.get_config_file_path("client-cert.pem")) as fp:
certificate = fp.read()
self.client.create_kubernetes_configuration(
kubernetes_host=test_host,
pem_keys=[certificate],
mount_point=test_mount_point,
)
# Test that we can retrieve the configuration
response = self.client.get_kubernetes_configuration(
mount_point=test_mount_point
)
self.assertIn(
member="data",
container=response,
)
self.assertEqual(
first=test_host, second=response["data"].get("kubernetes_host")
)
# Reset integration test state
self.client.sys.disable_auth_method(path=test_mount_point)
def test_create_kubernetes_role(self):
test_role_name = "test_role"
test_mount_point = "k8s"
expected_status_code = 204
# Turn on the kubernetes backend with a custom mount_point path specified.
if (
"{0}/".format(test_mount_point)
in self.client.sys.list_auth_methods()["data"]
):
self.client.sys.disable_auth_method(test_mount_point)
self.client.sys.enable_auth_method("kubernetes", path=test_mount_point)
with open(utils.get_config_file_path("client-cert.pem")) as fp:
certificate = fp.read()
self.client.create_kubernetes_configuration(
kubernetes_host="127.0.0.1:80",
pem_keys=[certificate],
mount_point=test_mount_point,
)
# Test that we can createa role
response = self.client.create_kubernetes_role(
name=test_role_name,
bound_service_account_names="*",
bound_service_account_namespaces="vault_test",
mount_point=test_mount_point,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code,
)
# Reset integration test state
self.client.sys.disable_auth_method(path=test_mount_point)
def test_get_kubernetes_role(self):
test_role_name = "test_role"
test_mount_point = "k8s"
test_bound_service_account_namespaces = ["vault-test"]
# Turn on the kubernetes backend with a custom mount_point path specified.
if (
"{0}/".format(test_mount_point)
in self.client.sys.list_auth_methods()["data"]
):
self.client.sys.disable_auth_method(test_mount_point)
self.client.sys.enable_auth_method("kubernetes", path=test_mount_point)
with open(utils.get_config_file_path("client-cert.pem")) as fp:
certificate = fp.read()
self.client.create_kubernetes_configuration(
kubernetes_host="127.0.0.1:80",
pem_keys=[certificate],
mount_point=test_mount_point,
)
# Test that we can createa role
self.client.create_kubernetes_role(
name=test_role_name,
bound_service_account_names="*",
bound_service_account_namespaces=test_bound_service_account_namespaces,
mount_point=test_mount_point,
)
response = self.client.get_kubernetes_role(
name=test_role_name,
mount_point=test_mount_point,
)
self.assertIn(
member="data",
container=response,
)
self.assertEqual(
first=test_bound_service_account_namespaces,
second=response["data"].get("bound_service_account_namespaces"),
)
# Reset integration test state
self.client.sys.disable_auth_method(path=test_mount_point)
def test_list_kubernetes_roles(self):
test_role_name = "test_role"
test_mount_point = "k8s"
test_bound_service_account_namespaces = ["vault-test"]
# Turn on the kubernetes backend with a custom mount_point path specified.
if (
"{0}/".format(test_mount_point)
in self.client.sys.list_auth_methods()["data"]
):
self.client.sys.disable_auth_method(test_mount_point)
self.client.sys.enable_auth_method("kubernetes", path=test_mount_point)
with open(utils.get_config_file_path("client-cert.pem")) as fp:
certificate = fp.read()
self.client.create_kubernetes_configuration(
kubernetes_host="127.0.0.1:80",
pem_keys=[certificate],
mount_point=test_mount_point,
)
# Test that we can createa role
self.client.create_kubernetes_role(
name=test_role_name,
bound_service_account_names="*",
bound_service_account_namespaces=test_bound_service_account_namespaces,
mount_point=test_mount_point,
)
response = self.client.list_kubernetes_roles(
mount_point=test_mount_point,
)
self.assertIn(
member="data",
container=response,
)
self.assertEqual(first=[test_role_name], second=response["data"].get("keys"))
# Reset integration test state
self.client.sys.disable_auth_method(path=test_mount_point)
def test_delete_kubernetes_role(self):
test_role_name = "test_role"
test_mount_point = "k8s"
expected_status_code = 204
# Turn on the kubernetes backend with a custom mount_point path specified.
if (
"{0}/".format(test_mount_point)
in self.client.sys.list_auth_methods()["data"]
):
self.client.sys.disable_auth_method(test_mount_point)
self.client.sys.enable_auth_method("kubernetes", path=test_mount_point)
with open(utils.get_config_file_path("client-cert.pem")) as fp:
certificate = fp.read()
self.client.create_kubernetes_configuration(
kubernetes_host="127.0.0.1:80",
pem_keys=[certificate],
mount_point=test_mount_point,
)
self.client.create_kubernetes_role(
name=test_role_name,
bound_service_account_names="*",
bound_service_account_namespaces="vault_test",
mount_point=test_mount_point,
)
# Test that we can delete a role
response = self.client.delete_kubernetes_role(
role=test_role_name,
mount_point=test_mount_point,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code,
)
# Reset integration test state
self.client.sys.disable_auth_method(path=test_mount_point)
def test_auth_kubernetes(self):
test_role_name = "test_role"
test_host = "127.0.0.1:80"
test_mount_point = "k8s"
# Turn on the kubernetes backend with a custom mount_point path specified.
if (
"{0}/".format(test_mount_point)
in self.client.sys.list_auth_methods()["data"]
):
self.client.sys.disable_auth_method(test_mount_point)
self.client.sys.enable_auth_method("kubernetes", path=test_mount_point)
with open(utils.get_config_file_path("client-cert.pem")) as fp:
certificate = fp.read()
self.client.create_kubernetes_configuration(
kubernetes_host=test_host,
pem_keys=[certificate],
mount_point=test_mount_point,
)
self.client.create_kubernetes_role(
name=test_role_name,
bound_service_account_names="*",
bound_service_account_namespaces="vault_test",
mount_point=test_mount_point,
)
# Test that we can authenticate
with open(utils.get_config_file_path("example.jwt")) as fp:
test_jwt = fp.read()
with self.assertRaises(
exceptions.InternalServerError
) as assertRaisesContext:
# we don't actually have a valid JWT to provide, so this method will throw an exception
self.client.auth_kubernetes(
role=test_role_name,
jwt=test_jwt,
mount_point=test_mount_point,
)
expected_exception_message = 'claim "iss" is invalid'
actual_exception_message = str(assertRaisesContext.exception)
self.assertIn(expected_exception_message, actual_exception_message)
# Reset integration test state
self.client.sys.disable_auth_method(path=test_mount_point)
def test_seal_status(self):
seal_status_property = self.client.seal_status
logging.debug("seal_status_property: %s" % seal_status_property)
self.assertIn(
member="sealed",
container=seal_status_property,
)
| ianunruh/hvac | tests/integration_tests/v1/test_integration.py | Python | apache-2.0 | 29,744 |
# Name: params.py
# Purpose: Classes for parameter introduction
# Author: Roman Rolinsky <rolinsky@mema.ucl.ac.be>
# Created: 22.08.2001
# RCS-ID: $Id: params.py 71829 2012-06-21 19:15:52Z ROL $
'''
Visual C{Param*} classes for populating C{AtrtibutePanel} with attribute editing
blocks.
'''
import string
import os
import wx.combo
from globals import *
WARenameDict = {'fg': 'foreground', 'bg': 'background'}
def InitParams(panel):
'''Set pixel common size based on parent window.'''
global Presenter
from presenter import Presenter
global Listener
from listener import Listener
dc = wx.ClientDC(panel)
global textH, textB
textH = -1
if wx.Platform == '__WXMAC__':
textB = 3 # bigger text border needed for mac highlighting
else:
textB = 2
dc.Destroy()
# make a custom bitmap showing "..."
bw, bh = 14, 16
bmp = wx.EmptyBitmap(bw,bh)
dc = wx.MemoryDC(bmp)
# clear to a specific background colour
bgcolor = wx.Colour(255,254,255)
dc.SetBackground(wx.Brush(bgcolor))
dc.Clear()
# draw the label onto the bitmap
label = "..."
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetWeight(wx.FONTWEIGHT_BOLD)
dc.SetFont(font)
tw,th = dc.GetTextExtent(label)
dc.DrawText(label, (bw-tw)/2, (bw-tw)/2)
del dc
# now apply a mask using the bgcolor
bmp.SetMaskColour(bgcolor)
global bmpEdit
bmpEdit = bmp
# Set known encodings
for i in range(wx.FontMapper.GetSupportedEncodingsCount()):
ParamEncoding.values.append(wx.FontMapper.GetEncodingName(
wx.FontMapper.GetEncoding(i)))
ParamEncoding.values.sort()
# Class that can properly disable children
class PPanel(wx.Panel):
'''Abstract base class creating an empty C{wx.Panel}.'''
isCheck = False
def __init__(self, parent, name):
wx.Panel.__init__(self, parent, -1, name=name)
self.name = name
def Enable(self, value):
self.enabled = value
# Something strange is going on with enable so we make sure...
for w in self.GetChildren():
w.Enable(value)
#wx.Panel.Enable(self, value)
# Common method to set modified state
def OnChange(self, evt):
Presenter.setApplied(False)
evt.Skip()
def OnKillFocus(self, evt):
# Refresh test window if auto refresh policy on focus
if Listener.testWin.IsShown() and g.conf.autoRefresh and \
g.conf.autoRefreshPolicy == AUTO_REFRESH_POLICY_FOCUS:
wx.CallAfter(Presenter.refreshTestWin)
evt.Skip()
class ParamBinaryOr(PPanel):
'''Editing binary flag attributes defined by a string separated by '|'.'''
def __init__(self, parent, name):
PPanel.__init__(self, parent, name)
self.freeze = False
sizer = wx.BoxSizer()
popup = CheckListBoxComboPopup(self.values)
self.combo = wx.combo.ComboCtrl(self, size=(220,-1))
self.combo.SetPopupControl(popup)
if wx.Platform == '__WXMAC__':
sizer.Add(self.combo, 1, wx.ALL, 0)
else:
sizer.Add(self.combo, 1, wx.ALL, 2)
self.SetSizerAndFit(sizer)
self.combo.Bind(wx.EVT_TEXT, self.OnChange)
self.combo.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
def GetValue(self):
return self.combo.GetValue()
def SetValue(self, value):
self.freeze = True
self.combo.SetValue(value)
self.freeze = False
def SetValues(self):
self.combo.InsertItems(self.values, 0)
def OnChange(self, evt):
# ComboCtrl still generates events in SetValue
if self.freeze: return
Presenter.setApplied(False)
evt.Skip()
class ParamFlag(ParamBinaryOr):
'''Sizer flag editing.'''
values = ['wxTOP', 'wxBOTTOM', 'wxLEFT', 'wxRIGHT', 'wxALL',
'wxEXPAND', 'wxGROW', 'wxSHAPED', 'wxSTRETCH_NOT',
'wxALIGN_CENTRE', 'wxALIGN_LEFT', 'wxALIGN_RIGHT',
'wxALIGN_TOP', 'wxALIGN_BOTTOM',
'wxALIGN_CENTRE_VERTICAL', 'wxALIGN_CENTRE_HORIZONTAL',
'wxADJUST_MINSIZE', 'wxFIXED_MINSIZE',
'wxRESERVE_SPACE_EVEN_IF_HIDDEN',
]
equal = {'wxALIGN_CENTER': 'wxALIGN_CENTRE',
'wxALIGN_CENTER_VERTICAL': 'wxALIGN_CENTRE_VERTICAL',
'wxALIGN_CENTER_HORIZONTAL': 'wxALIGN_CENTRE_HORIZONTAL',
'wxUP': 'wxTOP', 'wxDOWN': 'wxBOTTOM', 'wxNORTH': 'wxTOP',
'wxSOUTH': 'wxBOTTOM', 'wxWEST': 'wxLEFT', 'wxEAST': 'wxRIGHT'}
class ParamColour(PPanel):
'''Color attribute editing.'''
def __init__(self, parent, name):
PPanel.__init__(self, parent, name)
sizer = wx.BoxSizer()
self.text = wx.TextCtrl(self, size=(80,textH))
sizer.Add(self.text, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, textB)
self.button = wx.Panel(self, size=(20, 20))
sizer.Add(self.button, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 3)
self.SetSizer(sizer)
self.textModified = False
self.button.Bind(wx.EVT_PAINT, self.OnPaintButton)
self.text.Bind(wx.EVT_TEXT, self.OnChange)
self.text.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.button.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
def GetValue(self):
return self.text.GetValue()
def SetValue(self, value):
self.text.ChangeValue(value) # update text ctrl
self.UpdateColour(value)
def UpdateColour(self, value):
try:
colour = wx.Colour(int(value[1:3], 16), int(value[3:5], 16), int(value[5:7], 16))
self.button.SetBackgroundColour(colour)
except: # ignore errors
self.button.SetBackgroundColour(self.GetBackgroundColour())
self.button.Refresh()
def OnChange(self, evt):
Presenter.setApplied(False)
self.UpdateColour(evt.GetString())
evt.Skip()
def OnPaintButton(self, evt):
dc = wx.PaintDC(self.button)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
if self.IsEnabled(): dc.SetPen(wx.BLACK_PEN)
else: dc.SetPen(wx.GREY_PEN)
size = self.button.GetSize()
dc.DrawRectangle(0, 0, size.width, size.height)
def OnLeftDown(self, evt):
data = wx.ColourData()
data.SetColour(self.GetValue())
dlg = wx.ColourDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
self.SetValue('#%02X%02X%02X' % dlg.GetColourData().GetColour().Get())
Presenter.setApplied(False)
dlg.Destroy()
################################################################################
# Mapping from wx constants to XML strings
fontFamiliesWx2Xml = {wx.DEFAULT: 'default', wx.DECORATIVE: 'decorative',
wx.ROMAN: 'roman', wx.SCRIPT: 'script', wx.SWISS: 'swiss',
wx.MODERN: 'modern'}
fontStylesWx2Xml = {wx.NORMAL: 'normal', wx.SLANT: 'slant', wx.ITALIC: 'italic'}
fontWeightsWx2Xml = {wx.NORMAL: 'normal', wx.LIGHT: 'light', wx.BOLD: 'bold'}
def ReverseMap(m):
rm = {}
for k,v in m.items(): rm[v] = k
return rm
fontFamiliesXml2wx = ReverseMap(fontFamiliesWx2Xml)
fontStylesXml2wx = ReverseMap(fontStylesWx2Xml)
fontWeightsXml2wx = ReverseMap(fontWeightsWx2Xml)
class ParamFont(PPanel):
'''Font attribute editing.'''
def __init__(self, parent, name):
PPanel.__init__(self, parent, name)
sizer = wx.BoxSizer()
self.button = wx.FontPickerCtrl(
self, style=wx.FNTP_FONTDESC_AS_LABEL | wx.FNTP_USE_TEXTCTRL
)
self.text = self.button.GetTextCtrl()
if wx.Platform == '__WXMAC__':
sizer.Add(self.button, 0, wx.LEFT, -2)
else:
sizer.Add(self.button, 0, wx.LEFT, textB)
self.SetSizer(sizer)
self.Bind(wx.EVT_FONTPICKER_CHANGED, self.OnPickFont)
self.text.Bind(wx.EVT_TEXT, self.OnText)
self.text.Bind(wx.EVT_KILL_FOCUS, self.OnTextKillFocus)
def OnText(self, evt):
Presenter.setApplied(False)
if not evt.GetString():
self.text.ChangeValue('')
self.value = {}
def OnTextKillFocus(self, evt):
if self.text.GetValue():
evt.Skip()
def GetValue(self):
return self.value
def dict2font(self, d):
error = False
if 'size' in d:
try: size = int(d['size'])
except ValueError: error = True; wx.LogError('Invalid size specification')
else:
size = g.sysFont().GetPointSize()
if 'family' in d:
try: family = fontFamiliesXml2wx[d['family']]
except KeyError: error = True; wx.LogError('Invalid family specification')
else:
family = wx.DEFAULT
if 'style' in d:
try: style = fontStylesXml2wx[d['style']]
except KeyError: error = True; wx.LogError('Invalid style specification')
else:
style = wx.NORMAL
if 'weight' in d:
try: weight = fontWeightsXml2wx[d['weight']]
except KeyError: error = True; wx.LogError('Invalid weight specification')
else:
weight = wx.NORMAL
try: underlined = bool(int(d.get('underlined', '0')))
except ValueError: error = True; wx.LogError('Invalid underlined flag specification')
face = d.get('face','')
enc = wx.FONTENCODING_DEFAULT
mapper = wx.FontMapper()
if 'encoding' in d and d['encoding'] != 'default':
enc = mapper.CharsetToEncoding(d['encoding'])
if error: wx.LogError('Invalid font specification')
if enc == wx.FONTENCODING_DEFAULT: enc = wx.FONTENCODING_SYSTEM
font = wx.Font(size, family, style, weight, underlined, face, enc)
return font
def SetValue(self, value):
if not value:
self.text.ChangeValue('')
else:
self.button.SetSelectedFont(self.dict2font(value))
self.value = value
def OnPickFont(self, evt):
font = evt.GetFont()
if font.GetEncoding() == wx.FONTENCODING_SYSTEM:
encName = ''
else:
encName = wx.FontMapper.GetEncodingName(font.GetEncoding()).encode()
value = {'size': str(font.GetPointSize()),
'family': fontFamiliesWx2Xml.get(font.GetFamily(), "default"),
'style': fontStylesWx2Xml.get(font.GetStyle(), "normal"),
'weight': fontWeightsWx2Xml.get(font.GetWeight(), "normal"),
'underlined': str(int(font.GetUnderlined())),
'face': font.GetFaceName().encode(),
'encoding': encName}
self.SetValue(value)
Presenter.setApplied(False)
################################################################################
# This is a replacement for SpinCtrl to make ParamUnit looking similar.
# Unfortunately there is no SpinCtrl::GetStringValue...
class ParamInt(PPanel):
'''TextCtrl with SpinButton for integer parameters.'''
default = 0
range = (-2147483648, 2147483647)
def __init__(self, parent, name):
PPanel.__init__(self, parent, name)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.spin = wx.SpinButton(self, style = wx.SP_VERTICAL, size=(-1,10))
textW = 60 - self.spin.GetSize()[0]
self.text = wx.TextCtrl(self, size=(textW,textH))
self.spin.SetRange(*self.range)
if wx.Platform == '__WXMAC__':
sizer.Add(self.text, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND | wx.ALL, textB)
else:
sizer.Add(self.text, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND | \
wx.LEFT | wx.TOP | wx.BOTTOM, textB)
sizer.Add(self.spin, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND)
self.SetSizer(sizer)
self.spin.Bind(wx.EVT_SPIN_UP, self.OnSpinUp)
self.spin.Bind(wx.EVT_SPIN_DOWN, self.OnSpinDown)
self.text.Bind(wx.EVT_TEXT, self.OnChange)
self.text.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
def GetValue(self):
return self.text.GetValue()
def SetValue(self, value):
self.text.ChangeValue(value)
self.SyncSpin(value)
def SyncSpin(self, value):
try:
intValue = int(value)
self.spin.SetValue(intValue)
except:
self.spin.SetValue(self.default)
def OnChange(self, evt):
self.SyncSpin(evt.GetString())
Presenter.setApplied(False)
evt.Skip()
def SyncText(self, spinValue):
if self.range[0] <= spinValue <= self.range[1]:
self.text.ChangeValue(str(spinValue))
Presenter.setApplied(False)
def OnSpinUp(self, evt):
self.SyncText(evt.GetPosition())
evt.Skip()
def OnSpinDown(self, evt):
self.SyncText(evt.GetPosition())
evt.Skip()
def MetaParamInt(**kargs):
'''Create ParamInt class with default value.'''
return type('ParamInt', (ParamInt,), kargs)
ParamIntNN = MetaParamInt(default=0, range=(0, 2147483647)) # non-negative
ParamIntP = MetaParamInt(default=1, range=(1, 2147483647)) # positive
# Same as ParamInt but allows dialog units (XXXd)
class ParamUnit(ParamInt):
'''Similar to L{ParamInt}, 'd' can be appended to the value to specify
dialog units mode.'''
def _splitValue(self, value):
units = ''
if value[-1:].upper() == 'D':
units = value[-1]
value = value[:-1]
return value,units
def SyncSpin(self, value):
try:
value,units = self._splitValue(value)
intValue = int(value)
self.spin.SetValue(intValue)
except:
self.spin.SetValue(self.default)
def SyncText(self, spinValue):
if self.range[0] <= spinValue <= self.range[1]:
value,units = self._splitValue(self.text.GetValue())
self.text.ChangeValue(str(spinValue)+units)
Presenter.setApplied(False)
class ParamMultilineText(PPanel):
'''Multiline text editing.'''
def __init__(self, parent, name, textWidth=-1):
PPanel.__init__(self, parent, name)
sizer = wx.BoxSizer()
self.text = wx.TextCtrl(self, size=wx.Size(200,textH))
sizer.Add(self.text, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, textB)
self.button = wx.BitmapButton(self, bitmap=bmpEdit, size=(-1,textH))
sizer.Add(self.button, 0, wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(sizer)
self.button.Bind(wx.EVT_BUTTON, self.OnButtonEdit)
self.text.Bind(wx.EVT_TEXT, self.OnChange)
self.text.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
def GetValue(self):
return self.text.GetValue()
def SetValue(self, value):
self.text.ChangeValue(value)
def OnButtonEdit(self, evt):
dlg = g.res.LoadDialog(self, 'DIALOG_TEXT')
textCtrl = xrc.XRCCTRL(dlg, 'TEXT')
textCtrl.SetValue(self.text.GetValue())
if dlg.ShowModal() == wx.ID_OK:
self.text.ChangeValue(textCtrl.GetValue())
Presenter.setApplied(False)
dlg.Destroy()
class ParamText(PPanel):
'''Text attribute.'''
textWidth = -1
proportion = 0
def __init__(self, parent, name, **kargs):
PPanel.__init__(self, parent, name)
style = kargs.pop('style', 0)
textWidth = kargs.pop('textWidth', self.textWidth)
option = kargs.pop('proportion', self.proportion)
if textWidth == -1: option = 1
# We use sizer even here to have the same size of text control
sizer = wx.BoxSizer()
self.text = wx.TextCtrl(self, size=wx.Size(textWidth,textH), style=style)
sizer.Add(self.text, option, wx.ALIGN_CENTER_VERTICAL | wx.ALL, textB)
self.SetSizer(sizer)
self.text.Bind(wx.EVT_TEXT, self.OnChange)
self.text.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
def GetValue(self):
return self.text.GetValue()
def SetValue(self, value):
self.text.ChangeValue(value)
def MetaParamText(textWidth, proportion=0):
'''Return a L{ParamText} class with specified width and proportion.'''
return type('ParamText__length', (ParamText,),
{'textWidth': textWidth, 'proportion': proportion})
ParamLongText = MetaParamText(200, 1)
ParamAccel = MetaParamText(100)
ParamHelp = MetaParamText(200, 1)
ParamPosSize = MetaParamText(80)
class ParamComment(ParamText):
'''Comment node editing.'''
def __init__(self, parent, name):
ParamText.__init__(self, parent, name, textWidth=330,
style=wx.TE_PROCESS_ENTER)
class ContentDialog(wx.Dialog):
'''Dialog for editing content attributes.'''
def __init__(self, parent, value):
# Load from resource
pre = wx.PreDialog()
g.res.LoadOnDialog(pre, parent, 'DIALOG_CONTENT')
self.PostCreate(pre)
self.list = xrc.XRCCTRL(self, 'LIST')
# Set list items
for v in value:
self.list.Append(v)
self.SetAutoLayout(True)
self.GetSizer().Fit(self)
# Callbacks
self.ID_BUTTON_APPEND = xrc.XRCID('BUTTON_APPEND')
self.ID_BUTTON_EDIT = xrc.XRCID('BUTTON_EDIT')
self.ID_BUTTON_REMOVE = xrc.XRCID('BUTTON_REMOVE')
self.ID_BUTTON_UP = xrc.XRCID('BUTTON_UP')
self.ID_BUTTON_DOWN = xrc.XRCID('BUTTON_DOWN')
wx.EVT_BUTTON(self, self.ID_BUTTON_UP, self.OnButtonUp)
wx.EVT_BUTTON(self, self.ID_BUTTON_DOWN, self.OnButtonDown)
wx.EVT_BUTTON(self, self.ID_BUTTON_APPEND, self.OnButtonAppend)
wx.EVT_BUTTON(self, self.ID_BUTTON_EDIT, self.OnButtonEdit)
wx.EVT_BUTTON(self, self.ID_BUTTON_REMOVE, self.OnButtonRemove)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_UP, self.OnUpdateUI)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_DOWN, self.OnUpdateUI)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_REMOVE, self.OnUpdateUI)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_EDIT, self.OnUpdateUI)
def OnButtonUp(self, evt):
i = self.list.GetSelection()
str = self.list.GetString(i)
self.list.Delete(i)
self.list.InsertItems([str], i-1)
self.list.SetSelection(i-1)
def OnButtonDown(self, evt):
i = self.list.GetSelection()
str = self.list.GetString(i)
self.list.Delete(i)
self.list.InsertItems([str], i+1)
self.list.SetSelection(i+1)
def OnButtonAppend(self, evt):
str = wx.GetTextFromUser('Enter new item:', 'Append', '', self)
self.list.Append(str)
def OnButtonEdit(self, evt):
i = self.list.GetSelection()
str = wx.GetTextFromUser('Edit item:', 'Change', self.list.GetString(i), self)
self.list.SetString(i, str)
def OnButtonRemove(self, evt):
self.list.Delete(self.list.GetSelection())
def OnUpdateUI(self, evt):
if evt.GetId() == self.ID_BUTTON_REMOVE or evt.GetId() == self.ID_BUTTON_EDIT:
evt.Enable(self.list.GetSelection() != -1)
elif evt.GetId() == self.ID_BUTTON_UP:
evt.Enable(self.list.GetSelection() > 0)
elif evt.GetId() == self.ID_BUTTON_DOWN:
evt.Enable(self.list.GetSelection() != -1 and \
self.list.GetSelection() < self.list.GetCount() - 1)
class ContentCheckListDialog(ContentDialog):
'''Dialog for editing content checklist attributes.'''
def __init__(self, parent, value):
pre = wx.PreDialog()
g.res.LoadOnDialog(pre, parent, 'DIALOG_CONTENT_CHECKLIST')
self.PostCreate(pre)
self.list = xrc.XRCCTRL(self, 'CHECK_LIST')
# Set list items
i = 0
for ch,v in value:
self.list.Append(v)
self.list.Check(i, ch)
i += 1
self.SetAutoLayout(True)
self.GetSizer().Fit(self)
# Callbacks
self.ID_BUTTON_APPEND = xrc.XRCID('BUTTON_APPEND')
self.ID_BUTTON_EDIT = xrc.XRCID('BUTTON_EDIT')
self.ID_BUTTON_REMOVE = xrc.XRCID('BUTTON_REMOVE')
self.ID_BUTTON_UP = xrc.XRCID('BUTTON_UP')
self.ID_BUTTON_DOWN = xrc.XRCID('BUTTON_DOWN')
wx.EVT_BUTTON(self, self.ID_BUTTON_UP, self.OnButtonUp)
wx.EVT_BUTTON(self, self.ID_BUTTON_DOWN, self.OnButtonDown)
wx.EVT_BUTTON(self, self.ID_BUTTON_APPEND, self.OnButtonAppend)
wx.EVT_BUTTON(self, self.ID_BUTTON_EDIT, self.OnButtonEdit)
wx.EVT_BUTTON(self, self.ID_BUTTON_REMOVE, self.OnButtonRemove)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_UP, self.OnUpdateUI)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_DOWN, self.OnUpdateUI)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_REMOVE, self.OnUpdateUI)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_EDIT, self.OnUpdateUI)
def OnButtonUp(self, evt):
i = self.list.GetSelection()
str, ch = self.list.GetString(i), self.list.IsChecked(i)
self.list.Delete(i)
self.list.InsertItems([str], i-1)
self.list.Check(i-1, ch)
self.list.SetSelection(i-1)
def OnButtonDown(self, evt):
i = self.list.GetSelection()
str, ch = self.list.GetString(i), self.list.IsChecked(i)
self.list.Delete(i)
self.list.InsertItems([str], i+1)
self.list.Check(i+1, ch)
self.list.SetSelection(i+1)
class ContentHelpListDialog(wx.Dialog):
'''Dialog for editing content attributes with help text.'''
def __init__(self, parent, value):
pre = wx.PreDialog()
g.res.LoadOnDialog(pre, parent, 'DIALOG_CONTENT_HELPLIST')
self.PostCreate(pre)
self.list = xrc.XRCCTRL(self, 'LIST')
self.list.InsertColumn(0, 'label')
self.list.InsertColumn(1, 'tooltip')
self.list.InsertColumn(2, 'help text')
# Set list items
i = 0
for v,t,h in value:
self.list.InsertStringItem(i, v)
self.list.SetStringItem(i, 1, t)
self.list.SetStringItem(i, 2, h)
i += 1
self.SetAutoLayout(True)
self.GetSizer().Fit(self)
# Callbacks
self.ID_BUTTON_APPEND = xrc.XRCID('BUTTON_APPEND')
self.ID_BUTTON_EDIT = xrc.XRCID('BUTTON_EDIT')
self.ID_BUTTON_REMOVE = xrc.XRCID('BUTTON_REMOVE')
self.ID_BUTTON_UP = xrc.XRCID('BUTTON_UP')
self.ID_BUTTON_DOWN = xrc.XRCID('BUTTON_DOWN')
wx.EVT_BUTTON(self, self.ID_BUTTON_UP, self.OnButtonUp)
wx.EVT_BUTTON(self, self.ID_BUTTON_DOWN, self.OnButtonDown)
wx.EVT_BUTTON(self, self.ID_BUTTON_APPEND, self.OnButtonAppend)
wx.EVT_BUTTON(self, self.ID_BUTTON_EDIT, self.OnButtonEdit)
wx.EVT_BUTTON(self, self.ID_BUTTON_REMOVE, self.OnButtonRemove)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_UP, self.OnUpdateUI)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_DOWN, self.OnUpdateUI)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_REMOVE, self.OnUpdateUI)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_EDIT, self.OnUpdateUI)
def OnButtonUp(self, evt):
i = self.list.GetNextItem(-1, state = wx.LIST_STATE_SELECTED)
v, t, h = self.list.GetItem(i, 0), self.list.GetItem(i, 1), self.list.GetItem(i, 2)
self.list.DeleteItem(i)
i = self.list.InsertStringItem(i-1, v.GetText())
self.list.SetStringItem(i, 1, t.GetText())
self.list.SetStringItem(i, 2, h.GetText())
self.list.SetItemState(i, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
def OnButtonDown(self, evt):
i = self.list.GetNextItem(-1, state = wx.LIST_STATE_SELECTED)
v, t, h = self.list.GetItem(i, 0), self.list.GetItem(i, 1), self.list.GetItem(i, 2)
self.list.DeleteItem(i)
i = self.list.InsertStringItem(i+1, v.GetText())
self.list.SetStringItem(i, 1, t.GetText())
self.list.SetStringItem(i, 2, h.GetText())
self.list.SetItemState(i, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
def OnButtonAppend(self, evt):
dlg = g.res.LoadDialog(self, 'DIALOG_HELPTEXT')
v = xrc.XRCCTRL(dlg, 'TEXT')
t = xrc.XRCCTRL(dlg, 'TOOLTIP')
h = xrc.XRCCTRL(dlg, 'HELPTEXT')
if dlg.ShowModal() == wx.ID_OK:
i = self.list.GetItemCount()
self.list.InsertStringItem(i, v.GetValue())
self.list.SetStringItem(i, 1, t.GetValue())
self.list.SetStringItem(i, 2, h.GetValue())
dlg.Destroy()
def OnButtonEdit(self, evt):
s = self.list.GetNextItem(-1, state = wx.LIST_STATE_SELECTED)
dlg = g.res.LoadDialog(self, 'DIALOG_HELPTEXT')
v = xrc.XRCCTRL(dlg, 'TEXT')
t = xrc.XRCCTRL(dlg, 'TOOLTIP')
h = xrc.XRCCTRL(dlg, 'HELPTEXT')
v.SetValue(self.list.GetItem(s, 0).GetText())
t.SetValue(self.list.GetItem(s, 1).GetText())
h.SetValue(self.list.GetItem(s, 2).GetText())
if dlg.ShowModal() == wx.ID_OK:
self.list.SetStringItem(s, 0, v.GetValue())
self.list.SetStringItem(s, 1, t.GetValue())
self.list.SetStringItem(s, 2, h.GetValue())
dlg.Destroy()
def OnButtonRemove(self, evt):
self.list.DeleteItem(self.list.GetNextItem(-1, state = wx.LIST_STATE_SELECTED))
def OnUpdateUI(self, evt):
s = self.list.GetNextItem(-1, state = wx.LIST_STATE_SELECTED)
if evt.GetId() == self.ID_BUTTON_REMOVE or evt.GetId() == self.ID_BUTTON_EDIT:
evt.Enable(s != -1)
elif evt.GetId() == self.ID_BUTTON_UP:
evt.Enable(s > 0)
elif evt.GetId() == self.ID_BUTTON_DOWN:
evt.Enable(s != -1 and s < self.list.GetItemCount() - 1)
class ParamContent(PPanel):
'''Editing of content attribute.'''
def __init__(self, parent, name):
PPanel.__init__(self, parent, name)
sizer = wx.BoxSizer()
self.text = wx.TextCtrl(self, size=wx.Size(200,textH))
sizer.Add(self.text, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, textB)
self.button = wx.BitmapButton(self, bitmap=bmpEdit, size=(-1,textH))
sizer.Add(self.button, 0, wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(sizer)
self.textModified = False
self.button.Bind(wx.EVT_BUTTON, self.OnButtonEdit)
self.text.Bind(wx.EVT_TEXT, self.OnChange)
def OnChange(self, evt):
Presenter.setApplied(False)
self.textModified = True
evt.Skip()
def GetValue(self):
if self.textModified: # text has newer value
try:
return self.text.GetValue().split('|')
except ValueError:
return []
return self.value
def SetValue(self, value):
if not value: value = []
self.value = value
repr_ = '|'.join(map(str, value))
self.text.ChangeValue(repr_) # update text ctrl
def OnButtonEdit(self, evt):
if self.textModified: # text has newer value
self.value = self.GetValue()
dlg = ContentDialog(self, self.value)
if dlg.ShowModal() == wx.ID_OK:
value = []
for i in range(dlg.list.GetCount()):
value.append(dlg.list.GetString(i))
self.SetValue(value)
Presenter.setApplied(False)
self.textModified = False
dlg.Destroy()
# CheckList content
class ParamContentCheckList(ParamContent):
'''Editing of content check list attribute.'''
def __init__(self, parent, name):
ParamContent.__init__(self, parent, name)
def OnButtonEdit(self, evt):
if self.textModified: # text has newer value
self.value = self.GetValue()
dlg = ContentCheckListDialog(self, self.value)
if dlg.ShowModal() == wx.ID_OK:
value = []
for i in range(dlg.list.GetCount()):
value.append((int(dlg.list.IsChecked(i)), str(dlg.list.GetString(i))))
self.SetValue(value)
Presenter.setApplied(False)
self.textModified = False
dlg.Destroy()
# HelpList content
class ParamContentHelpList(ParamContent):
'''Editing of content attribute with help text.'''
def __init__(self, parent, name):
ParamContent.__init__(self, parent, name)
def OnButtonEdit(self, evt):
if self.textModified: # text has newer value
self.value = self.GetValue()
dlg = ContentHelpListDialog(self, self.value)
if dlg.ShowModal() == wx.ID_OK:
value = []
for i in range(dlg.list.GetItemCount()):
value.append((str(dlg.list.GetItem(i, 0).GetText()),
str(dlg.list.GetItem(i, 1).GetText()),
str(dlg.list.GetItem(i, 2).GetText())))
self.SetValue(value)
Presenter.setApplied(False)
self.textModified = False
dlg.Destroy()
class IntListDialog(wx.Dialog):
'''Dialog for editing integer lists.'''
def __init__(self, parent, value):
pre = wx.PreDialog()
g.res.LoadOnDialog(pre, parent, 'DIALOG_INTLIST')
self.PostCreate(pre)
self.list = xrc.XRCCTRL(self, 'LIST')
# Set list items
value.sort()
for v in value:
self.list.Append(v)
self.SetAutoLayout(True)
self.GetSizer().Fit(self)
# Callbacks
self.spinCtrl = xrc.XRCCTRL(self, 'SPIN')
wx.EVT_BUTTON(self, xrc.XRCID('BUTTON_ADD'), self.OnButtonAdd)
self.ID_BUTTON_REMOVE = xrc.XRCID('BUTTON_REMOVE')
wx.EVT_BUTTON(self, self.ID_BUTTON_REMOVE, self.OnButtonRemove)
wx.EVT_BUTTON(self, xrc.XRCID('BUTTON_CLEAR'), self.OnButtonClear)
wx.EVT_UPDATE_UI(self, self.ID_BUTTON_REMOVE, self.OnUpdateUI)
def OnButtonAdd(self, evt):
# Check that it's unique
try:
v = self.spinCtrl.GetValue()
s = str(v) # to be sure
i = self.list.FindString(s)
if i == -1: # ignore non-unique
# Find place to insert
found = False
for i in range(self.list.GetCount()):
if int(self.list.GetString(i)) > v:
found = True
break
if found: self.list.InsertItems([s], i)
else: self.list.Append(s)
except ValueError:
wx.LogError('List item is not an int!')
def OnButtonRemove(self, evt):
self.list.Delete(self.list.GetSelection())
def OnButtonClear(self, evt):
self.list.Clear()
def OnUpdateUI(self, evt):
if evt.GetId() == self.ID_BUTTON_REMOVE:
evt.Enable(self.list.GetSelection() != -1)
# For growable list
class ParamIntList(ParamContent):
'''Editing integer list attribute.'''
def __init__(self, parent, name):
ParamContent.__init__(self, parent, name)
def OnButtonEdit(self, evt):
if self.textModified: # text has newer value
try:
self.value = self.text.GetValue().split('|')
except ValueError:
self.value = []
dlg = IntListDialog(self, self.value)
if dlg.ShowModal() == wx.ID_OK:
value = []
for i in range(dlg.list.GetCount()):
value.append(dlg.list.GetString(i))
self.SetValue(value)
Presenter.setApplied()
self.textModified = False
dlg.Destroy()
# Boxless radiobox
class RadioBox(PPanel):
def __init__(self, parent, name='radiobox'):
PPanel.__init__(self, parent, name)
topSizer = wx.BoxSizer()
self.choicesInv = {}
for i,v in self.choices.items():
self.choicesInv[v] = i
button = wx.RadioButton(self, -1, i, name=i)
topSizer.Add(button, 0, wx.RIGHT, 5)
wx.EVT_RADIOBUTTON(self, button.GetId(), self.OnRadioChoice)
self.SetSizer(topSizer)
def SetStringSelection(self, value):
for i in self.choices.keys():
self.FindWindowByName(i).SetValue(i == value)
self.value = value
def OnRadioChoice(self, evt):
if evt.GetSelection():
self.value = evt.GetEventObject().GetName()
Presenter.setApplied(False)
def GetStringSelection(self):
return self.value
def GetValue(self):
return self.choices[self.GetStringSelection()]
def SetValue(self, value):
if not value: value = self.default
self.SetStringSelection(self.choicesInv[value])
# Base type for checkable parameters
class CheckBox(PPanel):
isCheck = True
def __init__(self, parent, name='checkbox'):
PPanel.__init__(self, parent, name)
topSizer = wx.BoxSizer()
self.check = wx.CheckBox(self, -1, name, size=(-1,textH))
topSizer.Add(self.check, 0, wx.TOP | wx.BOTTOM, textB)
self.check.Bind(wx.EVT_CHECKBOX, self.OnCheck)
self.SetSizer(topSizer)
def OnCheck(self, evt):
Presenter.setApplied(False)
if Presenter.panelIsDirty():
Presenter.registerUndoEdit()
if Listener.testWin.IsShown() and g.conf.autoRefresh and \
g.conf.autoRefreshPolicy == AUTO_REFRESH_POLICY_FOCUS:
Listener.testWin.isDirty = True
wx.CallAfter(Presenter.refreshTestWin)
evt.Skip()
class ParamBool(CheckBox):
'''Editing on/off attributes.'''
defaultString = '(default is OFF)'
def GetValue(self):
return ('', '1')[self.check.IsChecked()]
def SetValue(self, value):
self.check.SetValue(value == '1')
class ParamInverseBool(CheckBox):
'''like L{ParamBool} but defined if unchecked'''
defaultString = '(default is ON)'
def GetValue(self):
return ('0', '')[self.check.IsChecked()]
def SetValue(self, value):
self.check.SetValue(not value or value == '1')
class ParamOrient(RadioBox):
'''Orientation attribute editing for sizers.'''
choices = {'horizontal': 'wxHORIZONTAL', 'vertical': 'wxVERTICAL'}
default = 'wxHORIZONTAL'
class ParamOrientation(RadioBox):
'''Orientaiton attribute editing for C{wx.SplitterWindow}.'''
choices = {'horizontal': 'horizontal', 'vertical': 'vertical'}
default = 'vertical'
class ParamBitmap(PPanel):
def __init__(self, parent, name):
pre = wx.PrePanel()
g.res.LoadOnPanel(pre, parent, 'PANEL_BITMAP')
self.PostCreate(pre)
self.modified = False
self.radio_std = xrc.XRCCTRL(self, 'RADIO_STD')
self.radio_file = xrc.XRCCTRL(self, 'RADIO_FILE')
self.combo = xrc.XRCCTRL(self, 'COMBO_STD')
self.text = xrc.XRCCTRL(self, 'TEXT_FILE')
self.button = xrc.XRCCTRL(self, 'BUTTON_BROWSE')
self.textModified = False
wx.EVT_RADIOBUTTON(self, xrc.XRCID('RADIO_STD'), self.OnRadioStd)
wx.EVT_RADIOBUTTON(self, xrc.XRCID('RADIO_FILE'), self.OnRadioFile)
wx.EVT_BUTTON(self, xrc.XRCID('BUTTON_BROWSE'), self.OnButtonBrowse)
wx.EVT_COMBOBOX(self, xrc.XRCID('COMBO_STD'), self.OnCombo)
wx.EVT_TEXT(self, xrc.XRCID('COMBO_STD'), self.OnChange)
wx.EVT_TEXT(self, xrc.XRCID('TEXT_FILE'), self.OnChange)
def OnRadioStd(self, evt):
Presenter.setApplied(False)
self.SetValue(['wxART_MISSING_IMAGE',''])
def OnRadioFile(self, evt):
Presenter.setApplied(False)
self.SetValue(['',''])
def updateRadios(self):
if self.value[0]:
self.radio_std.SetValue(True)
self.radio_file.SetValue(False)
self.text.Enable(False)
self.button.Enable(False)
self.combo.Enable(True)
else:
self.radio_std.SetValue(False)
self.radio_file.SetValue(True)
self.text.Enable(True)
self.button.Enable(True)
self.combo.Enable(False)
def OnChange(self, evt):
Presenter.setApplied(False)
self.textModified = True
evt.Skip()
def OnCombo(self, evt):
Presenter.setApplied(False)
self.value[0] = self.combo.GetValue()
def GetValue(self):
return [self.combo.GetValue(), self.text.GetValue()]
def SetValue(self, value):
if not value:
self.value = ['', '']
else:
self.value = value
self.combo.SetValue(self.value[0])
self.text.ChangeValue(self.value[1]) # update text ctrl
self.updateRadios()
def OnButtonBrowse(self, evt):
if self.textModified: # text has newer value
self.value[1] = self.text.GetValue()
dlg = wx.FileDialog(self,
defaultDir = os.path.abspath(os.path.dirname(self.value[1])),
defaultFile = os.path.basename(self.value[1]))
if dlg.ShowModal() == wx.ID_OK:
# Get common part of selected path and current
if Presenter.path:
curpath = os.path.abspath(Presenter.path)
else:
curpath = os.path.join(os.getcwd(), '')
common = os.path.commonprefix([curpath, dlg.GetPath()])
self.SetValue(['', dlg.GetPath()[len(common):]])
Presenter.setApplied(False)
self.textModified = False
dlg.Destroy()
class ParamImage(PPanel):
'''Image selector.'''
def __init__(self, parent, name):
PPanel.__init__(self, parent, name)
sizer = wx.BoxSizer()
self.text = wx.TextCtrl(self, size=wx.Size(200,textH))
sizer.Add(self.text, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, textB)
self.button = wx.Button(self, -1, 'Browse...')
sizer.Add(self.button, 0, wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(sizer)
self.button.Bind(wx.EVT_BUTTON, self.OnButtonBrowse)
self.text.Bind(wx.EVT_TEXT, self.OnChange)
def OnChange(self, evt):
Presenter.setApplied(False)
evt.Skip()
def GetValue(self):
return self.text.GetValue()
def SetValue(self, value):
self.text.ChangeValue(value)
def OnButtonBrowse(self, evt):
value = self.text.GetValue()
dlg = wx.FileDialog(self,
defaultDir = os.path.abspath(os.path.dirname(value)),
defaultFile = os.path.basename(value))
if dlg.ShowModal() == wx.ID_OK:
# Get common part of selected path and current
if Presenter.path:
curpath = os.path.abspath(Presenter.path)
else:
curpath = os.path.join(os.getcwd(), '')
common = os.path.commonprefix([curpath, dlg.GetPath()])
self.SetValue(dlg.GetPath()[len(common):])
Presenter.setApplied(False)
self.textModified = False
dlg.Destroy()
class ParamCombo(PPanel):
values = []
'''Combo box.'''
def __init__(self, parent, name):
PPanel.__init__(self, parent, name)
sizer = wx.BoxSizer()
self.combo = wx.ComboBox(self, size=(220,-1))
if wx.Platform == '__WXMAC__':
sizer.Add(self.combo, 0, wx.ALL, 0)
else:
sizer.Add(self.combo, 0, wx.ALL, 2)
self.SetSizerAndFit(sizer)
self.combo.Bind(wx.EVT_TEXT, self.OnChange)
self.SetValues()
def GetValue(self):
return self.combo.GetValue()
def SetValue(self, value):
self.combo.SetValue(value)
def SetValues(self):
for v in self.values:
self.combo.Append(v)
class ParamEncoding(ParamCombo):
'''Editing encoding attribute of the XML root node.'''
pass
paramDict = {
# sizer params
'flag': ParamFlag, 'orient': ParamOrient, 'option': ParamInt,
'cellpos': ParamPosSize, 'cellspan': ParamPosSize,
'border': ParamUnit, 'borders': ParamUnit,
'cols': ParamIntP, 'rows': ParamIntP,
'vgap': ParamUnit, 'hgap': ParamUnit,
# common window params
'pos': ParamPosSize, 'size': ParamPosSize,
'checkable': ParamBool, 'checked': ParamBool, 'radio': ParamBool,
'accel': ParamAccel, 'help': ParamHelp, 'centered': ParamBool,
'label': ParamMultilineText, 'title': ParamLongText, 'value': ParamLongText,
'content': ParamContent, 'selection': ParamIntNN,
'min': ParamInt, 'max': ParamInt,
# window attributes
'fg': ParamColour, 'bg': ParamColour, 'font': ParamFont,
'enabled': ParamInverseBool, 'focused': ParamBool, 'hidden': ParamBool,
'tooltip': ParamLongText,
# other
'bitmap': ParamBitmap, 'icon': ParamBitmap,
'comment': ParamComment,
'wrap': ParamInt,
}
'''Default classes for standard attributes.'''
class StylePanel(wx.Panel):
'''Style panel.'''
equivStyles = []
def __init__(self, parent, styles, genericStyles=[], tag='style', equiv={}):
wx.Panel.__init__(self, parent, -1)
self.SetFont(g.smallerFont())
self.node = None
self.controls = []
self.tag = tag
self.equivStyles = equiv
topSizer = wx.BoxSizer(wx.HORIZONTAL)
if genericStyles:
# Generic styles
sizer = wx.GridSizer(cols=1, vgap=1, hgap=5)
label = wx.StaticText(self, label='Generic')
label.SetFont(g.labelFont())
sizer.Add(label, 0, wx.LEFT, 20)
for s in genericStyles:
if s[:2] == 'wx': label = s[2:]
else: label = s
control = wx.CheckBox(self, label=label)
sizer.Add(control)
self.controls.append((s, control))
topSizer.Add(sizer)
if styles:
# Specific styles
sizer = wx.GridSizer(cols=1, vgap=1, hgap=5)
if genericStyles:
label = wx.StaticText(self, label='Specific')
label.SetFont(g.labelFont())
sizer.Add(label, 0, wx.LEFT, 20)
for s in styles:
if s[:2] == 'wx': label = s[2:]
else: label = s
control = wx.CheckBox(self, label=label)
sizer.Add(control)
self.controls.append((s, control))
topSizer.Add(sizer)
self.Bind(wx.EVT_CHECKBOX, self.OnCheck)
self.SetSizerAndFit(topSizer)
def GetValues(self):
checked = []
for s,check in self.controls:
if check.IsChecked(): checked.append(s)
return [(self.tag, '|'.join(checked))]
def SetValues(self, values):
styles = map(string.strip, values[0][1].split('|'))
for s,check in self.controls:
check.SetValue(s in styles or (self.equivStyles.has_key(s) and self.equivStyles[s] in styles))
def OnCheck(self, evt):
Presenter.setApplied(False)
if Listener.testWin.IsShown() and g.conf.autoRefresh and \
g.conf.autoRefreshPolicy == AUTO_REFRESH_POLICY_FOCUS:
Listener.testWin.isDirty = True
wx.CallAfter(Presenter.refreshTestWin)
evt.Skip()
#############################################################################
class CheckListBoxComboPopup(wx.CheckListBox, wx.combo.ComboPopup):
def __init__(self, values):
self.values = values
self.PostCreate(wx.PreCheckListBox())
wx.combo.ComboPopup.__init__(self)
def Create(self, parent):
wx.CheckListBox.Create(self, parent)
self.InsertItems(self.values, 0)
# Workaround for mac/windows - see ticket #14282
if wx.Platform in ['__WXMAC__', '__WXMSW__']:
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
return True
def GetControl(self):
return self
def OnPopup(self):
combo = self.GetCombo()
value = map(string.strip, combo.GetValue().split('|'))
if value == ['']: value = []
self.ignored = []
for i in value:
try:
self.Check(self.values.index(i))
except ValueError:
# Try to find equal
if self.equal.has_key(i):
self.Check(self.values.index(self.equal[i]))
else:
logger.warning('unknown flag: %s: ignored.', i)
self.ignored.append(i)
wx.combo.ComboPopup.OnPopup(self)
def OnDismiss(self):
combo = self.GetCombo()
value = []
for i in range(self.GetCount()):
if self.IsChecked(i):
value.append(self.values[i])
# Add ignored flags
value.extend(self.ignored)
strValue = '|'.join(value)
if combo.GetValue() != strValue:
combo.SetValue(strValue)
Presenter.setApplied(False)
wx.combo.ComboPopup.OnDismiss(self)
if wx.Platform in ['__WXMAC__', '__WXMSW__']:
def OnMotion(self, evt):
item = self.HitTest(evt.GetPosition())
if item >= 0:
self.Select(item)
self.curitem = item
def OnLeftDown(self, evt):
self.value = self.curitem
self.Check(self.value, not self.IsChecked(self.value))
| garrettcap/Bulletproof-Backup | wx/tools/XRCed/params.py | Python | gpl-2.0 | 45,266 |
#!/usr/bin/env python
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CLI interface for cinder management.
"""
from __future__ import print_function
import os
import sys
import warnings
warnings.simplefilter('once', DeprecationWarning)
from oslo import messaging
from oslo_config import cfg
from oslo_db.sqlalchemy import migration
from oslo_utils import uuidutils
from cinder import i18n
i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config # noqa
from cinder import context
from cinder import db
from cinder.db import migration as db_migration
from cinder.db.sqlalchemy import api as db_api
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import rpc
from cinder import utils
from cinder import version
CONF = cfg.CONF
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
def param2id(object_id):
"""Helper function to convert various id types to internal id.
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
"""
if uuidutils.is_uuid_like(object_id):
return object_id
elif '-' in object_id:
# FIXME(ja): mapping occurs in nova?
pass
else:
try:
return int(object_id)
except ValueError:
return object_id
class ShellCommands(object):
def bpython(self):
"""Runs a bpython shell.
Falls back to Ipython/python shell if unavailable
"""
self.run('bpython')
def ipython(self):
"""Runs an Ipython shell.
Falls back to Python shell if unavailable
"""
self.run('ipython')
def python(self):
"""Runs a python shell.
Falls back to Python shell if unavailable
"""
self.run('python')
@args('--shell', dest="shell",
metavar='<bpython|ipython|python>',
help='Python shell')
def run(self, shell=None):
"""Runs a Python interactive interpreter."""
if not shell:
shell = 'bpython'
if shell == 'bpython':
try:
import bpython
bpython.embed()
except ImportError:
shell = 'ipython'
if shell == 'ipython':
try:
from IPython import embed
embed()
except ImportError:
try:
# Ipython < 0.11
# Explicitly pass an empty list as arguments, because
# otherwise IPython would use sys.argv from this script.
import IPython
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
# no IPython module
shell = 'python'
if shell == 'python':
import code
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try',
# because we already know 'readline' was imported successfully.
import rlcompleter # noqa
readline.parse_and_bind("tab:complete")
code.interact()
@args('--path', required=True, help='Script path')
def script(self, path):
"""Runs the script from the specified path with flags set properly.
arguments: path
"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
def _db_error(caught_exception):
print('%s' % caught_exception)
print(_("The above error may show that the database has not "
"been created.\nPlease create a database using "
"'cinder-manage db sync' before running this command."))
exit(1)
class HostCommands(object):
"""List hosts."""
@args('zone', nargs='?', default=None,
help='Availability Zone (default: %(default)s)')
def list(self, zone=None):
"""Show a list of all physical hosts. Filter by zone.
args: [zone]
"""
print(_("%(host)-25s\t%(zone)-15s") % {'host': 'host', 'zone': 'zone'})
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for srv in services:
if not [h for h in hosts if h['host'] == srv['host']]:
hosts.append(srv)
for h in hosts:
print(_("%(host)-25s\t%(availability_zone)-15s")
% {'host': h['host'],
'availability_zone': h['availability_zone']})
class DbCommands(object):
"""Class for managing the database."""
def __init__(self):
pass
@args('version', nargs='?', default=None,
help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return db_migration.db_sync(version)
def version(self):
"""Print the current database version."""
print(migration.db_version(db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH,
db_migration.INIT_VERSION))
@args('age_in_days', type=int,
help='Purge deleted rows older than age in days')
def purge(self, age_in_days):
"""Purge deleted rows older than a given age from cinder tables."""
age_in_days = int(age_in_days)
if age_in_days <= 0:
print(_("Must supply a positive, non-zero value for age"))
exit(1)
ctxt = context.get_admin_context()
db.purge_deleted_rows(ctxt, age_in_days)
class VersionCommands(object):
"""Class for exposing the codebase version."""
def __init__(self):
pass
def list(self):
print(version.version_string())
def __call__(self):
self.list()
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state."""
def __init__(self):
self._client = None
def rpc_client(self):
if self._client is None:
if not rpc.initialized():
rpc.init(CONF)
target = messaging.Target(topic=CONF.volume_topic)
self._client = rpc.get_client(target)
return self._client
@args('volume_id',
help='Volume ID to be deleted')
def delete(self, volume_id):
"""Delete a volume, bypassing the check that it
must be available.
"""
ctxt = context.get_admin_context()
volume = db.volume_get(ctxt, param2id(volume_id))
host = volume['host']
if not host:
print(_("Volume not yet assigned to host."))
print(_("Deleting volume from database and skipping rpc."))
db.volume_destroy(ctxt, param2id(volume_id))
return
if volume['status'] == 'in-use':
print(_("Volume is in-use."))
print(_("Detach volume from instance and then try again."))
return
cctxt = self.rpc_client().prepare(server=host)
cctxt.cast(ctxt, "delete_volume", volume_id=volume['id'])
@args('--currenthost', required=True, help='Existing volume host name')
@args('--newhost', required=True, help='New volume host name')
def update_host(self, currenthost, newhost):
"""Modify the host name associated with a volume.
Particularly to recover from cases where one has moved
their Cinder Volume node, or modified their backend_name in a
multi-backend config.
"""
ctxt = context.get_admin_context()
volumes = db.volume_get_all_by_host(ctxt,
currenthost)
for v in volumes:
db.volume_update(ctxt, v['id'],
{'host': newhost})
class ConfigCommands(object):
"""Class for exposing the flags defined by flag_file(s)."""
def __init__(self):
pass
@args('param', nargs='?', default=None,
help='Configuration parameter to display (default: %(default)s)')
def list(self, param=None):
"""List parameters configured for cinder.
Lists all parameters configured for cinder unless an optional argument
is specified. If the parameter is specified we only print the
requested parameter. If the parameter is not found an appropriate
error is produced by .get*().
"""
param = param and param.strip()
if param:
print('%s = %s' % (param, CONF.get(param)))
else:
for key, value in CONF.iteritems():
print('%s = %s' % (key, value))
class GetLogCommands(object):
"""Get logging information."""
def errors(self):
"""Get all of the errors from the log files."""
error_found = 0
if CONF.log_dir:
logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]
for file in logs:
log_file = os.path.join(CONF.log_dir, file)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print_name = 0
for index, line in enumerate(lines):
if line.find(" ERROR ") > 0:
error_found += 1
if print_name == 0:
print(log_file + ":-")
print_name = 1
print(_("Line %(dis)d : %(line)s") %
{'dis': len(lines) - index, 'line': line})
if error_found == 0:
print(_("No errors in logfiles!"))
@args('num_entries', nargs='?', type=int, default=10,
help='Number of entries to list (default: %(default)d)')
def syslog(self, num_entries=10):
"""Get <num_entries> of the cinder syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
if os.path.exists('/var/log/syslog'):
log_file = '/var/log/syslog'
elif os.path.exists('/var/log/messages'):
log_file = '/var/log/messages'
else:
print(_("Unable to find system log file!"))
sys.exit(1)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print(_("Last %s cinder syslog entries:-") % (entries))
for line in lines:
if line.find("cinder") > 0:
count += 1
print(_("%s") % (line))
if count == entries:
break
if count == 0:
print(_("No cinder entries in syslog!"))
class BackupCommands(object):
"""Methods for managing backups."""
def list(self):
"""List all backups (including ones in progress) and the host
on which the backup operation is running.
"""
ctxt = context.get_admin_context()
backups = db.backup_get_all(ctxt)
hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s"
print(hdr % (_('ID'),
_('User ID'),
_('Project ID'),
_('Host'),
_('Name'),
_('Container'),
_('Status'),
_('Size'),
_('Object Count')))
res = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d\t%-12d"
for backup in backups:
object_count = 0
if backup['object_count'] is not None:
object_count = backup['object_count']
print(res % (backup['id'],
backup['user_id'],
backup['project_id'],
backup['host'],
backup['display_name'],
backup['container'],
backup['status'],
backup['size'],
object_count))
class ServiceCommands(object):
"""Methods for managing services."""
def list(self):
"""Show a list of all cinder services."""
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
print(print_format % (_('Binary'),
_('Host'),
_('Zone'),
_('Status'),
_('State'),
_('Updated At')))
for svc in services:
alive = utils.service_is_up(svc)
art = ":-)" if alive else "XXX"
status = 'enabled'
if svc['disabled']:
status = 'disabled'
print(print_format % (svc['binary'], svc['host'].partition('.')[0],
svc['availability_zone'], status, art,
svc['updated_at']))
CATEGORIES = {
'backup': BackupCommands,
'config': ConfigCommands,
'db': DbCommands,
'host': HostCommands,
'logs': GetLogCommands,
'service': ServiceCommands,
'shell': ShellCommands,
'version': VersionCommands,
'volume': VolumeCommands,
}
def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(subparsers):
for category in CATEGORIES:
command_object = CATEGORIES[category]()
parser = subparsers.add_parser(category)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
handler=add_command_parsers)
def get_arg_string(args):
arg = None
if args[0] == '-':
# (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars
# is optional args. Notice that cfg module takes care of
# actual ArgParser so prefix_chars is always '-'.
if args[1] == '-':
# This is long optional arg
arg = args[2:]
else:
arg = args[3:]
else:
arg = args
return arg
def fetch_func_args(func):
fn_args = []
for args, kwargs in getattr(func, 'args', []):
arg = get_arg_string(args[0])
fn_args.append(getattr(CONF.category, arg))
return fn_args
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
script_name = sys.argv[0]
if len(sys.argv) < 2:
print(_("\nOpenStack Cinder version: %(version)s\n") %
{'version': version.version_string()})
print(script_name + " category action [<args>]")
print(_("Available categories:"))
for category in CATEGORIES:
print(_("\t%s") % category)
sys.exit(2)
try:
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup("cinder")
except cfg.ConfigFilesNotFoundError:
cfgfile = CONF.config_file[-1] if CONF.config_file else None
if cfgfile and not os.access(cfgfile, os.R_OK):
st = os.stat(cfgfile)
print(_("Could not read %s. Re-running with sudo") % cfgfile)
try:
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
except Exception:
print(_('sudo failed, continuing as if nothing happened'))
print(_('Please re-run cinder-manage as root.'))
sys.exit(2)
fn = CONF.category.action_fn
fn_args = fetch_func_args(fn)
fn(*fn_args)
| abusse/cinder | cinder/cmd/manage.py | Python | apache-2.0 | 19,015 |
"""
Code for managing the implementation cache.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _, logger
import os
from zeroinstall.support import basedir
from zeroinstall import SafeException, support
class BadDigest(SafeException):
"""Thrown if a digest is invalid (either syntactically or cryptographically)."""
detail = None
class NotStored(SafeException):
"""Throws if a requested implementation isn't in the cache."""
class NonwritableStore(SafeException):
"""Attempt to add to a non-writable store directory."""
def _copytree2(src, dst):
"""@type src: str
@type dst: str"""
import shutil
names = os.listdir(src)
assert os.path.isdir(dst)
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
os.mkdir(dstname)
mtime = int(os.lstat(srcname).st_mtime)
_copytree2(srcname, dstname)
os.utime(dstname, (mtime, mtime))
else:
shutil.copy2(srcname, dstname)
def _validate_pair(value):
"""@type value: str"""
if '/' in value or \
'\\' in value or \
value.startswith('.'):
raise BadDigest("Invalid digest '{value}'".format(value = value))
def parse_algorithm_digest_pair(src):
"""Break apart an algorithm/digest into in a tuple.
Old algorithms use '=' as the separator, while newer ones use '_'.
@param src: the combined string
@type src: str
@return: the parsed values
@rtype: (str, str)
@raise BadDigest: if it can't be parsed
@since: 1.10"""
_validate_pair(src)
if src.startswith('sha1=') or src.startswith('sha1new=') or src.startswith('sha256='):
return src.split('=', 1)
result = src.split('_', 1)
if len(result) != 2:
if '=' in src:
raise BadDigest("Use '_' not '=' for new algorithms, in {src}".format(src = src))
raise BadDigest("Can't parse digest {src}".format(src = src))
return result
def format_algorithm_digest_pair(alg, digest):
"""The opposite of L{parse_algorithm_digest_pair}.
The result is suitable for use as a directory name (does not contain '/' characters).
@type alg: str
@type digest: str
@rtype: str
@raise BadDigest: if the result is invalid
@since: 1.10"""
if alg in ('sha1', 'sha1new', 'sha256'):
result = alg + '=' + digest
else:
result = alg + '_' + digest
_validate_pair(result)
return result
class Store(object):
"""A directory for storing implementations."""
def __init__(self, dir, public = False):
"""Create a new Store.
@param dir: directory to contain the implementations
@type dir: str
@param public: deprecated
@type public: bool"""
self.dir = dir
self.dry_run_names = set()
def __str__(self):
return _("Store '%s'") % self.dir
def lookup(self, digest):
"""@type digest: str
@rtype: str"""
alg, value = parse_algorithm_digest_pair(digest)
dir = os.path.join(self.dir, digest)
if os.path.isdir(dir) or digest in self.dry_run_names:
return dir
return None
def get_tmp_dir_for(self, required_digest):
"""Create a temporary directory in the directory where we would store an implementation
with the given digest. This is used to setup a new implementation before being renamed if
it turns out OK.
@type required_digest: str
@rtype: str
@raise NonwritableStore: if we can't create it"""
try:
if not os.path.isdir(self.dir):
os.makedirs(self.dir)
from tempfile import mkdtemp
tmp = mkdtemp(dir = self.dir, prefix = 'tmp-')
os.chmod(tmp, 0o755) # r-x for all; needed by 0store-helper
return tmp
except OSError as ex:
raise NonwritableStore(str(ex))
def add_archive_to_cache(self, required_digest, data, url, extract = None, type = None, start_offset = 0, try_helper = False, dry_run = False):
"""@type required_digest: str
@type data: file
@type url: str
@type extract: str | None
@type type: str | None
@type start_offset: int
@type try_helper: bool
@type dry_run: bool"""
from . import unpack
if self.lookup(required_digest):
logger.info(_("Not adding %s as it already exists!"), required_digest)
return
tmp = self.get_tmp_dir_for(required_digest)
try:
unpack.unpack_archive(url, data, tmp, extract, type = type, start_offset = start_offset)
except:
import shutil
shutil.rmtree(tmp)
raise
try:
self.check_manifest_and_rename(required_digest, tmp, extract, try_helper = try_helper, dry_run = dry_run)
except Exception:
#warn(_("Leaving extracted directory as %s"), tmp)
support.ro_rmtree(tmp)
raise
def add_dir_to_cache(self, required_digest, path, try_helper = False, dry_run = False):
"""Copy the contents of path to the cache.
@param required_digest: the expected digest
@type required_digest: str
@param path: the root of the tree to copy
@type path: str
@param try_helper: attempt to use privileged helper before user cache (since 0.26)
@type try_helper: bool
@type dry_run: bool
@raise BadDigest: if the contents don't match the given digest."""
if self.lookup(required_digest):
logger.info(_("Not adding %s as it already exists!"), required_digest)
return
tmp = self.get_tmp_dir_for(required_digest)
try:
_copytree2(path, tmp)
self.check_manifest_and_rename(required_digest, tmp, try_helper = try_helper, dry_run = dry_run)
except:
logger.warning(_("Error importing directory."))
logger.warning(_("Deleting %s"), tmp)
support.ro_rmtree(tmp)
raise
def _add_with_helper(self, required_digest, path, dry_run):
"""Use 0store-secure-add to copy 'path' to the system store.
@param required_digest: the digest for path
@type required_digest: str
@param path: root of implementation directory structure
@type path: str
@return: True iff the directory was copied into the system cache successfully"""
if required_digest.startswith('sha1='):
return False # Old digest alg not supported
if os.environ.get('ZEROINSTALL_PORTABLE_BASE'):
return False # Can't use helper with portable mode
helper = support.find_in_path('0store-secure-add-helper')
if not helper:
logger.info(_("'0store-secure-add-helper' command not found. Not adding to system cache."))
return False
if dry_run:
print(_("[dry-run] would use {helper} to store {required_digest} in system store").format(
helper = helper,
required_digest = required_digest))
self.dry_run_names.add(required_digest)
return True
import subprocess
env = os.environ.copy()
env['ENV_NOT_CLEARED'] = 'Unclean' # (warn about insecure configurations)
env['HOME'] = 'Unclean' # (warn about insecure configurations)
dev_null = os.open(os.devnull, os.O_RDONLY)
try:
logger.info(_("Trying to add to system cache using %s"), helper)
child = subprocess.Popen([helper, required_digest],
stdin = dev_null,
cwd = path,
env = env)
exit_code = child.wait()
finally:
os.close(dev_null)
if exit_code:
logger.warning(_("0store-secure-add-helper failed."))
return False
logger.info(_("Added succcessfully."))
return True
def check_manifest_and_rename(self, required_digest, tmp, extract = None, try_helper = False, dry_run = False):
"""Check that tmp[/extract] has the required_digest.
On success, rename the checked directory to the digest, and
make the whole tree read-only.
@type required_digest: str
@type tmp: str
@type extract: str | None
@param try_helper: attempt to use privileged helper to import to system cache first (since 0.26)
@type try_helper: bool
@param dry_run: just print what we would do to stdout (and delete tmp)
@type dry_run: bool
@raise BadDigest: if the input directory doesn't match the given digest"""
if extract:
extracted = os.path.join(tmp, extract)
if not os.path.isdir(extracted):
raise Exception(_('Directory %s not found in archive') % extract)
else:
extracted = tmp
from . import manifest
manifest.fixup_permissions(extracted)
alg, required_value = manifest.splitID(required_digest)
actual_digest = alg.getID(manifest.add_manifest_file(extracted, alg))
if actual_digest != required_digest:
raise BadDigest(_('Incorrect manifest -- archive is corrupted.\n'
'Required digest: %(required_digest)s\n'
'Actual digest: %(actual_digest)s\n') %
{'required_digest': required_digest, 'actual_digest': actual_digest})
if try_helper:
if self._add_with_helper(required_digest, extracted, dry_run = dry_run):
support.ro_rmtree(tmp)
return
logger.info(_("Can't add to system store. Trying user store instead."))
logger.info(_("Caching new implementation (digest %s) in %s"), required_digest, self.dir)
final_name = os.path.join(self.dir, required_digest)
if os.path.isdir(final_name):
logger.warning(_("Item %s already stored.") % final_name) # not really an error
return
if dry_run:
print(_("[dry-run] would store implementation as {path}").format(path = final_name))
self.dry_run_names.add(required_digest)
support.ro_rmtree(tmp)
return
else:
# If we just want a subdirectory then the rename will change
# extracted/.. and so we'll need write permission on 'extracted'
os.chmod(extracted, 0o755)
os.rename(extracted, final_name)
os.chmod(final_name, 0o555)
if extract:
os.rmdir(tmp)
def __repr__(self):
return "<store: %s>" % self.dir
class Stores(object):
"""A list of L{Store}s. All stores are searched when looking for an implementation.
When storing, we use the first of the system caches (if writable), or the user's
cache otherwise."""
__slots__ = ['stores']
def __init__(self):
# Always add the user cache to have a reliable fallback location for storage
user_store = os.path.join(basedir.xdg_cache_home, '0install.net', 'implementations')
self.stores = [Store(user_store)]
# Add custom cache locations
dirs = []
for impl_dirs in basedir.load_config_paths('0install.net', 'injector', 'implementation-dirs'):
with open(impl_dirs, 'rt') as stream:
dirs.extend(stream.readlines())
for directory in dirs:
directory = directory.strip()
if directory and not directory.startswith('#'):
logger.debug(_("Added system store '%s'"), directory)
self.stores.append(Store(directory))
# Add the system cache when not in portable mode
if not os.environ.get('ZEROINSTALL_PORTABLE_BASE'):
if os.name == "nt":
from win32com.shell import shell, shellcon
commonAppData = shell.SHGetFolderPath(0, shellcon.CSIDL_COMMON_APPDATA, 0, 0)
systemCachePath = os.path.join(commonAppData, "0install.net", "implementations")
# Only use shared cache location on Windows if it was explicitly created
if os.path.isdir(systemCachePath):
self.stores.append(Store(systemCachePath))
else:
self.stores.append(Store('/var/cache/0install.net/implementations'))
def lookup(self, digest):
"""@type digest: str
@rtype: str
@deprecated: use lookup_any instead"""
return self.lookup_any([digest])
def lookup_any(self, digests):
"""Search for digest in all stores.
@type digests: [str]
@rtype: str
@raises NotStored: if not found"""
path = self.lookup_maybe(digests)
if path:
return path
raise NotStored(_("Item with digests '%(digests)s' not found in stores. Searched:\n- %(stores)s") %
{'digests': digests, 'stores': '\n- '.join([s.dir for s in self.stores])})
def lookup_maybe(self, digests):
"""Like lookup_any, but return None if it isn't found.
@type digests: [str]
@rtype: str | None
@since: 0.53"""
assert digests
for digest in digests:
assert digest
_validate_pair(digest)
for store in self.stores:
path = store.lookup(digest)
if path:
return path
return None
def add_dir_to_cache(self, required_digest, dir, dry_run = False):
"""Add to the best writable cache.
@type required_digest: str
@type dir: str
@type dry_run: bool
@see: L{Store.add_dir_to_cache}"""
self._write_store(lambda store, **kwargs: store.add_dir_to_cache(required_digest, dir, dry_run = dry_run, **kwargs))
def add_archive_to_cache(self, required_digest, data, url, extract = None, type = None, start_offset = 0, dry_run = False):
"""Add to the best writable cache.
@type required_digest: str
@type data: file
@type url: str
@type extract: str | None
@type type: str | None
@type start_offset: int
@type dry_run: bool
@see: L{Store.add_archive_to_cache}"""
self._write_store(lambda store, **kwargs: store.add_archive_to_cache(required_digest,
data, url, extract, type = type, start_offset = start_offset, dry_run = dry_run, **kwargs))
def _write_store(self, fn):
"""Call fn(first_system_store). If it's read-only, try again with the user store."""
if len(self.stores) > 1:
try:
fn(self.get_first_system_store())
return
except NonwritableStore:
logger.debug(_("%s not-writable. Trying helper instead."), self.get_first_system_store())
pass
fn(self.stores[0], try_helper = True)
def get_first_system_store(self):
"""The first system store is the one we try writing to first.
@rtype: L{Store}
@since: 0.30"""
try:
return self.stores[1]
except IndexError:
raise SafeException(_("No system stores have been configured"))
| slovenwd/0install | zeroinstall/zerostore/__init__.py | Python | lgpl-2.1 | 13,307 |
from __future__ import unicode_literals
import unittest
import spotify
import tests
from tests import mock
@mock.patch('spotify.image.lib', spec=spotify.lib)
class ImageTest(unittest.TestCase):
def setUp(self):
self.session = tests.create_session_mock()
spotify._session_instance = self.session
def tearDown(self):
spotify._session_instance = None
def test_create_without_uri_or_sp_image_fails(self, lib_mock):
with self.assertRaises(AssertionError):
spotify.Image(self.session)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_create_from_uri(self, link_mock, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
link_instance_mock = link_mock.return_value
link_instance_mock.as_image.return_value = spotify.Image(
self.session, sp_image=sp_image)
lib_mock.sp_image_create_from_link.return_value = sp_image
uri = 'spotify:image:foo'
result = spotify.Image(self.session, uri=uri)
link_mock.assert_called_with(self.session, uri=uri)
link_instance_mock.as_image.assert_called_with()
lib_mock.sp_image_add_ref.assert_called_with(sp_image)
self.assertEqual(result._sp_image, sp_image)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_create_from_uri_fail_raises_error(self, link_mock, lib_mock):
link_instance_mock = link_mock.return_value
link_instance_mock.as_image.return_value = None
uri = 'spotify:image:foo'
with self.assertRaises(ValueError):
spotify.Image(self.session, uri=uri)
def test_adds_ref_to_sp_image_when_created(self, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
spotify.Image(self.session, sp_image=sp_image)
lib_mock.sp_image_add_ref.assert_called_with(sp_image)
def test_releases_sp_image_when_image_dies(self, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
image = None # noqa
tests.gc_collect()
lib_mock.sp_image_release.assert_called_with(sp_image)
@mock.patch('spotify.Link', spec=spotify.Link)
def test_repr(self, link_mock, lib_mock):
link_instance_mock = link_mock.return_value
link_instance_mock.uri = 'foo'
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
result = repr(image)
self.assertEqual(result, 'Image(%r)' % 'foo')
def test_eq(self, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image1 = spotify.Image(self.session, sp_image=sp_image)
image2 = spotify.Image(self.session, sp_image=sp_image)
self.assertTrue(image1 == image2)
self.assertFalse(image1 == 'foo')
def test_ne(self, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image1 = spotify.Image(self.session, sp_image=sp_image)
image2 = spotify.Image(self.session, sp_image=sp_image)
self.assertFalse(image1 != image2)
def test_hash(self, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image1 = spotify.Image(self.session, sp_image=sp_image)
image2 = spotify.Image(self.session, sp_image=sp_image)
self.assertEqual(hash(image1), hash(image2))
def test_loaded_event_is_unset_by_default(self, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
self.assertFalse(image.loaded_event.is_set())
def test_create_with_callback(self, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
lib_mock.sp_image_remove_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
lib_mock.sp_image_create.return_value = sp_image
callback = mock.Mock()
# Add callback
image = spotify.Image(
self.session, sp_image=sp_image, callback=callback)
lib_mock.sp_image_add_load_callback.assert_called_with(
sp_image, mock.ANY, mock.ANY)
image_load_cb = lib_mock.sp_image_add_load_callback.call_args[0][1]
callback_handle = lib_mock.sp_image_add_load_callback.call_args[0][2]
# Call calls callback, sets event, and removes callback registration
self.assertEqual(callback.call_count, 0)
self.assertEqual(lib_mock.sp_image_remove_load_callback.call_count, 0)
self.assertFalse(image.loaded_event.is_set())
image_load_cb(sp_image, callback_handle)
callback.assert_called_once_with(image)
lib_mock.sp_image_remove_load_callback.assert_called_with(
sp_image, image_load_cb, callback_handle)
self.assertTrue(image.loaded_event.is_set())
def test_create_with_callback_and_throw_away_image_and_call_load_callback(
self, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
lib_mock.sp_image_remove_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
lib_mock.sp_image_create.return_value = sp_image
callback = mock.Mock()
# Add callback
image = spotify.Image(
self.session, sp_image=sp_image, callback=callback)
loaded_event = image.loaded_event
# Throw away reference to 'image'
image = None # noqa
tests.gc_collect()
# The mock keeps the handle/userdata alive, thus this test doesn't
# really test that session._callback_handles keeps the handle alive.
# Call callback
image_load_cb = lib_mock.sp_image_add_load_callback.call_args[0][1]
callback_handle = lib_mock.sp_image_add_load_callback.call_args[0][2]
image_load_cb(sp_image, callback_handle)
loaded_event.wait(3)
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback.call_args[0][0]._sp_image, sp_image)
def test_create_with_callback_fails_if_error_adding_callback(
self, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.BAD_API_VERSION)
sp_image = spotify.ffi.cast('sp_image *', 42)
callback = mock.Mock()
with self.assertRaises(spotify.Error):
spotify.Image(self.session, sp_image=sp_image, callback=callback)
def test_is_loaded(self, lib_mock):
lib_mock.sp_image_is_loaded.return_value = 1
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
result = image.is_loaded
lib_mock.sp_image_is_loaded.assert_called_once_with(sp_image)
self.assertTrue(result)
def test_error(self, lib_mock):
lib_mock.sp_image_error.return_value = int(
spotify.ErrorType.IS_LOADING)
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
result = image.error
lib_mock.sp_image_error.assert_called_once_with(sp_image)
self.assertIs(result, spotify.ErrorType.IS_LOADING)
@mock.patch('spotify.utils.load')
def test_load(self, load_mock, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
image.load(10)
load_mock.assert_called_with(self.session, image, timeout=10)
def test_format(self, lib_mock):
lib_mock.sp_image_is_loaded.return_value = 1
lib_mock.sp_image_format.return_value = int(spotify.ImageFormat.JPEG)
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
result = image.format
lib_mock.sp_image_format.assert_called_with(sp_image)
self.assertIs(result, spotify.ImageFormat.JPEG)
def test_format_is_none_if_unloaded(self, lib_mock):
lib_mock.sp_image_is_loaded.return_value = 0
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
result = image.format
lib_mock.sp_image_is_loaded.assert_called_with(sp_image)
self.assertIsNone(result)
def test_data(self, lib_mock):
lib_mock.sp_image_is_loaded.return_value = 1
size = 20
data = spotify.ffi.new('char[]', size)
data[0:3] = [b'a', b'b', b'c']
def func(sp_image_ptr, data_size_ptr):
data_size_ptr[0] = size
return spotify.ffi.cast('void *', data)
lib_mock.sp_image_data.side_effect = func
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
result = image.data
lib_mock.sp_image_data.assert_called_with(sp_image, mock.ANY)
self.assertEqual(result[:5], b'abc\x00\x00')
def test_data_is_none_if_unloaded(self, lib_mock):
lib_mock.sp_image_is_loaded.return_value = 0
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
result = image.data
lib_mock.sp_image_is_loaded.assert_called_with(sp_image)
self.assertIsNone(result)
def test_data_uri(self, lib_mock):
lib_mock.sp_image_format.return_value = int(spotify.ImageFormat.JPEG)
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
prop_mock = mock.PropertyMock()
with mock.patch.object(spotify.Image, 'data', prop_mock):
image = spotify.Image(self.session, sp_image=sp_image)
prop_mock.return_value = b'01234\x006789'
result = image.data_uri
self.assertEqual(result, 'data:image/jpeg;base64,MDEyMzQANjc4OQ==')
def test_data_uri_is_none_if_unloaded(self, lib_mock):
lib_mock.sp_image_is_loaded.return_value = 0
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
result = image.data_uri
self.assertIsNone(result)
def test_data_uri_fails_if_unknown_image_format(self, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
image.__dict__['format'] = mock.Mock(
return_value=spotify.ImageFormat.UNKNOWN)
image.__dict__['data'] = mock.Mock(return_value=b'01234\x006789')
with self.assertRaises(ValueError):
image.data_uri
@mock.patch('spotify.Link', spec=spotify.Link)
def test_link_creates_link_to_image(self, link_mock, lib_mock):
lib_mock.sp_image_add_load_callback.return_value = int(
spotify.ErrorType.OK)
sp_image = spotify.ffi.cast('sp_image *', 42)
image = spotify.Image(self.session, sp_image=sp_image)
sp_link = spotify.ffi.cast('sp_link *', 43)
lib_mock.sp_link_create_from_image.return_value = sp_link
link_mock.return_value = mock.sentinel.link
result = image.link
link_mock.assert_called_once_with(
self.session, sp_link=sp_link, add_ref=False)
self.assertEqual(result, mock.sentinel.link)
class ImageFormatTest(unittest.TestCase):
def test_has_constants(self):
self.assertEqual(spotify.ImageFormat.UNKNOWN, -1)
self.assertEqual(spotify.ImageFormat.JPEG, 0)
class ImageSizeTest(unittest.TestCase):
def test_has_size_constants(self):
self.assertEqual(spotify.ImageSize.NORMAL, 0)
self.assertEqual(spotify.ImageSize.SMALL, 1)
self.assertEqual(spotify.ImageSize.LARGE, 2)
| felix1m/pyspotify | tests/test_image.py | Python | apache-2.0 | 13,570 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2010 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from defaultmode import DefaultMode
from quicktransmode import QuickTranslateMode
from searchmode import SearchMode
from qualitycheckmode import QualityCheckMode
from workflowmode import WorkflowMode
modeclasses = [DefaultMode, QuickTranslateMode, SearchMode, QualityCheckMode, WorkflowMode]
__all__ = ['modeclasses']
| elric/virtaal-debian | virtaal/modes/__init__.py | Python | gpl-2.0 | 1,103 |
import smtplib
import copy
from email.mime.text import MIMEText
from main import app, db
from flask import jsonify, request
from DB.User import User
from DB.Reservation import Reservation
from flask_jwt_extended import jwt_required, get_jwt_identity
from constants import MAIL_MESSAGES, PASSWORD_MIN_LENGTH
@app.route('/users')
@jwt_required
def show_users():
user_id = get_jwt_identity()
current_user = User.query.get(user_id)
if not current_user or not current_user.admin:
return jsonify({"error": {'msg': 'Operation not permitted', 'code': 14}}), 403
db_users = User.query.all()
users = copy.deepcopy(db_users)
userDict = []
for user in users:
userDict.append(user.to_dict())
return jsonify(userDict)
@app.route('/users/<int:id>', methods=["GET"])
@jwt_required
def show_user(id):
user_id_from_token = get_jwt_identity()
current_user = User.query.get(user_id_from_token)
if not current_user.admin and id != user_id_from_token:
return jsonify({"error": {'msg': 'Operation not permitted', 'code': 14}}), 403
user = User.query.get(id)
if user is not None:
return jsonify(copy.deepcopy(user).to_dict())
else:
return jsonify({'error': {'msg': 'User not found', 'code': 16, 'info': id}}), 404
@app.route('/users/<int:user_id>', methods=["PUT", "PATCH"])
@jwt_required
def edit_user(user_id):
user_id_from_token = get_jwt_identity()
current_user = User.query.get(user_id_from_token)
if not current_user.admin and user_id != user_id_from_token:
return jsonify({'error': {'msg': 'Operation not permitted', 'code': 14}}), 403
user = User.query.get(user_id)
if not user:
return jsonify({'error': {'msg': 'User not found', 'code': 16, 'info': user_id}}), 404
if "password" in request.json and not current_user.admin:
if "oldPassword" not in request.json:
return jsonify({'error': {'msg': 'Current password must be provided as "oldPassword" within the request body', 'code': 21}}), 400
if not user.verify_password(request.json["oldPassword"]):
return jsonify({'error': {'msg': 'Password missmatch for user', 'code': 22}}), 401
try:
if "password" in request.json:
if len(request.json['password']) < PASSWORD_MIN_LENGTH:
return jsonify({'error': {'msg': 'Password needs to be at least 8 characters long', 'code': 24}}), 400
user.hash_password(request.json["password"])
del request.json["password"]
for attribute in request.json:
if attribute in User.get_protected_attributes() and not current_user.admin:
db.session.rollback()
return jsonify({'error': {'msg': 'Attribute protected', 'code': 23}}), 400
if attribute in User.get_all_attributes():
setattr(user, attribute, request.json[attribute])
if attribute == "active" and request.json[attribute] == 1:
send_activation_mail(user)
db.session.commit()
return '', 204
except Exception:
db.session.rollback()
return jsonify({"error": {'msg': "Failed to update user", 'code': 17}}), 500
@app.route('/users', methods=["POST"])
def add_user():
for attribute in User.get_required_attributes():
if attribute not in request.json:
return jsonify({'error': {'msg': '\'' + attribute + '\' is required', 'code': 2, 'info': attribute}}), 400
if len(request.json['password']) < PASSWORD_MIN_LENGTH:
return jsonify({'error': {'msg': 'Password needs to be at least 8 characters long', 'code': 24}}), 400
data = request.json
new_user = User(data['username'], data['password'], data['email'], data['language'])
db.session.add(new_user)
db.session.commit()
send_new_user_mail(new_user)
return '', 201
@app.route('/users/<int:user_id>', methods=["DELETE"])
@jwt_required
def delete_user(user_id):
user_id_from_token = get_jwt_identity()
current_user = User.query.get(user_id_from_token)
if not current_user.admin:
return jsonify({'error': {'msg': 'Operation not permitted', 'code': 14}}), 403
user = User.query.get(user_id)
if not user:
return jsonify({'error': {'msg': 'User not found', 'code': 16, 'info': user_id}}), 404
try:
db.session.delete(user)
db.session.commit()
return '', 204
except Exception as error:
# Log error
return jsonify({"error": {'msg': "Cannot delete user", 'code': 18}}), 500
@app.route('/users/checkUnique', methods=["GET"])
def check_unique_attribute():
arguments = request.args
possible_keys = ['username', 'email']
if 'key' not in arguments or 'value' not in arguments:
return jsonify({'error': {'msg': '"key" and "value" must be given as query parameters', 'code': 19}}), 400
if not arguments['key'] in possible_keys:
return jsonify({'error': {'msg': '"key" can be one of the following: ' + ",".join(possible_keys), 'code': 20,
'info': ",".join(possible_keys)}}), 400
kwargs = {arguments['key']: arguments['value']}
user = User.query.filter_by(**kwargs).first()
if not user:
return jsonify({'unique': True}), 200
else:
return jsonify({'unique': False}), 200
@app.route('/users/<int:user_id>/nextReservation', methods=["GET"])
def get_next_reservation(user_id):
user = User.query.get(user_id)
if not user:
return jsonify({'error': {'msg': 'User not found', 'code': 16, 'info': user_id}}), 404
next_reservation = user.get_next_reservation()
if next_reservation is None:
return '', 204
return jsonify(next_reservation.to_dict()), 200
def send_new_user_mail(user):
mail_host = app.config['MAIL_HOST']
mail_port = app.config['MAIL_PORT']
mail_user = app.config['MAIL_LOGIN_USER']
mail_pass = app.config['MAIL_LOGIN_PASS']
mailer = smtplib.SMTP_SSL(mail_host, mail_port)
mailer.login(mail_user, mail_pass)
send_new_user_information_mail(user, mailer)
send_new_user_activation_request(user, mailer)
mailer.quit()
def send_new_user_information_mail(user, mailer):
mail_to = user.email
mail_from = app.config['MAIL_FROM']
mail_messaage = MAIL_MESSAGES[user.language]['registration']['message'].format(user.username)
message = MIMEText(mail_messaage)
message["Subject"] = MAIL_MESSAGES[user.language]['registration']['subject']
message["From"] = mail_from
message["To"] = mail_to
mailer.sendmail(mail_from, mail_to, message.as_string())
def send_new_user_activation_request(user, mailer):
mail_to = map(lambda admin_account: admin_account.email, User.get_admin_accounts())
mail_from = app.config['MAIL_FROM']
mail_messaage = "Hello admins\n\nA new user with username {} just signed up at zermatt.patklaey.ch. Please verify he is allowed to and activate it's account accordingly.".format(
user.username)
message = MIMEText(mail_messaage)
message["Subject"] = "New user on zermatt.patklaey.ch"
message["From"] = mail_from
message["To"] = ", ".join(mail_to)
mailer.sendmail(mail_from, mail_to, message.as_string())
def send_activation_mail(user):
mail_host = app.config['MAIL_HOST']
mail_port = app.config['MAIL_PORT']
mail_user = app.config['MAIL_LOGIN_USER']
mail_pass = app.config['MAIL_LOGIN_PASS']
mailer = smtplib.SMTP_SSL(mail_host, mail_port)
mailer.login(mail_user, mail_pass)
mail_to = user.email
mail_from = app.config['MAIL_FROM']
mail_messaage = MAIL_MESSAGES[user.language]['activation']['message'].format(user.username)
message = MIMEText(mail_messaage)
message["Subject"] = MAIL_MESSAGES[user.language]['activation']['subject']
message["From"] = mail_from
message["To"] = mail_to
mailer.sendmail(mail_from, mail_to, message.as_string())
| patklaey/ZermattReservationAPI | endpoints/user.py | Python | mit | 7,947 |
# Copyright 2015, 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_utils import uuidutils
from pypowervm import const as pvm_const
from pypowervm import exceptions as pvm_exc
from pypowervm.tasks import storage as tsk_stg
from pypowervm.utils import transaction as pvm_tx
from pypowervm.wrappers import cluster as pvm_clust
from pypowervm.wrappers import storage as pvm_stg
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova import exception
from nova import test
from nova.tests.unit.virt import powervm
from nova.virt.powervm.disk import ssp as ssp_dvr
from nova.virt.powervm import vm
FAKE_INST_UUID = uuidutils.generate_uuid(dashed=True)
FAKE_INST_UUID_PVM = vm.get_pvm_uuid(mock.Mock(uuid=FAKE_INST_UUID))
class TestSSPDiskAdapter(test.NoDBTestCase):
"""Unit Tests for the LocalDisk storage driver."""
def setUp(self):
super(TestSSPDiskAdapter, self).setUp()
self.inst = powervm.TEST_INSTANCE
self.apt = mock.Mock()
self.host_uuid = 'host_uuid'
self.ssp_wrap = mock.create_autospec(pvm_stg.SSP, instance=True)
# SSP.refresh() returns itself
self.ssp_wrap.refresh.return_value = self.ssp_wrap
self.node1 = mock.create_autospec(pvm_clust.Node, instance=True)
self.node2 = mock.create_autospec(pvm_clust.Node, instance=True)
self.clust_wrap = mock.create_autospec(
pvm_clust.Cluster, instance=True)
self.clust_wrap.nodes = [self.node1, self.node2]
self.clust_wrap.refresh.return_value = self.clust_wrap
self.tier_wrap = mock.create_autospec(pvm_stg.Tier, instance=True)
# Tier.refresh() returns itself
self.tier_wrap.refresh.return_value = self.tier_wrap
self.vio_wrap = mock.create_autospec(pvm_vios.VIOS, instance=True)
# For _cluster
self.mock_clust = self.useFixture(fixtures.MockPatch(
'pypowervm.wrappers.cluster.Cluster', autospec=True)).mock
self.mock_clust.get.return_value = [self.clust_wrap]
# For _ssp
self.mock_ssp_gbhref = self.useFixture(fixtures.MockPatch(
'pypowervm.wrappers.storage.SSP.get_by_href')).mock
self.mock_ssp_gbhref.return_value = self.ssp_wrap
# For _tier
self.mock_get_tier = self.useFixture(fixtures.MockPatch(
'pypowervm.tasks.storage.default_tier_for_ssp',
autospec=True)).mock
self.mock_get_tier.return_value = self.tier_wrap
# A FeedTask
self.mock_wtsk = mock.create_autospec(
pvm_tx.WrapperTask, instance=True)
self.mock_wtsk.configure_mock(wrapper=self.vio_wrap)
self.mock_ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
self.mock_afs = self.mock_ftsk.add_functor_subtask
self.mock_ftsk.configure_mock(
wrapper_tasks={self.vio_wrap.uuid: self.mock_wtsk})
self.pvm_uuid = self.useFixture(fixtures.MockPatch(
'nova.virt.powervm.vm.get_pvm_uuid')).mock
# Return the mgmt uuid
self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
'nova.virt.powervm.mgmt.mgmt_uuid')).mock
self.mgmt_uuid.return_value = 'mp_uuid'
# The SSP disk adapter
self.ssp_drv = ssp_dvr.SSPDiskAdapter(self.apt, self.host_uuid)
def test_init(self):
self.assertEqual(self.apt, self.ssp_drv._adapter)
self.assertEqual(self.host_uuid, self.ssp_drv._host_uuid)
self.mock_clust.get.assert_called_once_with(self.apt)
self.assertEqual(self.mock_clust.get.return_value,
[self.ssp_drv._clust])
self.mock_ssp_gbhref.assert_called_once_with(
self.apt, self.clust_wrap.ssp_uri)
self.assertEqual(self.mock_ssp_gbhref.return_value, self.ssp_drv._ssp)
self.mock_get_tier.assert_called_once_with(self.ssp_wrap)
self.assertEqual(self.mock_get_tier.return_value, self.ssp_drv._tier)
def test_init_error(self):
# Do these in reverse order to verify we trap all of 'em
for raiser in (self.mock_get_tier, self.mock_ssp_gbhref,
self.mock_clust.get):
raiser.side_effect = pvm_exc.TimeoutError("timed out")
self.assertRaises(exception.NotFound,
ssp_dvr.SSPDiskAdapter, self.apt, self.host_uuid)
raiser.side_effect = ValueError
self.assertRaises(ValueError,
ssp_dvr.SSPDiskAdapter, self.apt, self.host_uuid)
def test_capabilities(self):
self.assertTrue(self.ssp_drv.capabilities.get('shared_storage'))
self.assertFalse(self.ssp_drv.capabilities.get('has_imagecache'))
self.assertTrue(self.ssp_drv.capabilities.get('snapshot'))
@mock.patch('pypowervm.util.get_req_path_uuid', autospec=True)
def test_vios_uuids(self, mock_rpu):
mock_rpu.return_value = self.host_uuid
vios_uuids = self.ssp_drv._vios_uuids
self.assertEqual({self.node1.vios_uuid, self.node2.vios_uuid},
set(vios_uuids))
mock_rpu.assert_has_calls(
[mock.call(node.vios_uri, preserve_case=True, root=True)
for node in [self.node1, self.node2]])
mock_rpu.reset_mock()
# Test VIOSes on other nodes, which won't have uuid or url
node1 = mock.Mock(vios_uuid=None, vios_uri='uri1')
node2 = mock.Mock(vios_uuid='2', vios_uri=None)
# This mock is good and should be returned
node3 = mock.Mock(vios_uuid='3', vios_uri='uri3')
self.clust_wrap.nodes = [node1, node2, node3]
self.assertEqual(['3'], self.ssp_drv._vios_uuids)
# get_req_path_uuid was only called on the good one
mock_rpu.assert_called_once_with('uri3', preserve_case=True, root=True)
def test_capacity(self):
self.tier_wrap.capacity = 10
self.assertAlmostEqual(10.0, self.ssp_drv.capacity)
self.tier_wrap.refresh.assert_called_once_with()
def test_capacity_used(self):
self.ssp_wrap.capacity = 4.56
self.ssp_wrap.free_space = 1.23
self.assertAlmostEqual((4.56 - 1.23), self.ssp_drv.capacity_used)
self.ssp_wrap.refresh.assert_called_once_with()
@mock.patch('pypowervm.tasks.cluster_ssp.get_or_upload_image_lu',
autospec=True)
@mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._vios_uuids',
new_callable=mock.PropertyMock)
@mock.patch('pypowervm.util.sanitize_file_name_for_api', autospec=True)
@mock.patch('pypowervm.tasks.storage.crt_lu', autospec=True)
@mock.patch('nova.image.glance.API.download')
@mock.patch('nova.virt.powervm.disk.driver.IterableToFileAdapter',
autospec=True)
def test_create_disk_from_image(self, mock_it2f, mock_dl, mock_crt_lu,
mock_san, mock_vuuid, mock_goru):
img = powervm.TEST_IMAGE1
mock_crt_lu.return_value = self.ssp_drv._ssp, 'boot_lu'
mock_san.return_value = 'disk_name'
mock_vuuid.return_value = ['vuuid']
self.assertEqual('boot_lu', self.ssp_drv.create_disk_from_image(
'context', self.inst, img))
mock_dl.assert_called_once_with('context', img.id)
mock_san.assert_has_calls([
mock.call(img.name, prefix='image_', suffix='_' + img.checksum),
mock.call(self.inst.name, prefix='boot_')])
mock_it2f.assert_called_once_with(mock_dl.return_value)
mock_goru.assert_called_once_with(
self.ssp_drv._tier, 'disk_name', 'vuuid',
mock_it2f.return_value, img.size,
upload_type=tsk_stg.UploadType.IO_STREAM)
mock_crt_lu.assert_called_once_with(
self.mock_get_tier.return_value, mock_san.return_value,
self.inst.flavor.root_gb, typ=pvm_stg.LUType.DISK,
clone=mock_goru.return_value)
@mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._vios_uuids',
new_callable=mock.PropertyMock)
@mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True)
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping',
autospec=True)
@mock.patch('pypowervm.wrappers.storage.LU', autospec=True)
def test_connect_disk(self, mock_lu, mock_bldmap, mock_addmap,
mock_vio_uuids):
disk_info = mock.Mock()
disk_info.configure_mock(name='dname', udid='dudid')
mock_vio_uuids.return_value = [self.vio_wrap.uuid]
def test_afs(add_func):
# Verify the internal add_func
self.assertEqual(mock_addmap.return_value, add_func(self.vio_wrap))
mock_bldmap.assert_called_once_with(
self.host_uuid, self.vio_wrap, self.pvm_uuid.return_value,
mock_lu.bld_ref.return_value)
mock_addmap.assert_called_once_with(
self.vio_wrap, mock_bldmap.return_value)
self.mock_wtsk.add_functor_subtask.side_effect = test_afs
self.ssp_drv.attach_disk(self.inst, disk_info, self.mock_ftsk)
mock_lu.bld_ref.assert_called_once_with(self.apt, 'dname', 'dudid')
self.pvm_uuid.assert_called_once_with(self.inst)
self.assertEqual(1, self.mock_wtsk.add_functor_subtask.call_count)
@mock.patch('pypowervm.tasks.storage.rm_tier_storage', autospec=True)
def test_delete_disks(self, mock_rm_tstor):
self.ssp_drv.delete_disks(['disk1', 'disk2'])
mock_rm_tstor.assert_called_once_with(['disk1', 'disk2'],
tier=self.ssp_drv._tier)
@mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._vios_uuids',
new_callable=mock.PropertyMock)
@mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
@mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
@mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
@mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task',
autospec=True)
def test_disconnect_disk(self, mock_bld_ftsk, mock_gmf, mock_rmmaps,
mock_findmaps, mock_vio_uuids):
mock_vio_uuids.return_value = [self.vio_wrap.uuid]
mock_bld_ftsk.return_value = self.mock_ftsk
lu1, lu2 = [mock.create_autospec(pvm_stg.LU, instance=True)] * 2
# Two mappings have the same LU, to verify set behavior
mock_findmaps.return_value = [
mock.Mock(spec=pvm_vios.VSCSIMapping, backing_storage=lu)
for lu in (lu1, lu2, lu1)]
def test_afs(rm_func):
# verify the internal rm_func
self.assertEqual(mock_rmmaps.return_value, rm_func(self.vio_wrap))
mock_rmmaps.assert_called_once_with(
self.vio_wrap, self.pvm_uuid.return_value,
match_func=mock_gmf.return_value)
self.mock_wtsk.add_functor_subtask.side_effect = test_afs
self.assertEqual(
{lu1, lu2}, set(self.ssp_drv.detach_disk(self.inst)))
mock_bld_ftsk.assert_called_once_with(
self.apt, name='ssp', xag=[pvm_const.XAG.VIO_SMAP])
self.pvm_uuid.assert_called_once_with(self.inst)
mock_gmf.assert_called_once_with(pvm_stg.LU)
self.assertEqual(1, self.mock_wtsk.add_functor_subtask.call_count)
mock_findmaps.assert_called_once_with(
self.vio_wrap.scsi_mappings,
client_lpar_id=self.pvm_uuid.return_value,
match_func=mock_gmf.return_value)
self.mock_ftsk.execute.assert_called_once_with()
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
@mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
@mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._disk_match_func')
def test_get_bootdisk_path(self, mock_match_fn, mock_findmaps,
mock_vios):
mock_vios.return_value = self.vio_wrap
# No maps found
mock_findmaps.return_value = None
devname = self.ssp_drv.get_bootdisk_path('inst', 'vios_uuid')
mock_vios.assert_called_once_with(
self.apt, uuid='vios_uuid', xag=[pvm_const.XAG.VIO_SMAP])
mock_findmaps.assert_called_once_with(
self.vio_wrap.scsi_mappings,
client_lpar_id=self.pvm_uuid.return_value,
match_func=mock_match_fn.return_value)
self.assertIsNone(devname)
# Good map
mock_lu = mock.Mock()
mock_lu.server_adapter.backing_dev_name = 'devname'
mock_findmaps.return_value = [mock_lu]
devname = self.ssp_drv.get_bootdisk_path('inst', 'vios_uuid')
self.assertEqual('devname', devname)
@mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter.'
'_vios_uuids', new_callable=mock.PropertyMock)
@mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
@mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping', autospec=True)
def test_connect_instance_disk_to_mgmt(self, mock_add, mock_vio_get,
mock_lw, mock_vio_uuids):
inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk()
mock_lw.return_value = lpar_wrap
mock_vio_uuids.return_value = [1, 2]
# Test with two VIOSes, both of which contain the mapping
mock_vio_get.side_effect = [vio1, vio2]
lu, vios = self.ssp_drv.connect_instance_disk_to_mgmt(inst)
self.assertEqual('lu_udid', lu.udid)
# Should hit on the first VIOS
self.assertIs(vio1, vios)
mock_add.assert_called_once_with(self.host_uuid, vio1, 'mp_uuid', lu)
# Now the first VIOS doesn't have the mapping, but the second does
mock_add.reset_mock()
mock_vio_get.side_effect = [vio3, vio2]
lu, vios = self.ssp_drv.connect_instance_disk_to_mgmt(inst)
self.assertEqual('lu_udid', lu.udid)
# Should hit on the second VIOS
self.assertIs(vio2, vios)
self.assertEqual(1, mock_add.call_count)
mock_add.assert_called_once_with(self.host_uuid, vio2, 'mp_uuid', lu)
# No hits
mock_add.reset_mock()
mock_vio_get.side_effect = [vio3, vio3]
self.assertRaises(exception.InstanceDiskMappingFailed,
self.ssp_drv.connect_instance_disk_to_mgmt, inst)
self.assertEqual(0, mock_add.call_count)
# First add_vscsi_mapping call raises
mock_vio_get.side_effect = [vio1, vio2]
mock_add.side_effect = [Exception("mapping failed"), None]
# Should hit on the second VIOS
self.assertIs(vio2, vios)
@mock.patch('pypowervm.tasks.scsi_mapper.remove_lu_mapping', autospec=True)
def test_disconnect_disk_from_mgmt(self, mock_rm_lu_map):
self.ssp_drv.disconnect_disk_from_mgmt('vios_uuid', 'disk_name')
mock_rm_lu_map.assert_called_with(self.apt, 'vios_uuid',
'mp_uuid', disk_names=['disk_name'])
@mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
@mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._get_disk_name')
def test_disk_match_func(self, mock_disk_name, mock_gen_match):
mock_disk_name.return_value = 'disk_name'
self.ssp_drv._disk_match_func('disk_type', 'instance')
mock_disk_name.assert_called_once_with('disk_type', 'instance')
mock_gen_match.assert_called_with(pvm_stg.LU, names=['disk_name'])
@mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter.'
'_vios_uuids', new_callable=mock.PropertyMock)
@mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
def test_get_bootdisk_iter(self, mock_vio_get, mock_lw, mock_vio_uuids):
inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk()
mock_lw.return_value = lpar_wrap
mock_vio_uuids.return_value = [1, 2]
# Test with two VIOSes, both of which contain the mapping. Force the
# method to get the lpar_wrap.
mock_vio_get.side_effect = [vio1, vio2]
idi = self.ssp_drv._get_bootdisk_iter(inst)
lu, vios = next(idi)
self.assertEqual('lu_udid', lu.udid)
self.assertEqual('vios1', vios.name)
mock_vio_get.assert_called_once_with(self.apt, uuid=1,
xag=[pvm_const.XAG.VIO_SMAP])
lu, vios = next(idi)
self.assertEqual('lu_udid', lu.udid)
self.assertEqual('vios2', vios.name)
mock_vio_get.assert_called_with(self.apt, uuid=2,
xag=[pvm_const.XAG.VIO_SMAP])
self.assertRaises(StopIteration, next, idi)
self.assertEqual(2, mock_vio_get.call_count)
mock_lw.assert_called_once_with(self.apt, inst)
# Same, but prove that breaking out of the loop early avoids the second
# get call. Supply lpar_wrap from here on, and prove no calls to
# get_instance_wrapper
mock_vio_get.reset_mock()
mock_lw.reset_mock()
mock_vio_get.side_effect = [vio1, vio2]
for lu, vios in self.ssp_drv._get_bootdisk_iter(inst):
self.assertEqual('lu_udid', lu.udid)
self.assertEqual('vios1', vios.name)
break
mock_vio_get.assert_called_once_with(self.apt, uuid=1,
xag=[pvm_const.XAG.VIO_SMAP])
# Now the first VIOS doesn't have the mapping, but the second does
mock_vio_get.reset_mock()
mock_vio_get.side_effect = [vio3, vio2]
idi = self.ssp_drv._get_bootdisk_iter(inst)
lu, vios = next(idi)
self.assertEqual('lu_udid', lu.udid)
self.assertEqual('vios2', vios.name)
mock_vio_get.assert_has_calls(
[mock.call(self.apt, uuid=uuid, xag=[pvm_const.XAG.VIO_SMAP])
for uuid in (1, 2)])
self.assertRaises(StopIteration, next, idi)
self.assertEqual(2, mock_vio_get.call_count)
# No hits
mock_vio_get.reset_mock()
mock_vio_get.side_effect = [vio3, vio3]
self.assertEqual([], list(self.ssp_drv._get_bootdisk_iter(inst)))
self.assertEqual(2, mock_vio_get.call_count)
def _bld_mocks_for_instance_disk(self):
inst = mock.Mock()
inst.name = 'my-instance-name'
lpar_wrap = mock.Mock()
lpar_wrap.id = 4
lu_wrap = mock.Mock(spec=pvm_stg.LU)
lu_wrap.configure_mock(name='boot_my_instance_name', udid='lu_udid')
smap = mock.Mock(backing_storage=lu_wrap,
server_adapter=mock.Mock(lpar_id=4))
# Build mock VIOS Wrappers as the returns from VIOS.wrap.
# vios1 and vios2 will both have the mapping for client ID 4 and LU
# named boot_my_instance_name.
smaps = [mock.Mock(), mock.Mock(), mock.Mock(), smap]
vios1 = mock.Mock(spec=pvm_vios.VIOS)
vios1.configure_mock(name='vios1', uuid='uuid1', scsi_mappings=smaps)
vios2 = mock.Mock(spec=pvm_vios.VIOS)
vios2.configure_mock(name='vios2', uuid='uuid2', scsi_mappings=smaps)
# vios3 will not have the mapping
vios3 = mock.Mock(spec=pvm_vios.VIOS)
vios3.configure_mock(name='vios3', uuid='uuid3',
scsi_mappings=[mock.Mock(), mock.Mock()])
return inst, lpar_wrap, vios1, vios2, vios3
| openstack/nova | nova/tests/unit/virt/powervm/disk/test_ssp.py | Python | apache-2.0 | 20,150 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DhcpOptions(Model):
"""DhcpOptions contains an array of DNS servers available to VMs deployed in
the virtual network. Standard DHCP option for a subnet overrides VNET DHCP
options.
:param dns_servers: The list of DNS servers IP addresses.
:type dns_servers: list[str]
"""
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(self, *, dns_servers=None, **kwargs) -> None:
super(DhcpOptions, self).__init__(**kwargs)
self.dns_servers = dns_servers
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/dhcp_options_py3.py | Python | mit | 1,066 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-13 10:10
from __future__ import unicode_literals
import re
import uuid
import django.core.validators
import django.utils.timezone
import jsonfield.fields
import model_utils.fields
import sortedm2m.fields
from django.db import migrations, models
import django_netjsonconfig.models.config
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Config',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=63)),
('backend', models.CharField(choices=[('netjsonconfig.OpenWrt', 'OpenWRT'), ('netjsonconfig.OpenWisp', 'OpenWISP')], help_text='Select netjsonconfig backend', max_length=128, verbose_name='backend')),
('config', jsonfield.fields.JSONField(default=dict, help_text='configuration in NetJSON DeviceConfiguration format', verbose_name='configuration')),
('key', models.CharField(db_index=True, default=django_netjsonconfig.models.config.get_random_key, help_text='unique key that can be used to download the configuration', max_length=64, unique=True, validators=[django.core.validators.RegexValidator(re.compile('^[^\\s/\\.]+$', 32), code='invalid', message='Key must not contain spaces, dots or slashes.')])),
],
options={
'verbose_name_plural': 'configurations',
'verbose_name': 'configuration',
},
),
migrations.CreateModel(
name='Template',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=63)),
('backend', models.CharField(choices=[('netjsonconfig.OpenWrt', 'OpenWRT'), ('netjsonconfig.OpenWisp', 'OpenWISP')], help_text='Select netjsonconfig backend', max_length=128, verbose_name='backend')),
('config', jsonfield.fields.JSONField(default=dict, help_text='configuration in NetJSON DeviceConfiguration format', verbose_name='configuration')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='config',
name='templates',
field=sortedm2m.fields.SortedManyToManyField(blank=True, help_text='configuration templates, applied fromfirst to last', related_name='config_relations', to='django_netjsonconfig.Template', verbose_name='templates'),
),
]
| cosgrid001/cosgrid_dn | django_netjsonconfig/migrations/0001_initial.py | Python | gpl-3.0 | 3,287 |
# This file is part of TRS (http://math.kompiler.org)
#
# TRS is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# TRS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with TRS. If not, see <http://www.gnu.org/licenses/>.
import logging
import logging.config
import sys
try:
import config as cfg
except ImportError:
cfg = object()
import default_config as default
try:
logging.basicConfig(level=logging.DEBUG,
format=getattr(cfg, 'LOG_FORMAT', default.LOG_FORMAT),
filename=getattr(cfg, 'LOG_FILE', default.LOG_FILE),
filemode='a')
except IOError as e: # pragma: no cover
print >>sys.stderr, 'warning: IOError raised: "%s"' % str(e)
def logger(name):
return logging.getLogger(name)
def filter_non_ascii(data):
return ''.join(map(lambda x: 33 < ord(x) < 125 and x or '.', data))
| smvv/trs | src/logger.py | Python | agpl-3.0 | 1,348 |
"""Library for polling dataplanes for statistics."""
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
from ryu.lib import hub
from faucet.valve_util import dpid_log
class GaugePoller(object):
"""Abstraction for a poller for statistics."""
def __init__(self, conf, logname, prom_client):
self.dp = conf.dp # pylint: disable=invalid-name
self.conf = conf
self.prom_client = prom_client
self.reply_pending = False
self.logger = logging.getLogger(
logname + '.{0}'.format(self.conf.type)
)
@staticmethod
def start(_ryudp):
"""Start the poller."""
return
@staticmethod
def stop():
"""Stop the poller."""
return
@staticmethod
def running():
"""Return True if the poller is running."""
return True
@staticmethod
def send_req():
"""Send a stats request to a datapath."""
raise NotImplementedError
@staticmethod
def no_response():
"""Called when a polling cycle passes without receiving a response."""
raise NotImplementedError
def update(self, rcv_time, dp_id, msg):
"""Handle the responses to requests.
Called when a reply to a stats request sent by this object is received
by the controller.
It should acknowledge the receipt by setting self.reply_pending to
false.
Arguments:
rcv_time -- the time the response was received
dp_id -- DP ID
msg -- the stats reply message
"""
# TODO: it may be worth while verifying this is the correct stats
# response before doing this
self.reply_pending = False
def _stat_port_name(self, msg, stat, dp_id):
"""Return port name as string based on port number."""
if stat.port_no == msg.datapath.ofproto.OFPP_CONTROLLER:
return 'CONTROLLER'
elif stat.port_no == msg.datapath.ofproto.OFPP_LOCAL:
return 'LOCAL'
elif stat.port_no in self.dp.ports:
return self.dp.ports[stat.port_no].name
self.logger.debug('%s stats for unknown port %u',
dpid_log(dp_id), stat.port_no)
return stat.port_no
@staticmethod
def _format_port_stats(delim, stat):
formatted_port_stats = []
for stat_name_list, stat_val in (
(('packets', 'out'), stat.tx_packets),
(('packets', 'in'), stat.rx_packets),
(('bytes', 'out'), stat.tx_bytes),
(('bytes', 'in'), stat.rx_bytes),
(('dropped', 'out'), stat.tx_dropped),
(('dropped', 'in'), stat.rx_dropped),
(('errors', 'in'), stat.rx_errors)):
# For openvswitch, unsupported statistics are set to
# all-1-bits (UINT64_MAX), skip reporting them
if stat_val != 2**64-1:
stat_name = delim.join(stat_name_list)
formatted_port_stats.append((stat_name, stat_val))
return formatted_port_stats
class GaugeThreadPoller(GaugePoller):
"""A ryu thread object for sending and receiving OpenFlow stats requests.
The thread runs in a loop sending a request, sleeping then checking a
response was received before sending another request.
The methods send_req, update and no_response should be implemented by
subclasses.
"""
def __init__(self, conf, logname, prom_client):
super(GaugeThreadPoller, self).__init__(conf, logname, prom_client)
self.thread = None
self.interval = self.conf.interval
self.ryudp = None
def start(self, ryudp):
self.ryudp = ryudp
self.stop()
self.thread = hub.spawn(self)
def stop(self):
if self.running():
hub.kill(self.thread)
hub.joinall([self.thread])
self.thread = None
def running(self):
return self.thread is not None
def __call__(self):
"""Send request loop.
Delays the initial request for a random interval to reduce load.
Then sends a request to the datapath, waits the specified interval and
checks that a response has been received in a loop."""
# TODO: this should use a deterministic method instead of random
hub.sleep(random.randint(1, self.conf.interval))
while True:
self.send_req()
self.reply_pending = True
hub.sleep(self.conf.interval)
if self.reply_pending:
self.no_response()
@staticmethod
def send_req():
"""Send a stats request to a datapath."""
raise NotImplementedError
@staticmethod
def no_response():
"""Called when a polling cycle passes without receiving a response."""
raise NotImplementedError
class GaugePortStatsPoller(GaugeThreadPoller):
"""Periodically sends a port stats request to the datapath and parses
and outputs the response.
"""
def send_req(self):
if self.ryudp:
ofp = self.ryudp.ofproto
ofp_parser = self.ryudp.ofproto_parser
req = ofp_parser.OFPPortStatsRequest(self.ryudp, 0, ofp.OFPP_ANY)
self.ryudp.send_msg(req)
def no_response(self):
self.logger.info(
'port stats request timed out for %s', self.dp.name)
class GaugeFlowTablePoller(GaugeThreadPoller):
"""Periodically dumps the current datapath flow table as a yaml object.
Includes a timestamp and a reference ($DATAPATHNAME-flowtables). The
flow table is dumped as an OFFlowStatsReply message (in yaml format) that
matches all flows.
"""
def send_req(self):
if self.ryudp:
ofp = self.ryudp.ofproto
ofp_parser = self.ryudp.ofproto_parser
match = ofp_parser.OFPMatch()
req = ofp_parser.OFPFlowStatsRequest(
self.ryudp, 0, ofp.OFPTT_ALL, ofp.OFPP_ANY, ofp.OFPG_ANY,
0, 0, match)
self.ryudp.send_msg(req)
def no_response(self):
self.logger.info(
'flow dump request timed out for %s', self.dp.name)
class GaugePortStateBaseLogger(GaugePoller):
"""Abstraction for port state poller."""
@staticmethod
def send_req():
"""Send a stats request to a datapath."""
raise NotImplementedError
@staticmethod
def no_response():
"""Called when a polling cycle passes without receiving a response."""
raise NotImplementedError
| byllyfish/faucet | faucet/gauge_pollers.py | Python | apache-2.0 | 7,197 |
"""
tests.pytests.unit.beacons.test_memusage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Memory usage beacon test cases
"""
from collections import namedtuple
import pytest
import salt.beacons.memusage as memusage
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {}
@pytest.fixture
def stub_memory_usage():
return namedtuple(
"vmem",
"total available percent used free active inactive buffers cached shared",
)(
15722012672,
9329594368,
40.7,
5137018880,
4678086656,
6991405056,
2078953472,
1156378624,
4750528512,
898908160,
)
def test_non_list_config():
config = {}
ret = memusage.validate(config)
assert ret == (False, "Configuration for memusage beacon must be a list.")
def test_empty_config():
config = [{}]
ret = memusage.validate(config)
assert ret == (False, "Configuration for memusage beacon requires percent.")
def test_memusage_match(stub_memory_usage):
with patch("psutil.virtual_memory", MagicMock(return_value=stub_memory_usage)):
config = [{"percent": "40%"}, {"interval": 30}]
ret = memusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = memusage.beacon(config)
assert ret == [{"memusage": 40.7}]
# Test without the percent
config = [{"percent": 40}, {"interval": 30}]
ret = memusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = memusage.beacon(config)
assert ret == [{"memusage": 40.7}]
def test_memusage_nomatch(stub_memory_usage):
with patch("psutil.virtual_memory", MagicMock(return_value=stub_memory_usage)):
config = [{"percent": "70%"}]
ret = memusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = memusage.beacon(config)
assert ret != [{"memusage": 40.7}]
| saltstack/salt | tests/pytests/unit/beacons/test_memusage.py | Python | apache-2.0 | 2,027 |
import numpy as np
def topicJSONmaker(malletPath):
# Configuration
inpath = malletPath # Insert the path to the Mallet file here
num_top_words = 100 # Top N number of words in each topic that will appear in output
mallet_vocab = []
word_topic_counts = []
topics = []
# Calculate the number of topics in the model
n_topics = []
with open(inpath) as data:
for line in data:
try:
l = line.rstrip().split(' ') # Split the line on spaces
id = l[0] # Get the word ID from the first item in the list
word = l[1] # Get the word token from the second item in the list
l[0:2] = [] # Delete the ID and word from the list
topic_count_pairs = [pair.split(':') for pair in l] # New list topic-count pairs
for topic, count in topic_count_pairs:
n_topics.append(int(topic)) # New list with topics
except:
raise IOError(
'Your source data cannot be parsed into a regular number of columns. Please ensure that there are no spaces in your file names or file paths. It may be easiest to open the output_state file in a spreadsheet using a space as the delimiter to ensure that there are a regular number of columns. Please fix any misaligned data and upload the data again.')
n_topics.sort() # Sort the topics
num_topics = max(n_topics) + 1 # The number of topics in the model is the highest in the list
# Re-shape the file data
with open(inpath) as f:
for line in f:
l = line.rstrip().split(' ')
id = l[0]
word = l[1]
l[0:2] = []
topic_count_pairs = [pair.split(':') for pair in l]
mallet_vocab.append(word)
counts = np.zeros(num_topics)
for topic, count in topic_count_pairs:
counts[int(topic)] = int(count)
word_topic_counts.append(counts)
word_topic = np.array(word_topic_counts)
word_topic.shape
word_topic = word_topic / np.sum(word_topic, axis=0)
mallet_vocab = np.array(mallet_vocab)
# Generate a topics dictionary
for t in range(num_topics):
top_words_idx = np.argsort(word_topic[:, t])[::-1]
top_words_idx = top_words_idx[:num_top_words]
top_words = mallet_vocab[top_words_idx]
top_words_shares = word_topic[top_words_idx, t]
# print("Topic{}".format(t))
topics.append({})
for word, share in zip(top_words, top_words_shares):
topics[t].update({word: np.round(share, 3)}) # Create the topics dictionary
# print("{} : {}".format(word, np.round(share,3)))
# For Lexos, build the json string
jsonStr = ""
for i in xrange(len(topics)):
jsonStr += '{"name": "Topic' + str(i) + '.txt", "children": ['
children = ""
for name, size in topics[i].iteritems():
children += ', {"text": "%s", "size": %s}' % (name, size * 1000)
children = children.lstrip(', ')
jsonStr += children
jsonStr += ']}, '
jsonStr = jsonStr[:-2]
# Send the jsonStr variable to the template
JSONObj = []
for i in xrange(len(topics)):
newChildrenlist = []
for name, size in topics[i].iteritems():
newChildrenlist.append({"text": name, "size": size * 1000})
JSONObj.append({"name": "Topic" + str(i) + ".txt", "children": newChildrenlist})
return JSONObj
| TheLady/Lexos | processors/visualize/multicloud_topic.py | Python | mit | 3,532 |
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
import json
from default import db, with_context
from nose.tools import assert_equal
from test_api import TestAPI
from pybossa.model.category import Category
from factories import UserFactory, CategoryFactory
class TestCategoryAPI(TestAPI):
@with_context
def test_query_category(self):
"""Test API query for category endpoint works"""
CategoryFactory.create(name='thinking', short_name='thinking')
# Test for real field
url = "/api/category"
res = self.app.get(url + "?short_name=thinking")
data = json.loads(res.data)
# Should return one result
assert len(data) == 1, data
# Correct result
assert data[0]['short_name'] == 'thinking', data
# Valid field but wrong value
res = self.app.get(url + "?short_name=wrongvalue")
data = json.loads(res.data)
assert len(data) == 0, data
# Multiple fields
res = self.app.get(url + '?short_name=thinking&name=thinking')
data = json.loads(res.data)
# One result
assert len(data) == 1, data
# Correct result
assert data[0]['short_name'] == 'thinking', data
assert data[0]['name'] == 'thinking', data
# Limits
res = self.app.get(url + "?limit=1")
data = json.loads(res.data)
for item in data:
assert item['short_name'] == 'thinking', item
assert len(data) == 1, data
# Errors
res = self.app.get(url + "?something")
err = json.loads(res.data)
err_msg = "AttributeError exception should be raised"
res.status_code == 415, err_msg
assert res.status_code == 415, err_msg
assert err['action'] == 'GET', err_msg
assert err['status'] == 'failed', err_msg
assert err['exception_cls'] == 'AttributeError', err_msg
@with_context
def test_category_post(self):
"""Test API Category creation and auth"""
admin = UserFactory.create()
user = UserFactory.create()
name = u'Category'
category = dict(
name=name,
short_name='category',
description=u'description')
data = json.dumps(category)
# no api-key
url = '/api/category'
res = self.app.post(url, data=data)
err = json.loads(res.data)
err_msg = 'Should not be allowed to create'
assert res.status_code == 401, err_msg
assert err['action'] == 'POST', err_msg
assert err['exception_cls'] == 'Unauthorized', err_msg
# now a real user but not admin
res = self.app.post(url + '?api_key=' + user.api_key, data=data)
err = json.loads(res.data)
err_msg = 'Should not be allowed to create'
assert res.status_code == 403, err_msg
assert err['action'] == 'POST', err_msg
assert err['exception_cls'] == 'Forbidden', err_msg
# now as an admin
res = self.app.post(url + '?api_key=' + admin.api_key,
data=data)
err = json.loads(res.data)
err_msg = 'Admin should be able to create a Category'
assert res.status_code == 200, err_msg
cat = db.session.query(Category)\
.filter_by(short_name=category['short_name']).first()
assert err['id'] == cat.id, err_msg
assert err['name'] == category['name'], err_msg
assert err['short_name'] == category['short_name'], err_msg
assert err['description'] == category['description'], err_msg
# test re-create should fail
res = self.app.post(url + '?api_key=' + admin.api_key,
data=data)
err = json.loads(res.data)
assert res.status_code == 415, err
assert err['status'] == 'failed', err
assert err['action'] == 'POST', err
assert err['exception_cls'] == "IntegrityError", err
# test create with non-allowed fields should fail
data = dict(name='fail', short_name='fail', wrong=15)
res = self.app.post(url + '?api_key=' + admin.api_key,
data=data)
err = json.loads(res.data)
err_msg = "ValueError exception should be raised"
assert res.status_code == 415, err
assert err['action'] == 'POST', err
assert err['status'] == 'failed', err
assert err['exception_cls'] == "ValueError", err_msg
# Now with a JSON object but not valid
data = json.dumps(data)
res = self.app.post(url + '?api_key=' + user.api_key,
data=data)
err = json.loads(res.data)
err_msg = "TypeError exception should be raised"
assert err['action'] == 'POST', err_msg
assert err['status'] == 'failed', err_msg
assert err['exception_cls'] == "TypeError", err_msg
assert res.status_code == 415, err_msg
# test update
data = {'name': 'My New Title'}
datajson = json.dumps(data)
## anonymous
res = self.app.put(url + '/%s' % cat.id,
data=data)
error_msg = 'Anonymous should not be allowed to update'
assert_equal(res.status, '401 UNAUTHORIZED', error_msg)
error = json.loads(res.data)
assert error['status'] == 'failed', error
assert error['action'] == 'PUT', error
assert error['exception_cls'] == 'Unauthorized', error
### real user but not allowed as not admin!
url = '/api/category/%s?api_key=%s' % (cat.id, user.api_key)
res = self.app.put(url, data=datajson)
error_msg = 'Should not be able to update apps of others'
assert_equal(res.status, '403 FORBIDDEN', error_msg)
error = json.loads(res.data)
assert error['status'] == 'failed', error
assert error['action'] == 'PUT', error
assert error['exception_cls'] == 'Forbidden', error
# Now as an admin
res = self.app.put('/api/category/%s?api_key=%s' % (cat.id, admin.api_key),
data=datajson)
assert_equal(res.status, '200 OK', res.data)
out2 = db.session.query(Category).get(cat.id)
assert_equal(out2.name, data['name'])
out = json.loads(res.data)
assert out.get('status') is None, error
assert out.get('id') == cat.id, error
# With fake data
data['algo'] = 13
datajson = json.dumps(data)
res = self.app.put('/api/category/%s?api_key=%s' % (cat.id, admin.api_key),
data=datajson)
err = json.loads(res.data)
assert res.status_code == 415, err
assert err['status'] == 'failed', err
assert err['action'] == 'PUT', err
assert err['exception_cls'] == 'TypeError', err
# With not JSON data
datajson = data
res = self.app.put('/api/category/%s?api_key=%s' % (cat.id, admin.api_key),
data=datajson)
err = json.loads(res.data)
assert res.status_code == 415, err
assert err['status'] == 'failed', err
assert err['action'] == 'PUT', err
assert err['exception_cls'] == 'ValueError', err
# With wrong args in the URL
data = dict(
name='Category3',
short_name='category3',
description=u'description3')
datajson = json.dumps(data)
res = self.app.put('/api/category/%s?api_key=%s&search=select1' % (cat.id, admin.api_key),
data=datajson)
err = json.loads(res.data)
assert res.status_code == 415, err
assert err['status'] == 'failed', err
assert err['action'] == 'PUT', err
assert err['exception_cls'] == 'AttributeError', err
# test delete
## anonymous
res = self.app.delete(url + '/%s' % cat.id, data=data)
error_msg = 'Anonymous should not be allowed to delete'
assert_equal(res.status, '401 UNAUTHORIZED', error_msg)
error = json.loads(res.data)
assert error['status'] == 'failed', error
assert error['action'] == 'DELETE', error
assert error['target'] == 'category', error
### real user but not admin
url = '/api/category/%s?api_key=%s' % (cat.id, user.api_key)
res = self.app.delete(url, data=datajson)
error_msg = 'Should not be able to delete apps of others'
assert_equal(res.status, '403 FORBIDDEN', error_msg)
error = json.loads(res.data)
assert error['status'] == 'failed', error
assert error['action'] == 'DELETE', error
assert error['target'] == 'category', error
# As admin
url = '/api/category/%s?api_key=%s' % (cat.id, admin.api_key)
res = self.app.delete(url, data=datajson)
assert_equal(res.status, '204 NO CONTENT', res.data)
# delete a category that does not exist
url = '/api/category/5000?api_key=%s' % admin.api_key
res = self.app.delete(url, data=datajson)
error = json.loads(res.data)
assert res.status_code == 404, error
assert error['status'] == 'failed', error
assert error['action'] == 'DELETE', error
assert error['target'] == 'category', error
assert error['exception_cls'] == 'NotFound', error
# delete a category that does not exist
url = '/api/category/?api_key=%s' % admin.api_key
res = self.app.delete(url, data=datajson)
assert res.status_code == 404, error
| stitchfix/pybossa | test/test_api/test_category_api.py | Python | agpl-3.0 | 10,259 |
import boto.ec2
from datetime import date, timedelta,datetime
from dateutil.parser import parse
import csv
days_counter=45
region="ap-southeast-1"
class Ami:
def __init__(self,ec2_ami):
self.id = ec2_ami.id
self.strpdate = parse(ec2_ami.creationDate).date()
## Computing Older date
old = datetime.today() - timedelta(days_counter)
old_date=old.date()
## Fetching All AMI's
ec2conn = boto.ec2.connect_to_region(region)
amis=ec2conn.get_all_images(owners='self')
print "Today's date : " + str(datetime.today())
print "Finding AMI's older than : " + str(old_date)
older_list = []
for ami in amis:
older_list = [ami for ami in amis if parse(ami.creationDate).date() < old_date ]
for item in older_list:
print item.id + "," + item.name + "," + item.creationDate
print "Total : " + str(len(older_list))
| hiteshBhatia/aws-boto-scripts | amiOlderThanXDays.py | Python | apache-2.0 | 839 |
# Copyright 2009-2010 Gregory P. Ward
# Copyright 2009-2010 Intelerad Medical Systems Incorporated
# Copyright 2010-2011 Fog Creek Software
# Copyright 2010-2011 Unity Technologies
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''High-level command function for lfconvert, plus the cmdtable.'''
import os, errno
import shutil
from mercurial import util, match as match_, hg, node, context, error, \
cmdutil, scmutil, commands
from mercurial.i18n import _
from mercurial.lock import release
import lfutil
import basestore
# -- Commands ----------------------------------------------------------
def lfconvert(ui, src, dest, *pats, **opts):
'''convert a normal repository to a largefiles repository
Convert repository SOURCE to a new repository DEST, identical to
SOURCE except that certain files will be converted as largefiles:
specifically, any file that matches any PATTERN *or* whose size is
above the minimum size threshold is converted as a largefile. The
size used to determine whether or not to track a file as a
largefile is the size of the first version of the file. The
minimum size can be specified either with --size or in
configuration as ``largefiles.size``.
After running this command you will need to make sure that
largefiles is enabled anywhere you intend to push the new
repository.
Use --to-normal to convert largefiles back to normal files; after
this, the DEST repository can be used without largefiles at all.'''
if opts['to_normal']:
tolfile = False
else:
tolfile = True
size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
if not hg.islocal(src):
raise util.Abort(_('%s is not a local Mercurial repo') % src)
if not hg.islocal(dest):
raise util.Abort(_('%s is not a local Mercurial repo') % dest)
rsrc = hg.repository(ui, src)
ui.status(_('initializing destination %s\n') % dest)
rdst = hg.repository(ui, dest, create=True)
success = False
dstwlock = dstlock = None
try:
# Lock destination to prevent modification while it is converted to.
# Don't need to lock src because we are just reading from its history
# which can't change.
dstwlock = rdst.wlock()
dstlock = rdst.lock()
# Get a list of all changesets in the source. The easy way to do this
# is to simply walk the changelog, using changelog.nodesbetween().
# Take a look at mercurial/revlog.py:639 for more details.
# Use a generator instead of a list to decrease memory usage
ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
rsrc.heads())[0])
revmap = {node.nullid: node.nullid}
if tolfile:
lfiles = set()
normalfiles = set()
if not pats:
pats = ui.configlist(lfutil.longname, 'patterns', default=[])
if pats:
matcher = match_.match(rsrc.root, '', list(pats))
else:
matcher = None
lfiletohash = {}
for ctx in ctxs:
ui.progress(_('converting revisions'), ctx.rev(),
unit=_('revision'), total=rsrc['tip'].rev())
_lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
lfiles, normalfiles, matcher, size, lfiletohash)
ui.progress(_('converting revisions'), None)
if os.path.exists(rdst.wjoin(lfutil.shortname)):
shutil.rmtree(rdst.wjoin(lfutil.shortname))
for f in lfiletohash.keys():
if os.path.isfile(rdst.wjoin(f)):
os.unlink(rdst.wjoin(f))
try:
os.removedirs(os.path.dirname(rdst.wjoin(f)))
except OSError:
pass
# If there were any files converted to largefiles, add largefiles
# to the destination repository's requirements.
if lfiles:
rdst.requirements.add('largefiles')
rdst._writerequirements()
else:
for ctx in ctxs:
ui.progress(_('converting revisions'), ctx.rev(),
unit=_('revision'), total=rsrc['tip'].rev())
_addchangeset(ui, rsrc, rdst, ctx, revmap)
ui.progress(_('converting revisions'), None)
success = True
finally:
rdst.dirstate.clear()
release(dstlock, dstwlock)
if not success:
# we failed, remove the new directory
shutil.rmtree(rdst.root)
def _addchangeset(ui, rsrc, rdst, ctx, revmap):
# Convert src parents to dst parents
parents = _convertparents(ctx, revmap)
# Generate list of changed files
files = _getchangedfiles(ctx, parents)
def getfilectx(repo, memctx, f):
if lfutil.standin(f) in files:
# if the file isn't in the manifest then it was removed
# or renamed, raise IOError to indicate this
try:
fctx = ctx.filectx(lfutil.standin(f))
except error.LookupError:
raise IOError
renamed = fctx.renamed()
if renamed:
renamed = lfutil.splitstandin(renamed[0])
hash = fctx.data().strip()
path = lfutil.findfile(rsrc, hash)
# If one file is missing, likely all files from this rev are
if path is None:
cachelfiles(ui, rsrc, ctx.node())
path = lfutil.findfile(rsrc, hash)
if path is None:
raise util.Abort(
_("missing largefile \'%s\' from revision %s")
% (f, node.hex(ctx.node())))
data = ''
fd = None
try:
fd = open(path, 'rb')
data = fd.read()
finally:
if fd:
fd.close()
return context.memfilectx(f, data, 'l' in fctx.flags(),
'x' in fctx.flags(), renamed)
else:
return _getnormalcontext(repo.ui, ctx, f, revmap)
dstfiles = []
for file in files:
if lfutil.isstandin(file):
dstfiles.append(lfutil.splitstandin(file))
else:
dstfiles.append(file)
# Commit
_commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
matcher, size, lfiletohash):
# Convert src parents to dst parents
parents = _convertparents(ctx, revmap)
# Generate list of changed files
files = _getchangedfiles(ctx, parents)
dstfiles = []
for f in files:
if f not in lfiles and f not in normalfiles:
islfile = _islfile(f, ctx, matcher, size)
# If this file was renamed or copied then copy
# the largefile-ness of its predecessor
if f in ctx.manifest():
fctx = ctx.filectx(f)
renamed = fctx.renamed()
renamedlfile = renamed and renamed[0] in lfiles
islfile |= renamedlfile
if 'l' in fctx.flags():
if renamedlfile:
raise util.Abort(
_('renamed/copied largefile %s becomes symlink')
% f)
islfile = False
if islfile:
lfiles.add(f)
else:
normalfiles.add(f)
if f in lfiles:
dstfiles.append(lfutil.standin(f))
# largefile in manifest if it has not been removed/renamed
if f in ctx.manifest():
fctx = ctx.filectx(f)
if 'l' in fctx.flags():
renamed = fctx.renamed()
if renamed and renamed[0] in lfiles:
raise util.Abort(_('largefile %s becomes symlink') % f)
# largefile was modified, update standins
m = util.sha1('')
m.update(ctx[f].data())
hash = m.hexdigest()
if f not in lfiletohash or lfiletohash[f] != hash:
rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
executable = 'x' in ctx[f].flags()
lfutil.writestandin(rdst, lfutil.standin(f), hash,
executable)
lfiletohash[f] = hash
else:
# normal file
dstfiles.append(f)
def getfilectx(repo, memctx, f):
if lfutil.isstandin(f):
# if the file isn't in the manifest then it was removed
# or renamed, raise IOError to indicate this
srcfname = lfutil.splitstandin(f)
try:
fctx = ctx.filectx(srcfname)
except error.LookupError:
raise IOError
renamed = fctx.renamed()
if renamed:
# standin is always a largefile because largefile-ness
# doesn't change after rename or copy
renamed = lfutil.standin(renamed[0])
return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
fctx.flags(), 'x' in fctx.flags(), renamed)
else:
return _getnormalcontext(repo.ui, ctx, f, revmap)
# Commit
_commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
getfilectx, ctx.user(), ctx.date(), ctx.extra())
ret = rdst.commitctx(mctx)
rdst.setparents(ret)
revmap[ctx.node()] = rdst.changelog.tip()
# Generate list of changed files
def _getchangedfiles(ctx, parents):
files = set(ctx.files())
if node.nullid not in parents:
mc = ctx.manifest()
mp1 = ctx.parents()[0].manifest()
mp2 = ctx.parents()[1].manifest()
files |= (set(mp1) | set(mp2)) - set(mc)
for f in mc:
if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
files.add(f)
return files
# Convert src parents to dst parents
def _convertparents(ctx, revmap):
parents = []
for p in ctx.parents():
parents.append(revmap[p.node()])
while len(parents) < 2:
parents.append(node.nullid)
return parents
# Get memfilectx for a normal file
def _getnormalcontext(ui, ctx, f, revmap):
try:
fctx = ctx.filectx(f)
except error.LookupError:
raise IOError
renamed = fctx.renamed()
if renamed:
renamed = renamed[0]
data = fctx.data()
if f == '.hgtags':
data = _converttags (ui, revmap, data)
return context.memfilectx(f, data, 'l' in fctx.flags(),
'x' in fctx.flags(), renamed)
# Remap tag data using a revision map
def _converttags(ui, revmap, data):
newdata = []
for line in data.splitlines():
try:
id, name = line.split(' ', 1)
except ValueError:
ui.warn(_('skipping incorrectly formatted tag %s\n'
% line))
continue
try:
newid = node.bin(id)
except TypeError:
ui.warn(_('skipping incorrectly formatted id %s\n'
% id))
continue
try:
newdata.append('%s %s\n' % (node.hex(revmap[newid]),
name))
except KeyError:
ui.warn(_('no mapping for id %s\n') % id)
continue
return ''.join(newdata)
def _islfile(file, ctx, matcher, size):
'''Return true if file should be considered a largefile, i.e.
matcher matches it or it is larger than size.'''
# never store special .hg* files as largefiles
if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
return False
if matcher and matcher(file):
return True
try:
return ctx.filectx(file).size() >= size * 1024 * 1024
except error.LookupError:
return False
def uploadlfiles(ui, rsrc, rdst, files):
'''upload largefiles to the central store'''
if not files:
return
store = basestore._openstore(rsrc, rdst, put=True)
at = 0
ui.debug("sending statlfile command for %d largefiles\n" % len(files))
retval = store.exists(files)
files = filter(lambda h: not retval[h], files)
ui.debug("%d largefiles need to be uploaded\n" % len(files))
for hash in files:
ui.progress(_('uploading largefiles'), at, unit='largefile',
total=len(files))
source = lfutil.findfile(rsrc, hash)
if not source:
raise util.Abort(_('largefile %s missing from store'
' (needs to be uploaded)') % hash)
# XXX check for errors here
store.put(source, hash)
at += 1
ui.progress(_('uploading largefiles'), None)
def verifylfiles(ui, repo, all=False, contents=False):
'''Verify that every largefile revision in the current changeset
exists in the central store. With --contents, also verify that
the contents of each local largefile file revision are correct (SHA-1 hash
matches the revision ID). With --all, check every changeset in
this repository.'''
if all:
# Pass a list to the function rather than an iterator because we know a
# list will work.
revs = range(len(repo))
else:
revs = ['.']
store = basestore._openstore(repo)
return store.verify(revs, contents=contents)
def debugdirstate(ui, repo):
'''Show basic information for the largefiles dirstate'''
lfdirstate = lfutil.openlfdirstate(ui, repo)
for file_, ent in sorted(lfdirstate._map.iteritems()):
mode = '%3o' % (ent[1] & 0777 & ~util.umask)
ui.write("%c %s %10d %s\n" % (ent[0], mode, ent[2], file_))
def cachelfiles(ui, repo, node, filelist=None):
'''cachelfiles ensures that all largefiles needed by the specified revision
are present in the repository's largefile cache.
returns a tuple (cached, missing). cached is the list of files downloaded
by this operation; missing is the list of files that were needed but could
not be found.'''
lfiles = lfutil.listlfiles(repo, node)
if filelist:
lfiles = set(lfiles) & set(filelist)
toget = []
for lfile in lfiles:
try:
expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
except IOError, err:
if err.errno == errno.ENOENT:
continue # node must be None and standin wasn't found in wctx
raise
if not lfutil.findfile(repo, expectedhash):
toget.append((lfile, expectedhash))
if toget:
store = basestore._openstore(repo)
ret = store.get(toget)
return ret
return ([], [])
def downloadlfiles(ui, repo, rev=None):
matchfn = scmutil.match(repo[None],
[repo.wjoin(lfutil.shortname)], {})
def prepare(ctx, fns):
pass
totalsuccess = 0
totalmissing = 0
if rev != []: # walkchangerevs on empty list would return all revs
for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
prepare):
success, missing = cachelfiles(ui, repo, ctx.node())
totalsuccess += len(success)
totalmissing += len(missing)
ui.status(_("%d additional largefiles cached\n") % totalsuccess)
if totalmissing > 0:
ui.status(_("%d largefiles failed to download\n") % totalmissing)
return totalsuccess, totalmissing
def updatelfiles(ui, repo, filelist=None, printmessage=True):
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
if filelist is not None:
lfiles = [f for f in lfiles if f in filelist]
printed = False
if printmessage and lfiles:
ui.status(_('getting changed largefiles\n'))
printed = True
cachelfiles(ui, repo, None, lfiles)
updated, removed = 0, 0
for f in lfiles:
i = _updatelfile(repo, lfdirstate, f)
if i:
if i > 0:
updated += i
else:
removed -= i
if printmessage and (removed or updated) and not printed:
ui.status(_('getting changed largefiles\n'))
printed = True
lfdirstate.write()
if printed and printmessage:
ui.status(_('%d largefiles updated, %d removed\n') % (updated,
removed))
finally:
wlock.release()
def _updatelfile(repo, lfdirstate, lfile):
'''updates a single largefile and copies the state of its standin from
the repository's dirstate to its state in the lfdirstate.
returns 1 if the file was modified, -1 if the file was removed, 0 if the
file was unchanged, and None if the needed largefile was missing from the
cache.'''
ret = 0
abslfile = repo.wjoin(lfile)
absstandin = repo.wjoin(lfutil.standin(lfile))
if os.path.exists(absstandin):
if os.path.exists(absstandin + '.orig') and os.path.exists(abslfile):
shutil.copyfile(abslfile, abslfile + '.orig')
expecthash = lfutil.readstandin(repo, lfile)
if (expecthash != '' and
(not os.path.exists(abslfile) or
expecthash != lfutil.hashfile(abslfile))):
if not lfutil.copyfromcache(repo, expecthash, lfile):
# use normallookup() to allocate entry in largefiles dirstate,
# because lack of it misleads lfilesrepo.status() into
# recognition that such cache missing files are REMOVED.
if lfile not in repo[None]: # not switched to normal file
util.unlinkpath(abslfile, ignoremissing=True)
lfdirstate.normallookup(lfile)
return None # don't try to set the mode
else:
# Synchronize largefile dirstate to the last modified time of
# the file
lfdirstate.normal(lfile)
ret = 1
mode = os.stat(absstandin).st_mode
if mode != os.stat(abslfile).st_mode:
os.chmod(abslfile, mode)
ret = 1
else:
# Remove lfiles for which the standin is deleted, unless the
# lfile is added to the repository again. This happens when a
# largefile is converted back to a normal file: the standin
# disappears, but a new (normal) file appears as the lfile.
if (os.path.exists(abslfile) and
repo.dirstate.normalize(lfile) not in repo[None]):
util.unlinkpath(abslfile)
ret = -1
state = repo.dirstate[lfutil.standin(lfile)]
if state == 'n':
# When rebasing, we need to synchronize the standin and the largefile,
# because otherwise the largefile will get reverted. But for commit's
# sake, we have to mark the file as unclean.
if getattr(repo, "_isrebasing", False):
lfdirstate.normallookup(lfile)
else:
lfdirstate.normal(lfile)
elif state == 'r':
lfdirstate.remove(lfile)
elif state == 'a':
lfdirstate.add(lfile)
elif state == '?':
lfdirstate.drop(lfile)
return ret
def lfpull(ui, repo, source="default", **opts):
"""pull largefiles for the specified revisions from the specified source
Pull largefiles that are referenced from local changesets but missing
locally, pulling from a remote repository to the local cache.
If SOURCE is omitted, the 'default' path will be used.
See :hg:`help urls` for more information.
.. container:: verbose
Some examples:
- pull largefiles for all branch heads::
hg lfpull -r "head() and not closed()"
- pull largefiles on the default branch::
hg lfpull -r "branch(default)"
"""
repo.lfpullsource = source
revs = opts.get('rev', [])
if not revs:
raise util.Abort(_('no revisions specified'))
revs = scmutil.revrange(repo, revs)
numcached = 0
for rev in revs:
ui.note(_('pulling largefiles for revision %s\n') % rev)
(cached, missing) = cachelfiles(ui, repo, rev)
numcached += len(cached)
ui.status(_("%d largefiles cached\n") % numcached)
# -- hg commands declarations ------------------------------------------------
cmdtable = {
'lfconvert': (lfconvert,
[('s', 'size', '',
_('minimum size (MB) for files to be converted '
'as largefiles'),
'SIZE'),
('', 'to-normal', False,
_('convert from a largefiles repo to a normal repo')),
],
_('hg lfconvert SOURCE DEST [FILE ...]')),
'lfpull': (lfpull,
[('r', 'rev', [], _('pull largefiles for these revisions'))
] + commands.remoteopts,
_('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]')
),
}
commands.inferrepo += " lfconvert"
| iaddict/mercurial.rb | vendor/mercurial/hgext/largefiles/lfcommands.py | Python | mit | 21,474 |
''' {Underdog Militia, a text based adventure.}
Copyright (C) {2015} {Michael G Zigler Jr}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Mr Zigler's contact bitmessage BM-NB5C7KBGPPxjEFSuHTqJwsVzFGELJfVZ
'''
import random
import time
from landplots import *
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
PURPLE = (255, 0, 255)
BROWN = (160, 82, 45)
zards = 1
class Livingbeing():
def __init__(self, name, health, armor, mindset, emotion, skills, inventory):
self.name = name
self.health = health
self.armor = armor
self.mindset = mindset
self.emotion = emotion
self.skills = skills
self.inventory = inventory
self.hunger = random.randrange(1, 5)
self.thirst = 0
def takeDamage(self, dmgAmount):
if dmgAmount > self.armor:
self.health = self.health - (dmgAmount - self.armor)
else:
pass
''' mindset (1 Liberty minded - 100 authoritiarian psycopath), emotion (1 happy - 100 blind rage)
patriots, hackers, writers (authors), artists, snitches, mercs, breaucrats
CFR, Executives, Landlords
'''
''' skills (leatherworking, blacksmith, woodworking, carpentry, chemistry, engineering, machining, disernment, discretion)
'''
landplot1 = Landplots('none', 5, 7, 4, 37, {})
# Create a surface we can draw on
#create a player
playerx = Livingbeing('Platinum Falcon', 72, 5, 2, 22, {}, {})
dogs = 1
while dogs < 9:
dmbgz = int(input('damage: '))
playerx.takeDamage(dmbgz)
print(playerx.health)
dogs += 1
else:
quit()
time.sleep(9)
quit()
| MrZigler/UnderdogMilitia | main.py | Python | gpl-3.0 | 2,475 |
#!/usr/bin/env python3
#######################################################################
# This file is part of JMdictDB.
# Copyright (c) 2008-2012 Stuart McGraw
#
# JMdictDB is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License,
# or (at your option) any later version.
#
# JMdictDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with JMdictDB; if not, write to the Free Software Foundation,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#######################################################################
# This program will read an Examples file containing paired
# English and Japanese sentences and available for download
# at
# ftp://ftp.monash.edu.au/pub/nihongo/examples.utf.gz
# (This file is derived from data from the Tatoeba
# project: http://tatoeba.org)
# and create an output file containing postgresql data COPY
# commands. This file can be loaded into a Postgresql JMdictDB
# database after subsequent processing with jmload.pl.
#
# The Example file "A" lines create database entries with
# a entr.src=3 which identifies them as from the Examples
# files. These entries will have an single kanji, single
# sense, and single gloss. They may have a misc tag and
# sense note if there was a parsable "[...]" comment on the
# line.
#
# "B" line items create database xref table rows. However,
# like jmparse, we do not create the xrefs directly from
# within this program, but instead write pseudo-xref records
# that contain the target reading and kanji text, to the
# xrslv table, and generate the resolved xrefs later by
# running insert queries based on joins of xrslv and the
# jmdict entries. All the pseudo-xref genereated by this
# program will have a typ=6.
import sys, os, io, inspect, pdb
_ = os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])
_ = os.path.join (os.path.dirname(_), 'python', 'lib')
if _ not in sys.path: sys.path.insert(0, _)
import re, datetime
import jdb, pgi
from pylib import diagnum
Seq = None
Lnnum = None
Opts = None
class ParseError (Exception): pass
def main (args, opts):
global msg
global Opts; Opts = opts
global KW; jdb.KW = KW = jdb.Kwds (jdb.std_csv_dir())
# Create a globally accessible function, msg() that has
# has 'logfile' and 'opts.verbose' already bound and
# which will be called elsewhere when there is a need to
# write a message to the logfile.
logfile = sys.stderr
if opts.logfile:
logfile = open (opts.logfile, "w", encoding=opts.encoding)
def msg (message): _msg (logfile, opts.verbose, message)
fin = ABPairReader (args[0], encoding='utf-8')
# FIXME: following gives localtime, change to utc or lt+tz.
mtime = datetime.date.fromtimestamp(os.stat(args[0])[8])
corpid, corprec \
= pgi.parse_corpus_opt (opts.corpus, "examples", mtime, KW.SRCT['examples'].id)
tmpfiles = pgi.initialize (opts.tempdir)
if not opts.noaction:
tmpfiles = pgi.initialize (opts.tempdir)
if corprec: pgi.wrcorp (corprec, tmpfiles)
for eid, entr in enumerate (parse_ex (fin, opts.begin)):
if not opts.noaction:
entr.src = corpid
jdb.setkeys (entr, eid+1)
pgi.wrentr (entr, tmpfiles)
if not (eid % 2000):
sys.stdout.write ('.'); sys.stdout.flush()
if opts.count and eid+1 >= opts.count: break
sys.stdout.write ('\n')
if not opts.noaction: pgi.finalize (tmpfiles, opts.output, not opts.keep)
def parse_ex (fin, begin):
# This is a generator function that will process one (A and B) pair
# of lines from open file object 'fin' each time it is called.
#
# fin -- An open Examples file.
# begin -- Line number at which to begin processing. Lines
# before that are skipped.
global Lnnum, Seq
seq_cache = set()
for aln, bln in fin:
if fin.lineno < begin: continue
Lnnum = fin.lineno
mo = re.search (r'(\s*#\s*ID\s*=\s*(\d+)_(\d+)\s*)$', aln)
if mo:
aln = aln[:mo.start(1)]
# The ID number is of the form "nnnn_mmmm" where "nnnn" is
# the Tatoeba English sentence id number, and "mmmm" is the
# Japanese id number. Generate a seq number by mapping each
# pair to a "square number". These are numbers generated
# by assigning sequential numbers on a grid (x>=0, y>=0)
# starting at the origin proceeding down the diagonal,
# assigning number to each cell on the column and row at
# the diagonal cell.
id_en, id_jp = int(mo.group(2)), int(mo.group(3))
Seq = diagnum.xy2sq1 (id_en, id_jp)
else:
msg ("No ID number found"); continue
try:
jtxt, etxt = parsea (aln)
idxlist = parseb (bln, jtxt)
except ParseError as e:
msg (e.args[0]); continue
if not idxlist: continue
# Turns out some of the entries in the examples file are duplicates
# (including the ID#) so we check the seq#
if Seq in seq_cache:
msg ("Duplicate id#: %s_%s" % (id_en, id_jp))
continue
seq_cache.add (Seq)
entr = mkentr (jtxt, etxt)
entr.seq = Seq
entr._sens[0]._xrslv = mkxrslv (idxlist)
yield entr
def parsea (aln):
# When we're called, 'aln' has had the "A: " stripped from
# its start, and the ID field stripped from it's end.
mo = re.search (r'^\s*(.+)\t(.+?)\s*$', aln)
if not mo: raise ParseError ('"A" line parse error')
jp, en = mo.group (1,2)
kws = []
return jp, en
def parseb (bln, jtxt):
parts = bln.split()
res = []
for n,x in enumerate (parts):
try: res.append (parsebitem (x, n, jtxt))
except ParseError as e: msg (e.args[0])
return res
def parsebitem (s, n, jtxt):
mo = re.search (r'^([^([{~]+)(\((\S+)\))?(\[\d+\])*(\{(\S+)\})?(~)?\s*$', s)
if not mo:
raise ParseError ("\"B\" line parse error in item %d: '%s'" % (n, s))
ktxt,rtxt,sens,atxt,prio = mo.group (1,3,4,6,7)
if rtxt and not jdb.jstr_reb (rtxt):
raise ParseError ("Expected kana in item %d: '%s'" % (n, rtxt))
if kana_only (ktxt):
if rtxt: raise ParseError ("Double kana in item %d: '%s', '%s'" % (n, ktxt, rtxt))
rtxt = ktxt; ktxt = None
if sens:
sens = sens.replace(']', '')
sens = [x for x in sens.split ('[') if len(x)>0]
if atxt and jtxt.find (atxt) < 0:
raise ParseError ("{%s} not in A line in item %d" % (atxt, n))
return ktxt, rtxt, sens, atxt, not not prio
def hw (ktxt, rtxt):
if ktxt and rtxt: return "%s(%s)" % (ktxt,rtxt)
return ktxt or rtxt
def mkentr (jtxt, etxt):
global Lnnum
# Create an entry object to represent the "A" line text of the
# example sentence.
e = jdb.Entr (stat=KW.STAT_A, unap=False)
e.srcnote = str (Lnnum)
if jdb.jstr_reb (jtxt): e._rdng = [jdb.Rdng (txt=jtxt)]
else: e._kanj = [jdb.Kanj (txt=jtxt)]
e._sens = [jdb.Sens (_gloss=[jdb.Gloss (txt=etxt, ginf=KW.GINF_equ,
lang=KW.LANG_eng)])]
return e
def mkxrslv (idxlist):
# Convert the $@indexlist that was created by bparse() into a
# list of database xrslv table records. The fk fields "entr"
# and "sens" are not set in the xrslv records; they are set
# by setids() just prior to writing to the database.
res = []
for ktxt, rtxt, senslst, note, prio in idxlist:
if senslst:
# A list of explicit sens were give in the B line,
# create an xrslv record for each.
res.extend ([jdb.Obj (ktxt=ktxt, rtxt=rtxt, tsens=s,
typ=KW.XREF_uses, notes=note, prio=prio)
for s in senslst])
else:
# This is not list of senses so this cross-ref will
# apply to all the target's senses. Don't set a "sens"
# field in the xrslv record which will result in a NULL
# in the database record.
res.append (jdb.Obj (ktxt=ktxt, rtxt=rtxt,
typ=KW.XREF_uses, notes=note, prio=prio))
for n,r in enumerate (res): r.ord = n + 1
return res
def kana_only (txt):
v = jdb.jstr_reb (txt)
return (v & jdb.KANA) and not (v & jdb.KANJI)
def _msg (logfile, verbose, message):
# This function should not be called directly. It is called
# by the global function, msg(), which is a closure with 'logfile'
# and 'verbose' already bound, created in main() and which should
# be called instead of calling _msg() directly.
global Seq, Lnnum
m = "Seq %d (line %s): %s" % (Seq, Lnnum, message)
if verbose and logfile !=sys.stderr:
print (m, file=sys.stderr)
if logfile: print (m, file=logfile)
class ABPairReader:
def __init__ (self, *args, **kwds):
self.__dict__['stream'] = open (*args, **kwds)
self.lineno = 0 # This creates attribute on self.stream object.
def readpair( self ):
aline = self.getline ('A: ')
bline = self.getline ('B: ')
return aline, bline
def getline( self, key ):
didmsg = False
while 1:
line = self.stream.readline(); self.lineno += 1
if not line: return None
if line.startswith (key) \
or (line[1:].startswith(key) and line[0]=='\uFEFF'):
if didmsg:
msg ("Line %d: resyncronised." % self.lineno)
didmsg = False
return line[len(key):].strip()
else:
if not didmsg:
msg ("Line %d: expected '%s' line not found, resyncronising..."
% (self.lineno, key.strip()))
didmsg = True
def __next__( self ):
a, b = self.readpair()
if not a: raise StopIteration
return a, b
def __iter__ (self): return self
# Delegate all other method calls to the stream.
def __getattr__(self, attr):
return getattr(self.stream, attr)
def __setattr__(self, attr, value):
return setattr(self.stream, attr, value)
from optparse import OptionParser
def parse_cmdline ():
u = \
"""\n\t%prog [options] [filename]
%prog will read a file containing Tanaka corpus example sentence pairs
(as described at http://www.edrdg.org/wiki/index.php/Tanaka_Corpus) and
create a data file that can be subsequently loaded into a jmdict Postgresql
database (usually after pre-processing by jmload.pl).
Arguments:
filename -- Name of input examples file. Default is
"examples.txt"."""
p = OptionParser (usage=u, add_help_option=False)
p.add_option ("--help",
action="help", help="Print this help message.")
p.add_option ("-o", "--output", default="examples.pgi",
dest="output", metavar="FILENAME",
help="Name of output postgresql rebasable dump file. "
"By convention this is usually given the suffix \".pgi\".")
p.add_option ("-b", "--begin", default=0,
dest="begin", type="int", metavar="SEQNUM",
help="Line number of first entry to process. If not "
"given or 0, processing will start with the first entry.")
p.add_option ("-c", "--count", default=0,
dest="count", type="int", metavar="NUM",
help="Number of entries to process. If not given or 0, "
"all entries in the file will be processed.")
p.add_option ("-s", "--corpus",
dest="corpus", default=None,
help="""\
CORPUS defines a corpus record (in table kwsrc) to which all
entries in the input file will be assigned. It is set of one
to four comma separated items. Spaces are not permitted within
the string.
The CORPUS items are:
id -- Id number of the corpus record.
kw -- A short string used as an identifier for the corpus.
Must start with a lowercase letter followed by zero or
more lowercase letters, digits, or underscore ("_")
characters. Must not already be used in the database.
dt -- The corpus' date in the form: "yyyy-mm-dd".
seq -- The name of a Postgresql sequence that will be used
to assign sequence numbers of entries of this corpus when
those entries have no explicit sequence number. Note that
this does not affect entries loaded by jmdict which always
assigns explicit seq numbers to entries it generates.
There are five predefined sequences:
jmdict_seq, jmnedict_seq, examples_seq, test_seq, seq.
You can create additional sequences if required.
[N.B. that the corpus table ("kwsrc") also has two other columns,
'descr' and 'notes' but exparse.py provides no means for setting
their values. They can be updated in the database table after
kwsrc is loaded, using standard Postgresql tools like "psql".]
Unless only 'id' is given in the CORPUS string, a corpus record
will be written to the output .pgi file. A record with this 'id'
number or 'kw' must not exist in the database when the output
file is later loaded.
If only 'id' is given in CORPUS, a new corpus record will not
be created; rather, all enties will be assigned the given corpus
id number and it will be assumed that a corpus record with that
id number already exists when the output file is later loaded.
If this option is not given at all, exparse.py will use "3",
"examples", and "examples_seq", and the last-modified date of
the input file (or null if not available) for 'id', 'kw', and
'seq', and 'dt' respectively.""")
p.add_option ("-k", "--keep", default=False,
dest="keep", action="store_true",
help="Do not delete temporary files after program exits.")
p.add_option ("-l", "--logfile",
dest="logfile", metavar="FILENAME",
help="Name of file to write log messages to.")
p.add_option ("-t", "--tempdir", default=".",
dest="tempdir", metavar="DIRPATH",
help="Directory in which to create temporary files.")
p.add_option ("-e", "--encoding", default='utf-8',
type="str", dest="encoding",
help="Encoding for error and logfile messages (typically "
"\"sjis\", \"utf8\", or \"euc-jp\"). This does not "
"affect the output .pgi file or the temp files which "
"are always written with utf-8 encoding.")
p.add_option ("-n", "--noaction", default=False,
dest="noaction", action="store_true",
help="Parse only, no database access used: do not resolve "
"index words from it.")
p.add_option ("-v", "--verbose", default=None,
dest="verbose", action="store_true",
help="Write log messages to stderr. Default is true if "
"--logfile was not given, or false if it was.")
opts, args = p.parse_args ()
if opts.verbose is None: opts.verbose = not bool (opts.logfile)
if len (args) > 1: print ("%d arguments given, expected at most one", file=sys.stderr)
if len (args) < 1: args = ["examples.txt"]
return args, opts
if __name__ == '__main__':
args, opts = parse_cmdline()
main (args, opts)
| tatsuhirosatou/JMdictDB | python/exparse.py | Python | gpl-2.0 | 16,713 |
def foo(a_new, b_new):
print(a_new + b_new * 123)
def f():
a = 1
b = 1
foo(a, b) | IllusionRom-deprecated/android_platform_tools_idea | python/testData/refactoring/extractmethod/Statement.after.py | Python | apache-2.0 | 98 |
import os
import pickle
import signal
import time
from gppylib.mainUtils import *
from gppylib.utils import checkNotNone, appendNewEntriesToHbaFile
from gppylib.db import dbconn
from gppylib import gparray, gplog
from gppylib.gplog import *
from gppylib.commands import unix
from gppylib.commands import gp
from gppylib.commands import base
from gppylib.gparray import GpArray
from gppylib import gphostcache
from gppylib.testold.testUtils import *
from gppylib.operations import startSegments, Operation
from gppylib.gp_era import read_era
from gppylib.operations.utils import ParallelOperation, RemoteOperation
from gppylib.operations.unix import CleanSharedMem
from gppylib.operations.filespace import PG_SYSTEM_FILESPACE, GP_TRANSACTION_FILES_FILESPACE, GP_TEMPORARY_FILES_FILESPACE, GetMoveOperationList, GetFilespaceEntriesDict, GetFilespaceEntries, GetCurrentFilespaceEntries, RollBackFilespaceChanges, UpdateFlatFiles, FileType, MoveFilespaceError
from gppylib.commands.gp import is_pid_postmaster, get_pid_from_remotehost
from gppylib.commands.unix import check_pid_on_remotehost
logger = get_default_logger()
gDatabaseDirectories = [
# this list and the gDatabaseSubDirectories occur combined inside initdb.c
"global",
"pg_log",
"pg_xlog",
"pg_clog",
"pg_changetracking",
"pg_subtrans",
"pg_twophase",
"pg_multixact",
"pg_distributedxidmap",
"pg_distributedlog",
"pg_utilitymodedtmredo",
"base",
"pg_tblspc",
"pg_stat_tmp"
]
gDatabaseSubDirectories = [
"pg_xlog/archive_status",
"pg_multixact/members",
"pg_multixact/offsets",
"base/1"
]
#
# Database files that may exist in the root directory and need deleting
#
gDatabaseFiles = [
"PG_VERSION",
"pg_hba.conf",
"pg_ident.conf",
"postgresql.conf",
"postmaster.log",
"postmaster.opts",
"postmaster.pid",
"gp_dbid"
]
def MPP_12038_fault_injection():
"""This function will check for the environment variable
GP_MPP_12038 and if it is set will sleep for 2 * gp_fts_probe_interval.
This is used in this module to check interaction with the FTS prober and
should only be used for testing. Note this delay is long enough for a
small test installation but would likely not be long enough for a large
cluster."""
if os.getenv("GP_MPP_12038_INJECT_DELAY", None):
faultProber = faultProberInterface.getFaultProber()
probe_interval_secs = faultProber.getFaultProberInterval()
logger.info("Sleeping for %d seconds for MPP-12038 test..." % (probe_interval_secs * 2))
time.sleep(probe_interval_secs * 2)
#
# note: it's a little quirky that caller must set up failed/failover so that failover is in gparray but
# failed is not (if both set)...change that, or at least protect against problems
#
class GpMirrorToBuild:
def __init__(self, failedSegment, liveSegment, failoverSegment, forceFullSynchronization):
checkNotNone("liveSegment", liveSegment)
checkNotNone("forceFullSynchronization", forceFullSynchronization)
if failedSegment is None and failoverSegment is None:
raise Exception( "No mirror passed to GpMirrorToBuild")
if not liveSegment.isSegmentQE():
raise ExceptionNoStackTraceNeeded("Segment to recover from for content %s is not a correct segment " \
"(it is a master or standby master)" % liveSegment.getSegmentContentId())
if not liveSegment.isSegmentPrimary(True):
raise ExceptionNoStackTraceNeeded("Segment to recover from for content %s is not a primary" % liveSegment.getSegmentContentId())
if not liveSegment.isSegmentUp():
raise ExceptionNoStackTraceNeeded("Primary segment is not up for content %s" % liveSegment.getSegmentContentId())
if failedSegment is not None:
if failedSegment.getSegmentContentId() != liveSegment.getSegmentContentId():
raise ExceptionNoStackTraceNeeded("The primary is not of the same content as the failed mirror. Primary content %d, " \
"mirror content %d" % (liveSegment.getSegmentContentId(), failedSegment.getSegmentContentId()))
if failedSegment.getSegmentDbId() == liveSegment.getSegmentDbId():
raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. " \
"A segment may not be recovered from itself" % liveSegment.getSegmentDbId())
if failoverSegment is not None:
if failoverSegment.getSegmentContentId() != liveSegment.getSegmentContentId():
raise ExceptionNoStackTraceNeeded("The primary is not of the same content as the mirror. Primary content %d, " \
"mirror content %d" % (liveSegment.getSegmentContentId(), failoverSegment.getSegmentContentId()))
if failoverSegment.getSegmentDbId() == liveSegment.getSegmentDbId():
raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. " \
"A segment may not be built from itself" % liveSegment.getSegmentDbId())
if failedSegment is not None and failoverSegment is not None:
# for now, we require the code to have produced this -- even when moving the segment to another
# location, we preserve the directory
assert failedSegment.getSegmentDbId() == failoverSegment.getSegmentDbId()
self.__failedSegment = failedSegment
self.__liveSegment = liveSegment
self.__failoverSegment = failoverSegment
"""
__forceFullSynchronization is true if full resynchronization should be FORCED -- that is, the
existing segment will be cleared and all objects will be transferred by the file resynchronization
process on the server
"""
self.__forceFullSynchronization = forceFullSynchronization
def getFailedSegment(self):
"""
returns the segment that failed. This can be None, for example when adding mirrors
"""
return self.__failedSegment
def getLiveSegment(self):
"""
returns the primary segment from which the recovery will take place. Will always be non-None
"""
return self.__liveSegment
def getFailoverSegment(self):
"""
returns the target segment to which we will copy the data, or None
if we will recover in place. Note that __failoverSegment should refer to the same dbid
as __failedSegment, but should have updated path + file information.
"""
return self.__failoverSegment
def isFullSynchronization(self):
"""
Returns whether or not this segment to recover needs to recover using full resynchronization
"""
if self.__forceFullSynchronization:
return True
# if we are failing over to a new segment location then we must fully resync
if self.__failoverSegment is not None:
return True
return False
class GpMirrorListToBuild:
def __init__(self, toBuild, pool, quiet, parallelDegree, additionalWarnings=None):
self.__mirrorsToBuild = toBuild
self.__pool = pool
self.__quiet = quiet
self.__parallelDegree = parallelDegree
self.__additionalWarnings = additionalWarnings or []
def getMirrorsToBuild(self):
"""
Returns a newly allocated list
"""
return [m for m in self.__mirrorsToBuild]
def getAdditionalWarnings(self):
"""
Returns any additional warnings generated during building of list
"""
return self.__additionalWarnings
def __moveFilespaces(self, gparray, target_segment):
"""
Moves filespaces for temporary and transaction files to a particular location.
"""
master_seg = gparray.master
default_filespace_dir = master_seg.getSegmentDataDirectory()
cur_filespace_entries = GetFilespaceEntriesDict(GetFilespaceEntries(gparray,
PG_SYSTEM_FILESPACE).run()).run()
pg_system_filespace_entries = GetFilespaceEntriesDict(GetFilespaceEntries(gparray,
PG_SYSTEM_FILESPACE).run()).run()
cur_filespace_name = gparray.getFileSpaceName(int(cur_filespace_entries[1][0]))
segments = [target_segment] + [seg for seg in gparray.getDbList() if seg.getSegmentContentId() == target_segment.getSegmentContentId() and seg.getSegmentDbId() != target_segment.getSegmentDbId()]
logger.info('Starting file move procedure for %s' % target_segment)
if os.path.exists(os.path.join(default_filespace_dir, GP_TRANSACTION_FILES_FILESPACE)):
#On the expansion segments, the current filespace used by existing nodes will be the
#new filespace to which we want to move the transaction and temp files.
#The filespace directories which have to be moved will be the default pg_system directories.
new_filespace_entries = GetFilespaceEntriesDict(GetCurrentFilespaceEntries(gparray,
FileType.TRANSACTION_FILES).run()).run()
logger.info('getting filespace information')
new_filespace_name = gparray.getFileSpaceName(int(new_filespace_entries[1][0]))
logger.info('getting move operations list for filespace %s' % new_filespace_name)
operation_list = GetMoveOperationList(segments,
FileType.TRANSACTION_FILES,
new_filespace_name,
new_filespace_entries,
cur_filespace_entries,
pg_system_filespace_entries).run()
logger.info('Starting transaction files move')
ParallelOperation(operation_list).run()
logger.debug('Checking transaction files move')
try:
for operation in operation_list:
operation.get_ret()
pass
except Exception, e:
logger.info('Failed to move transaction filespace. Rolling back changes ...')
RollBackFilespaceChanges(gparray.getExpansionSegDbList(),
FileType.TRANSACTION_FILES,
cur_filespace_name,
cur_filespace_entries,
new_filespace_entries,
pg_system_filespace_entries).run()
raise
if os.path.exists(os.path.join(default_filespace_dir, GP_TEMPORARY_FILES_FILESPACE)):
new_filespace_entries = GetFilespaceEntriesDict(GetCurrentFilespaceEntries(gparray,
FileType.TEMPORARY_FILES).run()).run()
new_filespace_name = gparray.getFileSpaceName(int(new_filespace_entries[1][0]))
operation_list = GetMoveOperationList(segments,
FileType.TEMPORARY_FILES,
new_filespace_name,
new_filespace_entries,
cur_filespace_entries,
pg_system_filespace_entries).run()
logger.info('Starting temporary files move')
ParallelOperation(operation_list).run()
logger.debug('Checking temporary files move')
try:
for operation in operation_list:
operation.get_ret()
pass
except Exception, e:
logger.info('Failed to move temporary filespace. Rolling back changes ...')
RollBackFilespaceChanges(gparray.getExpansionDbList(),
FileType.TRANSACTION_FILES,
cur_filespace_name,
cur_filespace_entries,
new_filespace_entries,
pg_system_filespace_entries).run()
raise
def buildMirrors(self, actionName, gpEnv, gpArray):
"""
Build the mirrors.
gpArray must have already been altered to have updated directories -- that is, the failoverSegments
from the mirrorsToBuild must be present in gpArray.
"""
testOutput("building %s segment(s)" % len(self.__mirrorsToBuild))
if len(self.__mirrorsToBuild) == 0:
logger.info("No segments to " + actionName)
return
self.checkForPortAndDirectoryConflicts(gpArray)
logger.info("%s segment(s) to %s" % (len(self.__mirrorsToBuild), actionName))
self.__verifyGpArrayContents(gpArray)
# make sure the target directories are up-to-date
# by cleaning them, if needed, and then copying a basic directory there
# the postgresql.conf in that basic directory will need updating (to change the port)
toStopDirectives = []
toEnsureMarkedDown = []
cleanupDirectives = []
copyDirectives = []
for toRecover in self.__mirrorsToBuild:
if toRecover.getFailedSegment() is not None:
# will stop the failed segment. Note that we do this even if we are recovering to a different location!
toStopDirectives.append(GpStopSegmentDirectoryDirective(toRecover.getFailedSegment()))
if toRecover.getFailedSegment().getSegmentStatus() == gparray.STATUS_UP:
toEnsureMarkedDown.append(toRecover.getFailedSegment())
if toRecover.isFullSynchronization():
isTargetReusedLocation = False
if toRecover.getFailedSegment() is not None and \
toRecover.getFailoverSegment() is None:
#
# We are recovering a failed segment in-place
#
cleanupDirectives.append(GpCleanupSegmentDirectoryDirective(toRecover.getFailedSegment()))
isTargetReusedLocation = True
if toRecover.getFailoverSegment() is not None:
targetSegment = toRecover.getFailoverSegment()
else: targetSegment = toRecover.getFailedSegment()
d = GpCopySegmentDirectoryDirective(toRecover.getLiveSegment(), targetSegment, isTargetReusedLocation)
copyDirectives.append(d)
self.__ensureStopped(gpEnv, toStopDirectives)
self.__ensureSharedMemCleaned(gpEnv, toStopDirectives)
self.__ensureMarkedDown(gpEnv, toEnsureMarkedDown)
self.__cleanUpSegmentDirectories(cleanupDirectives)
self.__copySegmentDirectories(gpEnv, gpArray, copyDirectives)
#Move the filespace for transaction and temporary files
for toRecover in self.__mirrorsToBuild:
target_segment = None
if toRecover.getFailoverSegment() is not None:
target_segment = toRecover.getFailoverSegment()
elif toRecover.isFullSynchronization():
target_segment = toRecover.getFailedSegment()
if target_segment is not None:
self.__moveFilespaces(gpArray, target_segment)
#If we are adding mirrors, we need to update the flat files on the primaries as well
if actionName == "add":
try:
UpdateFlatFiles(gpArray, primaries=True).run()
except MoveFilespaceError, e:
logger.error(str(e))
raise
else:
try:
print 'updating flat files'
UpdateFlatFiles(gpArray, primaries=False).run()
except MoveFilespaceError, e:
logger.error(str(e))
raise
# update and save metadata in memory
for toRecover in self.__mirrorsToBuild:
if toRecover.getFailoverSegment() is None:
# we are recovering the lost segment in place
seg = toRecover.getFailedSegment()
else:
seg = toRecover.getFailedSegment()
# no need to update the failed segment's information -- it is
# being overwritten in the configuration with the failover segment
for gpArraySegment in gpArray.getDbList():
if gpArraySegment is seg:
raise Exception("failed segment should not be in the new configuration if failing over to new segment")
seg = toRecover.getFailoverSegment()
seg.setSegmentStatus(gparray.STATUS_DOWN) # down initially, we haven't started it yet
seg.setSegmentMode(gparray.MODE_RESYNCHRONIZATION)
# figure out what needs to be started or transitioned
mirrorsToStart = []
primariesToConvert = []
convertPrimaryUsingFullResync = []
fullResyncMirrorDbIds = {}
for toRecover in self.__mirrorsToBuild:
seg = toRecover.getFailoverSegment()
if seg is None:
seg = toRecover.getFailedSegment() # we are recovering in place
mirrorsToStart.append(seg)
primarySeg = toRecover.getLiveSegment()
# The change in configuration to of the mirror to down requires
# that the primary also be change to change tracking if required.
if primarySeg.getSegmentMode() != gparray.MODE_CHANGELOGGING:
primarySeg.setSegmentMode(gparray.MODE_CHANGELOGGING)
primariesToConvert.append(primarySeg)
convertPrimaryUsingFullResync.append(toRecover.isFullSynchronization())
if toRecover.isFullSynchronization() and seg.getSegmentDbId() > 0:
fullResyncMirrorDbIds[seg.getSegmentDbId()] = True
# should use mainUtils.getProgramName but I can't make it work!
programName = os.path.split(sys.argv[0])[-1]
# Disable Ctrl-C, going to save metadata in database and transition segments
signal.signal(signal.SIGINT,signal.SIG_IGN)
try:
logger.info("Updating configuration with new mirrors")
configInterface.getConfigurationProvider().updateSystemConfig(
gpArray,
"%s: segment config for resync" % programName,
dbIdToForceMirrorRemoveAdd = fullResyncMirrorDbIds,
useUtilityMode = False,
allowPrimary = False
)
MPP_12038_fault_injection()
logger.info("Updating mirrors")
self.__updateGpIdFile(gpEnv, gpArray, mirrorsToStart)
logger.info("Starting mirrors")
self.__startAll(gpEnv, gpArray, mirrorsToStart)
logger.info("Updating configuration to mark mirrors up")
for seg in mirrorsToStart:
seg.setSegmentStatus(gparray.STATUS_UP)
for seg in primariesToConvert:
seg.setSegmentMode(gparray.MODE_RESYNCHRONIZATION)
configInterface.getConfigurationProvider().updateSystemConfig(
gpArray,
"%s: segment resync marking mirrors up and primaries resync" % programName,
dbIdToForceMirrorRemoveAdd = {},
useUtilityMode = True,
allowPrimary = False
)
MPP_12038_fault_injection()
#
# note: converting the primaries may take a really long time to complete because of initializing
# resynchronization
#
logger.info("Updating primaries")
self.__convertAllPrimaries(gpEnv, gpArray, primariesToConvert, convertPrimaryUsingFullResync)
logger.info("Done updating primaries")
finally:
# Reenable Ctrl-C
signal.signal(signal.SIGINT,signal.default_int_handler)
def __verifyGpArrayContents(self, gpArray):
"""
Run some simple assertions against gpArray contents
"""
for seg in gpArray.getDbList():
if seg.getSegmentDataDirectory() != seg.getSegmentFilespaces()[gparray.SYSTEM_FILESPACE]:
raise Exception("Mismatch between segment data directory and filespace entry for segment %s" %
seg.getSegmentDbId())
def checkForPortAndDirectoryConflicts(self, gpArray):
"""
Check gpArray for internal consistency -- no duplicate ports or directories on the same host, for example
A detected problem causes an Exception to be raised
"""
for hostName, segmentArr in GpArray.getSegmentsByHostName(gpArray.getDbList()).iteritems():
usedPorts = {}
usedDataDirectories = {}
for segment in segmentArr:
# check for port conflict
replicationPort = segment.getSegmentReplicationPort()
port = segment.getSegmentPort()
dbid = segment.getSegmentDbId()
if port in usedPorts:
raise Exception("On host %s, a port for segment with dbid %s conflicts with a port for segment dbid %s" \
% (hostName, dbid, usedPorts.get(port)))
if segment.isSegmentQE():
if replicationPort is None:
raise Exception("On host %s, the replication port is not set for segment with dbid %s" \
% (hostName, dbid))
if replicationPort in usedPorts:
raise Exception("On host %s, a port for segment with dbid %s conflicts with a port for segment dbid %s" \
% (hostName, dbid, usedPorts.get(replicationPort)))
if port == replicationPort:
raise Exception("On host %s, segment with dbid %s has equal port and replication port" \
% (hostName, dbid))
usedPorts[port] = dbid
usedPorts[replicationPort] = dbid
# check for directory conflict; could improve this by reporting nicer the conflicts
paths = [path for oid, path in segment.getSegmentFilespaces().items() if oid != gparray.SYSTEM_FILESPACE]
paths.append(segment.getSegmentDataDirectory())
for path in paths:
if path in usedDataDirectories:
raise Exception("On host %s, directory (base or filespace) for segment with dbid %s conflicts with a " \
"directory (base or filespace) for segment dbid %s; directory: %s" % \
(hostName, dbid, usedDataDirectories.get(path), path))
usedDataDirectories[path] = dbid
def __runWaitAndCheckWorkerPoolForErrorsAndClear(self, cmds, actionVerb, suppressErrorCheck=False):
for cmd in cmds:
self.__pool.addCommand(cmd)
self.__pool.wait_and_printdots(len(cmds), self.__quiet)
if not suppressErrorCheck:
self.__pool.check_results()
self.__pool.empty_completed_items()
def __copyFiles(self, srcDir, destDir, fileNames):
for name in fileNames:
cmd = gp.LocalCopy("copy file for segment", srcDir + "/" + name, destDir + "/" + name)
cmd.run(validateAfter=True)
def __createEmptyDirectories( self, dir, newDirectoryNames ):
for name in newDirectoryNames:
subDir = os.path.join(dir, name)
unix.MakeDirectory("create blank directory for segment", subDir).run(validateAfter=True)
unix.Chmod.local('set permissions on blank dir', subDir, '0700')
def __buildTarFileForTransfer(self, gpEnv, masterSegment, sampleSegment, newSegments):
"""
Returns the file for the tarfile that should be transferred and used
for building the blank segment
"""
masterDir = gpEnv.getMasterDataDir()
# note that this tempdir will be left around on the system (this is what other scripts do currently)
tempDir = gp.createTempDirectoryName(gpEnv.getMasterDataDir(), "gpbuildingsegment")
unix.MakeDirectory("create temp directory for segment", tempDir ).run(validateAfter=True)
schemaDir = tempDir + "/schema"
unix.MakeDirectory("create temp schema directory for segment", schemaDir ).run(validateAfter=True)
unix.Chmod.local('set permissions on schema dir', schemaDir, '0700') # set perms so postgres can start
#
# Copy remote files from the sample segment to the master
#
for toCopyFromRemote in ["postgresql.conf", "pg_hba.conf"]:
cmd = gp.RemoteCopy('copying %s from a segment' % toCopyFromRemote,
sampleSegment.getSegmentDataDirectory() + '/' + toCopyFromRemote,
masterSegment.getSegmentHostName(), schemaDir, ctxt=base.REMOTE,
remoteHost=sampleSegment.getSegmentAddress())
cmd.run(validateAfter=True)
appendNewEntriesToHbaFile( schemaDir + "/pg_hba.conf", newSegments)
#
# Use the master's version of other files, and build
#
self.__createEmptyDirectories( schemaDir, gDatabaseDirectories )
self.__createEmptyDirectories( schemaDir, gDatabaseSubDirectories )
self.__copyFiles(masterDir, schemaDir, ["PG_VERSION", "pg_ident.conf"])
#
# Build final tar
#
tarFileName = "gp_emptySegmentSchema.tar"
tarFile = tempDir + "/" + tarFileName
cmd = gp.CreateTar('gpbuildingmirrorsegment tar segment template', schemaDir, tarFile)
cmd.run(validateAfter=True)
return (tempDir, tarFile, tarFileName)
def __copySegmentDirectories(self, gpEnv, gpArray, directives):
"""
directives should be composed of GpCopySegmentDirectoryDirective values
"""
if len(directives) == 0:
return
srcSegments = [d.getSrcSegment() for d in directives]
destSegments = [d.getDestSegment() for d in directives]
isTargetReusedLocation = [d.isTargetReusedLocation() for d in directives]
destSegmentByHost = GpArray.getSegmentsByHostName(destSegments)
newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment(destSegments, isTargetReusedLocation)
logger.info('Building template directory')
(tempDir, blankTarFile, tarFileName) = self.__buildTarFileForTransfer(gpEnv, gpArray.master, srcSegments[0], destSegments)
def createConfigureNewSegmentCommand(hostName, cmdLabel, validationOnly):
segmentInfo = newSegmentInfo[hostName]
checkNotNone("segmentInfo for %s" % hostName, segmentInfo)
return gp.ConfigureNewSegment(cmdLabel,
segmentInfo,
tarFile=tarFileName,
newSegments=True,
verbose=gplog.logging_is_verbose(),
batchSize=self.__parallelDegree,
ctxt=gp.REMOTE,
remoteHost=hostName,
validationOnly=validationOnly)
#
# validate directories for target segments
#
logger.info('Validating remote directories')
cmds = []
for hostName in destSegmentByHost.keys():
cmds.append(createConfigureNewSegmentCommand(hostName, 'validate blank segments', True))
for cmd in cmds:
self.__pool.addCommand(cmd)
self.__pool.wait_and_printdots(len(cmds), self.__quiet)
validationErrors = []
for item in self.__pool.getCompletedItems():
results = item.get_results()
if not results.wasSuccessful():
if results.rc == 1:
# stdoutFromFailure = results.stdout.replace("\n", " ").strip()
lines = results.stderr.split("\n")
for line in lines:
if len(line.strip()) > 0:
validationErrors.append("Validation failure on host %s %s" % (item.remoteHost, line))
else:
validationErrors.append(str(item))
self.__pool.empty_completed_items()
if validationErrors:
raise ExceptionNoStackTraceNeeded("\n" + ("\n".join(validationErrors)))
#
# copy tar from master to target hosts
#
logger.info('Copying template directory file')
cmds = []
for hostName in destSegmentByHost.keys():
cmds.append( gp.RemoteCopy("copy segment tar", blankTarFile, hostName, tarFileName ))
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "building and transferring basic segment directory")
#
# unpack and configure new segments
#
logger.info('Configuring new segments')
cmds = []
for hostName in destSegmentByHost.keys():
cmds.append(createConfigureNewSegmentCommand(hostName, 'configure blank segments', False))
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "unpacking basic segment directory")
#
# Clean up copied tar from each remote host
#
logger.info('Cleaning files')
cmds = []
for hostName, segments in destSegmentByHost.iteritems():
cmds.append(unix.RemoveFiles('remove tar file', tarFileName, ctxt=gp.REMOTE, remoteHost=hostName))
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "cleaning up tar file on segment hosts")
#
# clean up the local temp directory
#
unix.RemoveFiles.local('remove temp directory', tempDir)
def _get_running_postgres_segments(self, segments):
running_segments = []
for seg in segments:
datadir = self.dereference_remote_symlink(seg.getSegmentDataDirectory(), seg.getSegmentHostName())
pid = get_pid_from_remotehost(seg.getSegmentHostName(), datadir)
if pid is not None:
if check_pid_on_remotehost(pid, seg.getSegmentHostName()):
if is_pid_postmaster(datadir, pid, seg.getSegmentHostName()):
running_segments.append(seg)
else:
logger.info("Skipping to stop segment %s on host %s since it is not a postgres process" % (seg.getSegmentDataDirectory(), seg.getSegmentHostName()))
else:
logger.debug("Skipping to stop segment %s on host %s since process with pid %s is not running" % (seg.getSegmentDataDirectory(), seg.getSegmentHostName(), pid))
else:
logger.debug("Skipping to stop segment %s on host %s since pid could not be found" % (seg.getSegmentDataDirectory(), seg.getSegmentHostName()))
return running_segments
def dereference_remote_symlink(self, datadir, host):
cmdStr = """python -c 'import os; print os.path.realpath("%s")'""" % datadir
cmd = base.Command('dereference a symlink on a remote host', cmdStr=cmdStr, ctxt=base.REMOTE, remoteHost=host)
cmd.run()
results = cmd.get_results()
if results.rc != 0:
logger.warning('Unable to determine if %s is symlink. Assuming it is not symlink' % (datadir))
return datadir
return results.stdout.strip()
def __ensureSharedMemCleaned(self, gpEnv, directives):
"""
@param directives a list of the GpStopSegmentDirectoryDirective values indicating which segments to cleanup
"""
if len(directives) == 0:
return
logger.info('Ensuring that shared memory is cleaned up for stopped segments')
segments = [d.getSegment() for d in directives]
segmentsByHost = GpArray.getSegmentsByHostName(segments)
operation_list = [RemoteOperation(CleanSharedMem(segments), host=hostName) for hostName, segments in segmentsByHost.items()]
ParallelOperation(operation_list).run()
for operation in operation_list:
try:
operation.get_ret()
except Exception as e:
logger.warning('Unable to clean up shared memory for stopped segments on host (%s)' % operation.host)
def __ensureStopped(self, gpEnv, directives):
"""
@param directives a list of the GpStopSegmentDirectoryDirective values indicating which segments to stop
"""
if len(directives) == 0:
return
logger.info("Ensuring %d failed segment(s) are stopped" % (len(directives)))
segments = [d.getSegment() for d in directives]
segments = self._get_running_postgres_segments(segments)
segmentByHost = GpArray.getSegmentsByHostName(segments)
cmds = []
for hostName, segments in segmentByHost.iteritems():
cmd=gp.GpSegStopCmd("remote segment stop on host '%s'" % hostName,
gpEnv.getGpHome(), gpEnv.getGpVersion(),
mode='fast', dbs=segments, verbose=logging_is_verbose(),
ctxt=base.REMOTE, remoteHost=hostName)
cmds.append( cmd)
# we suppress checking for the error. This is because gpsegstop will actually error
# in many cases where the stop is actually done (that is, for example, the segment is
# running but slow to shutdown so gpsegstop errors after whacking it with a kill)
#
# Perhaps we should make it so that it so that is checks if the seg is running and only attempt stop
# if it's running? In that case, we could propagate the error
#
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "stopping segments", suppressErrorCheck=True)
def __ensureMarkedDown(self, gpEnv, toEnsureMarkedDown):
"""Waits for FTS prober to mark segments as down"""
wait_time = 60 * 30 # Wait up to 30 minutes to handle very large, busy
# clusters that may have faults. In most cases the
# actual time to wait will be small and this operation
# is only needed when moving mirrors that are up and
# needed to be stopped, an uncommon operation.
dburl = dbconn.DbURL(port=gpEnv.getMasterPort(), dbname='template1')
time_elapsed = 0
seg_up_count = 0
initial_seg_up_count = len(toEnsureMarkedDown)
last_seg_up_count = initial_seg_up_count
if initial_seg_up_count == 0:
# Nothing to wait on
return
logger.info("Waiting for segments to be marked down.")
logger.info("This may take up to %d seconds on large clusters." % wait_time)
# wait for all needed segments to be marked down by the prober. We'll wait
# a max time of double the interval
while wait_time > time_elapsed:
seg_up_count = 0
current_gparray = GpArray.initFromCatalog(dburl, True)
seg_db_map = current_gparray.getSegDbMap()
# go through and get the status of each segment we need to be marked down
for segdb in toEnsureMarkedDown:
if segdb.getSegmentDbId() in seg_db_map and seg_db_map[segdb.getSegmentDbId()].isSegmentUp() == True:
seg_up_count += 1
if seg_up_count == 0:
break
else:
if last_seg_up_count != seg_up_count:
print "\n",
logger.info("%d of %d segments have been marked down." %
(initial_seg_up_count - seg_up_count, initial_seg_up_count))
last_seg_up_count = seg_up_count
for _i in range(1,5):
time.sleep(1)
sys.stdout.write(".")
sys.stdout.flush()
time_elapsed += 5
if seg_up_count == 0:
print "\n",
logger.info("%d of %d segments have been marked down." %
(initial_seg_up_count, initial_seg_up_count))
else:
raise Exception("%d segments were not marked down by FTS" % seg_up_count)
def __cleanUpSegmentDirectories(self, directives):
if len(directives) == 0:
return
logger.info("Cleaning files from %d segment(s)" % (len(directives)))
segments = [d.getSegment() for d in directives]
segmentByHost = GpArray.getSegmentsByHostName(segments)
cmds = []
for hostName, segments in segmentByHost.iteritems():
cmds.append( gp.GpCleanSegmentDirectories("clean segment directories on %s" % hostName, \
segments, gp.REMOTE, hostName))
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "cleaning existing directories")
def __createStartSegmentsOp(self, gpEnv):
return startSegments.StartSegmentsOperation(self.__pool, self.__quiet,
gpEnv.getLocaleData(), gpEnv.getGpVersion(),
gpEnv.getGpHome(), gpEnv.getMasterDataDir()
)
def __updateGpIdFile(self, gpEnv, gpArray, segments):
segmentByHost = GpArray.getSegmentsByHostName(segments)
newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment(segments)
cmds = []
for hostName in segmentByHost.keys():
segmentInfo = newSegmentInfo[hostName]
checkNotNone("segmentInfo for %s" % hostName, segmentInfo)
cmd = gp.ConfigureNewSegment("update gpid file",
segmentInfo,
newSegments=False,
verbose=gplog.logging_is_verbose(),
batchSize=self.__parallelDegree,
ctxt=gp.REMOTE,
remoteHost=hostName,
validationOnly=False,
writeGpIdFileOnly=True)
cmds.append(cmd)
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "writing updated gpid files")
def __startAll(self, gpEnv, gpArray, segments):
# the newly started segments should belong to the current era
era = read_era(gpEnv.getMasterDataDir(), logger=gplog.get_logger_if_verbose())
segmentStartResult = self.__createStartSegmentsOp(gpEnv).startSegments(gpArray, segments, startSegments.START_AS_PRIMARY_OR_MIRROR, era)
for failure in segmentStartResult.getFailedSegmentObjs():
failedSeg = failure.getSegment()
failureReason = failure.getReason()
logger.warn("Failed to start segment. The fault prober will shortly mark it as down. Segment: %s: REASON: %s" % (failedSeg, failureReason))
pass
def __convertAllPrimaries(self, gpEnv, gpArray, segments, convertUsingFullResync):
segmentStartResult = self.__createStartSegmentsOp(gpEnv).transitionSegments(gpArray, segments, convertUsingFullResync, startSegments.MIRROR_MODE_PRIMARY)
for failure in segmentStartResult.getFailedSegmentObjs():
failedSeg = failure.getSegment()
failureReason = failure.getReason()
logger.warn("Failed to inform primary segment of updated mirroring state. Segment: %s: REASON: %s" % (failedSeg, failureReason))
class GpCleanupSegmentDirectoryDirective:
def __init__(self, segment):
checkNotNone("segment", segment)
self.__segment = segment
def getSegment(self):
return self.__segment
class GpStopSegmentDirectoryDirective:
def __init__(self, segment):
checkNotNone("segment", segment)
self.__segment = segment
def getSegment(self):
return self.__segment
class GpCopySegmentDirectoryDirective:
def __init__(self, source, dest, isTargetReusedLocation ):
"""
@param isTargetReusedLocation if True then the dest location is a cleaned-up location
"""
checkNotNone("source", source)
checkNotNone("dest", dest)
self.__source = source
self.__dest = dest
self.__isTargetReusedLocation = isTargetReusedLocation
def getSrcSegment(self):
return self.__source
def getDestSegment(self):
return self.__dest
def isTargetReusedLocation(self):
return self.__isTargetReusedLocation
| foyzur/gpdb | gpMgmt/bin/gppylib/operations/buildMirrorSegments.py | Python | apache-2.0 | 41,655 |
from tinymce.widgets import TinyMCE, get_language_config
from django.conf import settings
from django.utils.translation import get_language
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from os.path import join
from django.utils.encoding import smart_unicode
import tinymce.settings
from django.utils import simplejson
from django.template.defaultfilters import escape
from django.forms.widgets import flatatt
import cms.plugins.text.settings
class TinyMCEEditor(TinyMCE):
def __init__(self, installed_plugins=None, **kwargs):
super(TinyMCEEditor, self).__init__(**kwargs)
self.installed_plugins = installed_plugins
def render_additions(self, name, value, attrs=None):
language = get_language()
context = {
'name': name,
'language': language,
'CMS_MEDIA_URL': settings.CMS_MEDIA_URL,
'installed_plugins': self.installed_plugins,
}
return mark_safe(render_to_string(
'cms/plugins/widgets/tinymce.html', context))
def _media(self):
media = super(TinyMCEEditor, self)._media()
media.add_js([join(settings.CMS_MEDIA_URL, path) for path in (
'js/tinymce.placeholdereditor.js',
'js/lib/jquery-ui.custom.min.js',
'js/placeholder_editor_registry.js',
)])
media.add_css({"all":[join(settings.CMS_MEDIA_URL, path) for path in ('css/jquery/cupertino/jquery-ui.css',
'css/tinymce_toolbar.css')]})
return media
media = property(_media)
def render(self, name, value, attrs=None):
if value is None: value = ''
value = smart_unicode(value)
final_attrs = self.build_attrs(attrs)
final_attrs['name'] = name
assert 'id' in final_attrs, "TinyMCE widget attributes must contain 'id'"
mce_config = cms.plugins.text.settings.TINYMCE_CONFIG.copy()
mce_config.update(get_language_config(self.content_language))
if tinymce.settings.USE_FILEBROWSER:
mce_config['file_browser_callback'] = "djangoFileBrowser"
mce_config.update(self.mce_attrs)
mce_config['mode'] = 'exact'
mce_config['elements'] = final_attrs['id']
mce_config['strict_loading_mode'] = 1
plugins = mce_config.get("plugins", "")
if len(plugins):
plugins += ","
plugins += "-cmsplugins"
mce_config['plugins'] = plugins
if mce_config['theme'] == "simple":
mce_config['theme'] = "advanced"
mce_config['theme_advanced_buttons1_add_before'] = "cmsplugins,cmspluginsedit"
json = simplejson.dumps(mce_config)
html = [u'<textarea%s>%s</textarea>' % (flatatt(final_attrs), escape(value))]
if tinymce.settings.USE_COMPRESSOR:
compressor_config = {
'plugins': mce_config.get('plugins', ''),
'themes': mce_config.get('theme', 'advanced'),
'languages': mce_config.get('language', ''),
'diskcache': True,
'debug': False,
}
c_json = simplejson.dumps(compressor_config)
html.append(u'<script type="text/javascript">tinyMCE_GZ.init(%s);</script>' % (c_json))
html.append(u'<script type="text/javascript">%s;\ntinyMCE.init(%s);</script>' % (self.render_additions(name, value, attrs), json))
return mark_safe(u'\n'.join(html))
| jalaziz/django-cms-grappelli-old | cms/plugins/text/widgets/tinymce_widget.py | Python | bsd-3-clause | 3,625 |
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned
from __future__ import (absolute_import, print_function)
import socket
import subprocess
import sys
import os
import logging
import yaml
from ooinstall.variants import find_variant
from ooinstall.utils import debug_env
installer_log = logging.getLogger('installer')
CFG = None
ROLES_TO_GROUPS_MAP = {
'master': 'masters',
'node': 'nodes',
'etcd': 'etcd',
'storage': 'nfs',
'master_lb': 'lb'
}
VARIABLES_MAP = {
'ansible_ssh_user': 'ansible_ssh_user',
'deployment_type': 'deployment_type',
'variant_subtype': 'deployment_subtype',
'master_routingconfig_subdomain': 'openshift_master_default_subdomain',
'proxy_http': 'openshift_http_proxy',
'proxy_https': 'openshift_https_proxy',
'proxy_exclude_hosts': 'openshift_no_proxy',
}
HOST_VARIABLES_MAP = {
'ip': 'openshift_ip',
'public_ip': 'openshift_public_ip',
'hostname': 'openshift_hostname',
'public_hostname': 'openshift_public_hostname',
'containerized': 'containerized',
}
def set_config(cfg):
global CFG
CFG = cfg
def generate_inventory(hosts):
global CFG
new_nodes = [host for host in hosts if host.is_node() and host.new_host]
scaleup = len(new_nodes) > 0
lb = determine_lb_configuration(hosts)
base_inventory_path = CFG.settings['ansible_inventory_path']
base_inventory = open(base_inventory_path, 'w')
write_inventory_children(base_inventory, scaleup)
write_inventory_vars(base_inventory, lb)
# write_inventory_hosts
for role in CFG.deployment.roles:
# write group block
group = ROLES_TO_GROUPS_MAP.get(role, role)
base_inventory.write("\n[{}]\n".format(group))
# write each host
group_hosts = [host for host in hosts if role in host.roles]
for host in group_hosts:
schedulable = host.is_schedulable_node(hosts)
write_host(host, role, base_inventory, schedulable)
if scaleup:
base_inventory.write('\n[new_nodes]\n')
for node in new_nodes:
write_host(node, 'new_nodes', base_inventory)
base_inventory.close()
return base_inventory_path
def determine_lb_configuration(hosts):
lb = next((host for host in hosts if host.is_master_lb()), None)
if lb:
if lb.hostname is None:
lb.hostname = lb.connect_to
lb.public_hostname = lb.connect_to
return lb
def write_inventory_children(base_inventory, scaleup):
global CFG
base_inventory.write('\n[OSEv3:children]\n')
for role in CFG.deployment.roles:
child = ROLES_TO_GROUPS_MAP.get(role, role)
base_inventory.write('{}\n'.format(child))
if scaleup:
base_inventory.write('new_nodes\n')
# pylint: disable=too-many-branches
def write_inventory_vars(base_inventory, lb):
global CFG
base_inventory.write('\n[OSEv3:vars]\n')
for variable, value in CFG.settings.items():
inventory_var = VARIABLES_MAP.get(variable, None)
if inventory_var and value:
base_inventory.write('{}={}\n'.format(inventory_var, value))
for variable, value in CFG.deployment.variables.items():
inventory_var = VARIABLES_MAP.get(variable, variable)
if value:
base_inventory.write('{}={}\n'.format(inventory_var, value))
if CFG.deployment.variables['ansible_ssh_user'] != 'root':
base_inventory.write('ansible_become=yes\n')
base_inventory.write('openshift_override_hostname_check=true\n')
if lb is not None:
base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname))
base_inventory.write(
"openshift_master_cluster_public_hostname={}\n".format(lb.public_hostname))
if CFG.settings.get('variant_version', None) == '3.1':
# base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version')))
base_inventory.write('openshift_image_tag=v{}\n'.format('3.1.1.6'))
write_proxy_settings(base_inventory)
# Find the correct deployment type for ansible:
ver = find_variant(CFG.settings['variant'],
version=CFG.settings.get('variant_version', None))[1]
base_inventory.write('deployment_type={}\n'.format(ver.ansible_key))
if getattr(ver, 'variant_subtype', False):
base_inventory.write('deployment_subtype={}\n'.format(ver.deployment_subtype))
if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ:
base_inventory.write('openshift_docker_additional_registries={}\n'.format(
os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES']))
if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ:
base_inventory.write('openshift_docker_insecure_registries={}\n'.format(
os.environ['OO_INSTALL_INSECURE_REGISTRIES']))
if 'OO_INSTALL_PUDDLE_REPO' in os.environ:
# We have to double the '{' here for literals
base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', "
"'name': 'ose-devel', "
"'baseurl': '{}', "
"'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
for name, role_obj in CFG.deployment.roles.items():
if role_obj.variables:
group_name = ROLES_TO_GROUPS_MAP.get(name, name)
base_inventory.write("\n[{}:vars]\n".format(group_name))
for variable, value in role_obj.variables.items():
inventory_var = VARIABLES_MAP.get(variable, variable)
if value:
base_inventory.write('{}={}\n'.format(inventory_var, value))
base_inventory.write("\n")
def write_proxy_settings(base_inventory):
try:
base_inventory.write("openshift_http_proxy={}\n".format(
CFG.settings['openshift_http_proxy']))
except KeyError:
pass
try:
base_inventory.write("openshift_https_proxy={}\n".format(
CFG.settings['openshift_https_proxy']))
except KeyError:
pass
try:
base_inventory.write("openshift_no_proxy={}\n".format(
CFG.settings['openshift_no_proxy']))
except KeyError:
pass
def write_host(host, role, inventory, schedulable=None):
global CFG
if host.preconfigured:
return
facts = ''
for prop in HOST_VARIABLES_MAP:
if getattr(host, prop):
facts += ' {}={}'.format(HOST_VARIABLES_MAP.get(prop), getattr(host, prop))
if host.other_variables:
for variable, value in host.other_variables.items():
facts += " {}={}".format(variable, value)
if host.node_labels and role == 'node':
facts += ' openshift_node_labels="{}"'.format(host.node_labels)
# Distinguish between three states, no schedulability specified (use default),
# explicitly set to True, or explicitly set to False:
if role != 'node' or schedulable is None:
pass
else:
facts += " openshift_schedulable={}".format(schedulable)
installer_host = socket.gethostname()
if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
facts += ' ansible_connection=local'
if os.geteuid() != 0:
no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', '-n'])
if no_pwd_sudo == 1:
print('The atomic-openshift-installer requires sudo access without a password.')
sys.exit(1)
facts += ' ansible_become=yes'
inventory.write('{} {}\n'.format(host.connect_to, facts))
def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
"""
Retrieves system facts from the remote systems.
"""
installer_log.debug("Inside load_system_facts")
installer_log.debug("load_system_facts will run with Ansible/Openshift environment variables:")
debug_env(env_vars)
FNULL = open(os.devnull, 'w')
args = ['ansible-playbook', '-v'] if verbose \
else ['ansible-playbook']
args.extend([
'--inventory-file={}'.format(inventory_file),
os_facts_path])
installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args))
installer_log.debug("Subprocess will run with Ansible/Openshift environment variables:")
debug_env(env_vars)
status = subprocess.call(args, env=env_vars, stdout=FNULL)
if status != 0:
installer_log.debug("Exit status from subprocess was not 0")
return [], 1
with open(CFG.settings['ansible_callback_facts_yaml'], 'r') as callback_facts_file:
installer_log.debug("Going to try to read this file: %s", CFG.settings['ansible_callback_facts_yaml'])
try:
callback_facts = yaml.safe_load(callback_facts_file)
except yaml.YAMLError as exc:
print("Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc)
print("Try deleting and rerunning the atomic-openshift-installer")
sys.exit(1)
return callback_facts, 0
def default_facts(hosts, verbose=False):
global CFG
installer_log.debug("Current global CFG vars here: %s", CFG)
inventory_file = generate_inventory(hosts)
os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
facts_env = os.environ.copy()
facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml']
facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory']
if 'ansible_log_path' in CFG.settings:
facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
installer_log.debug("facts_env: %s", facts_env)
installer_log.debug("Going to 'load_system_facts' next")
return load_system_facts(inventory_file, os_facts_path, facts_env, verbose)
def run_main_playbook(inventory_file, hosts, hosts_to_run_on, verbose=False):
global CFG
if len(hosts_to_run_on) != len(hosts):
main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
'playbooks/openshift-node/scaleup.yml')
else:
main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
'playbooks/byo/openshift-cluster/config.yml')
facts_env = os.environ.copy()
if 'ansible_log_path' in CFG.settings:
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
# override the ansible config for our main playbook run
if 'ansible_quiet_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
return run_ansible(main_playbook_path, inventory_file, facts_env, verbose)
def run_ansible(playbook, inventory, env_vars, verbose=False):
installer_log.debug("run_ansible will run with Ansible/Openshift environment variables:")
debug_env(env_vars)
args = ['ansible-playbook', '-v'] if verbose \
else ['ansible-playbook']
args.extend([
'--inventory-file={}'.format(inventory),
playbook])
installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args))
return subprocess.call(args, env=env_vars)
def run_uninstall_playbook(hosts, verbose=False):
playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
'playbooks/adhoc/uninstall.yml')
inventory_file = generate_inventory(hosts)
facts_env = os.environ.copy()
if 'ansible_log_path' in CFG.settings:
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
# override the ansible config for our main playbook run
if 'ansible_quiet_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
return run_ansible(playbook, inventory_file, facts_env, verbose)
def run_upgrade_playbook(hosts, playbook, verbose=False):
playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
'playbooks/byo/openshift-cluster/upgrades/{}'.format(playbook))
# TODO: Upgrade inventory for upgrade?
inventory_file = generate_inventory(hosts)
facts_env = os.environ.copy()
if 'ansible_log_path' in CFG.settings:
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
# override the ansible config for our main playbook run
if 'ansible_quiet_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
return run_ansible(playbook, inventory_file, facts_env, verbose)
| zhiwliu/openshift-ansible | utils/src/ooinstall/openshift_ansible.py | Python | apache-2.0 | 13,004 |
# Legendre polynomials P_n(x) on [-1,1] for n=0,1,2,3,4
f0 = lambda x: legendre(0,x)
f1 = lambda x: legendre(1,x)
f2 = lambda x: legendre(2,x)
f3 = lambda x: legendre(3,x)
f4 = lambda x: legendre(4,x)
plot([f0,f1,f2,f3,f4],[-1,1]) | pducks32/intergrala | python/sympy/doc/src/modules/mpmath/plots/legendre.py | Python | mit | 230 |
# -*- coding: utf-8 -*-
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from __future__ import print_function
from datetime import datetime
import itertools
from operator import methodcaller
import os
import re
import sys
import textwrap
import warnings
import dateutil
import numpy as np
import pytest
import pytz
import pandas.compat as compat
from pandas.compat import (
PY3, StringIO, is_platform_32bit, is_platform_windows, lrange, lzip, range,
u, zip)
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, NaT, Series, Timestamp, date_range, read_csv)
from pandas.core.config import (
get_option, option_context, reset_option, set_option)
import pandas.util.testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
from pandas.io.formats.terminal import get_terminal_size
use_32bit_repr = is_platform_windows() or is_platform_32bit()
_frame = DataFrame(tm.getSeriesData())
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split('\n')[0].startswith("<class")
c2 = r.split('\n')[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split('\n')) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == '...')[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == '...':
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r'^[\.\ ]+$', row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(
df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(
df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split('\n'):
if line.endswith('\\'):
return True
return False
class TestDataFrameFormatting(object):
def setup_method(self, method):
self.warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
self.frame = _frame.copy()
def teardown_method(self, method):
warnings.filters = self.warn_filters
def test_repr_embedded_ndarray(self):
arr = np.empty(10, dtype=[('err', object)])
for i in range(len(arr)):
arr['err'][i] = np.random.randn(i)
df = DataFrame(arr)
repr(df['err'])
repr(df)
df.to_string()
def test_eng_float_formatter(self):
self.frame.loc[5] = 0
fmt.set_eng_float_format()
repr(self.frame)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(self.frame)
fmt.set_eng_float_format(accuracy=0)
repr(self.frame)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(null_counts, result):
buf = StringIO()
df.info(buf=buf, null_counts=null_counts)
assert ('non-null' in buf.getvalue()) is result
with option_context('display.max_info_rows', 20,
'display.max_info_columns', 20):
check(None, True)
check(True, True)
check(False, False)
with option_context('display.max_info_rows', 5,
'display.max_info_columns', 5):
check(None, False)
check(True, False)
check(False, False)
def test_repr_tuples(self):
buf = StringIO()
df = DataFrame({'tups': lzip(range(10), range(10))})
repr(df)
df.to_string(col_space=10, buf=buf)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame({'A': np.random.randn(10),
'B': [tm.rands(np.random.randint(
max_len - 1, max_len + 1)) for i in range(10)
]})
r = repr(df)
r = r[r.find('\n') + 1:]
adj = fmt._get_adjustment()
for line, value in lzip(r.split('\n'), df['B']):
if adj.len(value) + 1 > max_len:
assert '...' in line
else:
assert '...' not in line
with option_context("display.max_colwidth", 999999):
assert '...' not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert '...' not in repr(df)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
assert repr(df) == ' 0 1\n0 0.1 0.5\n1 0.5 -0.1'
with option_context("display.chop_threshold", 0.2):
assert repr(df) == ' 0 1\n0 0.0 0.5\n1 0.5 0.0'
with option_context("display.chop_threshold", 0.6):
assert repr(df) == ' 0 1\n0 0.0 0.0\n1 0.0 0.0'
with option_context("display.chop_threshold", None):
assert repr(df) == ' 0 1\n0 0.1 0.5\n1 0.5 -0.1'
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = pd.DataFrame([[10, 20, 30, 40],
[8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (' 0 1\n'
'0 10.0 8.000000e-10\n'
'1 20.0 -1.000000e-11\n'
'2 30.0 2.000000e-09\n'
'3 40.0 -2.000000e-11')
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (' 0 1\n'
'0 10.0 0.000000e+00\n'
'1 20.0 0.000000e+00\n'
'2 30.0 0.000000e+00\n'
'3 40.0 0.000000e+00')
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (' 0 1\n'
'0 10.0 8.000000e-10\n'
'1 20.0 0.000000e+00\n'
'2 30.0 2.000000e-09\n'
'3 40.0 0.000000e+00')
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(lrange(1000))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(lrange(1000))) < 100
def test_repr_set(self):
assert printing.pprint_thing({1}) == '{1}'
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather then
# stylized
idx = Index(['a', 'b'])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
cols = [u("\u03c8")]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(np.random.randn(10, 4))
assert '\\' not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame('hello', [0], [0])
df_wide = DataFrame('hello', [0], lrange(10))
df_tall = DataFrame('hello', lrange(30), lrange(5))
with option_context('mode.sim_interactive', True):
with option_context('display.max_columns', 10, 'display.width', 20,
'display.max_rows', 20,
'display.show_dimensions', True):
with option_context('display.expand_frame_repr', True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context('display.expand_frame_repr', False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame('hello', lrange(1000), lrange(5))
with option_context('mode.sim_interactive', False, 'display.width', 0,
'display.max_rows', 5000):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr('pandas.io.formats.console.get_terminal_size',
lambda: terminal_size)
monkeypatch.setattr('pandas.io.formats.format.get_terminal_size',
lambda: terminal_size)
index = range(5)
columns = pd.MultiIndex.from_tuples([
('This is a long title with > 37 chars.', 'cat'),
('This is a loooooonger title with > 43 chars.', 'dog'),
])
df = pd.DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split('\n')[:2]
assert 'long' in h1
assert 'loooooonger' in h1
assert 'cat' in h2
assert 'dog' in h2
# regular columns
df2 = pd.DataFrame({"A" * 41: [1, 2], 'B' * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split('\n')[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = pd.DataFrame(np.random.rand(1, 7))
monkeypatch.setattr('pandas.io.formats.console.get_terminal_size',
lambda: terminal_size)
monkeypatch.setattr('pandas.io.formats.format.get_terminal_size',
lambda: terminal_size)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = pd.DataFrame({'a': [108480, 30830], 'b': [12345, 12345],
'c': [12345, 12345], 'd': [12345, 12345],
'e': ['a' * 50] * 2})
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip("terminal size too small, "
"{0} x {1}".format(term_width, term_height))
def mkframe(n):
index = ['{i:05d}'.format(i=i) for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context('mode.sim_interactive', True):
with option_context('display.width', term_width * 2):
with option_context('display.max_rows', 5,
'display.max_columns', 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context('display.max_rows', 20,
'display.max_columns', 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context('display.max_rows', 9,
'display.max_columns', 10):
# out vertical bounds can not result in exanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context('display.max_columns', 100, 'display.max_rows',
term_width * 20, 'display.width', None):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_str_max_colwidth(self):
# GH 7856
df = pd.DataFrame([{'a': 'foo',
'b': 'bar',
'c': 'uncomfortably long line with lots of stuff',
'd': 1}, {'a': 'foo',
'b': 'bar',
'c': 'stuff',
'd': 1}])
df.set_index(['a', 'b', 'c'])
assert str(df) == (
' a b c d\n'
'0 foo bar uncomfortably long line with lots of stuff 1\n'
'1 foo bar stuff 1')
with option_context('max_colwidth', 20):
assert str(df) == (' a b c d\n'
'0 foo bar uncomfortably lo... 1\n'
'1 foo bar stuff 1')
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context('mode.sim_interactive', True):
with option_context('max_rows', None):
with option_context('max_columns', None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context('max_rows', 0):
with option_context('max_columns', 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context('max_rows', 0):
with option_context('max_columns', None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context('max_rows', None):
with option_context('max_columns', 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = [u('\u03c3')] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({'unicode': unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(['abc', u('\u03c3a'), 'aegdvg'])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split('\n')
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith('dtype:'):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_to_string_unicode_columns(self):
df = DataFrame({u('\u03c3'): np.arange(10.)})
buf = StringIO()
df.to_string(buf=buf)
buf.getvalue()
buf = StringIO()
df.info(buf=buf)
buf.getvalue()
result = self.frame.to_string()
assert isinstance(result, compat.text_type)
def test_to_string_utf8_columns(self):
n = u("\u05d0").encode('utf-8')
with option_context('display.max_rows', 1):
df = DataFrame([1, 2], columns=[n])
repr(df)
def test_to_string_unicode_two(self):
dm = DataFrame({u('c/\u03c3'): []})
buf = StringIO()
dm.to_string(buf)
def test_to_string_unicode_three(self):
dm = DataFrame(['\xc2'])
buf = StringIO()
dm.to_string(buf)
def test_to_string_with_formatters(self):
df = DataFrame({'int': [1, 2, 3],
'float': [1.0, 2.0, 3.0],
'object': [(1, 2), True, False]},
columns=['int', 'float', 'object'])
formatters = [('int', lambda x: '0x{x:x}'.format(x=x)),
('float', lambda x: '[{x: 4.1f}]'.format(x=x)),
('object', lambda x: '-{x!s}-'.format(x=x))]
result = df.to_string(formatters=dict(formatters))
result2 = df.to_string(formatters=lzip(*formatters)[1])
assert result == (' int float object\n'
'0 0x1 [ 1.0] -(1, 2)-\n'
'1 0x2 [ 2.0] -True-\n'
'2 0x3 [ 3.0] -False-')
assert result == result2
def test_to_string_with_datetime64_monthformatter(self):
months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]
x = DataFrame({'months': months})
def format_func(x):
return x.strftime('%Y-%m')
result = x.to_string(formatters={'months': format_func})
expected = 'months\n0 2016-01\n1 2016-02'
assert result.strip() == expected
def test_to_string_with_datetime64_hourformatter(self):
x = DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f')})
def format_func(x):
return x.strftime('%H:%M')
result = x.to_string(formatters={'hod': format_func})
expected = 'hod\n0 10:10\n1 12:12'
assert result.strip() == expected
def test_to_string_with_formatters_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
result = df.to_string(
formatters={u('c/\u03c3'): lambda x: '{x}'.format(x=x)})
assert result == u(' c/\u03c3\n') + '0 1\n1 2\n2 3'
def test_east_asian_unicode_false(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
# not alighned properly because of east asian width
# mid col
df = DataFrame({'a': [u'あ', u'いいい', u'う', u'ええええええ'],
'b': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
assert _rep(df) == expected
# last col
df = DataFrame({'a': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na 1 あ\n"
u"bb 222 いいい\nc 33333 う\n"
u"ddd 4 ええええええ")
assert _rep(df) == expected
# all col
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あああああ あ\n"
u"bb い いいい\nc う う\n"
u"ddd えええ ええええええ")
assert _rep(df) == expected
# column name
df = DataFrame({'b': [u'あ', u'いいい', u'う', u'ええええええ'],
u'あああああ': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" b あああああ\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
assert _rep(df) == expected
# index
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=[u'あああ', u'いいいいいい', u'うう', u'え'])
expected = (u" a b\nあああ あああああ あ\n"
u"いいいいいい い いいい\nうう う う\n"
u"え えええ ええええええ")
assert _rep(df) == expected
# index name
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=pd.Index([u'あ', u'い', u'うう', u'え'],
name=u'おおおお'))
expected = (u" a b\n"
u"おおおお \n"
u"あ あああああ あ\n"
u"い い いいい\n"
u"うう う う\n"
u"え えええ ええええええ")
assert _rep(df) == expected
# all
df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],
u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},
index=pd.Index([u'あ', u'いいい', u'うう', u'え'],
name=u'お'))
expected = (u" あああ いいいいい\n"
u"お \n"
u"あ あああ あ\n"
u"いいい い いいい\n"
u"うう う う\n"
u"え えええええ ええ")
assert _rep(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=idx)
expected = (u" a b\n"
u"あ いい あああああ あ\n"
u"う え い いいい\n"
u"おおお かかかか う う\n"
u"き くく えええ ええええええ")
assert _rep(df) == expected
# truncate
with option_context('display.max_rows', 3, 'display.max_columns', 3):
df = pd.DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ'],
'c': [u'お', u'か', u'ききき', u'くくくくくく'],
u'ああああ': [u'さ', u'し', u'す', u'せ']},
columns=['a', 'b', 'c', u'ああああ'])
expected = (u" a ... ああああ\n0 あああああ ... さ\n"
u".. ... ... ...\n3 えええ ... せ\n"
u"\n[4 rows x 4 columns]")
assert _rep(df) == expected
df.index = [u'あああ', u'いいいい', u'う', 'aaa']
expected = (u" a ... ああああ\nあああ あああああ ... さ\n"
u".. ... ... ...\naaa えええ ... せ\n"
u"\n[4 rows x 4 columns]")
assert _rep(df) == expected
def test_east_asian_unicode_true(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
# Emable Unicode option -----------------------------------------
with option_context('display.unicode.east_asian_width', True):
# mid col
df = DataFrame({'a': [u'あ', u'いいい', u'う', u'ええええええ'],
'b': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
assert _rep(df) == expected
# last col
df = DataFrame({'a': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na 1 あ\n"
u"bb 222 いいい\nc 33333 う\n"
u"ddd 4 ええええええ")
assert _rep(df) == expected
# all col
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\n"
u"a あああああ あ\n"
u"bb い いいい\n"
u"c う う\n"
u"ddd えええ ええええええ")
assert _rep(df) == expected
# column name
df = DataFrame({'b': [u'あ', u'いいい', u'う', u'ええええええ'],
u'あああああ': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" b あああああ\n"
u"a あ 1\n"
u"bb いいい 222\n"
u"c う 33333\n"
u"ddd ええええええ 4")
assert _rep(df) == expected
# index
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=[u'あああ', u'いいいいいい', u'うう', u'え'])
expected = (u" a b\n"
u"あああ あああああ あ\n"
u"いいいいいい い いいい\n"
u"うう う う\n"
u"え えええ ええええええ")
assert _rep(df) == expected
# index name
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=pd.Index([u'あ', u'い', u'うう', u'え'],
name=u'おおおお'))
expected = (u" a b\n"
u"おおおお \n"
u"あ あああああ あ\n"
u"い い いいい\n"
u"うう う う\n"
u"え えええ ええええええ")
assert _rep(df) == expected
# all
df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],
u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},
index=pd.Index([u'あ', u'いいい', u'うう', u'え'],
name=u'お'))
expected = (u" あああ いいいいい\n"
u"お \n"
u"あ あああ あ\n"
u"いいい い いいい\n"
u"うう う う\n"
u"え えええええ ええ")
assert _rep(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=idx)
expected = (u" a b\n"
u"あ いい あああああ あ\n"
u"う え い いいい\n"
u"おおお かかかか う う\n"
u"き くく えええ ええええええ")
assert _rep(df) == expected
# truncate
with option_context('display.max_rows', 3, 'display.max_columns',
3):
df = pd.DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ'],
'c': [u'お', u'か', u'ききき', u'くくくくくく'],
u'ああああ': [u'さ', u'し', u'す', u'せ']},
columns=['a', 'b', 'c', u'ああああ'])
expected = (u" a ... ああああ\n"
u"0 あああああ ... さ\n"
u".. ... ... ...\n"
u"3 えええ ... せ\n"
u"\n[4 rows x 4 columns]")
assert _rep(df) == expected
df.index = [u'あああ', u'いいいい', u'う', 'aaa']
expected = (u" a ... ああああ\n"
u"あああ あああああ ... さ\n"
u"... ... ... ...\n"
u"aaa えええ ... せ\n"
u"\n[4 rows x 4 columns]")
assert _rep(df) == expected
# ambiguous unicode
df = DataFrame({'b': [u'あ', u'いいい', u'¡¡', u'ええええええ'],
u'あああああ': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', '¡¡¡'])
expected = (u" b あああああ\n"
u"a あ 1\n"
u"bb いいい 222\n"
u"c ¡¡ 33333\n"
u"¡¡¡ ええええええ 4")
assert _rep(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({u('c/\u03c3'): Series()})
nonempty = DataFrame({u('c/\u03c3'): Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_truncate_indices(self):
for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr",
False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(
df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (
has_horizontally_truncated_repr(df))
with option_context("display.max_rows", 15,
"display.max_columns", 15):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(
df)
def test_to_string_truncate_multilevel(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series([datetime.datetime(2012, 1, 1)] * 10 +
[datetime.datetime(1012, 1, 2)] + [
datetime.datetime(2012, 1, 3)] * 10)
with pd.option_context('display.max_rows', 8):
result = str(s)
assert 'object' in result
# 12045
df = DataFrame({'text': ['some words'] + [None] * 9})
with pd.option_context('display.max_rows', 8,
'display.max_columns', 3):
result = str(df)
assert 'None' in result
assert 'NaN' not in result
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame(
{'date': [pd.Timestamp('20130101').tz_localize('UTC')] +
[pd.NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert '2013-01-01 00:00:00+00:00' in result
assert 'NaT' in result
assert '...' in result
assert '[6 rows x 1 columns]' in result
dts = [pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5 + [pd.NaT] * 5
df = pd.DataFrame({"dt": dts,
"x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context('display.max_rows', 5):
expected = (' dt x\n'
'0 2011-01-01 00:00:00-05:00 1\n'
'1 2011-01-01 00:00:00-05:00 2\n'
'.. ... ..\n'
'8 NaT 9\n'
'9 NaT 10\n\n'
'[10 rows x 2 columns]')
assert repr(df) == expected
dts = [pd.NaT] * 5 + [pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5
df = pd.DataFrame({"dt": dts,
"x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context('display.max_rows', 5):
expected = (' dt x\n'
'0 NaT 1\n'
'1 NaT 2\n'
'.. ... ..\n'
'8 2011-01-01 00:00:00-05:00 9\n'
'9 2011-01-01 00:00:00-05:00 10\n\n'
'[10 rows x 2 columns]')
assert repr(df) == expected
dts = ([pd.Timestamp('2011-01-01', tz='Asia/Tokyo')] * 5 +
[pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5)
df = pd.DataFrame({"dt": dts,
"x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context('display.max_rows', 5):
expected = (' dt x\n'
'0 2011-01-01 00:00:00+09:00 1\n'
'1 2011-01-01 00:00:00+09:00 2\n'
'.. ... ..\n'
'8 2011-01-01 00:00:00-05:00 9\n'
'9 2011-01-01 00:00:00-05:00 10\n\n'
'[10 rows x 2 columns]')
assert repr(df) == expected
@pytest.mark.parametrize('start_date', [
'2017-01-01 23:59:59.999999999',
'2017-01-01 23:59:59.99999999',
'2017-01-01 23:59:59.9999999',
'2017-01-01 23:59:59.999999',
'2017-01-01 23:59:59.99999',
'2017-01-01 23:59:59.9999',
])
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({'A': date_range(start=start_date,
freq='D', periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date,
freq='D', periods=5)
df = DataFrame({'A': range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split('\n')
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({u('c/\u03c3'): Series({'test': np.nan})})
compat.text_type(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath('io', 'parser', 'data', 'unicode_series.csv')
df = pd.read_csv(filepath, header=None, encoding='latin1')
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({'foo': [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ['\'Til There Was You (1997)',
'ldum klaka (Cold Fever) (1994)']
fmt.set_option('display.max_rows', 1)
df = DataFrame(columns=['a', 'b', 'c'], index=index)
repr(df)
repr(df.T)
fmt.set_option('display.max_rows', 200)
def test_pprint_thing(self):
from pandas.io.formats.printing import pprint_thing as pp_t
if PY3:
pytest.skip("doesn't work on Python 3")
assert pp_t('a') == u('a')
assert pp_t(u('a')) == u('a')
assert pp_t(None) == 'None'
assert pp_t(u('\u05d0'), quote_strings=True) == u("u'\u05d0'")
assert pp_t(u('\u05d0'), quote_strings=False) == u('\u05d0')
assert (pp_t((u('\u05d0'), u('\u05d1')), quote_strings=True) ==
u("(u'\u05d0', u'\u05d1')"))
assert (pp_t((u('\u05d0'), (u('\u05d1'), u('\u05d2'))),
quote_strings=True) == u("(u'\u05d0', "
"(u'\u05d1', u'\u05d2'))"))
assert (pp_t(('foo', u('\u05d0'), (u('\u05d0'), u('\u05d0'))),
quote_strings=True) == u("(u'foo', u'\u05d0', "
"(u'\u05d0', u'\u05d0'))"))
# gh-2038: escape embedded tabs in string
assert "\t" not in pp_t("a\tb", escape_chars=("\t", ))
def test_wide_repr(self):
with option_context('mode.sim_interactive', True,
'display.show_dimensions', True,
'display.max_columns', 20):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
assert "10 rows x {c} columns".format(c=max_cols - 1) in rep_str
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_columns(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
df = DataFrame(np.random.randn(5, 3),
columns=['a' * 90, 'b' * 90, 'c' * 90])
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = 'DataFrame Index'
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert 'DataFrame Index' in line
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)),
index=midx)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert 'Level 0 Level 1' in line
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex_cols(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
max_cols = get_option('display.max_columns')
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(
tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(tm.rands_array(25, (10, max_cols - 1)),
index=midx, columns=mcols)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150, 'display.max_columns', 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
def test_wide_repr_unicode(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_long_columns(self):
with option_context('mode.sim_interactive', True):
df = DataFrame({'a': ['a' * 30, 'b' * 30],
'b': ['c' * 70, 'd' * 80]})
result = repr(df)
assert 'ccccc' in result
assert 'ddddd' in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=['s{x:04d}'.format(x=x) for x in range(n)], dtype='int64')
import re
str_rep = str(s)
nmatches = len(re.findall('dtype', str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame({'id1': {0: '1a3',
1: '9h4'},
'id2': {0: np.nan,
1: 'd67'},
'id3': {0: '78d',
1: '79d'},
'value': {0: 123,
1: 64}})
# multi-index
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = u(
' value\nid1 id2 id3 \n'
'1a3 NaN 78d 123\n9h4 d67 79d 64')
assert result == expected
# index
y = df.set_index('id2')
result = y.to_string()
expected = u(
' id1 id3 value\nid2 \n'
'NaN 1a3 78d 123\nd67 9h4 79d 64')
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(['id1', 'id2']).set_index('id3', append=True)
result = y.to_string()
expected = u(
' value\nid1 id2 id3 \n'
'1a3 NaN 78d 123\n9h4 d67 79d 64')
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, 'id2'] = np.nan
y = df2.set_index('id2')
result = y.to_string()
expected = u(
' id1 id3 value\nid2 \n'
'NaN 1a3 78d 123\nNaN 9h4 79d 64')
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, 'id2'] = np.nan
y = df2.set_index(['id2', 'id3'])
result = y.to_string()
expected = u(
' id1 value\nid2 id3 \n'
'NaN 78d 1a3 123\n 79d 9h4 64')
assert result == expected
df = DataFrame({'id1': {0: np.nan,
1: '9h4'},
'id2': {0: np.nan,
1: 'd67'},
'id3': {0: np.nan,
1: '79d'},
'value': {0: 123,
1: 64}})
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = u(
' value\nid1 id2 id3 \n'
'NaN NaN NaN 123\n9h4 d67 79d 64')
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame({'A': np.random.randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20, 'A'] = np.nan
biggie.loc[:20, 'B'] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, compat.string_types)
# print in right order
result = biggie.to_string(columns=['B', 'A'], col_space=17,
float_format='%.5f'.__mod__)
lines = result.split('\n')
header = lines[0].strip().split()
joined = '\n'.join(re.sub(r'\s+', ' ', x).strip() for x in lines[1:])
recons = read_csv(StringIO(joined), names=header,
header=None, sep=' ')
tm.assert_series_equal(recons['B'], biggie['B'])
assert recons['A'].count() == biggie['A'].count()
assert (np.abs(recons['A'].dropna() -
biggie['A'].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=['A'], col_space=17)
header = result.split('\n')[0].strip().split()
expected = ['A']
assert header == expected
biggie.to_string(columns=['B', 'A'],
formatters={'A': lambda x: '{x:.1f}'.format(x=x)})
biggie.to_string(columns=['B', 'A'], float_format=str)
biggie.to_string(columns=['B', 'A'], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(header=['X', 'Y'])
expected = ' X Y\n0 1 4\n1 2 5\n2 3 6'
assert df_s == expected
with pytest.raises(ValueError):
df.to_string(header=['X'])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({'x': [11, 22], 'y': [33, -44], 'z': ['AAA', ' ']})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = (" x y z\n"
" 11 33 AAA\n"
" 22 -44 ")
assert df_s == expected
df_s = df[['y', 'x', 'z']].to_string(index=False)
expected = (" y x z\n"
" 33 11 AAA\n"
"-44 22 ")
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({'x': [11, 22, 33], 'y': [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n 33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({'x': [11, 22, -33], 'y': [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option('display.precision', 5, 'display.column_space', 12,
'display.notebook_repr_html', False)
df = DataFrame({'x': [0, 0.25, 3456.000, 12e+45, 1.64e+6, 1.7e+8,
1.253456, np.pi, -1e6]})
df_s = df.to_string()
if _three_digit_exp():
expected = (' x\n0 0.00000e+000\n1 2.50000e-001\n'
'2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n'
'5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n'
'8 -1.00000e+006')
else:
expected = (' x\n0 0.00000e+00\n1 2.50000e-01\n'
'2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n'
'5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n'
'8 -1.00000e+06')
assert df_s == expected
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string()
expected = (' x\n' '0 3234.000\n' '1 0.253')
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({'x': [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = (' x\n'
'0 1.000000e+009\n'
'1 2.512000e-001')
else:
expected = (' x\n'
'0 1.000000e+09\n'
'1 2.512000e-01')
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({'x': [0.19999]})
expected = ' x\n0 0.200'
assert df.to_string(float_format='%.3f') == expected
# GH 22270
df = DataFrame({'x': [100.0]})
expected = ' x\n0 100'
assert df.to_string(float_format='%.0f') == expected
def test_to_string_small_float_values(self):
df = DataFrame({'a': [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if '{x:.4g}'.format(x=1.7e8) == '1.7e+008':
expected = (' a\n'
'0 1.500000e+000\n'
'1 1.000000e-017\n'
'2 -5.500000e-007')
else:
expected = (' a\n'
'0 1.500000e+00\n'
'1 1.000000e-17\n'
'2 -5.500000e-07')
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = (' 0\n' '0 0\n' '1 0\n' '2 -0')
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(lrange(5), index=index)
result = df.to_string()
expected = (' 0\n'
'1.5 0\n'
'2.0 1\n'
'3.0 2\n'
'4.0 3\n'
'5.0 4')
assert result == expected
def test_to_string_ascii_error(self):
data = [('0 ', u(' .gitignore '), u(' 5 '),
' \xe2\x80\xa2\xe2\x80\xa2\xe2\x80'
'\xa2\xe2\x80\xa2\xe2\x80\xa2')]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({'x': [-15, 20, 25, -35]})
assert issubclass(df['x'].dtype.type, np.integer)
output = df.to_string()
expected = (' x\n' '0 -15\n' '1 20\n' '2 25\n' '3 -35')
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([lrange(5), lrange(5, 10), lrange(10, 15)])
rs = df.to_string(formatters={'__index__': lambda x: 'abc' [x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string(justify='left')
expected = (' x \n' '0 3234.000\n' '1 0.253')
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame({'A': [np.nan, -1, -2.1234, 3, 4],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1.0000 foo\n'
'2 -2.1234 foooo\n'
'3 3.0000 fooooo\n'
'4 4.0000 bar')
assert result == expected
df = DataFrame({'A': [np.nan, -1., -2., 3., 4.],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1.0 foo\n'
'2 -2.0 foooo\n'
'3 3.0 fooooo\n'
'4 4.0 bar')
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame({
'A': [-np.inf, np.inf, -1, -2.1234, 3, 4],
'B': [-np.inf, np.inf, 'foo', 'foooo', 'fooooo', 'bar']
})
result = df.to_string()
expected = (' A B\n'
'0 -inf -inf\n'
'1 inf inf\n'
'2 -1.0000 foo\n'
'3 -2.1234 foooo\n'
'4 3.0000 fooooo\n'
'5 4.0000 bar')
assert result == expected
df = DataFrame({
'A': [-np.inf, np.inf, -1., -2., 3., 4.],
'B': [-np.inf, np.inf, 'foo', 'foooo', 'fooooo', 'bar']
})
result = df.to_string()
expected = (' A B\n'
'0 -inf -inf\n'
'1 inf inf\n'
'2 -1.0 foo\n'
'3 -2.0 foooo\n'
'4 3.0 fooooo\n'
'5 4.0 bar')
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({'A': [6.0, 3.1, 2.2]})
expected = ' A\n0 6,0\n1 3,1\n2 2,2'
assert df.to_string(decimal=',') == expected
def test_to_string_line_width(self):
df = DataFrame(123, lrange(10, 15), lrange(30))
s = df.to_string(line_width=80)
assert max(len(l) for l in s.split('\n')) == 80
def test_show_dimensions(self):
df = DataFrame(123, lrange(10, 15), lrange(30))
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', True):
assert '5 rows' in str(df)
assert '5 rows' in df._repr_html_()
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', False):
assert '5 rows' not in str(df)
assert '5 rows' not in df._repr_html_()
with option_context('display.max_rows', 2, 'display.max_columns', 2,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', 'truncate'):
assert '5 rows' in str(df)
assert '5 rows' in df._repr_html_()
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', 'truncate'):
assert '5 rows' not in str(df)
assert '5 rows' not in df._repr_html_()
def test_repr_html(self):
self.frame._repr_html_()
fmt.set_option('display.max_rows', 1, 'display.max_columns', 1)
self.frame._repr_html_()
fmt.set_option('display.notebook_repr_html', False)
self.frame._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option('display.show_dimensions', True)
assert '2 rows' in df._repr_html_()
fmt.set_option('display.show_dimensions', False)
assert '2 rows' not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert 'tex2jax_ignore' not in df._repr_html_()
with pd.option_context('display.html.use_mathjax', False):
assert 'tex2jax_ignore' in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product([np.arange(max_cols // 2),
['foo', 'bar']],
names=['first', 'second'])
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),
columns=mcols)
reg_repr = df._repr_html_()
assert '...' not in reg_repr
mcols = MultiIndex.from_product((np.arange(1 + (max_cols // 2)),
['foo', 'bar']),
names=['first', 'second'])
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),
columns=mcols)
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert '...' in df._repr_html_()
def test_repr_html_long(self):
with option_context('display.max_rows', 60):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = DataFrame({'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert '..' not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert '..' in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert u('{h} rows ').format(h=h) in long_repr
assert u('2 columns') in long_repr
def test_repr_html_float(self):
with option_context('display.max_rows', 60):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = DataFrame({'idx': np.linspace(-10, 10, h),
'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)}).set_index('idx')
reg_repr = df._repr_html_()
assert '..' not in reg_repr
assert '<td>{val}</td>'.format(val=str(40 + h)) in reg_repr
h = max_rows + 1
df = DataFrame({'idx': np.linspace(-10, 10, h),
'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)}).set_index('idx')
long_repr = df._repr_html_()
assert '..' in long_repr
assert '<td>{val}</td>'.format(val='31') not in long_repr
assert u('{h} rows ').format(h=h) in long_repr
assert u('2 columns') in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ['foo', 'bar']))
idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx,
columns=['A', 'B'])
with option_context('display.max_rows', 60, 'display.max_columns', 20):
reg_repr = df._repr_html_()
assert '...' not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ['foo', 'bar']))
idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn((max_L1 + 1) * 2, 2), index=idx,
columns=['A', 'B'])
long_repr = df._repr_html_()
assert '...' in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert '...' not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert '...' in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context('display.large_repr', 'info'):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context('display.large_repr', 'info',
'display.max_columns', max_cols):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 4):
assert has_non_verbose_info_repr(df)
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 5):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r'<class' not in df._repr_html_()
with option_context('display.large_repr', 'info'):
assert r'<class' in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert '<class' not in df._repr_html_()
with option_context('display.large_repr', 'info',
'display.max_columns', max_cols):
assert '<class' in df._repr_html_()
def test_fake_qtconsole_repr_html(self):
def get_ipython():
return {'config': {'KernelApp':
{'parent_appname': 'ipython-qtconsole'}}}
repstr = self.frame._repr_html_()
assert repstr is not None
fmt.set_option('display.max_rows', 5, 'display.max_columns', 2)
repstr = self.frame._repr_html_()
assert 'class' in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A(object):
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
skip = True
for line in repr(DataFrame({'A': vals})).split('\n')[:-2]:
if line.startswith('dtype:'):
continue
if _three_digit_exp():
assert ('+010' in line) or skip
else:
assert ('+10' in line) or skip
skip = False
def test_dict_entries(self):
df = DataFrame({'A': [{'a': 1, 'b': 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_period(self):
# GH 12615
df = pd.DataFrame({'A': pd.period_range('2013-01',
periods=4, freq='M'),
'B': [pd.Period('2011-01', freq='M'),
pd.Period('2011-02-01', freq='D'),
pd.Period('2011-03-01 09:00', freq='H'),
pd.Period('2011-04', freq='M')],
'C': list('abcd')})
exp = (" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d")
assert str(df) == exp
def gen_series_formatting():
s1 = pd.Series(['a'] * 100)
s2 = pd.Series(['ab'] * 100)
s3 = pd.Series(['a', 'ab', 'abc', 'abcd', 'abcde', 'abcdef'])
s4 = s3[::-1]
test_sers = {'onel': s1, 'twol': s2, 'asc': s3, 'desc': s4}
return test_sers
class TestSeriesFormatting(object):
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series([u('\u03c3')] * 10)
repr(s)
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == 'Series([], Freq: B)'
result = self.ts[:0].to_string(length=0)
assert result == 'Series([], Freq: B)'
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split('\n')[-1].strip()
assert last_line == ("Freq: B, Name: foo, "
"Length: {cp}, dtype: float64".format(cp=len(cp)))
def test_freq_name_separation(self):
s = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10), name=0)
result = repr(s)
assert 'Freq: D, Name: 0' in result
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = (u('0 foo\n') + u('1 NaN\n') + u('2 -1.23\n') +
u('3 4.56'))
assert result == expected
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = (u('0 foo\n') + '1 NaN\n' + '2 bar\n' + '3 baz')
assert result == expected
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = (u('0 foo\n') + '1 5\n' + '2 bar\n' + '3 baz')
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = (u('0 NaN\n') + '1 1.5678\n' + '2 NaN\n' +
'3 -3.0000\n' + '4 NaN')
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = (u(' 1\n') + ' 2\n' + ' 3\n' + ' 4')
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name=u('\u05e2\u05d1\u05e8\u05d9\u05ea'))
sf = fmt.SeriesFormatter(s, name=u('\u05e2\u05d1\u05e8\u05d9\u05ea'))
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
# not aligned properly because of east asian width
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
index=[u'あ', u'いい', u'ううう', u'ええええ'])
expected = (u"あ a\nいい bb\nううう CCC\n"
u"ええええ D\ndtype: object")
assert _rep(s) == expected
# unicode values
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=['a', 'bb', 'c', 'ddd'])
expected = (u"a あ\nbb いい\nc ううう\n"
u"ddd ええええ\ndtype: object")
assert _rep(s) == expected
# both
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'])
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\ndtype: object")
assert _rep(s) == expected
# unicode footer
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'],
name=u'おおおおおおお')
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\nName: おおおおおおお, dtype: object")
assert _rep(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
expected = (u"あ いい 1\n"
u"う え 22\n"
u"おおお かかかか 3333\n"
u"き くく 44444\ndtype: int64")
assert _rep(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ'])
expected = (u"1 1\nAB 22\nNaN 3333\n"
u"あああ 44444\ndtype: int64")
assert _rep(s) == expected
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])
expected = (u"1 1\n"
u"AB 22\n"
u"2011-01-01 00:00:00 3333\n"
u"あああ 44444\ndtype: int64")
assert _rep(s) == expected
# truncate
with option_context('display.max_rows', 3):
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
name=u'おおおおおおお')
expected = (u"0 あ\n ... \n"
u"3 ええええ\n"
u"Name: おおおおおおお, Length: 4, dtype: object")
assert _rep(s) == expected
s.index = [u'ああ', u'いいいい', u'う', u'えええ']
expected = (u"ああ あ\n ... \n"
u"えええ ええええ\n"
u"Name: おおおおおおお, Length: 4, dtype: object")
assert _rep(s) == expected
# Emable Unicode option -----------------------------------------
with option_context('display.unicode.east_asian_width', True):
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
index=[u'あ', u'いい', u'ううう', u'ええええ'])
expected = (u"あ a\nいい bb\nううう CCC\n"
u"ええええ D\ndtype: object")
assert _rep(s) == expected
# unicode values
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=['a', 'bb', 'c', 'ddd'])
expected = (u"a あ\nbb いい\nc ううう\n"
u"ddd ええええ\ndtype: object")
assert _rep(s) == expected
# both
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'])
expected = (u"ああ あ\n"
u"いいいい いい\n"
u"う ううう\n"
u"えええ ええええ\ndtype: object")
assert _rep(s) == expected
# unicode footer
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'],
name=u'おおおおおおお')
expected = (u"ああ あ\n"
u"いいいい いい\n"
u"う ううう\n"
u"えええ ええええ\n"
u"Name: おおおおおおお, dtype: object")
assert _rep(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
expected = (u"あ いい 1\n"
u"う え 22\n"
u"おおお かかかか 3333\n"
u"き くく 44444\n"
u"dtype: int64")
assert _rep(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ'])
expected = (u"1 1\nAB 22\nNaN 3333\n"
u"あああ 44444\ndtype: int64")
assert _rep(s) == expected
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])
expected = (u"1 1\n"
u"AB 22\n"
u"2011-01-01 00:00:00 3333\n"
u"あああ 44444\ndtype: int64")
assert _rep(s) == expected
# truncate
with option_context('display.max_rows', 3):
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
name=u'おおおおおおお')
expected = (u"0 あ\n ... \n"
u"3 ええええ\n"
u"Name: おおおおおおお, Length: 4, dtype: object")
assert _rep(s) == expected
s.index = [u'ああ', u'いいいい', u'う', u'えええ']
expected = (u"ああ あ\n"
u" ... \n"
u"えええ ええええ\n"
u"Name: おおおおおおお, Length: 4, dtype: object")
assert _rep(s) == expected
# ambiguous unicode
s = Series([u'¡¡', u'い¡¡', u'ううう', u'ええええ'],
index=[u'ああ', u'¡¡¡¡いい', u'¡¡', u'えええ'])
expected = (u"ああ ¡¡\n"
u"¡¡¡¡いい い¡¡\n"
u"¡¡ ううう\n"
u"えええ ええええ\ndtype: object")
assert _rep(s) == expected
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
for line in repr(Series(vals)).split('\n'):
if line.startswith('dtype:'):
continue
if _three_digit_exp():
assert '+010' in line
else:
assert '+10' in line
def test_datetimeindex(self):
index = date_range('20130102', periods=6)
s = Series(1, index=index)
result = s.to_string()
assert '2013-01-02' in result
# nat in index
s2 = Series(2, index=[Timestamp('20130111'), NaT])
s = s2.append(s)
result = s.to_string()
assert 'NaT' in result
# nat in summary
result = str(s2.index)
assert 'NaT' in result
@pytest.mark.parametrize('start_date', [
'2017-01-01 23:59:59.999999999',
'2017-01-01 23:59:59.99999999',
'2017-01-01 23:59:59.9999999',
'2017-01-01 23:59:59.999999',
'2017-01-01 23:59:59.99999',
'2017-01-01 23:59:59.9999'
])
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Series(date_range(start=start_date, freq='D', periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq='D', periods=5)
s2 = Series(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import datetime, timedelta
Series(np.array([1100, 20], dtype='timedelta64[ns]')).to_string()
s = Series(date_range('2012-1-1', periods=3, freq='D'))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert '1 days' in result
assert '00:00:00' not in result
assert 'NaT' in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +23:59:59.999850' in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +23:00:00' in result
assert '1 days 23:00:00' in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +22:59:00' in result
assert '1 days 22:59:00' in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +22:58:59.999850' in result
assert '0 days 22:58:59.999850' in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - s2
result = y.to_string()
assert '-1 days +23:54:57' in result
td = timedelta(microseconds=550)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - td
result = y.to_string()
assert '2012-01-01 23:59:59.999450' in result
# no boxing of the actual elements
td = Series(pd.timedelta_range('1 days', periods=3))
result = td.to_string()
assert result == u("0 1 days\n1 2 days\n2 3 days")
def test_mixed_datetime64(self):
df = DataFrame({'A': [1, 2], 'B': ['2012-01-01', '2012-01-02']})
df['B'] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert '2012-01-01' in result
def test_period(self):
# GH 12615
index = pd.period_range('2013-01', periods=6, freq='M')
s = Series(np.arange(6, dtype='int64'), index=index)
exp = ("2013-01 0\n"
"2013-02 1\n"
"2013-03 2\n"
"2013-04 3\n"
"2013-05 4\n"
"2013-06 5\n"
"Freq: M, dtype: int64")
assert str(s) == exp
s = Series(index)
exp = ("0 2013-01\n"
"1 2013-02\n"
"2 2013-03\n"
"3 2013-04\n"
"4 2013-05\n"
"5 2013-06\n"
"dtype: period[M]")
assert str(s) == exp
# periods with mixed freq
s = Series([pd.Period('2011-01', freq='M'),
pd.Period('2011-02-01', freq='D'),
pd.Period('2011-03-01 09:00', freq='H')])
exp = ("0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object")
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split('\n')) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split('\n')) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split('\n')) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split('\n')) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split('\n')) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split('\n')) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split('\n')) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split('\n')) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split('\n')) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split('\n')) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = pd.Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10,
"display.show_dimensions", False):
res = repr(s)
exp = ('0 1.0000\n1 1.0000\n2 1.0000\n3 '
'1.0000\n4 1.0000\n ... \n125 '
'1.0000\n126 1.0000\n127 0.9999\n128 '
'1.0000\n129 1.0000\ndtype: float64')
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split('\n')
lines = [line for line in repr(s).split('\n')
if not re.match(r'[^\.]*\.+', line)][:-1]
ncolsizes = len({len(line.strip()) for line in lines})
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4,
"display.show_dimensions", False):
res = repr(test_sers['onel'])
exp = '0 a\n1 a\n ..\n98 a\n99 a\ndtype: object'
assert exp == res
res = repr(test_sers['twol'])
exp = ('0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype:'
' object')
assert exp == res
res = repr(test_sers['asc'])
exp = ('0 a\n1 ab\n ... \n4 abcde\n5'
' abcdef\ndtype: object')
assert exp == res
res = repr(test_sers['desc'])
exp = ('5 abcdef\n4 abcde\n ... \n1 ab\n0'
' a\ndtype: object')
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype='int64')
with option_context("display.max_rows", 1):
strrepr = repr(s).split('\n')
exp1 = ['0', '0']
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = ['..']
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r'[^\.]*(\.*)', s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace('\n', '')
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace('\n', '')
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert 'Length' not in repr(s)
with option_context("display.max_rows", 4):
assert 'Length' in repr(s)
with option_context("display.show_dimensions", True):
assert 'Length' in repr(s)
with option_context("display.max_rows", 4,
"display.show_dimensions", False):
assert 'Length' not in repr(s)
def test_to_string_name(self):
s = Series(range(100), dtype='int64')
s.name = 'myser'
res = s.to_string(max_rows=2, name=True)
exp = '0 0\n ..\n99 99\nName: myser'
assert res == exp
res = s.to_string(max_rows=2, name=False)
exp = '0 0\n ..\n99 99'
assert res == exp
def test_to_string_dtype(self):
s = Series(range(100), dtype='int64')
res = s.to_string(max_rows=2, dtype=True)
exp = '0 0\n ..\n99 99\ndtype: int64'
assert res == exp
res = s.to_string(max_rows=2, dtype=False)
exp = '0 0\n ..\n99 99'
assert res == exp
def test_to_string_length(self):
s = Series(range(100), dtype='int64')
res = s.to_string(max_rows=2, length=True)
exp = '0 0\n ..\n99 99\nLength: 100'
assert res == exp
def test_to_string_na_rep(self):
s = pd.Series(index=range(100))
res = s.to_string(na_rep='foo', max_rows=2)
exp = '0 foo\n ..\n99 foo'
assert res == exp
def test_to_string_float_format(self):
s = pd.Series(range(10), dtype='float64')
res = s.to_string(float_format=lambda x: '{0:2.1f}'.format(x),
max_rows=2)
exp = '0 0.0\n ..\n9 9.0'
assert res == exp
def test_to_string_header(self):
s = pd.Series(range(10), dtype='int64')
s.index.name = 'foo'
res = s.to_string(header=True, max_rows=2)
exp = 'foo\n0 0\n ..\n9 9'
assert res == exp
res = s.to_string(header=False, max_rows=2)
exp = '0 0\n ..\n9 9'
assert res == exp
def _three_digit_exp():
return '{x:.4g}'.format(x=1.7e8) == '1.7e+008'
class TestFloatArrayFormatter(object):
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
assert result[0] == " 12.0"
assert result[1] == " 0.0"
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with pd.option_context('display.precision', 6):
# DataFrame example from issue #9764
d = pd.DataFrame(
{'col1': [9.999e-8, 1e-7, 1.0001e-7, 2e-7, 4.999e-7, 5e-7,
5.0001e-7, 6e-7, 9.999e-7, 1e-6, 1.0001e-6, 2e-6,
4.999e-6, 5e-6, 5.0001e-6, 6e-6]})
expected_output = {
(0, 6):
' col1\n'
'0 9.999000e-08\n'
'1 1.000000e-07\n'
'2 1.000100e-07\n'
'3 2.000000e-07\n'
'4 4.999000e-07\n'
'5 5.000000e-07',
(1, 6):
' col1\n'
'1 1.000000e-07\n'
'2 1.000100e-07\n'
'3 2.000000e-07\n'
'4 4.999000e-07\n'
'5 5.000000e-07',
(1, 8):
' col1\n'
'1 1.000000e-07\n'
'2 1.000100e-07\n'
'3 2.000000e-07\n'
'4 4.999000e-07\n'
'5 5.000000e-07\n'
'6 5.000100e-07\n'
'7 6.000000e-07',
(8, 16):
' col1\n'
'8 9.999000e-07\n'
'9 1.000000e-06\n'
'10 1.000100e-06\n'
'11 2.000000e-06\n'
'12 4.999000e-06\n'
'13 5.000000e-06\n'
'14 5.000100e-06\n'
'15 6.000000e-06',
(9, 16):
' col1\n'
'9 0.000001\n'
'10 0.000001\n'
'11 0.000002\n'
'12 0.000005\n'
'13 0.000005\n'
'14 0.000005\n'
'15 0.000006'
}
for (start, stop), v in expected_output.items():
assert str(d[start:stop]) == v
def test_too_long(self):
# GH 10451
with pd.option_context('display.precision', 4):
# need both a number > 1e6 and something that normally formats to
# having length > display.precision + 6
df = pd.DataFrame(dict(x=[12345.6789]))
assert str(df) == ' x\n0 12345.6789'
df = pd.DataFrame(dict(x=[2e6]))
assert str(df) == ' x\n0 2000000.0'
df = pd.DataFrame(dict(x=[12345.6789, 2e6]))
assert str(df) == ' x\n0 1.2346e+04\n1 2.0000e+06'
class TestRepr_timedelta64(object):
def test_none(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base()
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "0 days"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_sub_day(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='sub_day')
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "00:00:00"
assert drepr(delta_1s) == "00:00:01"
assert drepr(delta_500ms) == "00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_long(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='long')
assert drepr(delta_1d) == "1 days 00:00:00"
assert drepr(-delta_1d) == "-1 days +00:00:00"
assert drepr(delta_0d) == "0 days 00:00:00"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_all(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1ns = pd.to_timedelta(1, unit='ns')
drepr = lambda x: x._repr_base(format='all')
assert drepr(delta_1d) == "1 days 00:00:00.000000000"
assert drepr(-delta_1d) == "-1 days +00:00:00.000000000"
assert drepr(delta_0d) == "0 days 00:00:00.000000000"
assert drepr(delta_1ns) == "0 days 00:00:00.000000001"
assert drepr(-delta_1d + delta_1ns) == "-1 days +00:00:00.000000001"
class TestTimedelta64Formatter(object):
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x[1:2], box=True).get_result()
assert result[0].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x, box=False).get_result()
assert result[0].strip() == "0 days"
assert result[1].strip() == "1 days"
result = fmt.Timedelta64Formatter(x[1:2], box=False).get_result()
assert result[0].strip() == "1 days"
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(-x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'-1 days'"
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(y, box=True).get_result()
assert result[0].strip() == "'00:00:00'"
assert result[1].strip() == "'00:00:01'"
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(-y, box=True).get_result()
assert result[0].strip() == "'00:00:00'"
assert result[1].strip() == "'-1 days +23:59:59'"
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
x = pd.to_timedelta(list(range(1)), unit='D')
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
class TestDatetime64Formatter(object):
def test_mixed(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 00:00:00"
assert result[1].strip() == "2013-01-01 12:00:00"
def test_dates(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01"
assert result[1].strip() == "2013-01-02"
def test_date_nanos(self):
x = Series([Timestamp(200)])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "1970-01-01 00:00:00.000000200"
def test_dates_display(self):
# 10170
# make sure that we are consistently display date formatting
x = Series(date_range('20130101 09:00:00', periods=5, freq='D'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-05 09:00:00"
x = Series(date_range('20130101 09:00:00', periods=5, freq='s'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:04"
x = Series(date_range('20130101 09:00:00', periods=5, freq='ms'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.004"
x = Series(date_range('20130101 09:00:00', periods=5, freq='us'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000004"
x = Series(date_range('20130101 09:00:00', periods=5, freq='N'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000000004"
def test_datetime64formatter_yearmonth(self):
x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)])
def format_func(x):
return x.strftime('%Y-%m')
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ['2016-01', '2016-02']
def test_datetime64formatter_hoursecond(self):
x = Series(pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f'))
def format_func(x):
return x.strftime('%H:%M')
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ['10:10', '12:12']
class TestNaTFormatting(object):
def test_repr(self):
assert repr(pd.NaT) == "NaT"
def test_str(self):
assert str(pd.NaT) == "NaT"
class TestDatetimeIndexFormat(object):
def test_datetime(self):
formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()
assert formatted[0] == "2003-01-01 12:00:00"
assert formatted[1] == "NaT"
def test_date(self):
formatted = pd.to_datetime([datetime(2003, 1, 1), pd.NaT]).format()
assert formatted[0] == "2003-01-01"
assert formatted[1] == "NaT"
def test_date_tz(self):
formatted = pd.to_datetime([datetime(2013, 1, 1)], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
formatted = pd.to_datetime(
[datetime(2013, 1, 1), pd.NaT], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
def test_date_explicit_date_format(self):
formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(
date_format="%m-%d-%Y", na_rep="UT")
assert formatted[0] == "02-01-2003"
assert formatted[1] == "UT"
class TestDatetimeIndexUnicode(object):
def test_dates(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)
]))
assert "['2013-01-01'," in text
assert ", '2014-01-01']" in text
def test_mixed(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(
2014, 1, 1, 12), datetime(2014, 1, 1)]))
assert "'2013-01-01 00:00:00'," in text
assert "'2014-01-01 00:00:00']" in text
class TestStringRepTimestamp(object):
def test_no_tz(self):
dt_date = datetime(2013, 1, 2)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
ts_nanos_only = Timestamp(200)
assert str(ts_nanos_only) == "1970-01-01 00:00:00.000000200"
ts_nanos_micros = Timestamp(1200)
assert str(ts_nanos_micros) == "1970-01-01 00:00:00.000001200"
def test_tz_pytz(self):
dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_tz_dateutil(self):
utc = dateutil.tz.tzutc()
dt_date = datetime(2013, 1, 2, tzinfo=utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_nat_representations(self):
for f in (str, repr, methodcaller('isoformat')):
assert f(pd.NaT) == 'NaT'
def test_format_percentiles():
result = fmt.format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
expected = ['1.999%', '2.001%', '50%', '66.667%', '99.99%']
assert result == expected
result = fmt.format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
expected = ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
assert result == expected
msg = r"percentiles should all be in the interval \[0,1\]"
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, np.nan, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([-0.001, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([2, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, 0.5, 'a'])
def test_repr_html_ipython_config(ip):
code = textwrap.dedent("""\
import pandas as pd
df = pd.DataFrame({"A": [1, 2]})
df._repr_html_()
cfg = get_ipython().config
cfg['IPKernelApp']['parent_appname']
df._repr_html_()
""")
result = ip.run_cell(code)
assert not result.error_in_exec
| GuessWhoSamFoo/pandas | pandas/tests/io/formats/test_format.py | Python | bsd-3-clause | 110,932 |
class WriteAroundCacheMixin(object):
cache_modes = {'*': 'standard'}
def get_context_data(self, *args, **kwargs):
context = super(WriteAroundCacheMixin, self).get_context_data(*args, **kwargs)
context['cache_modes'] = self.cache_modes
return context
| gizmag/django-write-around-cache | django_write_around_cache/view_mixins.py | Python | mit | 283 |
# -*- coding: utf8 -*-
"""
Script baseado no arquivo decorators.py do django 1.3.
Ele foi copiado para usar o decorador ``login_required``
que possui o argumento ``login_url``, responsável por
redirecionar ao template de login desejado.
No ato de atualizar o framework, esse script torna-se
obsoleto.
"""
import urlparse
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.utils.decorators import available_attrs
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse.urlparse(login_url or
settings.LOGIN_URL)[:2]
current_scheme, current_netloc = urlparse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(path, login_url, redirect_field_name)
return _wrapped_view
return decorator
def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def permission_required(perm, login_url=None):
"""
Decorator for views that checks whether a user has a particular permission
enabled, redirecting to the log-in page if necessary.
"""
return user_passes_test(lambda u: u.has_perm(perm), login_url=login_url)
| brenotx/SIGI-1.6 | sigi/apps/utils/decorators.py | Python | gpl-2.0 | 2,697 |
#!/usr/bin/python
#
# Test for tempfile creation
# Use lsof to see how many open files we have
#
import sys
import os
import glob
sys.path.append('..')
from rhn.SmartIO import _tempfile
def t():
f = _tempfile()
for i in range(1024):
f.write(("%s" % (i % 10)) * 1023 + "\n")
f.seek(0, 2)
assert(f.tell() == 1048576)
return f
def openedFiles():
global pid
path = '/proc/' + pid + '/fd/';
return len(glob.glob(os.path.join(path, '*')));
if __name__ == '__main__':
global pid
pid = str(os.getpid());
print "PID: ", pid;
failed = False;
print "Running and saving stream object references"
ret = []
for i in range(100):
print "Saving", i
ret.append(t())
if openedFiles() != i + 5:
print "FAIL: Opened files: ", openedFiles(), "but expected: ", str(i + 5);
failed = True;
del ret
print "Running without saving object references"
for i in range(1000):
print "Running", i
t()
if openedFiles() not in [4, ]:
print "FAIL: Opened files: ", openedFiles(), "but expected 4!";
failed = True;
if failed:
print "Test FAILS!"
sys.exit(1);
else:
print "Test PASSES!"
sys.exit(0);
| aronparsons/spacewalk | client/rhel/rhnlib/test/10-tempfile.py | Python | gpl-2.0 | 1,289 |
#
#
#------------------------------------------------------------------------------
# Copyright (C) 2006-2009 University of Dundee. All rights reserved.
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#------------------------------------------------------------------------------
###
#
# Utility methods for deal with scripts.
#
# @author Jean-Marie Burel
# <a href="mailto:j.burel@dundee.ac.uk">j.burel@dundee.ac.uk</a>
# @author Donald MacDonald
# <a href="mailto:donald@lifesci.dundee.ac.uk">donald@lifesci.dundee.ac.uk</a>
# @author Will Moore
# <a href="mailto:will@lifesci.dundee.ac.uk">will@lifesci.dundee.ac.uk</a>
# @version 3.0
# <small>
# (<b>Internal version:</b> $Revision: $Date: $)
# </small>
# @since 3.0-Beta4
#
import logging
import getopt, sys, os, subprocess
from struct import *
import omero.clients
from omero.rtypes import *
import omero.util.pixelstypetopython as pixelstypetopython
try:
import hashlib
hash_sha1 = hashlib.sha1
except:
import sha
hash_sha1 = sha.new
# r,g,b,a colours for use in scripts.
COLOURS = {'Red': (255,0,0,255), 'Green': (0,255,0,255), 'Blue': (0,0,255,255), 'Yellow': (255,255,0,255),
'White': (255,255,255,255), }
EXTRA_COLOURS = {'Violet': (238,133,238,255), 'Indigo': (79,6,132,255),
'Black': (0,0,0,255), 'Orange': (254,200,6,255), 'Gray': (130,130,130,255),}
CSV_NS = 'text/csv';
CSV_FORMAT = 'text/csv';
SU_LOG = logging.getLogger("omero.util.script_utils")
def drawTextOverlay(draw, x, y, text, colour='0xffffff'):
"""
Draw test on image.
@param draw The PIL Draw class.
@param x The x-coord to draw.
@param y The y-coord to draw.
@param text The text to render.
@param colour The colour as a PIL colour string to draw the text in.
"""
draw.text((x,y),text, fill=colour)
def drawLineOverlay(draw, x0, y0, x1, y1, colour='0xffffff'):
"""
Draw line on image.
@param draw The PIL Draw class.
@param x0 The x0-coord of line.
@param y0 The y0-coord of line.
@param x1 The x1-coord of line.
@param y1 The y1-coord of line.
@param colour The colour as a PIL colour string to draw the text in.
"""
draw.line([(x0, y0),(x1,y1)], text, fill=colour)
def rgbToRGBInt(red, green, blue):
"""
Convert an R,G,B value to an int.
@param R the Red value.
@param G the Green value.
@param B the Blue value.
@return See above.
"""
RGBInt = (red<<16)+(green<<8)+blue;
return int(RGBInt);
def RGBToPIL(RGB):
"""
Convert an RGB value to a PIL colour value.
@param RGB the RGB value.
@return See above.
"""
hexval = hex(int(RGB));
return '#'+(6-len(hexval[2:]))*'0'+hexval[2:];
def rangeToStr(range):
"""
Map a range to a string of numbers
@param range See above.
@return See above.
"""
first = 1;
string = "";
for value in range:
if(first==1):
string = str(value);
first = 0;
else:
string = string + ','+str(value)
return string;
def rmdir_recursive(dir):
for name in os.listdir(dir):
full_name = os.path.join(dir, name)
# on Windows, if we don't have write permission we can't remove
# the file/directory either, so turn that on
if not os.access(full_name, os.W_OK):
os.chmod(full_name, 0600)
if os.path.isdir(full_name):
rmdir_recursive(full_name)
else:
os.remove(full_name)
os.rmdir(dir)
def calcSha1(filename):
"""
Returns a hash of the file identified by filename
@param filename: pathName of the file
@return: The hash of the file
"""
fileHandle = open(filename)
h = hash_sha1()
h.update(fileHandle.read())
hash = h.hexdigest()
fileHandle.close()
return hash;
def calcSha1FromData(data):
"""
Calculate the Sha1 Hash from a data array
@param data The data array.
@return The Hash
"""
h = hash_sha1()
h.update(data)
hash = h.hexdigest()
return hash;
def getFormat(queryService, format):
return queryService.findByQuery("from Format as f where f.value='"+format+"'", None)
def createFile(updateService, filename, mimetype=None, origFilePathName=None):
"""
Creates an original file, saves it to the server and returns the result
@param queryService: The query service E.g. session.getQueryService()
@param updateService: The update service E.g. session.getUpdateService()
@param filename: The file path and name (or name if in same folder). String
@param mimetype: The mimetype (string) or Format object representing the file format
@param origFilePathName: Optional path/name for the original file
@return: The saved OriginalFileI, as returned from the server
"""
originalFile = omero.model.OriginalFileI();
if(origFilePathName == None):
origFilePathName = filename;
path, name = os.path.split(origFilePathName)
originalFile.setName(omero.rtypes.rstring(name));
originalFile.setPath(omero.rtypes.rstring(path));
# just in case we are passed a FormatI object
try:
v = mimetype.getValue()
mt = v.getValue()
except:
# handle the string we expect
mt = mimetype
if mt:
originalFile.mimetype = omero.rtypes.rstring(mt)
originalFile.setSize(omero.rtypes.rlong(os.path.getsize(filename)));
originalFile.setSha1(omero.rtypes.rstring(calcSha1(filename)));
return updateService.saveAndReturnObject(originalFile);
def uploadFile(rawFileStore, originalFile, filePath=None):
"""
Uploads an OriginalFile to the server
@param rawFileStore: The Omero rawFileStore
@param originalFile: The OriginalFileI
@param filePath: Where to find the file to upload. If None, use originalFile.getName().getValue()
"""
rawFileStore.setFileId(originalFile.getId().getValue());
fileSize = originalFile.getSize().getValue();
increment = 10000;
cnt = 0;
if filePath == None:
filePath = originalFile.getName().getValue()
fileHandle = open(filePath, 'rb');
done = 0
while(done!=1):
if(increment+cnt<fileSize):
blockSize = increment;
else:
blockSize = fileSize-cnt;
done = 1;
fileHandle.seek(cnt);
block = fileHandle.read(blockSize);
rawFileStore.write(block, cnt, blockSize);
cnt = cnt+blockSize;
fileHandle.close();
def downloadFile(rawFileStore, originalFile, filePath=None):
"""
Downloads an OriginalFile from the server.
@param rawFileStore: The Omero rawFileStore
@param originalFile: The OriginalFileI
@param filePath: Where to download the file. If None, use originalFile.getName().getValue()
"""
fileId = originalFile.getId().getValue()
rawFileStore.setFileId(fileId)
fileSize = originalFile.getSize().getValue()
maxBlockSize = 10000
cnt = 0
if filePath == None:
filePath = originalFile.getName().getValue()
# don't overwrite. Add number before extension
i = 1
path, ext = filePath.rsplit(".", 1)
while os.path.exists(filePath):
filePath = "%s_%s.%s" % (path,i,ext)
i +=1
fileHandle = open(filePath, 'w')
data = '';
cnt = 0;
fileSize = originalFile.getSize().getValue()
while(cnt<fileSize):
blockSize = min(maxBlockSize, fileSize)
block = rawFileStore.read(cnt, blockSize)
cnt = cnt+blockSize
fileHandle.write(block)
fileHandle.close()
return filePath
def attachFileToParent(updateService, parent, originalFile, description=None, namespace=None):
"""
Attaches the original file (file) to a Project, Dataset or Image (parent)
@param updateService: The update service
@param parent: A ProjectI, DatasetI or ImageI to attach the file to
@param originalFile: The OriginalFileI to attach
@param description: Optional description for the file annotation. String
@param namespace: Optional namespace for file annotataion. String
@return: The saved and returned *AnnotationLinkI (* = Project, Dataset or Image)
"""
fa = omero.model.FileAnnotationI();
fa.setFile(originalFile);
if description:
fa.setDescription(omero.rtypes.rstring(description))
if namespace:
fa.setNs(omero.rtypes.rstring(namespace))
if type(parent) == omero.model.DatasetI:
l = omero.model.DatasetAnnotationLinkI()
elif type(parent) == omero.model.ProjectI:
l = omero.model.ProjectAnnotationLinkI()
elif type(parent) == omero.model.ImageI:
l = omero.model.ImageAnnotationLinkI()
else:
return
parent = parent.__class__(parent.id.val, False) # use unloaded object to avoid update conflicts
l.setParent(parent);
l.setChild(fa);
return updateService.saveAndReturnObject(l);
def uploadAndAttachFile(queryService, updateService, rawFileStore, parent, localName, mimetype, description=None, namespace=None, origFilePathName=None):
"""
Uploads a local file to the server, as an Original File and attaches it to the
parent (Project, Dataset or Image)
@param queryService: The query service
@param updateService: The update service
@param rawFileStore: The rawFileStore
@param parent: The ProjectI or DatasetI or ImageI to attach file to
@param localName: Full Name (and path) of the file location to upload. String
@param mimetype: The original file mimetype. E.g. "PNG". String
@param description: Optional description for the file annotation. String
@param namespace: Namespace to set for the original file
@param origFilePathName: The /path/to/file/fileName.ext you want on the server. If none, use output as name
@return: The originalFileLink child. (FileAnnotationI)
"""
filename = localName
if origFilePathName == None:
origFilePathName = localName
originalFile = createFile(updateService, filename, mimetype, origFilePathName);
uploadFile(rawFileStore, originalFile, localName)
fileLink = attachFileToParent(updateService, parent, originalFile, description, namespace)
return fileLink.getChild()
def createLinkFileAnnotation(conn, localPath, parent, output="Output", parenttype="Image", mimetype=None, desc=None, ns=None, origFilePathAndName=None):
"""
Uploads a local file to the server, as an Original File and attaches it to the
parent (Project, Dataset or Image)
@param conn: The L{omero.gateway.BlitzGateway} connection.
@param parent: The ProjectI or DatasetI or ImageI to attach file to
@param localPath: Full Name (and path) of the file location to upload. String
@param mimetype: The original file mimetype. E.g. "PNG". String
@param description: Optional description for the file annotation. String
@param namespace: Namespace to set for the original file
@param
@param origFilePathName: The /path/to/file/fileName.ext you want on the server. If none, use output as name
@return: The originalFileLink child (FileAnnotationI) and a log message
"""
if os.path.exists(localPath):
fileAnnotation = conn.createFileAnnfromLocalFile(localPath, origFilePathAndName=origFilePathAndName, mimetype=mimetype, ns=ns, desc=desc)
message = "%s created" % output
if parent is not None:
if parent.canAnnotate():
parentClass = parent.OMERO_CLASS
message += " and attached to %s%s %s." % (parentClass[0].lower(), parentClass[1:], parent.getName())
parent.linkAnnotation(fileAnnotation)
else:
message += " but could not be attached."
else:
message = "%s not created." % output
fileAnnotation = None
return fileAnnotation, message
def getObjects(conn, params):
"""
Get the objects specified by the script parameters.
Assume the parameters contain the keys IDs and Data_Type
@param conn: The L{omero.gateway.BlitzGateway} connection.
@param params: The script parameters
@return: The valid objects and a log message
"""
dataType = params["Data_Type"]
ids = params["IDs"]
objects = list(conn.getObjects(dataType,ids))
message = ""
if not objects:
message += "No %s%s found. " % (dataType[0].lower(), dataType[1:])
else:
if not len(objects) == len(ids):
message += "Found %s out of %s %s%s(s). " % (len(objects), len(ids), dataType[0].lower(), dataType[1:])
return objects, message
def addAnnotationToImage(updateService, image, annotation):
"""
Add the annotation to an image.
@param updateService The update service to create the annotation link.
@param image The ImageI object that should be annotated.
@param annotation The annotation object
@return The new annotationlink object
"""
l = omero.model.ImageAnnotationLinkI();
l.setParent(image);
l.setChild(annotation);
return updateService.saveAndReturnObject(l);
def readFromOriginalFile(rawFileService, iQuery, fileId, maxBlockSize = 10000):
"""
Read the OriginalFile with fileId and return it as a string.
@param rawFileService The RawFileService service to read the originalfile.
@param iQuery The Query Service.
@param fileId The id of the originalFile object.
@param maxBlockSize The block size of each read.
@return The OriginalFile object contents as a string
"""
fileDetails = iQuery.findByQuery("from OriginalFile as o where o.id = " + str(fileId) , None);
rawFileService.setFileId(fileId);
data = '';
cnt = 0;
fileSize = fileDetails.getSize().getValue();
while(cnt<fileSize):
blockSize = min(maxBlockSize, fileSize);
block = rawFileService.read(cnt, blockSize);
data = data + block;
cnt = cnt+blockSize;
return data[0:fileSize];
def readFileAsArray(rawFileService, iQuery, fileId, row, col, separator = ' '):
"""
Read an OriginalFile with id and column separator and return it as an array.
@param rawFileService The RawFileService service to read the originalfile.
@param iQuery The Query Service.
@param fileId The id of the originalFile object.
@param row The number of rows in the file.
@param col The number of columns in the file.
@param sep the column separator.
@return The file as an NumPy array.
"""
from numpy import fromstring, reshape
textBlock = readFromOriginalFile(rawFileService, iQuery, fileId);
arrayFromFile = fromstring(textBlock,sep = separator);
return reshape(arrayFromFile, (row, col));
def readFlimImageFile(rawPixelsStore, pixels):
"""
Read the RawImageFlimFile with fileId and return it as an array [c, x, y]
@param rawPixelsStore The rawPixelStore service to get the image.
@param pixels The pixels of the image.
@return The Contents of the image for z = 0, t = 0, all channels;
"""
from numpy import zeros
sizeC = pixels.getSizeC().getValue();
sizeX = pixels.getSizeX().getValue();
sizeY = pixels.getSizeY().getValue();
id = pixels.getId().getValue();
pixelsType = pixels.getPixelsType().getValue().getValue();
rawPixelsStore.setPixelsId(id , False);
cRange = range(0, sizeC);
stack = zeros((sizeC, sizeX, sizeY),dtype=pixelstypetopython.toNumpy(pixelsType));
for c in cRange:
plane = downloadPlane(rawPixelsStore, pixels, 0, c, 0);
stack[c,:,:]=plane;
return stack;
def downloadPlane(rawPixelsStore, pixels, z, c, t):
"""
Download the plane [z,c,t] for image pixels. Pixels must have pixelsType loaded.
N.B. The rawPixelsStore must have already been initialised by setPixelsId()
@param rawPixelsStore The rawPixelStore service to get the image.
@param pixels The pixels of the image.
@param z The Z-Section to retrieve.
@param c The C-Section to retrieve.
@param t The T-Section to retrieve.
@return The Plane of the image for z, c, t
"""
from numpy import array
rawPlane = rawPixelsStore.getPlane(z, c, t);
sizeX = pixels.getSizeX().getValue();
sizeY = pixels.getSizeY().getValue();
pixelsId = pixels.getId().getValue();
pixelType = pixels.getPixelsType().getValue().getValue();
convertType ='>'+str(sizeX*sizeY)+pixelstypetopython.toPython(pixelType);
convertedPlane = unpack(convertType, rawPlane);
numpyType = pixelstypetopython.toNumpy(pixelType)
remappedPlane = array(convertedPlane, numpyType);
remappedPlane.resize(sizeY, sizeX);
return remappedPlane;
def getPlaneFromImage(imagePath, rgbIndex=None):
"""
Reads a local image (E.g. single plane tiff) and returns it as a numpy 2D array.
@param imagePath Path to image.
"""
from numpy import asarray
try:
from PIL import Image # see ticket:2597
except ImportError:
import Image # see ticket:2597
i = Image.open(imagePath)
a = asarray(i)
if rgbIndex == None:
return a
else:
return a[:, :, rgbIndex]
def uploadDirAsImages(sf, queryService, updateService, pixelsService, path, dataset = None):
"""
Reads all the images in the directory specified by 'path' and uploads them to OMERO as a single
multi-dimensional image, placed in the specified 'dataset'
Uses regex to determine the Z, C, T position of each image by name,
and therefore determines sizeZ, sizeC, sizeT of the new Image.
@param path the path to the directory containing images.
@param dataset the OMERO dataset, if we want to put images somewhere. omero.model.DatasetI
"""
import re
from numpy import zeros
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
# assume 1 image in this folder for now.
# Make a single map of all images. key is (z,c,t). Value is image path.
imageMap = {}
channelSet = set()
tokens = []
# other parameters we need to determine
sizeZ = 1
sizeC = 1
sizeT = 1
zStart = 1 # could be 0 or 1 ?
tStart = 1
fullpath = None
rgb = False
# process the names and populate our imagemap
for f in os.listdir(path):
fullpath = os.path.join(path, f)
tSearch = regex_time.search(f)
cSearch = regex_channel.search(f)
zSearch = regex_zslice.search(f)
tokSearch = regex_token.search(f)
if f.endswith(".jpg"):
rgb = True
if tSearch == None:
theT = 0
else:
theT = int(tSearch.group('T'))
if cSearch == None:
cName = "0"
else:
cName = cSearch.group('C')
if zSearch == None:
theZ = 0
else:
theZ = int(zSearch.group('Z'))
channelSet.add(cName)
sizeZ = max(sizeZ, theZ)
zStart = min(zStart, theZ)
sizeT = max(sizeT, theT)
tStart = min(tStart, theT)
if tokSearch != None:
tokens.append(tokSearch.group('Token'))
imageMap[(theZ, cName, theT)] = fullpath
colourMap = {}
if not rgb:
channels = list(channelSet)
# see if we can guess what colour the channels should be, based on name.
for i, c in enumerate(channels):
if c == 'rfp':
colourMap[i] = (255, 0, 0, 255)
if c == 'gfp':
colourMap[i] = (0, 255, 0, 255)
else:
channels = ("red", "green", "blue")
colourMap[0] = (255, 0, 0, 255)
colourMap[1] = (0, 255, 0, 255)
colourMap[2] = (0, 0, 255, 255)
sizeC = len(channels)
# use the common stem as the image name
imageName = os.path.commonprefix(tokens).strip('0T_')
description = "Imported from images in %s" % path
SU_LOG.info("Creating image: %s" % imageName)
# use the last image to get X, Y sizes and pixel type
if rgb:
plane = getPlaneFromImage(fullpath, 0)
else:
plane = getPlaneFromImage(fullpath)
pType = plane.dtype.name
# look up the PixelsType object from DB
pixelsType = queryService.findByQuery(\
"from PixelsType as p where p.value='%s'" % pType, None) # omero::model::PixelsType
if pixelsType == None and pType.startswith("float"): # e.g. float32
pixelsType = queryService.findByQuery(\
"from PixelsType as p where p.value='%s'" % "float", None) # omero::model::PixelsType
if pixelsType == None:
SU_LOG.warn("Unknown pixels type for: %s" % pType)
return
sizeY, sizeX = plane.shape
SU_LOG.debug("sizeX: %s sizeY: %s sizeZ: %s sizeC: %s sizeT: %s" % (sizeX, sizeY, sizeZ, sizeC, sizeT))
# code below here is very similar to combineImages.py
# create an image in OMERO and populate the planes with numpy 2D arrays
channelList = range(sizeC)
imageId = pixelsService.createImage(sizeX, sizeY, sizeZ, sizeT, channelList, pixelsType, imageName, description)
params = omero.sys.ParametersI()
params.addId(imageId)
pixelsId = queryService.projection(\
"select p.id from Image i join i.pixels p where i.id = :id",\
params)[0][0].val
rawPixelStore = sf.createRawPixelsStore()
rawPixelStore.setPixelsId(pixelsId, True)
try:
for theC in range(sizeC):
minValue = 0
maxValue = 0
for theZ in range(sizeZ):
zIndex = theZ + zStart
for theT in range(sizeT):
tIndex = theT + tStart
if rgb:
c = "0"
else:
c = channels[theC]
if (zIndex, c, tIndex) in imageMap:
imagePath = imageMap[(zIndex, c, tIndex)]
if rgb:
SU_LOG.debug("Getting rgb plane from: %s" % imagePath)
plane2D = getPlaneFromImage(imagePath, theC)
else:
SU_LOG.debug("Getting plane from: %s" % imagePath)
plane2D = getPlaneFromImage(imagePath)
else:
SU_LOG.debug("Creating blank plane for .", theZ, channels[theC], theT)
plane2D = zeros((sizeY, sizeX))
SU_LOG.debug("Uploading plane: theZ: %s, theC: %s, theT: %s" % (theZ, theC, theT))
uploadPlane(rawPixelStore, plane2D, theZ, theC, theT)
minValue = min(minValue, plane2D.min())
maxValue = max(maxValue, plane2D.max())
pixelsService.setChannelGlobalMinMax(pixelsId, theC, float(minValue), float(maxValue))
rgba = None
if theC in colourMap:
rgba = colourMap[theC]
try:
renderingEngine = sf.createRenderingEngine()
resetRenderingSettings(renderingEngine, pixelsId, theC, minValue, maxValue, rgba)
finally:
renderingEngine.close()
finally:
rawPixelStore.close()
# add channel names
pixels = pixelsService.retrievePixDescription(pixelsId)
i = 0
for c in pixels.iterateChannels(): # c is an instance of omero.model.ChannelI
lc = c.getLogicalChannel() # returns omero.model.LogicalChannelI
lc.setName(rstring(channels[i]))
updateService.saveObject(lc)
i += 1
# put the image in dataset, if specified.
if dataset:
link = omero.model.DatasetImageLinkI()
link.parent = omero.model.DatasetI(dataset.id.val, False)
link.child = omero.model.ImageI(imageId, False)
updateService.saveAndReturnObject(link)
return imageId
def uploadCecogObjectDetails(updateService, imageId, filePath):
"""
Parses a single line of cecog output and saves as a roi.
Adds a Rectangle (particle) to the current OMERO image, at point x, y.
Uses the self.image (OMERO image) and self.updateService
"""
objects = {}
roi_ids = []
import fileinput
for line in fileinput.input([filePath]):
theZ = 0
theT = None
x = None
y = None
parts = line.split("\t")
names = ("frame", "objID", "primaryClassLabel", "primaryClassName", "centerX", "centerY", "mean", "sd", "secondaryClassabel", "secondaryClassName", "secondaryMean", "secondarySd")
values = {}
for idx, name in enumerate(names):
if len(parts) >= idx:
values[name] = parts[idx]
frame = values["frame"]
try:
frame = long(frame)
except ValueError:
SU_LOG.debug("Non-roi line: %s " % line)
continue
theT = frame - 1
objID = values["objID"]
className = values["primaryClassName"]
x = float(values["centerX"])
y = float(values["centerY"])
description = ""
for name in names:
description += ("%s=%s\n" % (name, values.get(name, "(missing)")))
if theT and x and y:
SU_LOG.debug("Adding point '%s' to frame: %s, x: %s, y: %s" % (className, theT, x, y))
try:
shapes = objects[objID]
except KeyError:
shapes = []
objects[objID] = shapes
shapes.append( (theT, className, x, y, values, description) )
for object, shapes in objects.items():
# create an ROI, add the point and save
roi = omero.model.RoiI()
roi.setImage(omero.model.ImageI(imageId, False))
roi.setDescription(omero.rtypes.rstring("objID: %s" % object))
# create and save a point
for shape in shapes:
theT, className, x, y, values, description = shape
point = omero.model.PointI()
point.cx = rdouble(x)
point.cy = rdouble(y)
point.theT = rint(theT)
point.theZ = rint(0) # Workaround for shoola:ticket:1596
if className:
point.setTextValue(rstring(className)) # for display only
# link the point to the ROI and save it
roi.addShape(point)
roi = updateService.saveAndReturnObject(point)
roi_ids.append(roi.id.val)
return roi_ids
def split_image(client, imageId, dir, unformattedImageName = "tubulin_P037_T%05d_C%s_Z%d_S1.tif", dims = ('T', 'C', 'Z')):
"""
Splits the image into component planes, which are saved as local tiffs according to unformattedImageName.
E.g. myLocalDir/tubulin_P037_T%05d_C%s_Z%d_S1.tif which will be formatted according to dims, E.g. ('T', 'C', 'Z')
Channel will be formatted according to channel name, not index.
@param rawPixelsStore The rawPixelStore
@param queryService
@param c The C-Section to retrieve.
@param t The T-Section to retrieve.
@param imageName the local location to save the image.
"""
unformattedImageName = os.path.join(dir, unformattedImageName)
session = client.getSession()
queryService = session.getQueryService()
rawPixelsStore = session.createRawPixelsStore()
pixelsService = session.getPixelsService()
try:
from PIL import Image
except:
import Image
query_string = "select p from Pixels p join fetch p.image as i join fetch p.pixelsType where i.id='%s'" % imageId
pixels = queryService.findByQuery(query_string, None)
sizeX = pixels.getSizeX().getValue()
sizeY = pixels.getSizeY().getValue()
sizeZ = pixels.getSizeZ().getValue()
sizeC = pixels.getSizeC().getValue()
sizeT = pixels.getSizeT().getValue()
rawPixelsStore.setPixelsId(pixels.getId().getValue(), True)
channelMap = {}
cIndex = 0
pixels = pixelsService.retrievePixDescription(pixels.id.val) # load channels
for c in pixels.iterateChannels():
lc = c.getLogicalChannel()
channelMap[cIndex] = lc.getName() and lc.getName().getValue() or str(cIndex)
cIndex += 1
def formatName(unformatted, z, c, t):
# need to turn dims E.g. ('T', 'C', 'Z') into tuple, E.g. (t, c, z)
dimMap = {'T': t, 'C':channelMap[c], 'Z': z}
dd = tuple([dimMap[d] for d in dims])
return unformatted % dd
# cecog does this, but other formats may want to start at 0
zStart = 1
tStart = 1
# loop through dimensions, saving planes as tiffs.
for z in range(sizeZ):
for c in range(sizeC):
for t in range(sizeT):
imageName = formatName(unformattedImageName, z+zStart, c, t+tStart)
SU_LOG.debug("downloading plane z: %s c: %s t: %s to %s" % (z, c, t, imageName))
plane = downloadPlane(rawPixelsStore, pixels, z, c, t)
i = Image.fromarray(plane)
i.save(imageName)
def createFileFromData(updateService, queryService, filename, data):
"""
Create a file from the data of type format, setting sha1, ..
@param updateService The updateService to create the annotation link.
@param filename The name of the file.
@param data The data to save.
@param format The Format of the file.
@return The newly created OriginalFile.
"""
tempFile = omero.model.OriginalFileI();
tempFile.setName(omero.rtypes.rstring(filename));
tempFile.setPath(omero.rtypes.rstring(filename));
tempFile.setMimetype(omero.rtypes.rstring(CSV_FORMAT));
tempFile.setSize(omero.rtypes.rlong(len(data)));
tempFile.setSha1(omero.rtypes.rstring(calcSha1FromData(data)));
return updateService.saveAndReturnObject(tempFile);
def attachArrayToImage(updateService, image, file, nameSpace):
"""
Attach an array, stored as a csv file to an image. Returns the annotation.
@param updateService The updateService to create the annotation link.
@param image The image to attach the data to.
@param filename The name of the file.
@param namespace The namespace of the file.
@return
"""
fa = omero.model.FileAnnotationI();
fa.setFile(file);
fa.setNs(omero.rtypes.rstring(nameSpace))
l = omero.model.ImageAnnotationLinkI();
l.setParent(image);
l.setChild(fa);
l = updateService.saveAndReturnObject(l);
return l.getChild();
def uploadArray(rawFileStore, updateService, queryService, image, filename, namespace, array):
"""
Upload the data to the server, creating the OriginalFile Object and attaching it to the image.
@param rawFileStore The rawFileStore used to create the file.
@param updateService The updateService to create the annotation link.
@param image The image to attach the data to.
@param filename The name of the file.
@param namespace The name space associated to the annotation.
@param data The data to save.
@return The newly created file.
"""
data = arrayToCSV(array);
file = createFileFromData(updateService, queryService, filename, data);
rawFileStore.setFileId(file.getId().getValue());
fileSize = len(data);
increment = 10000;
cnt = 0;
done = 0
while(done!=1):
if(increment+cnt<fileSize):
blockSize = increment;
else:
blockSize = fileSize-cnt;
done = 1;
block = data[cnt:cnt+blockSize];
rawFileStore.write(block, cnt, blockSize);
cnt = cnt+blockSize;
return attachArrayToImage(updateService, image, file, namespace);
def arrayToCSV(data):
"""
Convert the numpy array data to a csv file.
@param data the Numpy Array
@return The CSV string.
"""
size = data.shape;
row = size[0];
col = size[1];
strdata ="";
for r in range(0,row):
for c in range(0, col):
strdata = strdata + str(data[r,c])
if(c<col-1):
strdata = strdata+',';
strdata = strdata + '\n';
return strdata;
def uploadPlane(rawPixelsStore, plane, z, c, t):
"""
Upload the plane to the server attching it to the current z,c,t of the already instantiated rawPixelStore.
@param rawPixelsStore The rawPixelStore which is already pointing to the data.
@param plane The data to upload
@param z The Z-Section of the plane.
@param c The C-Section of the plane.
@param t The T-Section of the plane.
"""
byteSwappedPlane = plane.byteswap();
convertedPlane = byteSwappedPlane.tostring();
rawPixelsStore.setPlane(convertedPlane, z, c, t)
def uploadPlaneByRow(rawPixelsStore, plane, z, c, t):
"""
Upload the plane to the server one row at a time,
attching it to the current z,c,t of the already instantiated rawPixelStore.
@param rawPixelsStore The rawPixelStore which is already pointing to the data.
@param plane The data to upload
@param z The Z-Section of the plane.
@param c The C-Section of the plane.
@param t The T-Section of the plane.
"""
byteSwappedPlane = plane.byteswap()
rowCount, colCount = plane.shape
for y in range(rowCount):
row = byteSwappedPlane[y:y+1, :] # slice y axis into rows
convertedRow = row.tostring()
rawPixelsStore.setRow(convertedRow, y, z, c, t)
def getRenderingEngine(session, pixelsId):
"""
Create the renderingEngine for the pixelsId.
@param session The current session to create the renderingEngine from.
@return The renderingEngine Service for the pixels.
"""
renderingEngine = session.createRenderingEngine();
renderingEngine.lookupPixels(pixelsId);
if(renderingEngine.lookupRenderingDef(pixelsId)==0):
renderingEngine.resetDefaults();
renderingEngine.lookupRenderingDef(pixelsId);
renderingEngine.load();
return renderingEngine;
def createPlaneDef(z,t):
"""
Create the plane rendering def, for z,t
@param Z the Z-Section
@param T The T-Point.
@return The RenderingDef Object.
"""
planeDef = omero.romio.PlaneDef()
planeDef.t = t;
planeDef.z = z;
planeDef.x = 0;
planeDef.y = 0;
planeDef.slice = 0;
return planeDef;
def getPlaneAsPackedInt(renderingEngine, z, t):
"""
Get the rendered Image of the plane for the z, t with the default channels.
@param renderingEngine The already instantiated renderEngine.
@param z The Z-section.
@param t The Timepoint.
"""
planeDef = createPlaneDef(z, t);
return renderingEngine.renderAsPackedInt(planeDef);
def getRawPixelsStore(session, pixelsId):
"""
Get the rawPixelsStore for the Image with pixelsId
@param pixelsId The pixelsId of the object to retrieve.
@return The rawPixelsStore service.
"""
rawPixelsStore = session.createRawPixelsStore();
rawPixelsStore.setPixelsId(pixelsId);
return rawPixelsStore;
def getRawFileStore(session, fileId):
"""
Get the rawFileStore for the file with fileId
@param fileId The fileId of the object to retrieve.
@return The rawFileStore service.
"""
rawFileStore = session.createRawFileStore();
rawFileStore.setFileId(fileId);
return rawFileStore;
def getPlaneInfo(iQuery, pixelsId, asOrderedList = True):
"""
Get the plane info for the pixels object returning it in order of z,t,c
@param iQuery The query service.
@param pixelsId The pixels for Id.
@param asOrderedList
@return list of planeInfoTimes or map["z:t:c:]
"""
query = "from PlaneInfo as Info where pixels.id='"+str(pixelsId)+"' orderby info.deltaT"
infoList = iQuery.findAllByQuery(query,None)
if(asOrderedList):
map = {}
for info in infoList:
key = "z:"+str(info.theZ.getValue())+"t:"+str(info.theT.getValue())+"c:"+str(info.theC.getValue());
map[key] = info.deltaT.getValue();
return map;
else:
return infoList;
def IdentityFn(commandArgs):
return commandArgs;
def resetRenderingSettings(renderingEngine, pixelsId, cIndex, minValue, maxValue, rgba=None):
"""
Simply resests the rendering settings for a pixel set, according to the min and max values
The rendering engine does NOT have to be primed with pixelsId, as that is handled by this method.
@param renderingEngine The OMERO rendering engine
@param pixelsId The Pixels ID
@param minValue Minimum value of rendering window
@param maxValue Maximum value of rendering window
@param rgba Option to set the colour of the channel. (r,g,b,a) tuple.
"""
renderingEngine.lookupPixels(pixelsId)
if not renderingEngine.lookupRenderingDef(pixelsId):
renderingEngine.resetDefaults()
if rgba == None:
rgba=(255,255,255,255) # probably don't want E.g. single channel image to be blue!
if not renderingEngine.lookupRenderingDef(pixelsId):
raise "Still No Rendering Def"
renderingEngine.load()
renderingEngine.setChannelWindow(cIndex, float(minValue), float(maxValue))
if rgba:
red, green, blue, alpha = rgba
renderingEngine.setRGBA(cIndex, red, green, blue, alpha)
renderingEngine.saveCurrentSettings()
def createNewImage(session, plane2Dlist, imageName, description, dataset=None):
"""
Creates a new single-channel, single-timepoint image from the list of 2D numpy arrays in plane2Dlist
with each numpy 2D plane becoming a Z-section.
@param session An OMERO service factory or equivalent with getQueryService() etc.
@param plane2Dlist A list of numpy 2D arrays, corresponding to Z-planes of new image.
@param imageName Name of new image
@param description Description for the new image
@param dataset If specified, put the image in this dataset. omero.model.Dataset object
@return The new OMERO image: omero.model.ImageI
"""
queryService = session.getQueryService()
pixelsService = session.getPixelsService()
rawPixelStore = session.createRawPixelsStore()
renderingEngine = session.createRenderingEngine()
containerService = session.getContainerService()
pType = plane2Dlist[0].dtype.name
pixelsType = queryService.findByQuery("from PixelsType as p where p.value='%s'" % pType, None) # omero::model::PixelsType
theC, theT = (0,0)
# all planes in plane2Dlist should be same shape.
shape = plane2Dlist[0].shape
sizeY, sizeX = shape
minValue = plane2Dlist[0].min()
maxValue = plane2Dlist[0].max()
# get some other dimensions and create the image.
channelList = [theC] # omero::sys::IntList
sizeZ, sizeT = (len(plane2Dlist),1)
iId = pixelsService.createImage(sizeX, sizeY, sizeZ, sizeT, channelList, pixelsType, imageName, description)
imageId = iId.getValue()
image = containerService.getImages("Image", [imageId], None)[0]
# upload plane data
pixelsId = image.getPrimaryPixels().getId().getValue()
rawPixelStore.setPixelsId(pixelsId, True)
for theZ, plane2D in enumerate(plane2Dlist):
minValue = min(minValue, plane2D.min())
maxValue = max(maxValue, plane2D.max())
if plane2D.size > 1000000:
uploadPlaneByRow(rawPixelStore, plane2D, theZ, theC, theT)
else:
uploadPlane(rawPixelStore, plane2D, theZ, theC, theT)
pixelsService.setChannelGlobalMinMax(pixelsId, theC, float(minValue), float(maxValue))
resetRenderingSettings(renderingEngine, pixelsId, theC, minValue, maxValue)
# put the image in dataset, if specified.
if dataset:
link = omero.model.DatasetImageLinkI()
link.parent = omero.model.DatasetI(dataset.id.val, False)
link.child = omero.model.ImageI(image.id.val, False)
session.getUpdateService().saveObject(link)
renderingEngine.close()
rawPixelStore.close()
return image
def parseInputs(client, session, processFn=IdentityFn):
"""
parse the inputs from the client object and map it to some other form, values may be transformed by function.
@param client The client object
@param session The current session.
@param processFn A function to transform data to some other form.
@return Parsed inputs as defined by ProcessFn.
"""
inputKeys = client.getInputKeys();
commandArgs = {};
for key in inputKeys:
commandArgs[key]=client.getInput(key).getValue();
return processFn(commandArgs);
def getROIFromImage(iROIService, imageId, namespace=None):
"""
Get the ROI from the server for the image with the namespace
@param iROIService The iROIService object
@param imageId The imageId to retreive ROI from.
@param namespace The namespace of the ROI.
@return See above.
"""
roiOpts = omero.api.RoiOptions()
if(namespace!=None):
roiOpts.namespace = namespace;
return iROIService.findByImage(imageId, roiOpts);
def toCSV(list):
"""
Convert a list to a Comma Separated Value string.
@param list The list to convert.
@return See above.
"""
lenList = len(list);
cnt = 0;
str = "";
for item in list:
str = str + item;
if(cnt < lenList-1):
str = str + ",";
cnt = cnt +1;
return str;
def toList(csvString):
"""
Convert a csv string to a list of strings
@param csvString The CSV string to convert.
@return See above.
"""
list = csvString.split(',');
for index in range(len(list)):
list[index] = list[index].strip();
return list;
def registerNamespace(iQuery, iUpdate, namespace, keywords):
"""
Register a workflow with the server, if the workflow does not exist create it and returns it,
otherwise it returns the already created workflow.
@param iQuery The query service.
@param iUpdate The update service.
@param namespace The namespace of the workflow.
@param keywords The keywords associated with the workflow.
@return see above.
"""
from omero.util.OmeroPopo import WorkflowData as WorkflowData
workflow = iQuery.findByQuery("from Namespace as n where n.name = '" + namespace.val+"'", None);
workflowData = WorkflowData();
if(workflow!=None):
workflowData = WorkflowData(workflow);
else:
workflowData.setNamespace(namespace.val);
splitKeywords = keywords.val.split(',');
SU_LOG.debug(workflowData.asIObject())
for keyword in splitKeywords:
workflowData.addKeyword(keyword);
SU_LOG.debug(workflowData.asIObject())
workflow = iUpdate.saveAndReturnObject(workflowData.asIObject());
return WorkflowData(workflow);
def findROIByImage(roiService, image, namespace):
"""
Finds the ROI with the given namespace linked to the image. Returns a collection of ROIs.
@param roiService The ROI service.
@param image The image the ROIs are linked to .
@param namespace The namespace of the ROI.
@return see above.
"""
from omero.util.OmeroPopo import ROIData as ROIData
roiOptions = omero.api.RoiOptions();
roiOptions.namespace = omero.rtypes.rstring(namespace);
results = roiService.findByImage(image, roiOptions);
roiList = [];
for roi in results.rois:
roiList.append(ROIData(roi));
return roiList; | hflynn/openmicroscopy | components/tools/OmeroPy/src/omero/util/script_utils.py | Python | gpl-2.0 | 44,494 |
from app.controller.weather import Weather
from json import dumps
api = 'v3/location/search'
params = {'query' : 'Atlanta', 'locationType' : 'city', 'countryCode' : 'US', 'adminDistrictCode' : 'GA'}
weather = Weather(username='ad6fbabc-d710-4126-8764-3a12d5a85096', password='onPrMLvFGk', params=params, api_url=api)
print(dumps(weather.getWeather(), indent=2))
api = 'v1/geocode/45.42/75.69/forecast/daily/10day.json'
params = {'units' : 'm'}
weather = Weather(username='ad6fbabc-d710-4126-8764-3a12d5a85096', password='onPrMLvFGk', params=params, api_url=api)
print(dumps(weather.getWeather(), indent=2)) | weizy1981/WatsonRobot | run/runWeather.py | Python | apache-2.0 | 607 |
import json
import threading
import os
import re
import tornado.autoreload
import tornado.ioloop
import tornado.web
from api_python_library.tools import config, api
class ItemsHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
def get(self, *args, **kwargs):
# items GET /items(.:format) items#index
items = {'items': [{1: {'title': 'first item'}, 2: {'title': 'second time'}}]}
self.write(json.dumps(items))
self.finish()
def post(self, *args, **kwargs):
# POST /items(.:format) items#create
result = {'result': 'ok'}
self.write(json.dumps(result))
self.finish()
class ItemHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
def get(self, *args, **kwargs):
# new_item GET /items/new(.:format) items#new
# item GET /items/:id(.:format) items#show
# edit_item GET /items/:id/edit(.:format) items#edit
if re.match(r'/items/new', self.request.path):
self.write('..nothing to print..')
elif re.match(r'/items/[0-9]+(/edit)?', self.request.path):
item = {'title': 'first item'}
self.write(json.dumps(item))
self.finish()
def patch(self, *args, **kwargs):
# PATCH or PUT /items/:id(.:format) items#update
result = {'result': 'ok'}
self.write(json.dumps(result))
self.finish()
def put(self, *args, **kwargs):
# PATCH or PUT /items/:id(.:format) items#update
result = {'result': 'ok'}
self.write(json.dumps(result))
self.finish()
def delete(self, *args, **kwargs):
# DELETE /items/:id(.:format) items#destroy
result = {'result': 'ok'}
self.write(json.dumps(result))
self.finish()
pwd = os.path.dirname(os.path.abspath(__file__))
config.add_config_ini('%s/main.ini' % pwd)
api.add_api_config('%s/api.ini' % pwd)
application = tornado.web.Application(
handlers=[
(r"/items", ItemsHandler),
(r"/items/(.*)", ItemHandler),
])
if __name__ == '__main__':
application.listen(config.port, config.host)
tornado_ioloop = tornado.ioloop.IOLoop.instance()
threading.Thread(target=tornado_ioloop.start).start()
authorization_header = api.get_basic_authorization_header(config.user, config.password)
host = '%s:%s' % (config.host, config.port)
try:
print api.index_items(host, headers=authorization_header)
print api.create_item(host, headers=authorization_header)
print api.new_item(host, headers=authorization_header)
print api.edit_item(host, arguments={'id': 1}, headers=authorization_header)
print api.show_item(host, arguments={'id': 1}, headers=authorization_header)
print api.update_item(host, arguments={'id': 1}, headers=authorization_header)
print api.destroy_item(host, arguments={'id': 1}, headers=authorization_header)
finally:
tornado_ioloop.stop()
| SuderPawel/api-python-library | src/api_python_library/examples/main.py | Python | mit | 3,029 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-07 23:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_tasker', '0004_auto_20170102_1510'),
]
operations = [
migrations.AlterIndexTogether(
name='taskinfo',
index_together=set([('id', 'target', 'status', 'eta'), ('target', 'eta'), ('id', 'eta', 'status'), ('id', 'target')]),
),
]
| wooyek/django-tasker | django_tasker/migrations/0005_auto_20170107_2302.py | Python | mit | 506 |
from setuptools import setup
description = 'Ask not what $ORG can do for you, but what you can do for $ORG'
setup(
name='asknot-ng',
version='1.0',
description=description,
license='GPLv3+',
author='Ralph Bean',
author_email='rbean@redhat.com',
url='https://github.com/fedora-infra/asknot-ng',
install_requires=[
'mako',
'PyYAML',
],
extras_require={
'tests': ['nose2'],
},
packages=[],
py_modules=['asknot_lib'],
# This declares our special-case extractor to 'babel', a python l18n tool.
entry_points="""
[babel.extractors]
asknot = asknot_lib:extract
""",
# This further declares that babel should use our extractor on yaml files
# in the questions/ directory.
message_extractors={
"questions": [
('**.yml', 'asknot', None),
]
}
)
| fedora-infra/asknot-ng | setup.py | Python | gpl-3.0 | 885 |
import json
import time
import settings
from datetime import datetime
from shared import common
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
handler = None
send_interval = 60 # in seconds
doorpi_update_topic = '$aws/things/DoorPi/shadow/update/accepted'
def init():
global handler
if settings.is_fake():
from statuspi import statushandler_faker
handler = statushandler_faker
else:
from statuspi import statushandler
handler = statushandler
return
def customShadowCallback_Update(payload, responseStatus, token):
if responseStatus == "timeout":
print("Update request " + token + " time out!")
if responseStatus == "accepted":
payloadDict = json.loads(payload)
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Update request with token: " + token + " accepted!")
reported = payloadDict["state"]["reported"]
if "temperature_from_pressure" in reported:
print("temperature_from_pressure: " + str(payloadDict["state"]["reported"]["temperature_from_pressure"]))
if "temperature_from_humidity" in reported:
print("temperature_from_humidity: " + str(payloadDict["state"]["reported"]["temperature_from_humidity"]))
if "humidity" in reported:
print("humidity: " + str(payloadDict["state"]["reported"]["humidity"]))
if "pressure" in reported:
print("pressure: " + str(payloadDict["state"]["reported"]["pressure"]))
if "connected" in reported:
print("connected: " + str(payloadDict["state"]["reported"]["connected"]))
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Update request " + token + " rejected!")
def customCallback(client, userdata, message):
if(message.topic == doorpi_update_topic):
payloadDict = json.loads(message.payload.decode('utf8'))
#print(json.dumps(payloadDict, indent=4, sort_keys=True))
reported = payloadDict["state"]["reported"]
if "connected" in reported:
if(reported["connected"] == "false"):
set_status('false', "closed", "closed")
return
if "MaleDoor" in reported and "FemaleDoor" in reported:
m = reported["MaleDoor"]
f = reported["FemaleDoor"]
set_status('true', m, f)
def customDoorPiShadowCallback_Get(payload, responseStatus, token):
print(responseStatus)
payloadDict = json.loads(payload)
print("++++++++FULL++++++++++")
door1 = payloadDict["state"]["reported"]["FemaleDoor"]
door2 = payloadDict["state"]["reported"]["MaleDoor"]
connected = payloadDict["state"]["reported"]["connected"]
print("Female: " + door1)
print("Male: " + door2)
print("Connected: " + connected)
print("+++++++++++++++++++++++\n\n")
set_status(connected, door1, door2)
def handle_command(client, message):
payload = message.payload.decode('utf-8')
print("Command received:")
print(payload)
def set_status(connected, door1, door2):
if connected == 'false':
handler.display_unknown()
else:
if door1 == 'open' and door2 == 'open':
handler.display_go()
else:
if door1 == 'open' or door2 == 'open':
handler.display_maybe()
else:
handler.display_stop()
def handle_notification(message):
print("Notification received: " + str(message.payload))
def send_sensor_data(client):
# Prepare our sensor data in JSON format.
payload = json.dumps({
"state": {
"reported": {
"humidity": handler.get_humidity(),
"temperature_from_humidity": handler.get_temperature_from_humidity(),
"temperature_from_pressure": handler.get_temperature_from_pressure(),
"pressure": handler.get_pressure(),
"timestamp": str(datetime.now())
}
}
})
client.shadowUpdate(payload, customShadowCallback_Update, 5)
def on_message(client, userdata, msg):
if msg.topic == settings.topic_statuspi_command:
handle_command(client, msg)
return
if msg.topic == settings.topic_statuspi_notify:
handle_notification(msg)
return
if msg.topic == settings.topic_doorpi_event:
handle_status_update(msg)
return
print("Spam received: " + str(msg.payload))
def start():
handler.init()
handler.display_unknown()
time.sleep(20)
shadow, client = common.setup_aws_shadow_client(settings.aws_endpoint,
settings.aws_root_certificate,
settings.aws_private_key,
settings.aws_certificate,
settings.device_name)
JSONPayload = '{"state":{"reported":{"connected":"true"}}}'
client.shadowUpdate(JSONPayload, customShadowCallback_Update, 5)
door_shadow = shadow.createShadowHandlerWithName("DoorPi", True)
door_shadow.shadowGet(customDoorPiShadowCallback_Get, 5)
shadow.getMQTTConnection().subscribe(doorpi_update_topic, 1, customCallback)
try:
while True:
send_sensor_data(client)
time.sleep(send_interval)
except KeyboardInterrupt:
JSONPayload = '{"state":{"reported":{"connected":"false"}}}'
client.shadowUpdate(JSONPayload, customShadowCallback_Update, 5)
shadow.disconnect()
print('stopped')
def stop():
handler.display_blank()
| royveshovda/pifog | source/piclient/statuspi/status_runner.py | Python | apache-2.0 | 5,635 |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import jsonify, session
from marshmallow import INCLUDE, fields
from marshmallow_enum import EnumField
from indico.modules.categories.controllers.base import RHDisplayCategoryBase
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.search.base import SearchOptions, SearchTarget, get_search_provider
from indico.modules.search.internal import InternalSearch
from indico.modules.search.result_schemas import ResultSchema
from indico.modules.search.views import WPCategorySearch, WPEventSearch, WPSearch
from indico.util.marshmallow import validate_with_message
from indico.web.args import use_kwargs
from indico.web.rh import RH
class RHSearchDisplay(RH):
def _process(self):
return WPSearch.render_template('search.html')
class RHCategorySearchDisplay(RHDisplayCategoryBase):
def _process(self):
return WPCategorySearch.render_template('category_search.html', self.category)
class RHEventSearchDisplay(RHDisplayEventBase):
def _process(self):
return WPEventSearch.render_template('event_search.html', self.event)
class RHAPISearch(RH):
"""API for searching across all records with the current search provider.
Besides pagination, filters or placeholders may be passed as query parameters.
Since `type` may be a list, the results from the search provider are not mixed with
the InternalSearch.
"""
@use_kwargs({
'page': fields.Int(missing=None),
'q': fields.String(required=True),
'type': fields.List(EnumField(SearchTarget), missing=None),
'admin_override_enabled': fields.Bool(
missing=False,
validate=validate_with_message(lambda value: session.user and session.user.is_admin,
'Restricted to admins')
),
}, location='query', unknown=INCLUDE)
def _process(self, page, q, type, **params):
search_provider = get_search_provider()
if type == [SearchTarget.category]:
search_provider = InternalSearch
result = search_provider().search(q, session.user, page, type, **params)
return ResultSchema().dump(result)
class RHAPISearchOptions(RH):
def _process(self):
search_provider = get_search_provider()()
placeholders = search_provider.get_placeholders()
sort_options = search_provider.get_sort_options()
return jsonify(SearchOptions(placeholders, sort_options).dump())
| pferreir/indico | indico/modules/search/controllers.py | Python | mit | 2,694 |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all campaigns with AWQL.
To add a campaign, run add_campaign.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CampaignService.query
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
import time
from googleads import adwords
PAGE_SIZE = 100
def main(client):
# Initialize appropriate service.
campaign_service = client.GetService('CampaignService', version='v201502')
# Construct query and get all campaigns.
offset = 0
query = 'SELECT Id, Name, Status ORDER BY Name'
more_pages = True
while more_pages:
page = campaign_service.query(query + ' LIMIT %s, %s' % (offset, PAGE_SIZE))
# Display results.
if 'entries' in page:
for campaign in page['entries']:
print ('Campaign with id \'%s\', name \'%s\', and status \'%s\' was '
'found.' % (campaign['id'], campaign['name'],
campaign['status']))
else:
print 'No campaigns were found.'
offset += PAGE_SIZE
more_pages = offset < int(page['totalNumEntries'])
time.sleep(1)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| coxmediagroup/googleads-python-lib | examples/adwords/v201502/basic_operations/get_campaigns_with_awql.py | Python | apache-2.0 | 2,056 |
# coding: utf-8
from pww.crypto import Cipher
def test_plaintext_should_equal_decrypted_text():
cipher = Cipher(b"password", b"saltlsalt")
plain = "sample text サンプル テキスト".encode()
encrypted = cipher.encrypt(plain)
decrypted = cipher.decrypt(encrypted)
assert plain == decrypted
def test_different_passwords_should_fail_decryption():
cipher1 = Cipher(b"password", b"saltsalt")
cipher2 = Cipher(b"bad password", b"saltsalt")
plain = "sample text サンプル テキスト".encode()
encrypted = cipher1.encrypt(plain)
decrypted = cipher2.decrypt(encrypted)
assert plain != decrypted
def test_different_salts_should_fail_decryption():
cipher1 = Cipher(b"password", b"saltsalt")
cipher2 = Cipher(b"password", b"bad salt")
plain = "sample text サンプル テキスト".encode()
encrypted = cipher1.encrypt(plain)
decrypted = cipher2.decrypt(encrypted)
assert plain != decrypted
| meganehouser/pww | test/test_crypto.py | Python | mit | 970 |
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import, print_function, division)
from copy import deepcopy
from itertools import combinations
class Cell:
def __init__(self):
self.value = 0
self.row = set()
self.col = set()
self.sq = set()
self.rm_values = set()
def isSet(self):
return self.value > 0
@property
def values(self):
if self.value:
return set()
else:
return set(range(1, 10)) - self.row - self.col - self.sq - self.rm_values
def set(self, val):
if val > 0:
if val not in self.row and val not in self.col and val not in self.sq:
self.value = val
self.row.add(val)
self.col.add(val)
self.sq.add(val)
else:
raise ValueError
def rm_value(self, val):
if isinstance(val, int):
self.rm_values.add(val)
elif isinstance(val, set):
self.rm_values |= val
def __repr__(self):
if self.value == 0:
return ' '
else:
return repr(self.value)
def carre(i,j):
return i//3+3*(j//3)
def are_neigh(i,j,k,l):
return (i==k) + (j==l) + (carre(i,j)==carre(k,l))
def coord(dim, i, k):
if dim==0:
return i, k
elif dim==1:
return k, i
elif dim==2:
return 3*(i%3)+k%3,3*(i//3)+k//3
class Sudoku:
def __init__(self, start=None): #(((0,)*9, )*9):
self.grid = { }
self.turns = 0
# Cells initialisation
for i in range(9):
# self.grid[i] = { }
for j in range(9):
self.grid[i,j] = Cell()
# Rows initialisation
for j in range(9):
row = set()
for i in range(9):
self.grid[i,j].row = row
# Columns initialisation
for i in range(9):
col = set()
for j in range(9):
self.grid[i,j].col = col
# Squares initialisation
for c in range(9):
sq = set()
for i in range(3):
for j in range(3):
self.grid[i+3*(c%3),j+3*(c//3)].sq = sq
if start:
for j, c in enumerate(start):
for i, v in enumerate(c):
try:
self.set(i, j, v)
except:
print('###', i, j, v)
raise
def __repr__(self):
result = '-'*25 + "\n"
for j in range(8, -1, -1):
line = ''
for i in range(0, 9, 3):
line += "| %r %r %r " % (tuple( self.grid[k,j] for k in range(i, i+3) ))
result += "%s|\n" % line
if not j%3:
result += '-'*25 + "\n"
return result.rstrip()
@property
def solved(self):
return all( [ self.grid[i,j].isSet() for i in range(9) for j in range(9) ] )
def set(self, i, j, val):
self.grid[i,j].set(val)
def rm_value(self, i, j, val):
self.grid[i,j].rm_value(val)
def neigh_values(self, x, y, coord=False):
row_result = set()
for i in range(9):
if i != x:
if coord:
row_result.add((i,y))
else:
row_result |= self.grid[i,y].values
col_result = set()
for j in range(9):
if j != y:
if coord:
col_result.add((x,j))
else:
col_result |= self.grid[x,j].values
sq_result = set()
for i in range(3):
for j in range(3):
if i != x%3 or j != y%3:
if coord:
sq_result.add((i+3*(x//3),j+3*(y//3)))
else:
sq_result |= self.grid[i+3*(x//3),j+3*(y//3)].values
if coord:
return row_result | col_result | sq_result
else:
return (row_result, col_result, sq_result)
def rech_solitaire_nu(self):
chgt = False
# Solitaire nu
for i in range(9):
for j in range(9):
l = self.grid[i,j].values
if len(l) == 1:
v = l.pop()
print("%d,%d -> %d |" % (i, j, v), end=' ')
self.set(i, j, v)
chgt = True
self.turns += 1
return chgt
def rech_solitaire_camoufle(self):
chgt = False
# Solitaire camouflé
for i in range(9):
for j in range(9):
l = self.grid[i,j].values
for a in ( l - x for x in self.neigh_values(i, j) ):
if len(a) == 1:
v = a.pop()
print("%d,%d => %d |" % (i, j, v), end=' ')
self.set(i, j, v)
chgt = True
self.turns += 1
break
return chgt
def rech_gpes_dominants(self):
chgt = False
for v in range(1, 10):
candidates = [ (i,j) for i in range(9) for j in range(9) if v in self.grid[i,j].values ]
for candidat in candidates:
for dim in (0, 1): # colonne/ligne
copains = [ a for a in candidates if a[dim]==candidat[dim] and are_neigh(*candidat,*a) >= 2 ]
candid_mince = [ a for a in candidates if a[dim]==candidat[dim] and a not in copains ]
candid_sq = [ a for a in candidates if carre(*a)==carre(*candidat) and a not in copains ]
if not candid_mince:
for cell in candid_sq:
print("%d,%d -> -%d |" % (*cell, v), end=' ')
self.rm_value(*cell, v)
chgt = True
self.turns += 1
elif not candid_sq:
for cell in candid_mince:
print("%d,%d -> -%d |" % (*cell, v), end=' ')
self.rm_value(*cell, v)
chgt = True
self.turns += 1
return chgt
def rech_gpes_nus(self):
chgt = False
candidates = [ (i,j,self.grid[i,j].values) for i in range(9) for j in range(9) if self.grid[i,j].values ]
for (i,j,v) in candidates:
current_gpe = [(i,j)]
for (k,l,m) in candidates:
if all([ 1 <= are_neigh(*g,k,l) <= 2 for g in current_gpe ]) and m <= v:
current_gpe.append((k,l))
if len(current_gpe) == len(v):
for (k,l,m) in candidates:
intersect = m&v
if all([ 1 <= are_neigh(*g,k,l) <= 2 for g in current_gpe ]) and intersect:
print("%d,%d => -%s |" % (k,l,intersect), end=' ')
self.rm_value(k,l,intersect)
chgt = True
self.turns += 1
return chgt
def rech_gpes_camoufles(self):
chgt = False
candidates = [ (i,j,self.grid[i,j].values) for i in range(9) for j in range(9) ]
values_count = ( # col, lig, sq
{ i: {j: set() for j in range(1, 10)} for i in range(9)},
{ i: {j: set() for j in range(1, 10)} for i in range(9)},
{ i: {j: set() for j in range(1, 10)} for i in range(9)},
)
for (i, j, values) in candidates:
for v in values:
values_count[0][i][v].add((i,j))
values_count[1][j][v].add((i,j))
values_count[2][carre(i,j)][v].add((i,j))
for dim in (0, 1, 2): # colonne/ligne/carré
for k in range(9):
count_values = [ {'vals': set((v, )), 'cells': c} for (v,c) in values_count[dim][k].items() if len(c) > 1 ]
# len(c) = 0 correspond aux valeurs fixées. Et 1 au solitaire nu...
all_combinations = []
for n in range(1,5): # On limite au quatuor (si un quintet existe, il y aura aussi un quatuor complémentaire (5+4=9 cases)
all_combinations += combinations(count_values, n)
all_count_values = []
for glop in all_combinations:
tmp = {'vals': set(), 'cells': set() }
for plop in glop:
tmp['vals'] |= plop['vals']
tmp['cells'] |= plop['cells']
all_count_values.append(tmp)
for result in all_count_values:
if result['vals'] and len(result['cells'])==len(result['vals']):
for cell in result['cells']:
diff = self.grid[cell].values - result['vals']
if diff:
print("%d,%d ~> -%s |" % (*cell, diff), end=' ')
self.rm_value(*cell, diff)
chgt = True
self.turns += 1
return chgt
def rech_reseaux(self):
chgt = False
for v in range(1, 10):
candidates = [ (i,j) for i in range(9) for j in range(9) if v in self.grid[i,j].values ]
for dim in (0, 1): # colonne/ligne
other_dim = int(not dim)
current_dims = { i: set() for i in range(9) }
for a in candidates:
current_dims[a[dim]].add(a[other_dim])
all_combinations = []
for n in range(1,5): # On limite au quatuor (si un quintet existe, il y aura aussi un quatuor complémentaire (5+4=9 cases)
all_combinations += combinations([ ({i}, current_dims[i]) for i in current_dims if current_dims[i] ], n)
for combin in all_combinations:
current_dim = set()
current_other_dim = set()
for c in combin:
current_dim |= c[0]
current_other_dim |= c[1]
if len(current_dim) == len(current_other_dim):
for a in [ a for a in candidates if a[dim] not in current_dim and a[other_dim] in current_other_dim ]:
print("%d,%d *> -%d |" % (*a, v), end=' ')
self.grid[a].rm_value(v)
chgt = True
self.turns += 1
return chgt
def solve(self):
# https://fr.wikibooks.org/wiki/Résolution_de_casse-têtes/Résolution_du_sudoku
chgt = (True, )
while not self.solved and any(chgt):
chgt = (
self.rech_solitaire_nu(),
self.rech_solitaire_camoufle(),
)
if not any(chgt):
chgt = (
self.rech_gpes_dominants(),
self.rech_gpes_nus(),
self.rech_gpes_camoufles(),
self.rech_reseaux(),
)
#print("\n%r" % self)
#raw_input("Press Enter to continue...")
print("\n%r\n###### Résolu: %s en %d coups #######" % (self, self.solved, self.turns))
# if not self.solved:
# print([ (i,j,self.grid[i,j].values) for i in range(9) for j in range(9) ])
| albatros69/Divers | sudoku.py | Python | gpl-3.0 | 11,551 |
from __future__ import absolute_import
from typing import Any, Optional, Text
import logging
import re
from email.header import decode_header
import email.message as message
from django.conf import settings
from zerver.lib.actions import decode_email_address, get_email_gateway_message_string_from_address, \
internal_send_message
from zerver.lib.notifications import convert_html_to_markdown
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.upload import upload_message_image
from zerver.lib.utils import generate_random_token
from zerver.lib.str_utils import force_text
from zerver.models import Stream, Recipient, get_user_profile_by_email, \
get_user_profile_by_id, get_display_recipient, get_recipient, \
Message, Realm, UserProfile
from six import binary_type
import six
import talon
from talon import quotations
talon.init()
logger = logging.getLogger(__name__)
def redact_stream(error_message):
# type: (Text) -> Text
domain = settings.EMAIL_GATEWAY_PATTERN.rsplit('@')[-1]
stream_match = re.search(u'\\b(.*?)@' + domain, error_message)
if stream_match:
stream_name = stream_match.groups()[0]
return error_message.replace(stream_name, "X" * len(stream_name))
return error_message
def report_to_zulip(error_message):
# type: (Text) -> None
if settings.ERROR_BOT is None:
return
error_bot = get_user_profile_by_email(settings.ERROR_BOT)
error_stream = Stream.objects.get(name="errors", realm=error_bot.realm)
send_zulip(settings.ERROR_BOT, error_stream, u"email mirror error",
u"""~~~\n%s\n~~~""" % (error_message,))
def log_and_report(email_message, error_message, debug_info):
# type: (message.Message, Text, Dict[str, Any]) -> None
scrubbed_error = u"Sender: %s\n%s" % (email_message.get("From"),
redact_stream(error_message))
if "to" in debug_info:
scrubbed_error = u"Stream: %s\n%s" % (redact_stream(debug_info["to"]),
scrubbed_error)
if "stream" in debug_info:
scrubbed_error = u"Realm: %s\n%s" % (debug_info["stream"].realm.domain,
scrubbed_error)
logger.error(scrubbed_error)
report_to_zulip(scrubbed_error)
# Temporary missed message addresses
redis_client = get_redis_client()
def missed_message_redis_key(token):
# type: (Text) -> Text
return 'missed_message:' + token
def is_missed_message_address(address):
# type: (Text) -> bool
msg_string = get_email_gateway_message_string_from_address(address)
return is_mm_32_format(msg_string)
def is_mm_32_format(msg_string):
# type: (Text) -> bool
'''
Missed message strings are formatted with a little "mm" prefix
followed by a randomly generated 32-character string.
'''
return msg_string.startswith('mm') and len(msg_string) == 34
def get_missed_message_token_from_address(address):
# type: (Text) -> Text
msg_string = get_email_gateway_message_string_from_address(address)
if msg_string is None:
raise ZulipEmailForwardError('Address not recognized by gateway.')
if not is_mm_32_format(msg_string):
raise ZulipEmailForwardError('Could not parse missed message address')
# strip off the 'mm' before returning the redis key
return msg_string[2:]
def create_missed_message_address(user_profile, message):
# type: (UserProfile, Message) -> Text
if settings.EMAIL_GATEWAY_PATTERN == '':
logging.warning("EMAIL_GATEWAY_PATTERN is an empty string, using "
"NOREPLY_EMAIL_ADDRESS in the 'from' field.")
return settings.NOREPLY_EMAIL_ADDRESS
if message.recipient.type == Recipient.PERSONAL:
# We need to reply to the sender so look up their personal recipient_id
recipient_id = get_recipient(Recipient.PERSONAL, message.sender_id).id
else:
recipient_id = message.recipient_id
data = {
'user_profile_id': user_profile.id,
'recipient_id': recipient_id,
'subject': message.subject,
}
while True:
token = generate_random_token(32)
key = missed_message_redis_key(token)
if redis_client.hsetnx(key, 'uses_left', 1):
break
with redis_client.pipeline() as pipeline:
pipeline.hmset(key, data)
pipeline.expire(key, 60 * 60 * 24 * 5)
pipeline.execute()
address = u'mm' + token
return settings.EMAIL_GATEWAY_PATTERN % (address,)
def mark_missed_message_address_as_used(address):
# type: (Text) -> None
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
with redis_client.pipeline() as pipeline:
pipeline.hincrby(key, 'uses_left', -1)
pipeline.expire(key, 60 * 60 * 24 * 5)
new_value = pipeline.execute()[0]
if new_value < 0:
redis_client.delete(key)
raise ZulipEmailForwardError('Missed message address has already been used')
def send_to_missed_message_address(address, message):
# type: (Text, message.Message) -> None
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
result = redis_client.hmget(key, 'user_profile_id', 'recipient_id', 'subject')
if not all(val is not None for val in result):
raise ZulipEmailForwardError('Missing missed message address data')
user_profile_id, recipient_id, subject = result
user_profile = get_user_profile_by_id(user_profile_id)
recipient = Recipient.objects.get(id=recipient_id)
display_recipient = get_display_recipient(recipient)
# Testing with basestring so we don't depend on the list return type from
# get_display_recipient
if not isinstance(display_recipient, six.string_types):
recipient_str = ','.join([user['email'] for user in display_recipient])
else:
recipient_str = display_recipient
body = filter_footer(extract_body(message))
body += extract_and_upload_attachments(message, user_profile.realm)
if not body:
body = '(No email body)'
if recipient.type == Recipient.STREAM:
recipient_type_name = 'stream'
else:
recipient_type_name = 'private'
internal_send_message(user_profile.realm, user_profile.email,
recipient_type_name, recipient_str, subject, body)
logging.info("Successfully processed email from %s to %s" % (
user_profile.email, recipient_str))
## Sending the Zulip ##
class ZulipEmailForwardError(Exception):
pass
def send_zulip(sender, stream, topic, content):
# type: (Text, Stream, Text, Text) -> None
internal_send_message(
stream.realm,
sender,
"stream",
stream.name,
topic[:60],
content[:2000])
def valid_stream(stream_name, token):
# type: (Text, Text) -> bool
try:
stream = Stream.objects.get(email_token=token)
return stream.name.lower() == stream_name.lower()
except Stream.DoesNotExist:
return False
def get_message_part_by_type(message, content_type):
# type: (message.Message, Text) -> Text
charsets = message.get_charsets()
for idx, part in enumerate(message.walk()):
if part.get_content_type() == content_type:
content = part.get_payload(decode=True)
assert isinstance(content, binary_type)
if charsets[idx]:
text = content.decode(charsets[idx], errors="ignore")
return text
def extract_body(message):
# type: (message.Message) -> Text
# If the message contains a plaintext version of the body, use
# that.
plaintext_content = get_message_part_by_type(message, "text/plain")
if plaintext_content:
return quotations.extract_from_plain(plaintext_content)
# If we only have an HTML version, try to make that look nice.
html_content = get_message_part_by_type(message, "text/html")
if html_content:
html_content = quotations.extract_from_html(html_content)
return convert_html_to_markdown(html_content)
raise ZulipEmailForwardError("Unable to find plaintext or HTML message body")
def filter_footer(text):
# type: (Text) -> Text
# Try to filter out obvious footers.
possible_footers = [line for line in text.split("\n") if line.strip().startswith("--")]
if len(possible_footers) != 1:
# Be conservative and don't try to scrub content if there
# isn't a trivial footer structure.
return text
return text.partition("--")[0].strip()
def extract_and_upload_attachments(message, realm):
# type: (message.Message, Realm) -> Text
user_profile = get_user_profile_by_email(settings.EMAIL_GATEWAY_BOT)
attachment_links = []
payload = message.get_payload()
if not isinstance(payload, list):
# This is not a multipart message, so it can't contain attachments.
return ""
for part in payload:
content_type = part.get_content_type()
filename = part.get_filename()
if filename:
attachment = part.get_payload(decode=True)
if isinstance(attachment, binary_type):
s3_url = upload_message_image(filename, content_type,
attachment,
user_profile,
target_realm=realm)
formatted_link = u"[%s](%s)" % (filename, s3_url)
attachment_links.append(formatted_link)
else:
logger.warning("Payload is not bytes (invalid attachment %s in message from %s)." %
(filename, message.get("From")))
return u"\n".join(attachment_links)
def extract_and_validate(email):
# type: (Text) -> Stream
try:
stream_name, token = decode_email_address(email)
except (TypeError, ValueError):
raise ZulipEmailForwardError("Malformed email recipient " + email)
if not valid_stream(stream_name, token):
raise ZulipEmailForwardError("Bad stream token from email recipient " + email)
return Stream.objects.get(email_token=token)
def find_emailgateway_recipient(message):
# type: (message.Message) -> Text
# We can't use Delivered-To; if there is a X-Gm-Original-To
# it is more accurate, so try to find the most-accurate
# recipient list in descending priority order
recipient_headers = ["X-Gm-Original-To", "Delivered-To", "To"]
recipients = [] # type: List[Text]
for recipient_header in recipient_headers:
r = message.get_all(recipient_header, None)
if r:
recipients = r
break
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]
match_email_re = re.compile(".*?".join(pattern_parts))
for recipient_email in recipients:
if match_email_re.match(recipient_email):
return recipient_email
raise ZulipEmailForwardError("Missing recipient in mirror email")
def process_stream_message(to, subject, message, debug_info):
# type: (Text, Text, message.Message, Dict[str, Any]) -> None
stream = extract_and_validate(to)
body = filter_footer(extract_body(message))
body += extract_and_upload_attachments(message, stream.realm)
debug_info["stream"] = stream
send_zulip(settings.EMAIL_GATEWAY_BOT, stream, subject, body)
logging.info("Successfully processed email to %s (%s)" % (
stream.name, stream.realm.domain))
def process_missed_message(to, message, pre_checked):
# type: (Text, message.Message, bool) -> None
if not pre_checked:
mark_missed_message_address_as_used(to)
send_to_missed_message_address(to, message)
def process_message(message, rcpt_to=None, pre_checked=False):
# type: (message.Message, Optional[Text], bool) -> None
subject_header = message.get("Subject", "(no subject)")
encoded_subject, encoding = decode_header(subject_header)[0]
if encoding is None:
subject = force_text(encoded_subject) # encoded_subject has type str when encoding is None
else:
try:
subject = encoded_subject.decode(encoding)
except (UnicodeDecodeError, LookupError):
subject = u"(unreadable subject)"
debug_info = {}
try:
if rcpt_to is not None:
to = rcpt_to
else:
to = find_emailgateway_recipient(message)
debug_info["to"] = to
if is_missed_message_address(to):
process_missed_message(to, message, pre_checked)
else:
process_stream_message(to, subject, message, debug_info)
except ZulipEmailForwardError as e:
# TODO: notify sender of error, retry if appropriate.
log_and_report(message, str(e), debug_info)
| sonali0901/zulip | zerver/lib/email_mirror.py | Python | apache-2.0 | 12,924 |
#!/usr/bin/env python
from flask import Flask
from nose.tools import eq_, raises
from kit.util import *
def test_uncamelcase():
eq_(uncamelcase('CalvinAndHobbes'), 'calvin_and_hobbes')
class Test_Cacheable(object):
def setup(self):
class Example(Cacheable):
@Cacheable.cached_property
def number(self):
return 10
@Cacheable.cached_property
def another(self):
return 48
self.ex = Example()
def test_set_cache(self):
eq_(self.ex.number, 10)
self.ex.number = 2
eq_(self.ex.number, 2)
self.ex.number = 10
def test_del_cache(self):
eq_(self.ex.get_cache_ages()['number'], None)
eq_(self.ex.number, 10)
del self.ex.number
eq_(self.ex.get_cache_ages()['number'], None)
def test_refresh_cache(self):
self.ex.number = 3
self.ex.refresh_cache(['another'])
eq_(self.ex.number, 3)
self.ex.refresh_cache()
eq_(self.ex.number, 10)
def test_refresh_cache_expiration(self):
self.ex.number = 3
self.ex.refresh_cache(expiration=100)
eq_(self.ex.number, 3)
self.ex.refresh_cache()
eq_(self.ex.number, 10)
@raises(AttributeError)
def test_refresh_cache_error(self):
self.ex.refresh_cache(['number2'])
def test_get_cache_ages(self):
self.ex.refresh_cache()
eq_(set(self.ex.get_cache_ages().keys()), set(['number', 'another']))
def test_to_json():
class Foo(Jsonifiable):
def __init__(self, n, nested=0):
self.n = n
if nested > 0:
self.d = {str(n): Foo(n + 1, nested - 1)}
self.f = Foo(n + 1, nested - 1)
self.l = [Foo(n + 1, nested - 1)]
self._p = 33
foo = Foo(0, 2)
j = foo.to_json()
eq_(j, {'n': 0, 'd': {'0': {}}, 'f': {}, 'l': [{}]})
j['n'] = 1
j['d'] = {'1': {}}
eq_(foo.to_json(depth=2), {'n': 0, 'd': {'0': j}, 'f': j, 'l': [j]})
Foo.__json__ = ['n']
eq_(foo.to_json(depth=2), {'n': 0})
class Test_View(object):
def setup(self):
self.app = Flask('test_app')
self.View = make_view(self.app)
self.client = self.app.test_client()
class MyView(self.View):
rules = {
'/index/': ['GET'],
'/index/<page>': ['GET', 'PUT'],
}
def get(self, **kwargs):
if not kwargs:
return 'no page'
else:
page = kwargs['page']
return 'get page %s' % (page, )
def put(self, **kwargs):
page = kwargs['page']
return 'put page %s' % (page, )
self.MyView = MyView
@raises(ValueError)
def test_unbound(self):
class UnboundView(View):
rules = {
'/unbound/': ['GET']
}
def test_ok_basic(self):
eq_(self.client.get('/index/').data, 'no page')
eq_(self.client.get('/index/1').data, 'get page 1')
eq_(self.client.put('/index/1').data, 'put page 1')
def test_not_allowed_basic(self):
eq_(self.client.post('/index/').status_code, 405) # method not allowed
eq_(self.client.put('/index/').status_code, 405) # method not allowed
def test_not_allowed(self):
class OtherView(self.MyView):
methods = ['GET']
rules = {
'/other/': ['GET'],
'/other/<page>': ['GET', 'PUT'],
}
eq_(self.client.get('/other/').data, 'no page')
eq_(self.client.put('/other/1').status_code, 405)
def test_not_implemented(self):
class AnotherView(self.View):
rules = {
'/another/': ['GET'],
}
eq_(self.client.get('/another/').status_code, 404) # page not found
@raises(ValueError)
def test_no_rules(self):
class YetAnotherView(self.View):
def get():
return 'hi'
YetAnotherView.register_view()
@raises(ValueError)
def test_invalid_method(self):
class YouGuessedItView(self.View):
rules = {
'/guess/': ['GET', 'A_WRONG_METHOD']
}
def get():
return 'hi'
| mtth/kit | kit/test/test_util.py | Python | mit | 3,867 |
from horizon.test import helpers as test
class BandwidthTests(test.TestCase):
# Unit tests for bandwidth.
def test_me(self):
self.assertTrue(1 + 1 == 2)
| jorik041/shmoocon_2014_talk | caravan/caravan/dashboards/infrastructure/bandwidth/tests.py | Python | bsd-2-clause | 171 |
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the org applications."""
from google.appengine.ext import ndb
from django import forms as django_forms
from django.utils.translation import ugettext
from melange.request import exception
from soc.logic import validate
from soc.models import org_app_record
from soc.views import forms
from soc.views import survey
from soc.views.helper import access_checker
from soc.views.helper import lists
from soc.logic import cleaning
from soc.models.org_app_record import OrgAppRecord
from soc.models.org_app_survey import OrgAppSurvey
from soc.views.readonly_template import SurveyRecordReadOnlyTemplate
DEF_BACKUP_ADMIN_NO_PROFILE = ugettext(
'Backup admin does not have an org admin profile for the program. Please '
'ask your backup admin to register a profile for %s at %s')
PROCESS_ORG_APPS_FORM_BUTTON_VALUE = \
'Finalize decisions and send acceptance/rejection emails'
NEW_ORG_CHOICES = [('Veteran', 'Veteran'), ('New', 'New')]
class OrgAppEditForm(forms.SurveyEditForm):
"""Form to create/edit organization application survey.
"""
class Meta:
model = OrgAppSurvey
css_prefix = 'org-app-edit'
exclude = ['scope', 'author', 'program', 'created_by', 'modified_by']
class OrgAppTakeForm(forms.SurveyTakeForm):
"""Form for would-be organization admins to apply for the program.
"""
backup_admin_id = django_forms.CharField(
label=ugettext('Backup Admin'), required=True,
help_text=ugettext('The username of the user who will serve as the '
'backup admin for this organization.'))
# We render this field as a select field instead of a checkbox because
# of the visibility on the form. The checkbox field because of its location
# is not correctly visible to the person who fills the form, so we may
# have trouble later. As a precaution, we display this field as a choice
# field and then convert the data back to boolean value in the corresponding
# field cleaner.
new_org = forms.ChoiceField(choices=NEW_ORG_CHOICES)
def __init__(self, bound_class_field, request_data=None, **kwargs):
self.request_data = request_data
# Workaround for Django's limitation of not being able to set initial value
# for ChoiceField after calling super.
if 'instance' in kwargs:
kwargs['initial'] = {
'new_org': 'New' if kwargs['instance'].new_org else 'Veteran'
}
super(OrgAppTakeForm, self).__init__(
bound_class_field, survey=self.request_data.org_app, **kwargs)
if self.instance:
backup_admin_key = ndb.Key.from_old_key(
org_app_record.OrgAppRecord.backup_admin
.get_value_for_datastore(self.instance))
self.fields['backup_admin_id'].initial = backup_admin_key.get().user_id
# not marked required by data model for backwards compatibility
self.fields['org_id'].required = True
class Meta:
model = OrgAppRecord
css_prefix = 'org-app-record'
exclude = ['main_admin', 'backup_admin', 'status', 'user', 'survey',
'created', 'modified', 'program', 'agreed_to_admin_agreement']
widgets = forms.choiceWidgets(model,
['license'])
def validateBackupAdminProfile(self, backup_admin_user, profile_model):
"""Validates if backup admin has a profile for the current program.
Args:
backup_admin_user: User entity for the backup admin.
profile_model: Model class from which the profile must be fetched.
Raises:
django_forms.ValidationError if the backup admin does not have a profile.
"""
if not validate.hasNonStudentProfileForProgram(
backup_admin_user.key, self.request_data.program.key(),
models=self.request_data.models):
redirector = self.request_data.redirect.createProfile('org_admin')
raise django_forms.ValidationError(
DEF_BACKUP_ADMIN_NO_PROFILE % (
self.request_data.program.name,
self._getCreateProfileURL(redirector)))
def clean_org_id(self):
org_id = cleaning.clean_link_id('org_id')(self)
if not org_id:
# manual required check, see Issue 1291
raise django_forms.ValidationError('This field is required.')
q = OrgAppRecord.all()
q.filter('survey', self.survey)
q.filter('org_id', org_id)
org_app = q.get()
if org_app:
# If we are creating a new org app it is a duplicate, if we are editing
# an org app we must check if the one we found has a different key.
if (not self.instance) or (org_app.key() != self.instance.key()):
raise django_forms.ValidationError('This ID has already been taken.')
return org_id
def clean_backup_admin_id(self):
backup_admin = cleaning.clean_existing_user('backup_admin_id')(self)
if not self.instance:
cleaning.clean_users_not_same('backup_admin_id')(self)
else:
main_admin_key = ndb.Key.from_old_key(
org_app_record.OrgAppRecord.main_admin
.get_value_for_datastore(self.instance))
if main_admin_key == backup_admin.key:
raise django_forms.ValidationError(
'You cannot enter the person who created the application here.')
self.cleaned_data['backup_admin'] = backup_admin.key.to_old_key()
return backup_admin
def clean_new_org(self):
"""Converts the select widget value of the new_org field from the form to
the boolean value required by the backing data model.
"""
return self.cleaned_data['new_org'] == 'New'
def clean(self):
cleaned_data = self.cleaned_data
# pop our custom id field if exists
if 'backup_admin_id' in cleaned_data:
cleaned_data.pop('backup_admin_id')
return cleaned_data
def _getCreateProfileURL(self, redirector):
"""Returns the full secure URL of the create profile page."""
raise NotImplementedError
class OrgAppRecordsList(object):
"""View for listing all records of a Organization Applications."""
def __init__(self, read_only_view):
"""Initializes the OrgAppRecordsList.
Args:
read_only_view: Name of the url pattern for the read only view of a
record.
"""
self.read_only_view = read_only_view
def checkAccess(self, data, check, mutator):
"""Defines access checks for this list, all hosts should be able to see it.
"""
if not data.org_app:
raise exception.NotFound(
message=access_checker.DEF_NO_ORG_APP % data.program.name)
check.isHost()
def context(self, data, check, mutator):
"""Returns the context of the page to render."""
record_list = self._createOrgAppsList(data)
page_name = ugettext('Records - %s' % (data.org_app.title))
context = {
'page_name': page_name,
'record_list': record_list,
}
return context
def jsonContext(self, data, check, mutator):
"""Handler for JSON requests."""
idx = lists.getListIndex(data.request)
if idx == 0:
record_list = self._createOrgAppsList(data)
return record_list.listContentResponse(data.request).content()
else:
# TODO(nathaniel): This needs to be a return statement, right?
super(OrgAppRecordsList, self).jsonContext(data, check, mutator)
def _createOrgAppsList(self, data):
"""Creates a SurveyRecordList for the requested survey."""
record_list = survey.SurveyRecordList(
data, data.org_app, OrgAppRecord, idx=0)
record_list.list_config.addSimpleColumn('name', 'Name')
record_list.list_config.addSimpleColumn('org_id', 'Organization ID')
record_list.list_config.addPlainTextColumn(
'new_org', 'New/Veteran',
lambda ent, *args: 'New' if ent.new_org else 'Veteran')
# TODO(ljvderijk): Poke Mario during all-hands to see if we can separate
# "search options" and in-line selection options.
options = [
('', 'All'),
('(needs review)', 'needs review'),
('(pre-accepted)', 'pre-accepted'),
#('(accepted)', 'accepted'),
('(pre-rejected)', 'pre-rejected'),
#('(rejected)', 'rejected'),
('(ignored)', 'ignored'),
]
record_list.list_config.addSimpleColumn('status', 'Status', options=options)
record_list.list_config.setColumnEditable('status', True, 'select')
record_list.list_config.addPostEditButton('save', 'Save')
record_list.list_config.setRowAction(
lambda e, *args: data.redirect.id(e.key().id_or_name()).
urlOf(self.read_only_view))
return record_list
def templatePath(self):
return 'soc/org_app/records.html'
class OrgAppReadOnlyTemplate(SurveyRecordReadOnlyTemplate):
"""Template to construct readonly organization application record.
"""
class Meta:
model = OrgAppRecord
css_prefix = 'org-app-show'
fields = ['org_id', 'name', 'description', 'home_page', 'license',
'new_org']
renderers = {
'new_org': lambda instance: 'New' if instance.new_org else 'Veteran'
}
survey_name = 'Organization Application'
| rhyolight/nupic.son | app/soc/views/org_app.py | Python | apache-2.0 | 9,576 |
"""Defines methods that call the unversioned Mesos HTTP endpoints"""
from __future__ import unicode_literals
from node.resources.node_resources import NodeResources
from node.resources.resource import ScalarResource
from util.dcos import make_dcos_request
def get_agent_resources(master, agent_ids):
"""Returns the total resources for each of the given agents
:param master: The address for the Mesos master
:type master: `util.host.HostAddress`
:param agent_ids: The set of agent IDs
:type agent_ids: set
:returns: The total resources for each agent stored by agent ID
:rtype: dict
"""
results = {}
resp = make_dcos_request(master, '/slaves')
for agent_dict in resp.json()['slaves']:
agent_id = agent_dict['id']
if agent_id in agent_ids:
resource_list = []
resource_dict = agent_dict['resources']
for name in resource_dict:
value = resource_dict[name]
if isinstance(value, float):
resource_list.append(ScalarResource(name, value))
resources = NodeResources(resource_list)
results[agent_id] = resources
return results
| ngageoint/scale | scale/mesos_api/unversioned/agent.py | Python | apache-2.0 | 1,202 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-24 20:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LineItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=255)),
('amount', models.FloatField(default=0)),
],
),
migrations.CreateModel(
name='Quote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='lineitem',
name='quote',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quotes.Quote'),
),
]
| moniquehw/quoterizer | quotes/migrations/0001_initial.py | Python | gpl-3.0 | 1,134 |
import numpy as np
class AxisOrder:
"""Define the order of spatial axes. Default: x first"""
def __init__(self):
self.is_x_first = False
self.x = 0
self.y = 0
self.z = 0
self.x_first()
def x_first(self):
self.is_x_first = True
self.x, self.y, self.z = 0, 1, 2
def x_last(self):
self.is_x_first = False
self.x, self.y, self.z = -1, -2, -3
def up_vector(self, rank):
if self.is_x_first:
return np.array([0] * (rank - 1) + [1])
else:
return np.array([1] + [0] * (rank - 1))
def axis_name(self, index, spatial_rank):
if self.is_x_first:
return ['x', 'y', 'z', 'w'][:spatial_rank][index]
else:
return ['x', 'y', 'z', 'w'][:spatial_rank][::-1][index]
GLOBAL_AXIS_ORDER = AxisOrder()
| tum-pbs/PhiFlow | phi/math/_config.py | Python | mit | 862 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances"""
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if not copied and s is not pdf[field.name]:
# Copy once if the series is modified to prevent the original Pandas
# DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(pdf[column], timezone)
if not copied and s is not pdf[column]:
# Copy once if the series is modified to prevent the original Pandas
# DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowSerializer, _create_batch
from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create Arrow record batches
batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)],
timezone)
for pdf_slice in pdf_slices]
# Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing)
if isinstance(schema, (list, tuple)):
struct = from_arrow_schema(batches[0].schema)
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
# Create the Spark DataFrame directly from the Arrow data and schema
jrdd = self._sc._serialize_to_jvm(batches, len(batches), ArrowSerializer())
jdf = self._jvm.PythonSQLUtils.arrowPayloadToDataFrame(
jrdd, schema.json(), self._wrapped._jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if self.conf.get("spark.sql.execution.pandas.respectSessionTimeZone").lower() \
== "true":
timezone = self.conf.get("spark.sql.session.timeZone")
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in data.columns]
if self.conf.get("spark.sql.execution.arrow.enabled", "false").lower() == "true" \
and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
warnings.warn("Arrow will not be used in createDataFrame: %s" % str(e))
# Fallback to create DataFrame without arrow if raise some exception
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| saltstar/spark | python/pyspark/sql/session.py | Python | apache-2.0 | 32,651 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This sample "listen to directory". move the new file and print it,
using docker-containers.
The following operators are being used: DockerOperator,
BashOperator & ShortCircuitOperator.
TODO: Review the workflow, change it accordingly to
your environment & enable the code.
"""
from datetime import timedelta
from docker.types import Mount
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.python import ShortCircuitOperator
from airflow.providers.docker.operators.docker import DockerOperator
from airflow.utils.dates import days_ago
dag = DAG(
"docker_sample_copy_data",
default_args={
"owner": "airflow",
"depends_on_past": False,
"email": ["airflow@example.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
},
schedule_interval=timedelta(minutes=10),
start_date=days_ago(2),
)
locate_file_cmd = """
sleep 10
find {{params.source_location}} -type f -printf "%f\n" | head -1
"""
t_view = BashOperator(
task_id="view_file",
bash_command=locate_file_cmd,
do_xcom_push=True,
params={"source_location": "/your/input_dir/path"},
dag=dag,
)
t_is_data_available = ShortCircuitOperator(
task_id="check_if_data_available",
python_callable=lambda task_output: not task_output == "",
op_kwargs=dict(task_output=t_view.output),
dag=dag,
)
t_move = DockerOperator(
api_version="1.19",
docker_url="tcp://localhost:2375", # replace it with swarm/docker endpoint
image="centos:latest",
network_mode="bridge",
mounts=[
Mount(source="/your/host/input_dir/path", target="/your/input_dir/path", type="bind"),
Mount(source="/your/host/output_dir/path", target="/your/output_dir/path", type="bind"),
],
command=[
"/bin/bash",
"-c",
"/bin/sleep 30; "
"/bin/mv {{ params.source_location }}/" + f"{t_view.output}" + " {{ params.target_location }};"
"/bin/echo '{{ params.target_location }}/" + f"{t_view.output}';",
],
task_id="move_data",
do_xcom_push=True,
params={"source_location": "/your/input_dir/path", "target_location": "/your/output_dir/path"},
dag=dag,
)
t_print = DockerOperator(
api_version="1.19",
docker_url="tcp://localhost:2375",
image="centos:latest",
mounts=[Mount(source="/your/host/output_dir/path", target="/your/output_dir/path", type="bind")],
command=f"cat {t_move.output}",
task_id="print",
dag=dag,
)
t_is_data_available.set_downstream(t_move)
t_move.set_downstream(t_print)
# Task dependencies created via `XComArgs`:
# t_view >> t_is_data_available
| dhuang/incubator-airflow | airflow/providers/docker/example_dags/example_docker_copy_data.py | Python | apache-2.0 | 3,524 |
# Volume
import SMESH_mechanic_tetra
import SMESH
smesh = SMESH_mechanic_tetra.smesh
mesh = SMESH_mechanic_tetra.mesh
salome = SMESH_mechanic_tetra.salome
# Criterion : VOLUME < 7.
volume_margin = 7.
aFilter = smesh.GetFilter(SMESH.VOLUME, SMESH.FT_Volume3D, SMESH.FT_LessThan, volume_margin)
anIds = mesh.GetIdsFromFilter(aFilter)
# print the result
print ""
print "Criterion: Volume < ", volume_margin, " Nb = ", len(anIds)
j = 1
for i in range(len(anIds)):
if j > 20: j = 1; print ""
print anIds[i],
j = j + 1
pass
print ""
# create a group
aGroup = mesh.CreateEmptyGroup(SMESH.VOLUME, "Volume < " + `volume_margin`)
aGroup.Add(anIds)
salome.sg.updateObjBrowser(1)
| FedoraScientific/salome-smesh | doc/salome/examples/quality_controls_ex21.py | Python | lgpl-2.1 | 690 |
import database as d
import numpy as np
import random
from transitions import Machine
#Conversations are markov chains. Works as follows: a column vector for each CURRENT state j, a row vector for each TARGET state i.
#Each entry i,j = the probability of moving to state i from state j.
#target state D = end of conversation. We start in state D when initializing conversation.
#row vectors sum to 1, internal lists are columns.
#Conversation is a singleton. DO NOT CREATE NEW CONVERSATION OBJECTS.
class Conversation(object):
#a. stores, b.manufacturers, c.friends, d. myself, e.end conversation
topicMatrix = [
[0.00,0.20,0.15,0.15,0.25],
[0.20,0.00,0.15,0.15,0.25],
[0.15,0.15,0.00,0.20,0.25],
[0.15,0.15,0.20,0.00,0.25],
[0.50,0.50,0.50,0.50,0.00]
]
#a. different store, b. new topic, c. end convo, d. prices
storeMatrix = [
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.50],
[1.0,1.0,0.25,0.00]
]
#a. different manufacturer, b. new topic, c. end convo, d. prices
manuMatrix = [
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.50],
[1.0,1.0,0.25,0.00]
]
#a. different friend, b. new topic, c. end convo, d. family, e. job, /f. skills
friendMatrix = [
[0.0,0.0,0.2,0.1,0.1],
[0.0,0.0,0.2,0.2,0.2],
[0.0,0.0,0.2,0.5,0.5],
[0.5,0.5,0.2,0.0,0.2],
[0.5,0.5,0.2,0.2,0.0]
]
# friendMatrix = [
# [0.00,0.00,0.15,0.1,0.1,0.1],
# [0.00,0.00,0.15,0.2,0.2,0.2],
# [0.00,0.00,0.15,0.5,0.5,0.5],
# [0.34,0.34,0.15,0.0,0.1,0.1],
# [0.33,0.33,0.15,0.1,0.0,0.1],
# [0.33,0.33,0.25,0.1,0.1,0.0]
# ]
#a. introduction, b. new topic, c. end convo, d. myfamily, e. myjob, /f. myskills
myselfMatrix = [
[0.00,1,0.2,0.0,0.0],
[0.25,0,0.2,0.2,0.2],
[0.25,0,0.2,0.5,0.5],
[0.25,0,0.2,0.0,0.3],
[0.25,0,0.2,0.3,0.0]
]
# myselfMatrix = [
# [0.0,1,0.15,0.00,0.00,0.00],
# [0.2,0,0.15,0.20,0.20,0.20],
# [0.2,0,0.15,0.50,0.50,0.50],
# [0.2,0,0.15,0.00,0.15,0.15],
# [0.2,0,0.15,0.15,0.00,0.15],
# [0.2,0,0.15,0.15,0.15,0.00]
# ]
states = ['topic','store','manu','friend', 'myself', 'exit']
transitions = [
{'trigger' : 'toTopic', 'source' : '*', 'dest' : 'topic'},
{'trigger' : 'toStore', 'source' : 'topic', 'dest' : 'store'},
{'trigger' : 'toManu' , 'source' : 'topic', 'dest' : 'manu' },
{'trigger' : 'toFriend', 'source' : 'topic', 'dest' : 'friend' },
{'trigger' : 'toMyself', 'source' : 'topic', 'dest' : 'myself'},
{'trigger' : 'toExit', 'source' : '*', 'dest' : 'exit'}
]
def __init__(self):
self.isPlayer = False
self.firstPerson = None
self.secondPerson = None
self.target = None
self.machine = Machine(model=self, states=Conversation.states, transitions=Conversation.transitions, initial='exit')
self.menuDict = {
'topic' : [self.toStore, self.toManu, self.toFriend, self.toMyself, self.toExit],
'store' : [self.different, self.toTopic, self.toExit, self.prices],
'manu' : [self.different, self.toTopic, self.toExit, self.prices],
'friend' : [self.different, self.toTopic, self.toExit, self.family, self.job],
'myself' : [self.introduction, self.toTopic, self.toExit, self.myfamily, self.myjob]
}
self.machine.on_enter_topic('topicHandler')
self.machine.on_enter_store('storeHandler')
self.machine.on_enter_manu('manuHandler')
self.machine.on_enter_friend('friendHandler')
self.machine.on_enter_myself('myselfHandler')
self.machine.on_enter_exit('exitHandler')
def beginConversation(self, firstPerson, secondPerson, isPlayer=False):
self.isPlayer = isPlayer
self.firstPerson = firstPerson
self.secondPerson = secondPerson
self.introduction()
self.toTopic()
def introduction(self):
p2 = self.firstPerson.peopleManager(self.secondPerson)
p1 = self.secondPerson.peopleManager(self.firstPerson)
p2.name = self.secondPerson.name
p1.name = self.firstPerson.name
p2.updateOpinion(1)
p1.updateOpinion(1)
def different(self):
if self.state == 'friend':
testTarget = self.firstPerson.randomPerson(self.target)
if testTarget is not None:
self.target = testTarget.person
else:
self.target = None
elif self.state == 'manu':
testTarget = self.firstPerson.randomManu(self.target)
if testTarget is not None:
self.target = testTarget.store
else:
self.target = None
elif self.state == 'store':
testTarget = self.firstPerson.randomStore(self.target)
if testTarget is not None:
self.target = testTarget.store
else:
self.target = None
def prices(self):
if self.target is not None:
firstProfile = self.firstPerson.unitManager(self.target, self.secondPerson)
secondProfile = self.secondPerson.unitManager(self.target, self.firstPerson)
firstPrices = firstProfile.getPricesWithDayNum()
secondPrices = secondProfile.getPricesWithDayNum()
firstDayNum = firstPrices[1]
secondDayNum = secondPrices[1]
if firstDayNum > secondDayNum:
prices = firstPrices[0]
secondProfile.updatePrices(prices, firstDayNum)
#thoughts
self.firstPerson.think("I told " + self.secondPerson.name + " about the prices at " + self.target.name + ".")
self.secondPerson.think(self.firstPerson.name + " told me about the prices at " + self.target.name + ".")
elif secondDayNum > firstDayNum:
prices = secondPrices[0]
firstProfile.updatePrices(prices, secondDayNum)
#thoughts
self.firstPerson.think(self.secondPerson.name + " told me about the prices at " + self.target.name + ".")
self.secondPerson.think("I told " + self.firstPerson.name + " about the prices at " + self.target.name + ".")
else:
self.firstPerson.think(self.secondPerson.name + " and I talked about " + self.target.name + "'s prices.")
self.secondPerson.think(self.firstPerson.name + " and I talked about " + self.target.name + "'s prices.")
else:
if self.state == 'store':
self.firstPerson.think(self.secondPerson.name + " listened to me gripe about how I can't find anywhere to shop.")
self.secondPerson.think(self.firstPerson.name + " told me that they can't find anywhere to shop.")
elif self.state == 'manu':
self.firstPerson.think("I mentioned to " + self.secondPerson.name + " that I don't know anything about the local industry.")
self.secondPerson.think(self.firstPerson.name + " told me that they don't know much about the local industry.")
else:
self.firstPerson.think("There is a bug in conversation.prices. (not manu or store)")
self.secondPerson.think("There is a bug in conversation.prices. (not manu or store)")
def family(self):
if self.target is not None:
#info: family, people
#profiles
p1 = self.firstPerson.peopleManager(self.target)
p2 = self.secondPerson.peopleManager(self.target)
#variables
f1 = p1.getFamily()
f2 = p2.getFamily()
ff = []
#update profiles
for a, b in zip(f1, f2):
if a[-1] >= b[-1]:
ff.append(a)
else:
ff.append(b)
p1.updateFamily(*ff)
p2.updateFamily(*ff)
#thoughts
self.firstPerson.think(self.secondPerson.name + " and I gossipped about " + self.target.name + "'s family.")
self.secondPerson.think(self.firstPerson.name + " and I gossipped about " + self.target.name + "'s family.")
else:
self.firstPerson.think("I don't really know anything about my friends' families.")
self.secondPerson.think("I don't really know anything about my friends' families.")
def job(self):
if self.target is not None:
#profiles
firstProfile = self.firstPerson.peopleManager(self.target)
secondProfile = self.secondPerson.peopleManager(self.target)
#variables
firstJob = firstProfile.getJob()
secondJob = secondProfile.getJob()
#update profiles
if firstJob[1] > secondJob[1]:
secondProfile.updateJob(*firstJob)
self.firstPerson.think("I told " + self.secondPerson.name + " what " + self.target.name + " does for a living.")
self.secondPerson.think(self.firstPerson.name + " told me what " + self.target.name + " does for a living.")
elif secondJob[1] > firstJob[1]:
firstProfile.updateJob(*secondJob)
self.firstPerson.think(self.secondPerson.name + " told me what " + self.target.name + " does for a living.")
self.secondPerson.think("I told " + self.firstPerson.name + " about " + self.target.name + " does for a living.")
else:
self.firstPerson.think(self.secondPerson.name + " and I talked about " + self.target.name + "'s job.")
self.secondPerson.think(self.firstPerson.name + " and I talked about " + self.target.name + "'s job.")
else:
self.firstPerson.think("I don't know what any of my friends do for a living!")
self.secondPerson.think("I don't know what any of my friends do for a living!")
# def skills(self):
# #info: skills
# if self.target is not None:
# #profiles
# firstProfile = self.firstPerson.peopleManager(self.target)
# secondProfile = self.secondPerson.peopleManager(self.target)
# #variables
# firstSkills = firstProfile.getSkills()
# secondSkills = secondProfile.getSkills()
# #update profiles
# if firstSkills[1] > secondSkills[1]:
# secondProfile.updateSkills(*firstSkills)
# self.firstPerson.think("I told " + self.secondPerson.name + " about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think(self.firstPerson.name + " told me about how good " + self.target.name + " is with their hands.")
# elif secondSkills[1] > firstSkills[1]:
# firstProfile.updateSkills(*secondSkills)
# self.firstPerson.think(self.secondPerson.name + " told me about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think("I told " + self.firstPerson.name + " about how good " + self.target.name + " is with their hands.")
# else:
# self.firstPerson.think(self.secondPerson.name + " and I talked about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think(self.firstPerson.name + " and I talked about how good " + self.target.name + " is with their hands.")
# else:
# self.firstPerson.think("I should spend more time doing things with my friends.")
# self.secondPerson.think("I should spend more time doing things with my friends.")
def myfamily(self):
#info: family, people
#profiles
firstProfile = self.secondPerson.peopleManager(self.firstPerson)
secondProfile = self.firstPerson.peopleManager(self.secondPerson)
firstOwn = self.firstPerson.peopleManager(self.firstPerson)
secondOwn = self.secondPerson.peopleManager(self.secondPerson)
#update profiles
firstProfile.updateFamily(firstOwn.getFather(), firstOwn.getMother(), firstOwn.getSpouse(), firstOwn.getSiblings(), firstOwn.getChildren())
secondProfile.updateFamily(secondOwn.getFather(), secondOwn.getMother(), secondOwn.getSpouse(), secondOwn.getSiblings(), secondOwn.getChildren())
#thoughts
self.firstPerson.think(self.secondPerson.name + " caught me up on their family life.")
self.secondPerson.think(self.firstPerson.name + " caught me up on their family life.")
def myjob(self):
#info: jobs, jobUnits, *salaries
#profiles
firstProfile = self.secondPerson.peopleManager(self.firstPerson)
secondProfile = self.firstPerson.peopleManager(self.secondPerson)
#variables
firstJob = self.firstPerson.getJob()
secondJob = self.secondPerson.getJob()
dayNum = self.firstPerson.model.getDayNum()
try:
firstJobType = firstJob.getJobType()
firstJobUnit = firstJob.getUnit()
firstJobLoc = firstJobUnit.getName()
firstSalary = firstJob.getSalary()
except:
firstJobType = "Jobhunter"
firstJobUnit = None
firstJobLoc = "home"
firstSalary = 0
try:
secondJobType = secondJob.getJobType()
secondJobUnit = secondJob.getUnit()
secondJobLoc = secondJobUnit.getName()
secondSalary = secondJob.getSalary()
except:
secondJobType = "Jobhunter"
secondJobUnit = None
secondJobLoc = "home"
secondSalary = 0
#update profiles
if dayNum > firstProfile.getJob()[1]:
firstProfile.updateJob(firstJob, dayNum)
if dayNum > firstProfile.getSalary()[1]:
firstProfile.updateSalary(firstSalary, dayNum)
if dayNum > secondProfile.getJob()[1]:
secondProfile.updateJob(secondJob, dayNum)
if dayNum > secondProfile.getSalary()[1]:
secondProfile.updateSalary(firstSalary, dayNum)
if firstJobUnit is not None:
self.secondPerson.unitManager(firstJobUnit, self.firstPerson)
if secondJobUnit is not None:
self.firstPerson.unitManager(secondJobUnit, self.secondPerson)
#thoughts
self.firstPerson.think(self.secondPerson.name + " told me about their job as a " + secondJobType + " at " + secondJobLoc + ".")
self.secondPerson.think(self.firstPerson.name + " told me about their job as a " + firstJobType + " at " + firstJobLoc + ".")
# def myskills(self):
# #info skills
# #profiles
# firstProfile = self.secondPerson.peopleManager(self.firstPerson)
# secondProfile = self.firstPerson.peopleManager(self.secondPerson)
# #variables
# firstSkills = self.firstPerson.getSkills()
# secondSkills = self.secondPerson.getSkills()
# dayNum = self.firstPerson.model.getDayNum()
# #update profiles
# if dayNum > firstProfile.getSkills()[1]:
# firstProfile.updateSkills(firstSkills, dayNum)
# if dayNum > secondProfile.getSkills()[1]:
# secondProfile.updateSkills(secondSkills, dayNum)
# #thoughts
# self.firstPerson.think(self.secondPerson.name + " and I talked shop for a while.")
# self.secondPerson.think(self.firstPerson.name + " and I talked shop for a while.")
#dialogues are chosen here, but the actual method call is in the handler (eg prices)
def talk(self, matrix, stateVector):
if self.isPlayer:
# stateVector = playerChoice
pass
else:
#get dialogue probabilities given last dialogue
probArray = np.dot(matrix, stateVector)
prob = probArray.tolist()
#choose dialogue
choice = random.random()
stateVector = [0 for i in range(len(prob))]
for i in range(len(prob)):
outcome = prob[i]
if outcome >= choice:
stateVector[i] = 1
return stateVector
else:
choice = choice - outcome
def topicHandler(self):
matrix = Conversation.topicMatrix
stateVector = [0,0,0,0,1]
# self.firstPerson.think("topicHandler")
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def storeHandler(self):
matrix = Conversation.storeMatrix
stateVector = [0,1,0,0]
# self.firstPerson.think("storeHandler")
self.different()
while self.state == 'store':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def manuHandler(self):
matrix = Conversation.manuMatrix
stateVector = [0,1,0,0]
# self.firstPerson.think("manuHandler")
self.different()
while self.state == 'manu':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def friendHandler(self):
matrix = Conversation.friendMatrix
stateVector = [0,1,0,0,0]
# self.firstPerson.think("friendHandler")
self.different()
while self.state == 'friend':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def myselfHandler(self):
matrix = Conversation.myselfMatrix
stateVector = [0,1,0,0,0]
# self.firstPerson.think("myselfHandler")
while self.state == 'myself':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def exitHandler(self):
self.isPlayer = False
Convo = Conversation() | markemus/economy | conversation.py | Python | mit | 18,547 |
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Frescobaldi Main Window.
"""
from __future__ import division
import itertools
import os
import sys
import weakref
from PyQt5.QtCore import (pyqtSignal, QByteArray, QDir, QMimeData, QSettings,
QSize, Qt, QUrl)
from PyQt5.QtGui import (QKeySequence, QTextCursor, QTextDocument)
from PyQt5.QtPrintSupport import (QAbstractPrintDialog, QPrintDialog, QPrinter)
from PyQt5.QtWidgets import (QAction, QApplication, QFileDialog, QMainWindow,
QMenu, QMessageBox, QPlainTextEdit, QVBoxLayout,
QWhatsThis, QWidget)
import app
import backup
import appinfo
import icons
import actioncollection
import actioncollectionmanager
import menu
import tabbar
import document
import view
import viewmanager
import highlighter
import historymanager
import recentfiles
import sessions.manager
import util
import qutil
import helpers
import panelmanager
import engrave
import scorewiz
import externalchanges
import browseriface
import vcs
import file_import
class MainWindow(QMainWindow):
# emitted when the MainWindow will close
aboutToClose = pyqtSignal()
# only emitted when this is the last MainWindow to close
aboutToCloseLast = pyqtSignal()
# emitted when all editor documents have been closed
allDocumentsClosed = pyqtSignal()
# both signals emit (current, previous)
currentDocumentChanged = pyqtSignal(document.Document, document.Document)
currentViewChanged = pyqtSignal(view.View, view.View)
# emitted when whether there is a selection changes
selectionStateChanged = pyqtSignal(bool)
def __init__(self, other=None):
"""Creates a new MainWindow.
It adds itself to app.windows to keep a reference.
It shares the documents list with all other MainWindows. It copies
some info (like the currently active document) from the 'other' window,
if given.
"""
QMainWindow.__init__(self)
self.setAttribute(Qt.WA_DeleteOnClose)
# this could be made configurable
self.setCorner(Qt.TopLeftCorner, Qt.LeftDockWidgetArea)
self.setCorner(Qt.BottomLeftCorner, Qt.LeftDockWidgetArea)
self.setCorner(Qt.TopRightCorner, Qt.RightDockWidgetArea)
self.setCorner(Qt.BottomRightCorner, Qt.RightDockWidgetArea)
self._currentDocument = None
self._currentView = lambda: None
self._selectedState = None
# find an unused objectName
names = set(win.objectName() for win in app.windows)
for num in itertools.count(1):
name = "MainWindow{0}".format(num)
if name not in names:
self.setObjectName(name)
break
app.windows.append(self)
mainwidget = QWidget()
self.setCentralWidget(mainwidget)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
mainwidget.setLayout(layout)
self.tabBar = tabbar.TabBar(self)
self.viewManager = viewmanager.ViewManager(self)
layout.addWidget(self.tabBar)
layout.addWidget(self.viewManager)
self.createActions()
self.createMenus()
self.createToolBars()
app.translateUI(self)
app.sessionChanged.connect(self.updateWindowTitle)
self.readSettings()
self.historyManager = historymanager.HistoryManager(self, other.historyManager if other else None)
self.viewManager.viewChanged.connect(self.slotViewChanged)
self.tabBar.currentDocumentChanged.connect(self.setCurrentDocument)
self.setAcceptDrops(True)
# keep track of all ActionCollections for the keyboard settings dialog
actioncollectionmanager.manager(self).addActionCollection(self.actionCollection)
actioncollectionmanager.manager(self).addActionCollection(self.viewManager.actionCollection)
if other:
self.setCurrentDocument(other.currentDocument())
self.updateWindowTitle()
app.mainwindowCreated(self)
def documents(self):
"""Returns the list of documents in the order of the TabBar."""
return self.tabBar.documents()
def currentView(self):
"""Returns the current View or None."""
return self._currentView()
def currentDocument(self):
"""Returns the current Document or None."""
return self._currentDocument
def setCurrentDocument(self, doc, findOpenView=None):
"""Set the current document.
The findOpenView argument makes sense when the user has split the
editor view in more than one. If findOpenView == True and one of the
views has the document, that view is focused. If findOpenView == False,
the currently focused view is changed to the document. If findOpenView
is None, the users setting is read.
"""
if findOpenView is None:
findOpenView = QSettings().value("mainwindow/find_open_view", True, bool)
self.viewManager.setCurrentDocument(doc, findOpenView)
def hasSelection(self):
"""Returns whether there is a selection."""
return self.textCursor().hasSelection() if self.currentView() else False
def textCursor(self):
"""Returns the QTextCursor of the current View.
Raises an error if there is not yet a view.
"""
return self.currentView().textCursor()
def setTextCursor(self, cursor, findOpenView=None):
"""Switch to the cursor's document() and set that cursor on its View.
For the findOpenView argument, see setCurrentDocument().
This method also respects the preferred number of surrounding lines
that are at least to be shown (by using the gotoTextCursor() method of
the View (see view.py)).
"""
self.setCurrentDocument(cursor.document(), findOpenView)
self.currentView().gotoTextCursor(cursor)
def slotViewChanged(self, view):
curv = self._currentView()
if curv:
if curv is view:
return
curv.copyAvailable.disconnect(self.updateSelection)
curv.selectionChanged.disconnect(self.updateSelection)
view.copyAvailable.connect(self.updateSelection)
view.selectionChanged.connect(self.updateSelection)
self._currentView = weakref.ref(view)
doc = view.document()
curd, self._currentDocument = self._currentDocument, doc
if curd is not doc:
if curd:
curd.undoAvailable.disconnect(self.updateDocActions)
curd.redoAvailable.disconnect(self.updateDocActions)
curd.modificationChanged.disconnect(self.updateWindowTitle)
curd.urlChanged.disconnect(self.updateWindowTitle)
curd.loaded.disconnect(self.updateDocActions)
doc.undoAvailable.connect(self.updateDocActions)
doc.redoAvailable.connect(self.updateDocActions)
doc.modificationChanged.connect(self.updateWindowTitle)
doc.urlChanged.connect(self.updateWindowTitle)
doc.loaded.connect(self.updateDocActions)
self.updateDocActions()
self.updateWindowTitle()
self.updateSelection()
self.updateActions()
self.currentViewChanged.emit(view, curv)
if curd is not doc:
self.currentDocumentChanged.emit(doc, curd)
def updateSelection(self):
selection = self.textCursor().hasSelection()
if selection != self._selectedState:
self._selectedState = selection
self.selectionStateChanged.emit(selection)
ac = self.actionCollection
ac.edit_copy.setEnabled(selection)
ac.edit_copy_colored_html.setEnabled(selection)
ac.edit_cut.setEnabled(selection)
ac.edit_select_none.setEnabled(selection)
def updateActions(self):
view = self.currentView()
action = self.actionCollection.view_wrap_lines
action.setChecked(view.lineWrapMode() == QPlainTextEdit.WidgetWidth)
def updateDocActions(self):
doc = self.currentDocument()
ac = self.actionCollection
ac.edit_undo.setEnabled(doc.isUndoAvailable())
ac.edit_redo.setEnabled(doc.isRedoAvailable())
def updateWindowTitle(self):
doc = self.currentDocument()
name = []
if sessions.currentSession():
name.append(sessions.currentSession() + ':')
if doc:
if doc.url().isEmpty():
name.append(doc.documentName())
elif doc.url().toLocalFile():
name.append(util.homify(doc.url().toLocalFile()))
else:
name.append(doc.url().toString())
if doc.isModified():
# L10N: state of document in window titlebar
name.append(_("[modified]"))
window_title = app.caption(" ".join(name))
if vcs.app_is_git_controlled():
window_title += " " + vcs.app_active_branch_window_title()
self.setWindowTitle(window_title)
def dropEvent(self, ev):
if not ev.source() and ev.mimeData().hasUrls():
ev.accept()
lyurls = []
impurls = []
for url in ev.mimeData().urls():
imp = file_import.FileImport.instance(self)
if imp.isImportable(url.toLocalFile()):
impurls.append(QDir.toNativeSeparators(url.toLocalFile()))
else:
lyurls.append(url)
docs = self.openUrls(lyurls)
if docs:
self.setCurrentDocument(docs[-1])
for i in impurls:
imp.openDialog(i)
def dragEnterEvent(self, ev):
if not ev.source() and ev.mimeData().hasUrls():
ev.accept()
def closeEvent(self, ev):
lastWindow = len(app.windows) == 1
if not lastWindow or self.queryClose():
self.aboutToClose.emit()
if lastWindow:
self.writeSettings()
self.aboutToCloseLast.emit()
app.windows.remove(self)
app.mainwindowClosed(self)
ev.accept()
else:
ev.ignore()
def queryClose(self):
"""Tries to close all documents, returns True if succeeded."""
for doc in self.historyManager.documents():
if not self.queryCloseDocument(doc):
return False
sessions.manager.get(self).saveCurrentSessionIfDesired()
for doc in self.historyManager.documents()[::-1]:
doc.close()
return True
def queryCloseDocument(self, doc):
"""Returns whether a document can be closed.
If modified, asks the user. The document is not closed.
"""
if not doc.isModified():
allow_close = True
else:
self.setCurrentDocument(doc, findOpenView=True)
res = QMessageBox.warning(self, _("dialog title", "Close Document"),
_("The document \"{name}\" has been modified.\n"
"Do you want to save your changes or discard them?").format(name=doc.documentName()),
QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)
if res == QMessageBox.Save:
allow_close = self.saveDocument(doc)
else:
allow_close = res == QMessageBox.Discard
return allow_close and engrave.engraver(self).queryCloseDocument(doc)
def createPopupMenu(self):
""" Adds an entry to the popup menu to show/hide the tab bar. """
menu = QMainWindow.createPopupMenu(self)
menu.addSeparator()
a = menu.addAction(_("Tab Bar"))
a.setCheckable(True)
a.setChecked(self.tabBar.isVisible())
a.toggled.connect(self.tabBar.setVisible)
return menu
def readSettings(self):
""" Read a few settings from the application global config. """
settings = QSettings()
settings.beginGroup('mainwindow')
defaultSize = QApplication.desktop().screen().size() * 2 / 3
self.resize(settings.value("size", defaultSize, QSize))
self.restoreState(settings.value('state', QByteArray(), QByteArray))
self.tabBar.setVisible(settings.value('tabbar', True, bool))
if os.name != "posix" and settings.value('maximized', False, bool):
self.showMaximized()
def writeSettings(self):
""" Write a few settings to the application global config. """
settings = QSettings()
settings.beginGroup('mainwindow')
if not self.isFullScreen():
settings.setValue("size", self.size())
settings.setValue('state', self.saveState())
settings.setValue('tabbar', self.tabBar.isVisible())
settings.setValue('maximized', self.isMaximized())
def readSessionSettings(self, settings):
"""Restore ourselves from session manager settings.
These methods store much more information than the readSettings and
writeSettings methods. This method tries to restore window size and
position. Also the objectName() is set, so that the window manager can
preserve stacking order, etc.
"""
name = settings.value('name', '', str)
if name:
self.setObjectName(name)
self.restoreGeometry(settings.value('geometry', QByteArray(), QByteArray))
self.restoreState(settings.value('state', QByteArray(), QByteArray))
def writeSessionSettings(self, settings):
"""Write our state to the session manager settings.
See readSessionSettings().
"""
settings.setValue('name', self.objectName())
settings.setValue('geometry', self.saveGeometry())
settings.setValue('state', self.saveState())
def openUrl(self, url, encoding=None, ignore_errors=False):
"""Calls openUrls() with one url. See openUrls()."""
for d in self.openUrls([url], encoding, ignore_errors):
return d
def openUrls(self, urls, encoding=None, ignore_errors=False):
"""Open a list of urls, using encoding if specified.
Returns the list of documents that were successfully loaded.
If encoding is None, the encoding is read from the document, defaulting
to UTF-8.
If ignore_errors is False (the default), an error message is given
showing the url or urls that failed to load. If ignore_errors is True,
load errors are silently ignored.
If an url fails to load, a document is not created. To create an
empty document with an url, use the document.Document constructor.
Successfully loaded urls are added to the recent files.
"""
docs = []
failures = []
for url in urls:
try:
doc = app.openUrl(url, encoding)
except IOError as e:
failures.append((url, e))
else:
docs.append(doc)
recentfiles.add(url)
if failures and not ignore_errors:
if len(failures) == 1:
url, e = failures[0]
filename = url.toLocalFile()
msg = _("{message}\n\n{strerror} ({errno})").format(
message = _("Could not read from: {url}").format(url=filename),
strerror = e.strerror,
errno = e.errno)
else:
msg = _("Could not read:") + "\n\n" + "\n".join(
"{url}: {strerror} ({errno})".format(
url = url.toLocalFile(),
strerror = e.strerror,
errno = e.errno) for url, e in failures)
QMessageBox.critical(self, app.caption(_("Error")), msg)
return docs
def currentDirectory(self):
"""Returns the current directory of the current document.
If the document has no filename yet, returns the configured default
directory, or the user's home directory.
Is that is not set as well, returns the current directory
of the application.
"""
import resultfiles
curdir = (resultfiles.results(self.currentDocument()).currentDirectory()
or app.basedir() or QDir.homePath())
if curdir:
return curdir
try:
return os.getcwdu()
except AttributeError:
return os.getcwd()
def cleanStart(self):
"""Called when the previous action left no document open.
Currently simply calls newDocument().
"""
self.newDocument()
##
# Implementations of menu actions
##
def newDocument(self):
""" Creates a new, empty document. """
d = document.Document()
self.setCurrentDocument(d)
s = QSettings()
ndoc = s.value("new_document", "empty", str)
template = s.value("new_document_template", "", str)
if ndoc == "template" and template:
from snippet import snippets, insert
if snippets.text(template):
insert.insert(template, self.currentView())
d.setUndoRedoEnabled(False)
d.setUndoRedoEnabled(True) # d.clearUndoRedoStacks() only in Qt >= 4.7
d.setModified(False)
elif ndoc == "version":
import lilypondinfo
d.setPlainText('\\version "{0}"\n\n'.format(lilypondinfo.preferred().versionString()))
d.setModified(False)
def openDocument(self):
""" Displays an open dialog to open one or more documents. """
if app.documents:
ext = os.path.splitext(self.currentDocument().url().path())[1]
directory = os.path.dirname(self.currentDocument().url().toLocalFile()) or app.basedir()
else:
ext = ".ly"
directory = app.basedir()
filetypes = app.filetypes(ext)
caption = app.caption(_("dialog title", "Open File"))
files = QFileDialog.getOpenFileNames(self, caption, directory, filetypes)[0]
urls = [QUrl.fromLocalFile(filename) for filename in files]
docs = self.openUrls(urls)
if docs:
self.setCurrentDocument(docs[-1])
def saveDocument(self, doc, save_as=False):
""" Saves the document, asking for a name if necessary.
If save_as is True, a name is always asked.
Returns True if saving succeeded.
"""
if save_as or doc.url().isEmpty():
filename = doc.url().toLocalFile()
if filename:
filetypes = app.filetypes(os.path.splitext(filename)[1])
else:
directory = app.basedir() # default directory to save to
import documentinfo
import ly.lex
filename = os.path.join(directory, documentinfo.defaultfilename(doc))
filetypes = app.filetypes(ly.lex.extensions[documentinfo.mode(doc)])
caption = app.caption(_("dialog title", "Save File"))
filename = QFileDialog.getSaveFileName(self, caption, filename, filetypes)[0]
if not filename:
return False # cancelled
url = QUrl.fromLocalFile(filename)
else:
url = doc.url()
if QSettings().value("strip_trailing_whitespace", False, bool):
import reformat
reformat.remove_trailing_whitespace(QTextCursor(doc))
# we only support local files for now
filename = url.toLocalFile()
b = backup.backup(filename)
try:
doc.save(url)
except IOError as e:
msg = _("{message}\n\n{strerror} ({errno})").format(
message = _("Could not write to: {url}").format(url=filename),
strerror = e.strerror,
errno = e.errno)
QMessageBox.critical(self, app.caption(_("Error")), msg)
return False
else:
if b:
backup.removeBackup(filename)
recentfiles.add(doc.url())
return True
def saveDocumentAs(self, doc):
""" Saves the document, always asking for a name.
Returns True if saving succeeded, False if it failed or was cancelled.
"""
return self.saveDocument(doc, True)
def closeDocument(self, doc):
""" Closes the document, asking for saving if modified.
Returns True if closing succeeded.
"""
close = self.queryCloseDocument(doc)
if close:
doc.close()
# keep one document
if not app.documents:
self.cleanStart()
return close
def saveCurrentDocument(self):
return self.saveDocument(self.currentDocument())
def saveCurrentDocumentAs(self):
return self.saveDocumentAs(self.currentDocument())
def saveCopyAs(self):
import ly.lex
doc = self.currentDocument()
if not self.currentView().textCursor().hasSelection():
import documentinfo
mode = documentinfo.mode(doc)
data = doc.encodedText()
caption = app.caption(_("dialog title", "Save Copy"))
else:
import fileinfo
text = self.currentView().textCursor().selection().toPlainText()
mode = fileinfo.textmode(text)
data = util.encode(util.platform_newlines(text))
caption = app.caption(_("dialog title", "Save Selection"))
filetypes = app.filetypes(ly.lex.extensions[mode])
dirname = os.path.dirname(doc.url().toLocalFile()) or app.basedir()
filename = QFileDialog.getSaveFileName(self, caption, dirname, filetypes)[0]
if not filename:
return # cancelled
try:
with open(filename, "wb") as f:
f.write(data)
except IOError as e:
msg = _("{message}\n\n{strerror} ({errno})").format(
message = _("Could not write to: {url}").format(url=filename),
strerror = e.strerror,
errno = e.errno)
QMessageBox.critical(self, app.caption(_("Error")), msg)
def closeCurrentDocument(self):
return self.closeDocument(self.currentDocument())
def reloadCurrentDocument(self):
"""Reload the current document again from disk.
This action can be undone.
"""
d = self.currentDocument()
try:
d.load(keepUndo=True)
except IOError as e:
filename = d.url().toLocalFile()
msg = _("{message}\n\n{strerror} ({errno})").format(
message = _("Could not read from: {url}").format(url=filename),
strerror = e.strerror,
errno = e.errno)
QMessageBox.critical(self, app.caption(_("Error")), msg)
def reloadAllDocuments(self):
"""Reloads all documents."""
failures = []
for d in self.historyManager.documents():
try:
d.load(keepUndo=True)
except IOError as e:
failures.append((d, e))
if failures:
msg = _("Could not reload:") + "\n\n" + "\n".join(
"{url}: {strerror} ({errno})".format(
url = d.url().toLocalFile(),
strerror = e.strerror,
errno = e.errno) for d, e in failures)
QMessageBox.critical(self, app.caption(_("Error")), msg)
def saveAllDocuments(self):
""" Saves all documents.
Returns True if all documents were saved.
If one document failed or was cancelled the whole operation is cancelled
and this function returns False.
"""
cur = self.currentDocument()
for doc in self.historyManager.documents():
if doc.isModified():
if doc.url().isEmpty():
self.setCurrentDocument(doc, findOpenView=True)
res = self.saveDocumentAs(doc)
else:
res = self.saveDocument(doc)
if not res:
return False
self.setCurrentDocument(cur, findOpenView=True)
return True
def closeOtherDocuments(self):
""" Closes all documents that are not the current document.
Returns True if all documents were closed.
"""
cur = self.currentDocument()
docs = self.historyManager.documents()[1:]
for doc in docs:
if not self.queryCloseDocument(doc):
self.setCurrentDocument(cur, findOpenView=True)
return False
for doc in docs:
doc.close()
return True
def closeAllDocuments(self):
"""Closes all documents and keep one new, empty document."""
sessions.manager.get(self).saveCurrentSessionIfDesired()
if self.queryClose():
sessions.setCurrentSession(None)
self.allDocumentsClosed.emit()
self.cleanStart()
def quit(self):
"""Closes all MainWindows."""
for window in app.windows[:]: # copy
if window is not self:
window.close()
self.close()
if not app.windows:
app.qApp.quit()
def restart(self):
"""Closes all MainWindows and restart Frescobaldi."""
self.quit()
if not app.windows:
app.restart()
def insertFromFile(self):
ext = os.path.splitext(self.currentDocument().url().path())[1]
filetypes = app.filetypes(ext)
caption = app.caption(_("dialog title", "Insert From File"))
directory = os.path.dirname(self.currentDocument().url().toLocalFile()) or app.basedir()
filename = QFileDialog.getOpenFileName(self, caption, directory, filetypes)[0]
if filename:
try:
with open(filename, 'rb') as f:
data = f.read()
except IOError as e:
msg = _("{message}\n\n{strerror} ({errno})").format(
message = _("Could not read from: {url}").format(url=filename),
strerror = e.strerror,
errno = e.errno)
QMessageBox.critical(self, app.caption(_("Error")), msg)
else:
text = util.universal_newlines(util.decode(data))
self.currentView().textCursor().insertText(text)
def openCurrentDirectory(self):
helpers.openUrl(QUrl.fromLocalFile(self.currentDirectory()), "directory")
def openCommandPrompt(self):
helpers.openUrl(QUrl.fromLocalFile(self.currentDirectory()), "shell")
def printSource(self):
cursor = self.textCursor()
try:
printer = self._sourcePrinter
except AttributeError:
printer = self._sourcePrinter = QPrinter()
else:
printer.setCopyCount(1)
dlg = QPrintDialog(printer, self)
dlg.setWindowTitle(app.caption(_("dialog title", "Print Source")))
options = QAbstractPrintDialog.PrintToFile | QAbstractPrintDialog.PrintShowPageSize
if cursor.hasSelection():
options |= QAbstractPrintDialog.PrintSelection
dlg.setOptions(options)
if dlg.exec_():
if dlg.printRange() != QAbstractPrintDialog.Selection:
cursor.clearSelection()
number_lines = QSettings().value("source_export/number_lines", False, bool)
doc = highlighter.html_copy(cursor, 'printer', number_lines)
doc.setMetaInformation(QTextDocument.DocumentTitle, self.currentDocument().url().toString())
font = doc.defaultFont()
font.setPointSizeF(font.pointSizeF() * 0.8)
doc.setDefaultFont(font)
doc.print_(printer)
def exportColoredHtml(self):
doc = self.currentDocument()
name, ext = os.path.splitext(os.path.basename(doc.url().path()))
if name:
if ext.lower() == ".html":
name += "_html"
name += ".html"
dir = os.path.dirname(doc.url().toLocalFile())
if dir:
name = os.path.join(dir, name)
filename = QFileDialog.getSaveFileName(self, app.caption(_("Export as HTML")),
name, "{0} (*.html)".format("HTML Files"))[0]
if not filename:
return #cancelled
s = QSettings()
s.beginGroup("source_export")
number_lines = s.value("number_lines", False, bool)
inline_style = s.value("inline_export", False, bool)
wrap_tag = s.value("wrap_tag", "pre", str)
wrap_attrib = s.value("wrap_attrib", "id", str)
wrap_attrib_name = s.value("wrap_attrib_name", "document", str)
import highlight2html
html = highlight2html.html_document(doc, inline=inline_style, number_lines=number_lines,
wrap_tag=wrap_tag, wrap_attrib=wrap_attrib, wrap_attrib_name=wrap_attrib_name)
try:
with open(filename, "wb") as f:
f.write(html.encode('utf-8'))
except IOError as e:
msg = _("{message}\n\n{strerror} ({errno})").format(
message = _("Could not write to: {url}").format(url=filename),
strerror = e.strerror,
errno = e.errno)
QMessageBox.critical(self, app.caption(_("Error")), msg)
def undo(self):
self.currentView().undo()
def redo(self):
self.currentView().redo()
def cut(self):
self.currentView().cut()
def copy(self):
self.currentView().copy()
def paste(self):
self.currentView().paste()
def copyColoredHtml(self):
cursor = self.textCursor()
if not cursor.hasSelection():
return
s = QSettings()
s.beginGroup("source_export")
number_lines = s.value("number_lines", False, bool)
inline_style = s.value("inline_copy", True, bool)
as_plain_text = s.value("copy_html_as_plain_text", False, bool)
wrap_tag = s.value("wrap_tag", "pre", str)
wrap_attrib = s.value("wrap_attrib", "id", str)
wrap_attrib_name = s.value("wrap_attrib_name", "document", str)
document_body_only = s.value("copy_document_body_only", False, bool)
import highlight2html
html = highlight2html.html_inline(cursor, inline=inline_style, number_lines=number_lines,
full_html=not document_body_only, wrap_tag=wrap_tag, wrap_attrib=wrap_attrib,
wrap_attrib_name=wrap_attrib_name)
data = QMimeData()
data.setText(html) if as_plain_text else data.setHtml(html)
QApplication.clipboard().setMimeData(data)
def selectNone(self):
cursor = self.currentView().textCursor()
cursor.clearSelection()
self.currentView().setTextCursor(cursor)
def selectAll(self):
self.currentView().selectAll()
def selectBlock(self):
import lydocument
import ly.cursortools
cursor = lydocument.cursor(self.textCursor())
if ly.cursortools.select_block(cursor):
self.currentView().setTextCursor(cursor.cursor())
def find(self):
import search
search.Search.instance(self).find()
def replace(self):
import search
search.Search.instance(self).replace()
def showPreferences(self):
import preferences
dlg = preferences.PreferencesDialog(self)
dlg.exec_()
dlg.deleteLater()
def toggleFullScreen(self, enabled):
if enabled:
self._maximized = self.isMaximized()
self.showFullScreen()
else:
self.showNormal()
if self._maximized:
self.showMaximized()
def newWindow(self):
"""Opens a new MainWindow."""
self.writeSettings()
w = MainWindow(self)
w.show()
w.activateWindow()
def toggleWrapLines(self, enable):
"""Called when the user toggles View->Line Wrap"""
wrap = QPlainTextEdit.WidgetWidth if enable else QPlainTextEdit.NoWrap
self.currentView().setLineWrapMode(wrap)
def scrollUp(self):
"""Scroll up without moving the cursor"""
sb = self.currentView().verticalScrollBar()
sb.setValue(sb.value() - 1 if sb.value() else 0)
def scrollDown(self):
"""Scroll down without moving the cursor"""
sb = self.currentView().verticalScrollBar()
sb.setValue(sb.value() + 1)
def selectFullLinesUp(self):
"""Select lines upwards, selecting full lines."""
self.selectFullLines(QTextCursor.Up)
def selectFullLinesDown(self):
"""Select lines downwards, selecting full lines."""
self.selectFullLines(QTextCursor.Down)
def selectFullLines(self, direction):
"""Select full lines in the direction QTextCursor.Up or Down."""
view = self.currentView()
cur = view.textCursor()
position = cur.position()
cur.setPosition(cur.anchor())
cur.movePosition(QTextCursor.StartOfLine)
cur.setPosition(position, QTextCursor.KeepAnchor)
cur.movePosition(direction, QTextCursor.KeepAnchor)
cur.movePosition(QTextCursor.StartOfLine, QTextCursor.KeepAnchor)
view.setTextCursor(cur)
def showManual(self):
"""Shows the user guide, called when user presses F1."""
import userguide
userguide.show()
def showAbout(self):
"""Shows about dialog."""
import about
about.AboutDialog(self).exec_()
def reportBug(self):
"""Opens e-mail composer to send a bug or feature report."""
import bugreport
bugreport.email('', _(
"Please describe the issue or feature request.\n"
"Provide as much information as possible.\n\n\n"))
def createActions(self):
self.actionCollection = ac = ActionCollection()
# recent files
self.menu_recent_files = m = QMenu()
ac.file_open_recent.setMenu(m)
m.aboutToShow.connect(self.populateRecentFilesMenu)
m.triggered.connect(self.slotRecentFilesAction)
# connections
ac.file_quit.triggered.connect(self.quit, Qt.QueuedConnection)
ac.file_restart.triggered.connect(self.restart, Qt.QueuedConnection)
ac.file_new.triggered.connect(self.newDocument)
ac.file_open.triggered.connect(self.openDocument)
ac.file_insert_file.triggered.connect(self.insertFromFile)
ac.file_open_current_directory.triggered.connect(self.openCurrentDirectory)
ac.file_open_command_prompt.triggered.connect(self.openCommandPrompt)
ac.file_save.triggered.connect(self.saveCurrentDocument)
ac.file_save_as.triggered.connect(self.saveCurrentDocumentAs)
ac.file_save_copy_as.triggered.connect(self.saveCopyAs)
ac.file_save_all.triggered.connect(self.saveAllDocuments)
ac.file_reload.triggered.connect(self.reloadCurrentDocument)
ac.file_reload_all.triggered.connect(self.reloadAllDocuments)
ac.file_external_changes.triggered.connect(externalchanges.displayChangedDocuments)
ac.file_print_source.triggered.connect(self.printSource)
ac.file_close.triggered.connect(self.closeCurrentDocument)
ac.file_close_other.triggered.connect(self.closeOtherDocuments)
ac.file_close_all.triggered.connect(self.closeAllDocuments)
ac.export_colored_html.triggered.connect(self.exportColoredHtml)
ac.edit_undo.triggered.connect(self.undo)
ac.edit_redo.triggered.connect(self.redo)
ac.edit_cut.triggered.connect(self.cut)
ac.edit_copy.triggered.connect(self.copy)
ac.edit_paste.triggered.connect(self.paste)
ac.edit_copy_colored_html.triggered.connect(self.copyColoredHtml)
ac.edit_select_all.triggered.connect(self.selectAll)
ac.edit_select_none.triggered.connect(self.selectNone)
ac.edit_select_current_toplevel.triggered.connect(self.selectBlock)
ac.edit_select_full_lines_up.triggered.connect(self.selectFullLinesUp)
ac.edit_select_full_lines_down.triggered.connect(self.selectFullLinesDown)
ac.edit_find.triggered.connect(self.find)
ac.edit_replace.triggered.connect(self.replace)
ac.edit_preferences.triggered.connect(self.showPreferences)
ac.view_next_document.triggered.connect(self.tabBar.nextDocument)
ac.view_previous_document.triggered.connect(self.tabBar.previousDocument)
ac.view_wrap_lines.triggered.connect(self.toggleWrapLines)
ac.view_scroll_up.triggered.connect(self.scrollUp)
ac.view_scroll_down.triggered.connect(self.scrollDown)
ac.window_new.triggered.connect(self.newWindow)
ac.window_fullscreen.toggled.connect(self.toggleFullScreen)
ac.help_manual.triggered.connect(self.showManual)
ac.help_about.triggered.connect(self.showAbout)
ac.help_bugreport.triggered.connect(self.reportBug)
def populateRecentFilesMenu(self):
self.menu_recent_files.clear()
for url in recentfiles.urls():
f = url.toLocalFile()
dirname, basename = os.path.split(f)
text = "{0} ({1})".format(basename, util.homify(dirname))
self.menu_recent_files.addAction(text).url = url
qutil.addAccelerators(self.menu_recent_files.actions())
def slotRecentFilesAction(self, action):
"""Called when a recent files menu action is triggered."""
d = self.openUrl(action.url)
if d:
self.setCurrentDocument(d)
def createMenus(self):
menu.createMenus(self)
# actions that are not in menus
ac = self.actionCollection
self.addAction(ac.view_scroll_up)
self.addAction(ac.view_scroll_down)
self.addAction(ac.edit_select_full_lines_up)
self.addAction(ac.edit_select_full_lines_down)
def createToolBars(self):
ac = self.actionCollection
self.toolbar_main = t = self.addToolBar('')
t.setObjectName('toolbar_main')
t.addAction(ac.file_new)
t.addAction(ac.file_open)
t.addAction(ac.file_save)
t.widgetForAction(ac.file_save).addAction(ac.file_save_as)
t.widgetForAction(ac.file_save).addAction(ac.file_save_all)
t.addAction(ac.file_close)
t.addSeparator()
t.addAction(browseriface.get(self).actionCollection.go_back)
t.addAction(browseriface.get(self).actionCollection.go_forward)
t.addSeparator()
t.addAction(ac.edit_undo)
t.addAction(ac.edit_redo)
t.addSeparator()
t.addAction(scorewiz.ScoreWizard.instance(self).actionCollection.scorewiz)
t.addAction(engrave.engraver(self).actionCollection.engrave_runner)
w = t.widgetForAction(engrave.engraver(self).actionCollection.engrave_runner)
w.addAction(engrave.engraver(self).actionCollection.engrave_publish)
w.addAction(engrave.engraver(self).actionCollection.engrave_custom)
self.toolbar_music = t = self.addToolBar('')
t.setObjectName('toolbar_music')
ma = panelmanager.manager(self).musicview.actionCollection
t.addAction(ma.music_document_select)
t.addAction(ma.music_print)
t.addSeparator()
t.addAction(ma.music_zoom_in)
t.addAction(ma.music_zoom_combo)
t.addAction(ma.music_zoom_out)
t.addSeparator()
t.addAction(ma.music_prev_page)
t.addAction(ma.music_pager)
t.addAction(ma.music_next_page)
def translateUI(self):
self.toolbar_main.setWindowTitle(_("Main Toolbar"))
self.toolbar_music.setWindowTitle(_("Music View Toolbar"))
class ActionCollection(actioncollection.ActionCollection):
name = "main"
def createActions(self, parent=None):
self.file_new = QAction(parent)
self.file_open = QAction(parent)
self.file_open_recent = QAction(parent)
self.file_insert_file = QAction(parent)
self.file_open_current_directory = QAction(parent)
self.file_open_command_prompt = QAction(parent)
self.file_save = QAction(parent)
self.file_save_as = QAction(parent)
self.file_save_copy_as = QAction(parent)
self.file_save_all = QAction(parent)
self.file_reload = QAction(parent)
self.file_reload_all = QAction(parent)
self.file_external_changes = QAction(parent)
self.file_print_source = QAction(parent)
self.file_close = QAction(parent)
self.file_close_other = QAction(parent)
self.file_close_all = QAction(parent)
self.file_quit = QAction(parent)
self.file_restart = QAction(parent)
self.export_colored_html = QAction(parent)
self.edit_undo = QAction(parent)
self.edit_redo = QAction(parent)
self.edit_cut = QAction(parent)
self.edit_copy = QAction(parent)
self.edit_copy_colored_html = QAction(parent)
self.edit_paste = QAction(parent)
self.edit_select_all = QAction(parent)
self.edit_select_current_toplevel = QAction(parent)
self.edit_select_none = QAction(parent)
self.edit_select_full_lines_up = QAction(parent)
self.edit_select_full_lines_down = QAction(parent)
self.edit_find = QAction(parent)
self.edit_find_next = QAction(parent)
self.edit_find_previous = QAction(parent)
self.edit_replace = QAction(parent)
self.edit_preferences = QAction(parent)
self.view_next_document = QAction(parent)
self.view_previous_document = QAction(parent)
self.view_wrap_lines = QAction(parent, checkable=True)
self.view_scroll_up = QAction(parent)
self.view_scroll_down = QAction(parent)
self.window_new = QAction(parent)
self.window_fullscreen = QAction(parent)
self.window_fullscreen.setCheckable(True)
self.help_manual = QAction(parent)
self.help_whatsthis = QWhatsThis.createAction(parent)
self.help_about = QAction(parent)
self.help_bugreport = QAction(parent)
# icons
self.file_new.setIcon(icons.get('document-new'))
self.file_open.setIcon(icons.get('document-open'))
self.file_open_recent.setIcon(icons.get('document-open-recent'))
self.file_open_current_directory.setIcon(icons.get('folder-open'))
self.file_open_command_prompt.setIcon(icons.get('utilities-terminal'))
self.file_save.setIcon(icons.get('document-save'))
self.file_save_as.setIcon(icons.get('document-save-as'))
self.file_save_copy_as.setIcon(icons.get('document-save-as'))
self.file_save_all.setIcon(icons.get('document-save-all'))
self.file_reload.setIcon(icons.get('reload'))
self.file_reload_all.setIcon(icons.get('reload-all'))
self.file_print_source.setIcon(icons.get('document-print'))
self.file_close.setIcon(icons.get('document-close'))
self.file_quit.setIcon(icons.get('application-exit'))
self.edit_undo.setIcon(icons.get('edit-undo'))
self.edit_redo.setIcon(icons.get('edit-redo'))
self.edit_cut.setIcon(icons.get('edit-cut'))
self.edit_copy.setIcon(icons.get('edit-copy'))
self.edit_paste.setIcon(icons.get('edit-paste'))
self.edit_select_all.setIcon(icons.get('edit-select-all'))
self.edit_select_current_toplevel.setIcon(icons.get('edit-select'))
self.edit_find.setIcon(icons.get('edit-find'))
self.edit_find_next.setIcon(icons.get('go-down-search'))
self.edit_find_previous.setIcon(icons.get('go-up-search'))
self.edit_replace.setIcon(icons.get('edit-find-replace'))
self.edit_preferences.setIcon(icons.get('preferences-system'))
self.view_next_document.setIcon(icons.get('go-next'))
self.view_previous_document.setIcon(icons.get('go-previous'))
self.window_new.setIcon(icons.get('window-new'))
self.window_fullscreen.setIcon(icons.get('view-fullscreen'))
self.help_manual.setIcon(icons.get('help-contents'))
self.help_whatsthis.setIcon(icons.get('help-contextual'))
self.help_bugreport.setIcon(icons.get('tools-report-bug'))
self.help_about.setIcon(icons.get('help-about'))
# shortcuts
self.file_new.setShortcuts(QKeySequence.New)
self.file_open.setShortcuts(QKeySequence.Open)
self.file_save.setShortcuts(QKeySequence.Save)
self.file_save_as.setShortcuts(QKeySequence.SaveAs)
self.file_close.setShortcuts(QKeySequence.Close)
self.file_quit.setShortcuts(QKeySequence.Quit)
self.edit_undo.setShortcuts(QKeySequence.Undo)
self.edit_redo.setShortcuts(QKeySequence.Redo)
self.edit_cut.setShortcuts(QKeySequence.Cut)
self.edit_copy.setShortcuts(QKeySequence.Copy)
self.edit_paste.setShortcuts(QKeySequence.Paste)
self.edit_select_all.setShortcuts(QKeySequence.SelectAll)
self.edit_select_current_toplevel.setShortcut(QKeySequence(Qt.SHIFT+Qt.CTRL+Qt.Key_B))
self.edit_select_none.setShortcut(QKeySequence(Qt.SHIFT + Qt.CTRL + Qt.Key_A))
self.edit_select_full_lines_up.setShortcut(QKeySequence(Qt.SHIFT + Qt.CTRL + Qt.Key_Up))
self.edit_select_full_lines_down.setShortcut(QKeySequence(Qt.SHIFT + Qt.CTRL + Qt.Key_Down))
self.edit_find.setShortcuts(QKeySequence.Find)
self.edit_find_next.setShortcuts(QKeySequence.FindNext)
self.edit_find_previous.setShortcuts(QKeySequence.FindPrevious)
self.edit_replace.setShortcuts(QKeySequence.Replace)
self.edit_preferences.setShortcuts(QKeySequence.Preferences)
self.view_next_document.setShortcuts(QKeySequence.Forward)
self.view_previous_document.setShortcuts(QKeySequence.Back)
self.view_scroll_up.setShortcut(Qt.CTRL + Qt.Key_Up)
self.view_scroll_down.setShortcut(Qt.CTRL + Qt.Key_Down)
self.window_fullscreen.setShortcuts([QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_F), QKeySequence(Qt.Key_F11)])
self.help_manual.setShortcuts(QKeySequence.HelpContents)
# Mac OS X-specific roles?
if sys.platform.startswith('darwin'):
import macosx
if macosx.use_osx_menu_roles():
self.file_quit.setMenuRole(QAction.QuitRole)
self.edit_preferences.setMenuRole(QAction.PreferencesRole)
self.help_about.setMenuRole(QAction.AboutRole)
else:
self.file_quit.setMenuRole(QAction.NoRole)
self.edit_preferences.setMenuRole(QAction.NoRole)
self.help_about.setMenuRole(QAction.NoRole)
def translateUI(self):
self.file_new.setText(_("action: new document", "&New"))
self.file_open.setText(_("&Open..."))
self.file_open_recent.setText(_("Open &Recent"))
self.file_insert_file.setText(_("Insert from &File..."))
self.file_open_current_directory.setText(_("Open Current Directory"))
self.file_open_command_prompt.setText(_("Open Command Prompt"))
self.file_save.setText(_("&Save"))
self.file_save_as.setText(_("Save &As..."))
self.file_save_copy_as.setText(_("Save Copy or Selection As..."))
self.file_save_all.setText(_("Save All"))
self.file_reload.setText(_("Re&load"))
self.file_reload_all.setText(_("Reload All"))
self.file_external_changes.setText(_("Check for External Changes..."))
self.file_external_changes.setToolTip(_(
"Opens a window to check whether open documents were changed or "
"deleted by other programs."))
self.file_print_source.setText(_("Print Source..."))
self.file_close.setText(_("&Close"))
self.file_close_other.setText(_("Close Other Documents"))
self.file_close_all.setText(_("Close All Documents and Session"))
self.file_close_all.setToolTip(_("Closes all documents and leaves the current session."))
self.file_quit.setText(_("&Quit"))
self.file_restart.setText(_("Restart {appname}").format(appname=appinfo.appname))
self.export_colored_html.setText(_("Export Source as Colored &HTML..."))
self.edit_undo.setText(_("&Undo"))
self.edit_redo.setText(_("Re&do"))
self.edit_cut.setText(_("Cu&t"))
self.edit_copy.setText(_("&Copy"))
self.edit_copy_colored_html.setText(_("Copy as Colored &HTML"))
self.edit_paste.setText(_("&Paste"))
self.edit_select_all.setText(_("Select &All"))
self.edit_select_current_toplevel.setText(_("Select &Block"))
self.edit_select_none.setText(_("Select &None"))
self.edit_select_full_lines_up.setText(_("Select Whole Lines Up"))
self.edit_select_full_lines_down.setText(_("Select Whole Lines Down"))
self.edit_find.setText(_("&Find..."))
self.edit_find_next.setText(_("Find Ne&xt"))
self.edit_find_previous.setText(_("Find Pre&vious"))
self.edit_replace.setText(_("&Replace..."))
self.edit_preferences.setText(_("Pr&eferences..."))
self.view_next_document.setText(_("&Next Document"))
self.view_previous_document.setText(_("&Previous Document"))
self.view_wrap_lines.setText(_("Wrap &Lines"))
self.view_scroll_up.setText(_("Scroll Up"))
self.view_scroll_down.setText(_("Scroll Down"))
self.window_new.setText(_("New &Window"))
self.window_fullscreen.setText(_("&Fullscreen"))
self.help_manual.setText(_("&User Guide"))
self.help_whatsthis.setText(_("&What's This?"))
self.help_bugreport.setText(_("Report a &Bug..."))
self.help_about.setText(_("&About {appname}...").format(appname=appinfo.appname))
| anthonyfok/frescobaldi | frescobaldi_app/mainwindow.py | Python | gpl-2.0 | 50,862 |
"""
A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
with open('fwgen/version.py') as f:
exec(f.read())
setup(
name='fwgen',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description='A simple management framework for ip(6)tables based firewalls',
long_description=long_description,
# The project's main homepage.
url='https://github.com/hkbakke/fwgen',
# Author details
author='Hans-Kristian Bakke',
#author_email='pypa-dev@googlegroups.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: System Administrators',
'Topic :: System :: Networking :: Firewalls',
'Operating System :: POSIX :: Linux',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
# What does your project relate to?
keywords='linux firewall iptables ip6tables ipset',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pyyaml'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'test': ['pytest', 'pytest-cov', 'pylint'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'fwgen': [
'etc/defaults.yml',
'doc/examples/config.yml',
'doc/examples/fwgen.service',
],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[
#],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'fwgen=fwgen.bin.fwgen:main',
],
},
)
| hkbakke/fwgen | setup.py | Python | mit | 4,104 |
#!/usr/bin/env python3
"""Find similar words to query term."""
import hashlib
import os
import unicodedata
from philologic.Query import get_expanded_query
from Levenshtein import ratio
def get_all_words(db, request):
"""Expand query to all search terms."""
words = request["q"].replace('"', "")
hits = db.query(words)
hits.finish()
expanded_terms = get_expanded_query(hits)
word_groups = []
for word_group in expanded_terms:
normalized_group = []
for word in word_group:
word = "".join([i for i in unicodedata.normalize("NFKD", word) if not unicodedata.combining(i)])
normalized_group.append(word)
word_groups.append(normalized_group)
return word_groups
def find_similar_words(db, config, request):
"""Edit distance function."""
# Check if lookup is cached
hashed_query = hashlib.sha256()
hashed_query.update(request["q"].encode("utf8"))
hashed_query.update(str(request.approximate_ratio).encode("utf8"))
approximate_filename = os.path.join(config.db_path, "data/hitlists/%s.approximate_terms" % hashed_query.hexdigest())
if os.path.isfile(approximate_filename):
with open(approximate_filename, encoding="utf8") as fh:
approximate_terms = fh.read().strip()
return approximate_terms
query_groups = get_all_words(db, request)
file_path = os.path.join(config.db_path, "data/frequencies/normalized_word_frequencies")
new_query_groups = [set([]) for i in query_groups]
with open(file_path, encoding="utf8") as fh:
for line in fh:
line = line.strip()
try:
normalized_word, regular_word = line.split("\t")
for pos, query_group in enumerate(query_groups):
for query_word in query_group:
if ratio(query_word, normalized_word) >= float(request.approximate_ratio):
new_query_groups[pos].add(regular_word)
except ValueError:
pass
new_query_groups = " ".join([" | ".join(group) for group in new_query_groups])
cached_file = open(approximate_filename, "w", encoding="utf8")
cached_file.write(new_query_groups)
return new_query_groups
| ARTFL-Project/PhiloLogic5 | python/philologic/runtime/find_similar_words.py | Python | gpl-3.0 | 2,263 |
import numpy as np
from numba import cuda
@cuda.jit(debug=True)
def histogram(x, xmin, xmax, histogram_out):
nbins = histogram_out.shape[0]
bin_width = (xmax - xmin) / nbins
start = cuda.grid(1)
stride = cuda.gridsize(1)
for i in range(start, x.shape[0], stride):
bin_number = np.int32((x[i] + xmin)/bin_width)
if bin_number >= 0 or bin_number < histogram_out.shape[0]:
cuda.atomic.add(histogram_out, bin_number, 1)
x = np.random.normal(size=50, loc=0, scale=1).astype(np.float32)
xmin = np.float32(-4.0)
xmax = np.float32(4.0)
histogram_out = np.zeros(shape=10, dtype=np.int32)
histogram[64, 64](x, xmin, xmax, histogram_out)
print('input count:', x.shape[0])
print('histogram:', histogram_out)
print('count:', histogram_out.sum())
| fluxcapacitor/source.ml | jupyterhub.ml/src/main/python/numba/histogram.py | Python | apache-2.0 | 789 |
import website_sale_product_tags_models
| Endika/website-addons | website_sale_product_tags/__init__.py | Python | lgpl-3.0 | 40 |
#!/usr/bin/python
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Filters out trips which are not on the defualt routes and
set their trip_typeattribute accordingly.
For usage information run unusual_trip_filter.py --help
"""
__author__ = 'Jiri Semecky <jiri.semecky@gmail.com>'
import codecs
import os
import os.path
import sys
import time
import transitfeed
from transitfeed import util
class UnusualTripFilter(object):
"""Class filtering trips going on unusual paths.
Those are usually trips going to/from depot or changing to another route
in the middle. Sets the 'trip_type' attribute of the trips.txt dataset
so that non-standard trips are marked as special (value 1)
instead of regular (default value 0).
"""
def __init__ (self, threshold=0.1, force=False, quiet=False, route_type=None):
self._threshold = threshold
self._quiet = quiet
self._force = force
if route_type in transitfeed.Route._ROUTE_TYPE_NAMES:
self._route_type = transitfeed.Route._ROUTE_TYPE_NAMES[route_type]
elif route_type is None:
self._route_type = None
else:
self._route_type = int(route_type)
def filter_line(self, route):
"""Mark unusual trips for the given route."""
if self._route_type is not None and self._route_type != route.route_type:
self.info('Skipping route %s due to different route_type value (%s)' %
(route['route_id'], route['route_type']))
return
self.info('Filtering infrequent trips for route %s.' % route.route_id)
trip_count = len(route.trips)
for pattern_id, pattern in route.GetPatternIdTripDict().items():
ratio = float(1.0 * len(pattern) / trip_count)
if not self._force:
if (ratio < self._threshold):
self.info("\t%d trips on route %s with headsign '%s' recognized "
"as unusual (ratio %f)" %
(len(pattern),
route['route_short_name'],
pattern[0]['trip_headsign'],
ratio))
for trip in pattern:
trip.trip_type = 1 # special
self.info("\t\tsetting trip_type of trip %s as special" %
trip.trip_id)
else:
self.info("\t%d trips on route %s with headsign '%s' recognized "
"as %s (ratio %f)" %
(len(pattern),
route['route_short_name'],
pattern[0]['trip_headsign'],
('regular', 'unusual')[ratio < self._threshold],
ratio))
for trip in pattern:
trip.trip_type = ('0','1')[ratio < self._threshold]
self.info("\t\tsetting trip_type of trip %s as %s" %
(trip.trip_id,
('regular', 'unusual')[ratio < self._threshold]))
def filter(self, dataset):
"""Mark unusual trips for all the routes in the dataset."""
self.info('Going to filter infrequent routes in the dataset')
for route in dataset.routes.values():
self.filter_line(route)
def info(self, text):
if not self._quiet:
print text.encode("utf-8")
def main():
usage = \
'''%prog [options] <GTFS.zip>
Sets the trip_type for trips that have an unusual pattern for a route.
<GTFS.zip> is overwritten with the modifed GTFS file unless the --output
option is used.
For more information see
http://code.google.com/p/googletransitdatafeed/wiki/UnusualTripFilter
'''
parser = util.OptionParserLongError(
usage=usage, version='%prog '+transitfeed.__version__)
parser.add_option('-o', '--output', dest='output', metavar='FILE',
help='Name of the output GTFS file (writing to input feed if omitted).')
parser.add_option('-m', '--memory_db', dest='memory_db', action='store_true',
help='Force use of in-memory sqlite db.')
parser.add_option('-t', '--threshold', default=0.1,
dest='threshold', type='float',
help='Frequency threshold for considering pattern as non-regular.')
parser.add_option('-r', '--route_type', default=None,
dest='route_type', type='string',
help='Filter only selected route type (specified by number'
'or one of the following names: ' + \
', '.join(transitfeed.Route._ROUTE_TYPE_NAMES) + ').')
parser.add_option('-f', '--override_trip_type', default=False,
dest='override_trip_type', action='store_true',
help='Forces overwrite of current trip_type values.')
parser.add_option('-q', '--quiet', dest='quiet',
default=False, action='store_true',
help='Suppress information output.')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error('You must provide the path of a single feed.')
filter = UnusualTripFilter(float(options.threshold),
force=options.override_trip_type,
quiet=options.quiet,
route_type=options.route_type)
feed_name = args[0]
feed_name = feed_name.strip()
filter.info('Loading %s' % feed_name)
loader = transitfeed.Loader(feed_name, extra_validation=True,
memory_db=options.memory_db)
data = loader.Load()
filter.filter(data)
print 'Saving data'
# Write the result
if options.output is None:
data.WriteGoogleTransitFeed(feed_name)
else:
data.WriteGoogleTransitFeed(options.output)
if __name__ == '__main__':
util.RunWithCrashHandler(main)
| jonpetersen/transitfeed-1.2.12 | build/scripts-2.7/unusual_trip_filter.py | Python | apache-2.0 | 5,999 |
# MIT licensed
# Copyright (c) 2020-2021 DDoSolitary <DDoSolitary@gmail.com>, et al.
from nvchecker.api import GetVersionError
from pyalpm import Handle
async def open_db(info):
dbpath, repo = info
handle = Handle('/', dbpath)
db = handle.register_syncdb(repo, 0)
return handle, db
async def get_version(name, conf, *, cache, **kwargs):
pkgname = conf.get('alpm', name)
dbpath = conf.get('dbpath', '/var/lib/pacman')
strip_release = conf.get('strip_release', False)
provided = conf.get('provided')
repo = conf.get('repo')
if repo is None:
repos = ['core', 'extra', 'community', 'multilib']
else:
repos = [repo]
for repo in repos:
db = (await cache.get((dbpath, repo), open_db))[1]
pkg = db.get_pkg(pkgname)
if pkg is not None:
break
if pkg is None:
raise GetVersionError('package not found in the ALPM database')
if provided is None:
version = pkg.version
else:
provides = dict(x.split('=', 1) for x in pkg.provides if '=' in x)
version = provides.get(provided)
if version is None:
raise GetVersionError('provides element not found')
if strip_release:
version = version.split('-', 1)[0]
return version
| lilydjwg/nvchecker | nvchecker_source/alpm.py | Python | mit | 1,200 |
# Copyright 2022 ForgeFlow - Joan Mateu
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import test_sale_contract
| OCA/contract | contract_sale/tests/__init__.py | Python | agpl-3.0 | 143 |
#!/usr/bin/env python
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
import arrayfire as af
from . import _util
def simple_random(verbose=False):
display_func = _util.display_func(verbose)
display_func(af.randu(3, 3, 1, 2))
display_func(af.randu(3, 3, 1, 2, af.Dtype.b8))
display_func(af.randu(3, 3, dtype=af.Dtype.c32))
display_func(af.randn(3, 3, 1, 2))
display_func(af.randn(3, 3, dtype=af.Dtype.c32))
af.set_seed(1024)
assert(af.get_seed() == 1024)
engine = af.Random_Engine(af.RANDOM_ENGINE.MERSENNE_GP11213, 100)
display_func(af.randu(3, 3, 1, 2, engine=engine))
display_func(af.randu(3, 3, 1, 2, af.Dtype.s32, engine=engine))
display_func(af.randu(3, 3, dtype=af.Dtype.c32, engine=engine))
display_func(af.randn(3, 3, engine=engine))
engine.set_seed(100)
assert(engine.get_seed() == 100)
_util.tests["random"] = simple_random
| arrayfire/arrayfire-python | tests/simple/random.py | Python | bsd-3-clause | 1,182 |
#!/usr/bin/python
#
# \file config.py
# \brief Process configuration
# \date 2010-08-27 17:02GMT
# \author Jan Boon (Kaetemi)
# Python port of game data build pipeline.
# Process configuration.
#
# NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
# Copyright (C) 2010 Winch Gate Property Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# *** PROCESS CONFIGURATION ***
# *** PROCESS CONFIG ***
ProcessToComplete = [ ]
ProcessToComplete += [ "properties" ]
ProcessToComplete += [ "shape" ]
ProcessToComplete += [ "map" ]
ProcessToComplete += [ "ig" ]
ProcessToComplete += [ "ig_light" ]
# *** COMMON NAMES AND PATHS ***
EcosystemName = "construction"
EcosystemPath = "common/" + EcosystemName
ContinentName = EcosystemName
ContinentPath = EcosystemPath
CommonName = ContinentName
CommonPath = ContinentPath
# *** SHAPE EXPORT OPTIONS ***
# Compute lightmaps ?
ShapeExportOptExportLighting = "true"
# Cast shadow in lightmap ?
ShapeExportOptShadow = "true"
# Lighting limits. 0 : normal, 1 : soft shadows
ShapeExportOptLightingLimit = 0
# Lightmap lumel size
ShapeExportOptLumelSize = "0.25"
# Oversampling value. Can be 1, 2, 4 or 8
ShapeExportOptOversampling = 1
# Does the lightmap must be generated in 8 bits format ?
ShapeExportOpt8BitsLightmap = "false"
# Does the lightmaps export must generate logs ?
ShapeExportOptLightmapLog = "true"
# Coarse mesh texture mul size
TextureMulSizeValue = "1.5"
DoBuildShadowSkin = 0
ClodConfigFile = ""
# *** COARSE MESH TEXTURE NAME ***
CoarseMeshTextureNames = [ ]
# *** POSTFIX USED BY THE MULTIPLE TILES SYSTEM ***
MultipleTilesPostfix = [ ]
MultipleTilesPostfix += [ "_sp" ]
MultipleTilesPostfix += [ "_su" ]
MultipleTilesPostfix += [ "_au" ]
MultipleTilesPostfix += [ "_wi" ]
# Name of the tilebank to use
BankTileBankName = ""
# *** LANDSCAPE NAME ***
LandscapeName = ""
# *** LIGO OPTIONS ***
LigoExportLand = ""
LigoExportOnePass = 0
LigoExportColormap = "colormap_invalid.png"
LigoExportHeightmap1 = "big_invalid.png"
LigoExportZFactor1 = "1.0"
LigoExportHeightmap2 = "noise_invalid.png"
LigoExportZFactor2 = "0.5"
LigoTileBankFile = ""
# *** MAPS OPTIONS ***
ReduceBitmapFactor = 0
# list all panoply files
MapPanoplyFileList = None
# name of the .hlsbank to build.
MapHlsBankFileName = None
# *** ANIMATIONS OPTIONS ***
DoOptimizeAnimations = 0
| osgcc/ryzom | ryzom/tools/build_gamedata/workspace/common/construction/process.py | Python | agpl-3.0 | 2,956 |
import os
import fnmatch
from pipa import PipelineItem
class FindInPath(PipelineItem):
def __init__(self, *args, **kwargs):
super(FindInPath, self).__init__(*args, **kwargs)
self.name='find_in_remote_path'
self.tuple_name = 'find_in_remote_path'
self.tuple_fields = 'connection, filename'
def generator(self, paramiko_conns, path='', filemask='', **kwargs):
for conn in paramiko_conns:
sftp_client = conn.connection.open_sftp()
file_list = sftp_client.listdir(path=path)
for name in fnmatch.filter(file_list, filemask):
yield self.make_tuple(connection=conn.connection, filename=os.path.join(path,name))
sftp_client.close() | pokerone/pipa | pipa/system/remote/find_in_path.py | Python | bsd-3-clause | 758 |
# Copyright (c) 2014 Intel, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from nova.scheduler import filters
from nova.scheduler import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('weight_setting',
'nova.scheduler.weights.metrics',
group='metrics')
class MetricsFilter(filters.BaseHostFilter):
"""Metrics Filter
This filter is used to filter out those hosts which don't have the
corresponding metrics so these the metrics weigher won't fail due to
these hosts.
"""
def __init__(self):
super(MetricsFilter, self).__init__()
opts = utils.parse_options(CONF.metrics.weight_setting,
sep='=',
converter=float,
name="metrics.weight_setting")
self.keys = set([x[0] for x in opts])
def host_passes(self, host_state, spec_obj):
metrics_on_host = set(m.name for m in host_state.metrics)
if not self.keys.issubset(metrics_on_host):
unavail = metrics_on_host - self.keys
LOG.debug("%(host_state)s does not have the following "
"metrics: %(metrics)s",
{'host_state': host_state,
'metrics': ', '.join(unavail)})
return False
return True
| dims/nova | nova/scheduler/filters/metrics_filter.py | Python | apache-2.0 | 1,998 |
import django
from django.contrib.admin.checks import InlineModelAdminChecks as BaseInlineModelAdminChecks
def get_empty_value_display(model_admin):
if django.VERSION >= (1, 9):
return model_admin.get_empty_value_display()
else:
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
return EMPTY_CHANGELIST_VALUE
if django.VERSION >= (1, 9):
class InlineModelAdminChecks(BaseInlineModelAdminChecks):
def check(self, inline_obj, **kwargs):
errors = super(InlineModelAdminChecks, self).check(inline_obj)
errors.extend(self._check_lazy_model(inline_obj))
return errors
else:
class InlineModelAdminChecks(BaseInlineModelAdminChecks):
def check(self, cls, parent_model, **kwargs):
errors = super(InlineModelAdminChecks, self).check(cls, parent_model, **kwargs)
errors.extend(self._check_lazy_model(cls))
return errors
| jamieconnolly/django-lazy-choices | src/lazychoices/compat.py | Python | mit | 958 |
# Copyright 2014-2015 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import os
import platform
from . import linker
from . import config
from . import patcher
import subprocess
import pygccxml.utils
try: # select the faster xml parser
from .etree_scanner import etree_scanner_t as scanner_t
except:
from .scanner import scanner_t
from . import declarations_cache
from pygccxml import utils
from pygccxml import declarations
def bind_aliases(decls):
"""
This function binds between class and it's typedefs.
:param decls: list of all declarations
:type all_classes: list of :class:`declarations.declaration_t` items
:rtype: None
"""
visited = set()
typedefs = [
decl for decl in decls if isinstance(decl, declarations.typedef_t)]
for decl in typedefs:
type_ = declarations.remove_alias(decl.type)
if not isinstance(type_, declarations.declarated_t):
continue
cls_inst = type_.declaration
if not isinstance(cls_inst, declarations.class_types):
continue
if id(cls_inst) not in visited:
visited.add(id(cls_inst))
del cls_inst.aliases[:]
cls_inst.aliases.append(decl)
class source_reader_t(object):
"""
This class reads C++ source code and returns the declarations tree.
This class is the only class that works directly with GCC-XML or CastXML.
It has only one responsibility: it calls GCC-XML with a source file
specified by the user and creates declarations tree. The implementation of
this class is split to two classes:
1. `scanner_t` - this class scans the "XML" file, generated by GCC-XML
or CastXML and creates :mod:`pygccxml` declarations and types classes.
After the XML file has been processed declarations and type class
instances keeps references to each other using GCC-XML or CastXML
generated id's.
2. `linker_t` - this class contains logic for replacing GCC-XML or CastXML
generated ids with references to declarations or type class instances.
"""
def __init__(self, config, cache=None, decl_factory=None, join_decls=True):
"""
:param config: Instance of :class:`xml_generator_configuration_t`
class, that contains GCC-XML or CastXML configuration.
:param cache: Reference to cache object, that will be updated after a
file has been parsed.
:type cache: Instance of :class:`cache_base_t` class
:param decl_factory: Declarations factory, if not given default
declarations factory( :class:`decl_factory_t` )
will be used.
:param join_decls: Skip the joining of the declarations for the file.
This can then be done once, in the case where
there are multiple files, for example in the
project_reader. Is True per default.
:type boolean
"""
self.logger = utils.loggers.cxx_parser
self.__join_decls = join_decls
self.__search_directories = []
self.__config = config
self.__search_directories.append(config.working_directory)
self.__search_directories.extend(config.include_paths)
if not cache:
cache = declarations_cache.dummy_cache_t()
self.__dcache = cache
self.__config.raise_on_wrong_settings()
self.__decl_factory = decl_factory
if not decl_factory:
self.__decl_factory = declarations.decl_factory_t()
def __create_command_line(self, source_file, xml_file):
"""
Generate the command line used to build xml files.
Depending on the chosen xml_generator a different command line
is built. The gccxml option may be removed once gccxml
support is dropped (this was the original c++ xml_generator,
castxml is replacing it now).
"""
if self.__config.xml_generator == "gccxml":
return self.__create_command_line_gccxml(source_file, xml_file)
elif self.__config.xml_generator == "castxml":
return self.__create_command_line_castxml(source_file, xml_file)
def __create_command_line_castxml(self, source_file, xmlfile):
assert isinstance(self.__config, config.xml_generator_configuration_t)
cmd = []
# first is gccxml executable
if platform.system() == 'Windows':
cmd.append('"%s"' % os.path.normpath(
self.__config.xml_generator_path))
else:
cmd.append('%s' % os.path.normpath(
self.__config.xml_generator_path))
# Add all cflags passed
if self.__config.cflags != "":
cmd.append(" %s " % self.__config.cflags)
# Add additional includes directories
dirs = self.__search_directories
cmd.append(''.join([' -I%s' % search_dir for search_dir in dirs]))
# Clang option: -c Only run preprocess, compile, and assemble steps
cmd.append("-c")
# Clang option: make sure clang knows we want to parse c++
cmd.append("-x c++")
# Platform specific options
if platform.system() == 'Windows':
if "mingw" in self.__config.compiler_path.lower():
# Look at the compiler path. This is a bad way
# to find out if we are using mingw; but it
# should probably work in most of the cases
cmd.append('--castxml-cc-gnu ' + self.__config.compiler_path)
else:
# We are using msvc
cmd.append('--castxml-cc-msvc cl')
if 'msvc9' == self.__config.compiler:
cmd.append('-D"_HAS_TR1=0"')
else:
# On mac or linux, use gcc or clang (the flag is the same)
cmd.append('--castxml-cc-gnu ')
# Check for -std=xx flags passed to the compiler.
# A regex could be used but this is a moving target.
# See c++1z for example. It is preferable to have a defined
# list of what is allowed. http://clang.llvm.org/cxx_status.html
#
# Version 98 and 03 are only there in the case somebody is using
# these flags; this is the equivalent to not passing these flags.
standards = [
"-std=c++98",
"-std=c++03",
"-std=c++11",
"-std=c++14",
"-std=c++1z"]
std_flag = ""
for standard in standards:
if standard in self.__config.cflags:
std_flag = " " + standard + " "
# A -std= flag was passed, but is not in the list
if "-std=" in self.__config.cflags and std_flag == "":
raise(RuntimeError("Unknown -std=c++xx flag used !"))
if std_flag != "":
cmd.append(
'"(" ' + self.__config.compiler_path + std_flag + '")"')
else:
cmd.append(self.__config.compiler_path)
# Tell castxml to output xml compatible files with gccxml
# so that we can parse them with pygccxml
cmd.append('--castxml-gccxml')
# Add symbols
cmd = self.__add_symbols(cmd)
# The destination file
cmd.append('-o %s' % xmlfile)
# The source file
cmd.append('%s' % source_file)
# Where to start the parsing
if self.__config.start_with_declarations:
cmd.append(
'--castxml-start "%s"' %
','.join(self.__config.start_with_declarations))
cmd_line = ' '.join(cmd)
self.logger.debug('castxml cmd: %s' % cmd_line)
return cmd_line
def __create_command_line_gccxml(self, source_file, xmlfile):
assert isinstance(self.__config, config.xml_generator_configuration_t)
# returns
cmd = []
# first is gccxml executable
if 'nt' == os.name:
cmd.append('"%s"' % os.path.normpath(
self.__config.xml_generator_path))
else:
cmd.append('%s' % os.path.normpath(
self.__config.xml_generator_path))
# Add all cflags passed
if self.__config.cflags != "":
cmd.append(" %s " % self.__config.cflags)
# second all additional includes directories
dirs = self.__search_directories
cmd.append(''.join([' -I"%s"' % search_dir for search_dir in dirs]))
# Add symbols
cmd = self.__add_symbols(cmd)
# fourth source file
cmd.append('"%s"' % source_file)
# five destination file
cmd.append('-fxml="%s"' % xmlfile)
if self.__config.start_with_declarations:
cmd.append(
'-fxml-start="%s"' %
','.join(
self.__config.start_with_declarations))
# Specify compiler if asked to
if self.__config.compiler:
cmd.append(" --gccxml-compiler %s" % self.__config.compiler)
cmd_line = ' '.join(cmd)
self.logger.debug('gccxml cmd: %s' % cmd_line)
return cmd_line
def __add_symbols(self, cmd):
"""
Add all additional defined and undefined symbols.
"""
if self.__config.define_symbols:
symbols = self.__config.define_symbols
cmd.append(''.join(
[' -D"%s"' % defined_symbol for defined_symbol in symbols]))
if self.__config.undefine_symbols:
un_symbols = self.__config.undefine_symbols
cmd.append(
''.join([' -U"%s"' % undefined_symbol for
undefined_symbol in un_symbols]))
return cmd
def create_xml_file(self, source_file, destination=None):
"""
This method will generate a xml file using an external tool.
The external tool can be either gccxml or castxml. The method will
return the file path of the generated xml file.
:param source_file: path to the source file that should be parsed.
:type source_file: str
:param destination: if given, will be used as target file path for
GCC-XML or CastXML.
:type destination: str
:rtype: path to xml file.
"""
xml_file = destination
# If file specified, remove it to start else create new file name
if xml_file:
pygccxml.utils.remove_file_no_raise(xml_file, self.__config)
else:
xml_file = pygccxml.utils.create_temp_file_name(suffix='.xml')
try:
ffname = source_file
if not os.path.isabs(ffname):
ffname = self.__file_full_name(source_file)
command_line = self.__create_command_line(ffname, xml_file)
process = subprocess.Popen(
args=command_line,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
process.stdin.close()
gccxml_reports = []
while process.poll() is None:
line = process.stdout.readline()
if line.strip():
gccxml_reports.append(line.rstrip())
for line in process.stdout.readlines():
if line.strip():
gccxml_reports.append(line.rstrip())
exit_status = process.returncode
gccxml_msg = os.linesep.join([str(s) for s in gccxml_reports])
if self.__config.ignore_gccxml_output:
if not os.path.isfile(xml_file):
raise RuntimeError(
"Error occured while running " +
self.__config.xml_generator.upper() +
": %s status:%s" %
(gccxml_msg, exit_status))
else:
if gccxml_msg or exit_status or not \
os.path.isfile(xml_file):
raise RuntimeError(
"Error occured while running " +
self.__config.xml_generator.upper() + ": %s" %
gccxml_msg)
except Exception:
pygccxml.utils.remove_file_no_raise(xml_file, self.__config)
raise
return xml_file
def create_xml_file_from_string(self, content, destination=None):
"""
Creates XML file from text.
:param content: C++ source code
:type content: str
:param destination: file name for GCC-XML generated file
:type destination: str
:rtype: returns file name of GCC-XML generated file
"""
header_file = pygccxml.utils.create_temp_file_name(suffix='.h')
try:
header_file_obj = open(header_file, 'w+')
header_file_obj.write(content)
header_file_obj.close()
xml_file = self.create_xml_file(header_file, destination)
finally:
pygccxml.utils.remove_file_no_raise(header_file, self.__config)
return xml_file
def read_file(self, source_file):
return self.read_cpp_source_file(source_file)
def read_cpp_source_file(self, source_file):
"""
Reads C++ source file and returns declarations tree
:param source_file: path to C++ source file
:type source_file: str
"""
xml_file = ''
try:
ffname = self.__file_full_name(source_file)
self.logger.debug("Reading source file: [%s]." % ffname)
declarations = self.__dcache.cached_value(ffname, self.__config)
if not declarations:
self.logger.debug(
"File has not been found in cache, parsing...")
xml_file = self.create_xml_file(ffname)
declarations, files = self.__parse_xml_file(xml_file)
self.__dcache.update(
ffname, self.__config, declarations, files)
else:
self.logger.debug(
("File has not been changed, reading declarations " +
"from cache."))
except Exception:
if xml_file:
pygccxml.utils.remove_file_no_raise(xml_file, self.__config)
raise
if xml_file:
pygccxml.utils.remove_file_no_raise(xml_file, self.__config)
return declarations
def read_xml_file(self, xml_file):
"""
Read generated XML file.
:param xml_file: path to xml file
:type xml_file: str
:rtype: declarations tree
"""
assert(self.__config is not None)
ffname = self.__file_full_name(xml_file)
self.logger.debug("Reading xml file: [%s]" % xml_file)
declarations = self.__dcache.cached_value(ffname, self.__config)
if not declarations:
self.logger.debug("File has not been found in cache, parsing...")
declarations, files = self.__parse_xml_file(ffname)
self.__dcache.update(ffname, self.__config, declarations, [])
else:
self.logger.debug(
"File has not been changed, reading declarations from cache.")
return declarations
def read_string(self, content):
"""
Reads a Python string that contains C++ code, and return
the declarations tree.
"""
header_file = pygccxml.utils.create_temp_file_name(suffix='.h')
with open(header_file, "w+") as f:
f.write(content)
try:
declarations = self.read_file(header_file)
except Exception:
pygccxml.utils.remove_file_no_raise(header_file, self.__config)
raise
pygccxml.utils.remove_file_no_raise(header_file, self.__config)
return declarations
def __file_full_name(self, file):
if os.path.isfile(file):
return file
for path in self.__search_directories:
file_path = os.path.join(path, file)
if os.path.isfile(file_path):
return file_path
raise RuntimeError("pygccxml error: file '%s' does not exist" % file)
def __produce_full_file(self, file_path):
if os.name in ['nt', 'posix']:
file_path = file_path.replace(r'\/', os.path.sep)
if os.path.isabs(file_path):
return file_path
try:
abs_file_path = os.path.realpath(
os.path.join(
self.__config.working_directory,
file_path))
if os.path.exists(abs_file_path):
return os.path.normpath(abs_file_path)
return file_path
except Exception:
return file_path
def __parse_xml_file(self, xml_file):
scanner_ = scanner_t(xml_file, self.__decl_factory, self.__config)
scanner_.read()
decls = scanner_.declarations()
types = scanner_.types()
files = {}
for file_id, file_path in scanner_.files().items():
files[file_id] = self.__produce_full_file(file_path)
linker_ = linker.linker_t(
decls=decls,
types=types,
access=scanner_.access(),
membership=scanner_.members(),
files=files)
for type_ in list(types.values()):
# I need this copy because internaly linker change types collection
linker_.instance = type_
declarations.apply_visitor(linker_, type_)
for decl in decls.values():
linker_.instance = decl
declarations.apply_visitor(linker_, decl)
bind_aliases(iter(decls.values()))
# Join declarations
if self.__join_decls:
for ns in iter(decls.values()):
if isinstance(ns, pygccxml.declarations.namespace_t):
self.join_declarations(ns)
# some times gccxml report typedefs defined in no namespace
# it happens for example in next situation
# template< typename X>
# void ddd(){ typedef typename X::Y YY;}
# if I will fail on this bug next time, the right way to fix it may be
# different
patcher.fix_calldef_decls(scanner_.calldefs(), scanner_.enums())
decls = [
inst for inst in iter(
decls.values()) if isinstance(
inst,
declarations.namespace_t) and not inst.parent]
return decls, list(files.values())
def join_declarations(self, declref):
self._join_namespaces(declref)
for ns in declref.declarations:
if isinstance(ns, pygccxml.declarations.namespace_t):
self.join_declarations(ns)
@staticmethod
def _join_namespaces(nsref):
assert isinstance(nsref, pygccxml.declarations.namespace_t)
ddhash = {}
decls = []
for decl in nsref.declarations:
if decl.__class__ not in ddhash:
ddhash[decl.__class__] = {decl._name: [decl]}
decls.append(decl)
else:
joined_decls = ddhash[decl.__class__]
if decl._name not in joined_decls:
decls.append(decl)
joined_decls[decl._name] = [decl]
else:
if isinstance(decl, pygccxml.declarations.calldef_t):
if decl not in joined_decls[decl._name]:
# functions has overloading
decls.append(decl)
joined_decls[decl._name].append(decl)
elif isinstance(decl, pygccxml.declarations.enumeration_t):
# unnamed enums
if not decl.name and decl not in \
joined_decls[decl._name]:
decls.append(decl)
joined_decls[decl._name].append(decl)
elif isinstance(decl, pygccxml.declarations.class_t):
# unnamed classes
if not decl.name and decl not in \
joined_decls[decl._name]:
decls.append(decl)
joined_decls[decl._name].append(decl)
else:
assert 1 == len(joined_decls[decl._name])
if isinstance(decl, pygccxml.declarations.namespace_t):
joined_decls[decl._name][0].take_parenting(decl)
class_t = pygccxml.declarations.class_t
class_declaration_t = pygccxml.declarations.class_declaration_t
if class_t in ddhash and class_declaration_t in ddhash:
# if there is a class and its forward declaration - get rid of the
# second one.
class_names = set()
for name, same_name_classes in ddhash[class_t].items():
if not name:
continue
if "GCC" in utils.xml_generator:
class_names.add(same_name_classes[0].mangled)
elif "CastXML" in utils.xml_generator:
class_names.add(same_name_classes[0].name)
class_declarations = ddhash[class_declaration_t]
for name, same_name_class_declarations in \
class_declarations.items():
if not name:
continue
for class_declaration in same_name_class_declarations:
if "GCC" in utils.xml_generator:
if class_declaration.mangled and \
class_declaration.mangled in class_names:
decls.remove(class_declaration)
elif "CastXML" in utils.xml_generator:
if class_declaration.name and \
class_declaration.name in class_names:
decls.remove(class_declaration)
nsref.declarations = decls
| CIBC-Internal/itk | Modules/ThirdParty/pygccxml/src/pygccxml/parser/source_reader.py | Python | apache-2.0 | 22,365 |
# Generated by Django 2.0.6 on 2018-06-21 14:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("papers", "0020_auto_20180621_1426")]
operations = [
migrations.AddField(
model_name="paper",
name="source_file",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="papers.SourceFile",
),
)
]
| arxiv-vanity/arxiv-vanity | arxiv_vanity/papers/migrations/0021_paper_source_file.py | Python | apache-2.0 | 562 |
#File: default.py
"""
Provides a default style for bib4txt.py
Produces a list of citations that to be included in a reStructuredText document.
(In very simple documents, can also provide citation reference formatting
by substituting in the document text for the citation references.)
A style includes:
- citation template
- CitationManager class
- sortkey for make_text_output
(often based on a field list)
:note: you will often want to override these
:note: shared.py holds defintions common to most styles
:note: see the examples (e.g., example_numbered.py) of different styles
:author: Alan G Isaac
:contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm
:copyright: 2006 by Alan G Isaac
:license: MIT (see `license.txt`_)
:date: 2006-08-01
.. _license.txt: ./license.txt
"""
__docformat__ = "restructuredtext en"
__author__ = "Alan G. Isaac"
__version__ = "0.6"
__needs__ = '2.4'
################### IMPORTS ##########################
# from standard library
import logging
style_logger = logging.getLogger('bibstuff_logger')
#shared_logger = logging.getLogger('bibstuff_logger')
# imports from bibstuff
#TODO: change to relative imports (requires Python 2.5)
# :IMPORTANT: every style must import shared!
import shared
# most styles with start with the default templates:
# in default_templates.py
import default_templates
########################################################
##########################################################################
################### CITEREF FORMATTING #################################
##########################################################################
CITEREF_TEMPLATE = default_templates.DEFAULT_CITEREF_TEMPLATE.copy()
##########################################################################
################## CITATION FORMATTING #################################
##########################################################################
"""
Every style must have a CITATION_TEMPLATE, a CitationManager, and a ref_list_sort_key.
Crucial formatting decisions are made int the CITATION_TEMPLATE.
The CITATION_TEMPLATE provides default reference formatting (may also be used by BibStyle)
:TODO:
- provide graceful handling of missing fields
- allow different formatting of first and other names
- allow different initial line and subsequent line indenting
"""
# here we simply use the default citation template
CITATION_TEMPLATE = shared.CitationManager.default_citation_template
class CitationManager(shared.CitationManager):
################### CITEREF FORMATTING #########################
#we set the 'format_inline_cite' method equal to the below 'format_inline_cite' function
def format_inline_cite(self, cite_key_list):
"""
Usually you will need to write a 'format_inline_cite' function
that the CiteRefProcessor will use to substitute inline for citation references.
"""
style_logger.debug('default: enter CitationManager.format_inline_cite')
#:note: need entry to be None if cite_key not found, so discard=False
entry_list = self.find_entries(cite_key_list,discard=False)
"""
for entry in entry_list:
print entry
"""
return format_inline_cite(entry_list, self)
################### CITATION FORMATTING ########################
def get_citation_label(self,entry,citation_template=None):
return '.. [' + entry.citekey + ']\n'
#sort_key for sorting list of references
# (choice of field_list is a formatting decision)
def sortkey(self,bibentry):
return self.make_sort_key(bibentry,['Author','Year'])
def format_inline_cite(entry_list, citation_manager):
"""Return string, formatted in-text citation (allows *multiple* citations).
`entry_list` : list
entries to be formatted
`citation_manager` : CitationManager instance
handles name formatting
:note: need the entry formatter bc its determines the field of the names for the cite
:note: much of the following functionality was in the old Bibstyle's formatCitation() method
:TODO: rewrite
:TODO: ? entries shd be more featureful ? (conflicts with core goal of BibEntry class)
"""
style_logger.debug("default.py: Entering format_inline_cite.")
name_date_sep = ' '
formatted_list = []
for entry in entry_list:
if not entry: #None replaces missing entries
formatted_list.append('?')
else:
year = entry['year']
entry_formatter = citation_manager.entry_formatter
last_names = entry.get_names(entry_formatter).get_last_names() #:note: ignores "von" part
if len(last_names) < 3:
last_names = ' and '.join(last_names)
else:
last_names = last_names[0] + ' et al.'
formatted_list.append( ('%s' + name_date_sep + '%s')%(last_names, year) )
#to cite by number can use this instead:
#formatted_list.append('%d'%entry.citation_rank)
style_logger.debug("Exiting format_inline_cite.")
return '(' + CITEREF_TEMPLATE['citeref_sep'].join(formatted_list)+')'
| matthew-brett/bibstuff | bibstuff/bibstyles/default.py | Python | mit | 4,900 |
import functools
import json
import logging
import random
import re
import string
import fnmatch
import unicodedata
import urllib
from textwrap import dedent
from external_auth.models import ExternalAuthMap
from external_auth.djangostore import DjangoOpenIDStore
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME, authenticate, login
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
if settings.FEATURES.get('AUTH_USE_CAS'):
from django_cas.views import login as django_cas_login
from student.helpers import get_next_url_for_login_page
from student.models import UserProfile
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden
from django.utils.http import urlquote, is_safe_url
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from edxmako.shortcuts import render_to_response, render_to_string
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
from django.contrib.csrf.middleware import csrf_exempt
from django.views.decorators.csrf import ensure_csrf_cookie
import django_openid_auth.views as openid_views
from django_openid_auth import auth as openid_auth
from openid.consumer.consumer import SUCCESS
from openid.server.server import Server, ProtocolError, UntrustedReturnURL
from openid.server.trustroot import TrustRoot
from openid.extensions import ax, sreg
from ratelimitbackend.exceptions import RateLimitException
import student.views
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger("edx.external_auth")
AUDIT_LOG = logging.getLogger("audit")
SHIBBOLETH_DOMAIN_PREFIX = settings.SHIBBOLETH_DOMAIN_PREFIX
OPENID_DOMAIN_PREFIX = settings.OPENID_DOMAIN_PREFIX
# -----------------------------------------------------------------------------
# OpenID Common
# -----------------------------------------------------------------------------
@csrf_exempt
def default_render_failure(request,
message,
status=403,
template_name='extauth_failure.html',
exception=None):
"""Render an Openid error page to the user"""
log.debug("In openid_failure " + message)
data = render_to_string(template_name,
dict(message=message, exception=exception))
return HttpResponse(data, status=status)
# -----------------------------------------------------------------------------
# OpenID Authentication
# -----------------------------------------------------------------------------
def generate_password(length=12, chars=string.letters + string.digits):
"""Generate internal password for externally authenticated user"""
choice = random.SystemRandom().choice
return ''.join([choice(chars) for _i in range(length)])
@csrf_exempt
def openid_login_complete(request,
redirect_field_name=REDIRECT_FIELD_NAME,
render_failure=None):
"""Complete the openid login process"""
render_failure = (render_failure or default_render_failure)
openid_response = openid_views.parse_openid_response(request)
if not openid_response:
return render_failure(request,
'This is an OpenID relying party endpoint.')
if openid_response.status == SUCCESS:
external_id = openid_response.identity_url
oid_backend = openid_auth.OpenIDBackend()
details = oid_backend._extract_user_details(openid_response)
log.debug('openid success, details=%s', details)
url = getattr(settings, 'OPENID_SSO_SERVER_URL', None)
external_domain = "{0}{1}".format(OPENID_DOMAIN_PREFIX, url)
fullname = '%s %s' % (details.get('first_name', ''),
details.get('last_name', ''))
return _external_login_or_signup(
request,
external_id,
external_domain,
details,
details.get('email', ''),
fullname,
retfun=functools.partial(redirect, get_next_url_for_login_page(request)),
)
return render_failure(request, 'Openid failure')
def _external_login_or_signup(request,
external_id,
external_domain,
credentials,
email,
fullname,
retfun=None):
"""Generic external auth login or signup"""
# see if we have a map from this external_id to an edX username
try:
eamap = ExternalAuthMap.objects.get(external_id=external_id,
external_domain=external_domain)
log.debug(u'Found eamap=%s', eamap)
except ExternalAuthMap.DoesNotExist:
# go render form for creating edX user
eamap = ExternalAuthMap(external_id=external_id,
external_domain=external_domain,
external_credentials=json.dumps(credentials))
eamap.external_email = email
eamap.external_name = fullname
eamap.internal_password = generate_password()
log.debug(u'Created eamap=%s', eamap)
eamap.save()
log.info(u"External_Auth login_or_signup for %s : %s : %s : %s", external_domain, external_id, email, fullname)
uses_shibboleth = settings.FEATURES.get('AUTH_USE_SHIB') and external_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX)
uses_certs = settings.FEATURES.get('AUTH_USE_CERTIFICATES')
internal_user = eamap.user
if internal_user is None:
if uses_shibboleth:
# If we are using shib, try to link accounts
# For Stanford shib, the email the idp returns is actually under the control of the user.
# Since the id the idps return is not user-editable, and is of the from "username@stanford.edu",
# use the id to link accounts instead.
try:
link_user = User.objects.get(email=eamap.external_id)
if not ExternalAuthMap.objects.filter(user=link_user).exists():
# if there's no pre-existing linked eamap, we link the user
eamap.user = link_user
eamap.save()
internal_user = link_user
log.info(u'SHIB: Linking existing account for %s', eamap.external_id)
# now pass through to log in
else:
# otherwise, there must have been an error, b/c we've already linked a user with these external
# creds
failure_msg = _(
"You have already created an account using "
"an external login like WebAuth or Shibboleth. "
"Please contact {tech_support_email} for support."
).format(
tech_support_email=settings.TECH_SUPPORT_EMAIL,
)
return default_render_failure(request, failure_msg)
except User.DoesNotExist:
log.info(u'SHIB: No user for %s yet, doing signup', eamap.external_email)
return _signup(request, eamap, retfun)
else:
log.info(u'No user for %s yet. doing signup', eamap.external_email)
return _signup(request, eamap, retfun)
# We trust shib's authentication, so no need to authenticate using the password again
uname = internal_user.username
if uses_shibboleth:
user = internal_user
# Assuming this 'AUTHENTICATION_BACKENDS' is set in settings, which I think is safe
if settings.AUTHENTICATION_BACKENDS:
auth_backend = settings.AUTHENTICATION_BACKENDS[0]
else:
auth_backend = 'ratelimitbackend.backends.RateLimitModelBackend'
user.backend = auth_backend
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u'Linked user.id: {0} logged in via Shibboleth'.format(user.id))
else:
AUDIT_LOG.info(u'Linked user "{0}" logged in via Shibboleth'.format(user.email))
elif uses_certs:
# Certificates are trusted, so just link the user and log the action
user = internal_user
user.backend = 'ratelimitbackend.backends.RateLimitModelBackend'
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u'Linked user_id {0} logged in via SSL certificate'.format(user.id))
else:
AUDIT_LOG.info(u'Linked user "{0}" logged in via SSL certificate'.format(user.email))
else:
user = authenticate(username=uname, password=eamap.internal_password, request=request)
if user is None:
# we want to log the failure, but don't want to log the password attempted:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u'External Auth Login failed')
else:
AUDIT_LOG.warning(u'External Auth Login failed for "{0}"'.format(uname))
return _signup(request, eamap, retfun)
if not user.is_active:
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
# if BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH, we trust external auth and activate any users
# that aren't already active
user.is_active = True
user.save()
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u'Activating user {0} due to external auth'.format(user.id))
else:
AUDIT_LOG.info(u'Activating user "{0}" due to external auth'.format(uname))
else:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u'User {0} is not active after external login'.format(user.id))
else:
AUDIT_LOG.warning(u'User "{0}" is not active after external login'.format(uname))
# TODO: improve error page
msg = 'Account not yet activated: please look for link in your email'
return default_render_failure(request, msg)
login(request, user)
request.session.set_expiry(0)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u"Login success - user.id: {0}".format(user.id))
else:
AUDIT_LOG.info(u"Login success - {0} ({1})".format(user.username, user.email))
if retfun is None:
return redirect('/')
return retfun()
def _flatten_to_ascii(txt):
"""
Flattens possibly unicode txt to ascii (django username limitation)
@param name:
@return: the flattened txt (in the same type as was originally passed in)
"""
if isinstance(txt, str):
txt = txt.decode('utf-8')
return unicodedata.normalize('NFKD', txt).encode('ASCII', 'ignore')
else:
return unicode(unicodedata.normalize('NFKD', txt).encode('ASCII', 'ignore'))
@ensure_csrf_cookie
def _signup(request, eamap, retfun=None):
"""
Present form to complete for signup via external authentication.
Even though the user has external credentials, he/she still needs
to create an account on the edX system, and fill in the user
registration form.
eamap is an ExternalAuthMap object, specifying the external user
for which to complete the signup.
retfun is a function to execute for the return value, if immediate
signup is used. That allows @ssl_login_shortcut() to work.
"""
# save this for use by student.views.create_account
request.session['ExternalAuthMap'] = eamap
if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP', ''):
# do signin immediately, by calling create_account, instead of asking
# student to fill in form. MIT students already have information filed.
username = eamap.external_email.split('@', 1)[0]
username = username.replace('.', '_')
post_vars = dict(username=username,
honor_code=u'true',
terms_of_service=u'true')
log.info(u'doing immediate signup for %s, params=%s', username, post_vars)
student.views.create_account(request, post_vars)
# should check return content for successful completion before
if retfun is not None:
return retfun()
else:
return redirect('/')
# default conjoin name, no spaces, flattened to ascii b/c django can't handle unicode usernames, sadly
# but this only affects username, not fullname
username = re.sub(r'\s', '', _flatten_to_ascii(eamap.external_name), flags=re.UNICODE)
context = {'has_extauth_info': True,
'show_signup_immediately': True,
'extauth_domain': eamap.external_domain,
'extauth_id': eamap.external_id,
'extauth_email': eamap.external_email,
'extauth_username': username,
'extauth_name': eamap.external_name,
'ask_for_tos': True,
}
# Some openEdX instances can't have terms of service for shib users, like
# according to Stanford's Office of General Counsel
uses_shibboleth = (settings.FEATURES.get('AUTH_USE_SHIB') and
eamap.external_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX))
if uses_shibboleth and settings.FEATURES.get('SHIB_DISABLE_TOS'):
context['ask_for_tos'] = False
# detect if full name is blank and ask for it from user
context['ask_for_fullname'] = eamap.external_name.strip() == ''
# validate provided mail and if it's not valid ask the user
try:
validate_email(eamap.external_email)
context['ask_for_email'] = False
except ValidationError:
context['ask_for_email'] = True
log.info(u'EXTAUTH: Doing signup for %s', eamap.external_id)
return student.views.register_user(request, extra_context=context)
# -----------------------------------------------------------------------------
# MIT SSL
# -----------------------------------------------------------------------------
def _ssl_dn_extract_info(dn_string):
"""
Extract username, email address (may be anyuser@anydomain.com) and
full name from the SSL DN string. Return (user,email,fullname) if
successful, and None otherwise.
"""
ss = re.search('/emailAddress=(.*)@([^/]+)', dn_string)
if ss:
user = ss.group(1)
email = "%s@%s" % (user, ss.group(2))
else:
return None
ss = re.search('/CN=([^/]+)/', dn_string)
if ss:
fullname = ss.group(1)
else:
return None
return (user, email, fullname)
def ssl_get_cert_from_request(request):
"""
Extract user information from certificate, if it exists, returning (user, email, fullname).
Else return None.
"""
certkey = "SSL_CLIENT_S_DN" # specify the request.META field to use
cert = request.META.get(certkey, '')
if not cert:
cert = request.META.get('HTTP_' + certkey, '')
if not cert:
try:
# try the direct apache2 SSL key
cert = request._req.subprocess_env.get(certkey, '')
except Exception:
return ''
return cert
def ssl_login_shortcut(fn):
"""
Python function decorator for login procedures, to allow direct login
based on existing ExternalAuth record and MIT ssl certificate.
"""
def wrapped(*args, **kwargs):
"""
This manages the function wrapping, by determining whether to inject
the _external signup or just continuing to the internal function
call.
"""
if not settings.FEATURES['AUTH_USE_CERTIFICATES']:
return fn(*args, **kwargs)
request = args[0]
if request.user and request.user.is_authenticated(): # don't re-authenticate
return fn(*args, **kwargs)
cert = ssl_get_cert_from_request(request)
if not cert: # no certificate information - show normal login window
return fn(*args, **kwargs)
def retfun():
"""Wrap function again for call by _external_login_or_signup"""
return fn(*args, **kwargs)
(_user, email, fullname) = _ssl_dn_extract_info(cert)
return _external_login_or_signup(
request,
external_id=email,
external_domain="ssl:MIT",
credentials=cert,
email=email,
fullname=fullname,
retfun=retfun
)
return wrapped
@csrf_exempt
def ssl_login(request):
"""
This is called by branding.views.index when
FEATURES['AUTH_USE_CERTIFICATES'] = True
Used for MIT user authentication. This presumes the web server
(nginx) has been configured to require specific client
certificates.
If the incoming protocol is HTTPS (SSL) then authenticate via
client certificate. The certificate provides user email and
fullname; this populates the ExternalAuthMap. The user is
nevertheless still asked to complete the edX signup.
Else continues on with student.views.index, and no authentication.
"""
# Just to make sure we're calling this only at MIT:
if not settings.FEATURES['AUTH_USE_CERTIFICATES']:
return HttpResponseForbidden()
cert = ssl_get_cert_from_request(request)
if not cert:
# no certificate information - go onward to main index
return student.views.index(request)
(_user, email, fullname) = _ssl_dn_extract_info(cert)
redirect_to = get_next_url_for_login_page(request)
retfun = functools.partial(redirect, redirect_to)
return _external_login_or_signup(
request,
external_id=email,
external_domain="ssl:MIT",
credentials=cert,
email=email,
fullname=fullname,
retfun=retfun
)
# -----------------------------------------------------------------------------
# CAS (Central Authentication Service)
# -----------------------------------------------------------------------------
def cas_login(request, next_page=None, required=False):
"""
Uses django_cas for authentication.
CAS is a common authentcation method pioneered by Yale.
See http://en.wikipedia.org/wiki/Central_Authentication_Service
Does normal CAS login then generates user_profile if nonexistent,
and if login was successful. We assume that user details are
maintained by the central service, and thus an empty user profile
is appropriate.
"""
ret = django_cas_login(request, next_page, required)
if request.user.is_authenticated():
user = request.user
if not UserProfile.objects.filter(user=user):
user_profile = UserProfile(name=user.username, user=user)
user_profile.save()
return ret
# -----------------------------------------------------------------------------
# Shibboleth (Stanford and others. Uses *Apache* environment variables)
# -----------------------------------------------------------------------------
def shib_login(request):
"""
Uses Apache's REMOTE_USER environment variable as the external id.
This in turn typically uses EduPersonPrincipalName
http://www.incommonfederation.org/attributesummary.html#eduPersonPrincipal
but the configuration is in the shibboleth software.
"""
shib_error_msg = _(dedent(
"""
Your university identity server did not return your ID information to us.
Please try logging in again. (You may need to restart your browser.)
"""))
if not request.META.get('REMOTE_USER'):
log.error(u"SHIB: no REMOTE_USER found in request.META")
return default_render_failure(request, shib_error_msg)
elif not request.META.get('Shib-Identity-Provider'):
log.error(u"SHIB: no Shib-Identity-Provider in request.META")
return default_render_failure(request, shib_error_msg)
else:
# If we get here, the user has authenticated properly
shib = {attr: request.META.get(attr, '').decode('utf-8')
for attr in ['REMOTE_USER', 'givenName', 'sn', 'mail', 'Shib-Identity-Provider', 'displayName']}
# Clean up first name, last name, and email address
# TODO: Make this less hardcoded re: format, but split will work
# even if ";" is not present, since we are accessing 1st element
shib['sn'] = shib['sn'].split(";")[0].strip().capitalize()
shib['givenName'] = shib['givenName'].split(";")[0].strip().capitalize()
# TODO: should we be logging creds here, at info level?
log.info(u"SHIB creds returned: %r", shib)
fullname = shib['displayName'] if shib['displayName'] else u'%s %s' % (shib['givenName'], shib['sn'])
redirect_to = get_next_url_for_login_page(request)
retfun = functools.partial(_safe_postlogin_redirect, redirect_to, request.get_host())
return _external_login_or_signup(
request,
external_id=shib['REMOTE_USER'],
external_domain=SHIBBOLETH_DOMAIN_PREFIX + shib['Shib-Identity-Provider'],
credentials=shib,
email=shib['mail'],
fullname=fullname,
retfun=retfun
)
def _safe_postlogin_redirect(redirect_to, safehost, default_redirect='/'):
"""
If redirect_to param is safe (not off this host), then perform the redirect.
Otherwise just redirect to '/'.
Basically copied from django.contrib.auth.views.login
@param redirect_to: user-supplied redirect url
@param safehost: which host is safe to redirect to
@return: an HttpResponseRedirect
"""
if is_safe_url(url=redirect_to, host=safehost):
return redirect(redirect_to)
return redirect(default_redirect)
def course_specific_login(request, course_id):
"""
Dispatcher function for selecting the specific login method
required by the course
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = modulestore().get_course(course_key)
if not course:
# couldn't find the course, will just return vanilla signin page
return redirect_with_get('signin_user', request.GET)
# now the dispatching conditionals. Only shib for now
if (
settings.FEATURES.get('AUTH_USE_SHIB') and
course.enrollment_domain and
course.enrollment_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX)
):
return redirect_with_get('shib-login', request.GET)
# Default fallthrough to normal signin page
return redirect_with_get('signin_user', request.GET)
def course_specific_register(request, course_id):
"""
Dispatcher function for selecting the specific registration method
required by the course
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = modulestore().get_course(course_key)
if not course:
# couldn't find the course, will just return vanilla registration page
return redirect_with_get('register_user', request.GET)
# now the dispatching conditionals. Only shib for now
if (
settings.FEATURES.get('AUTH_USE_SHIB') and
course.enrollment_domain and
course.enrollment_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX)
):
# shib-login takes care of both registration and login flows
return redirect_with_get('shib-login', request.GET)
# Default fallthrough to normal registration page
return redirect_with_get('register_user', request.GET)
def redirect_with_get(view_name, get_querydict, do_reverse=True):
"""
Helper function to carry over get parameters across redirects
Using urlencode(safe='/') because the @login_required decorator generates 'next' queryparams with '/' unencoded
"""
if do_reverse:
url = reverse(view_name)
else:
url = view_name
if get_querydict:
return redirect("%s?%s" % (url, get_querydict.urlencode(safe='/')))
return redirect(view_name)
# -----------------------------------------------------------------------------
# OpenID Provider
# -----------------------------------------------------------------------------
def get_xrds_url(resource, request):
"""
Return the XRDS url for a resource
"""
host = request.get_host()
location = host + '/openid/provider/' + resource + '/'
if request.is_secure():
return 'https://' + location
else:
return 'http://' + location
def add_openid_simple_registration(request, response, data):
sreg_data = {}
sreg_request = sreg.SRegRequest.fromOpenIDRequest(request)
sreg_fields = sreg_request.allRequestedFields()
# if consumer requested simple registration fields, add them
if sreg_fields:
for field in sreg_fields:
if field == 'email' and 'email' in data:
sreg_data['email'] = data['email']
elif field == 'fullname' and 'fullname' in data:
sreg_data['fullname'] = data['fullname']
elif field == 'nickname' and 'nickname' in data:
sreg_data['nickname'] = data['nickname']
# construct sreg response
sreg_response = sreg.SRegResponse.extractResponse(sreg_request,
sreg_data)
sreg_response.toMessage(response.fields)
def add_openid_attribute_exchange(request, response, data):
try:
ax_request = ax.FetchRequest.fromOpenIDRequest(request)
except ax.AXError:
# not using OpenID attribute exchange extension
pass
else:
ax_response = ax.FetchResponse()
# if consumer requested attribute exchange fields, add them
if ax_request and ax_request.requested_attributes:
for type_uri in ax_request.requested_attributes.iterkeys():
email_schema = 'http://axschema.org/contact/email'
name_schema = 'http://axschema.org/namePerson'
if type_uri == email_schema and 'email' in data:
ax_response.addValue(email_schema, data['email'])
elif type_uri == name_schema and 'fullname' in data:
ax_response.addValue(name_schema, data['fullname'])
# construct ax response
ax_response.toMessage(response.fields)
def provider_respond(server, request, response, data):
"""
Respond to an OpenID request
"""
# get and add extensions
add_openid_simple_registration(request, response, data)
add_openid_attribute_exchange(request, response, data)
# create http response from OpenID response
webresponse = server.encodeResponse(response)
http_response = HttpResponse(webresponse.body)
http_response.status_code = webresponse.code
# add OpenID headers to response
for k, v in webresponse.headers.iteritems():
http_response[k] = v
return http_response
def validate_trust_root(openid_request):
"""
Only allow OpenID requests from valid trust roots
"""
trusted_roots = getattr(settings, 'OPENID_PROVIDER_TRUSTED_ROOT', None)
if not trusted_roots:
# not using trusted roots
return True
# don't allow empty trust roots
if (not hasattr(openid_request, 'trust_root') or
not openid_request.trust_root):
log.error('no trust_root')
return False
# ensure trust root parses cleanly (one wildcard, of form *.foo.com, etc.)
trust_root = TrustRoot.parse(openid_request.trust_root)
if not trust_root:
log.error('invalid trust_root')
return False
# don't allow empty return tos
if (not hasattr(openid_request, 'return_to') or
not openid_request.return_to):
log.error('empty return_to')
return False
# ensure return to is within trust root
if not trust_root.validateURL(openid_request.return_to):
log.error('invalid return_to')
return False
# check that the root matches the ones we trust
if not any(r for r in trusted_roots if fnmatch.fnmatch(trust_root, r)):
log.error('non-trusted root')
return False
return True
@csrf_exempt
def provider_login(request):
"""
OpenID login endpoint
"""
# make and validate endpoint
endpoint = get_xrds_url('login', request)
if not endpoint:
return default_render_failure(request, "Invalid OpenID request")
# initialize store and server
store = DjangoOpenIDStore()
server = Server(store, endpoint)
# first check to see if the request is an OpenID request.
# If so, the client will have specified an 'openid.mode' as part
# of the request.
querydict = dict(request.REQUEST.items())
error = False
if 'openid.mode' in request.GET or 'openid.mode' in request.POST:
# decode request
try:
openid_request = server.decodeRequest(querydict)
except (UntrustedReturnURL, ProtocolError):
openid_request = None
if not openid_request:
return default_render_failure(request, "Invalid OpenID request")
# don't allow invalid and non-trusted trust roots
if not validate_trust_root(openid_request):
return default_render_failure(request, "Invalid OpenID trust root")
# checkid_immediate not supported, require user interaction
if openid_request.mode == 'checkid_immediate':
return provider_respond(server, openid_request,
openid_request.answer(False), {})
# checkid_setup, so display login page
# (by falling through to the provider_login at the
# bottom of this method).
elif openid_request.mode == 'checkid_setup':
if openid_request.idSelect():
# remember request and original path
request.session['openid_setup'] = {
'request': openid_request,
'url': request.get_full_path(),
'post_params': request.POST,
}
# user failed login on previous attempt
if 'openid_error' in request.session:
error = True
del request.session['openid_error']
# OpenID response
else:
return provider_respond(server, openid_request,
server.handleRequest(openid_request), {})
# handle login redirection: these are also sent to this view function,
# but are distinguished by lacking the openid mode. We also know that
# they are posts, because they come from the popup
elif request.method == 'POST' and 'openid_setup' in request.session:
# get OpenID request from session
openid_setup = request.session['openid_setup']
openid_request = openid_setup['request']
openid_request_url = openid_setup['url']
post_params = openid_setup['post_params']
# We need to preserve the parameters, and the easiest way to do this is
# through the URL
url_post_params = {
param: post_params[param] for param in post_params if param.startswith('openid')
}
encoded_params = urllib.urlencode(url_post_params)
if '?' not in openid_request_url:
openid_request_url = openid_request_url + '?' + encoded_params
else:
openid_request_url = openid_request_url + '&' + encoded_params
del request.session['openid_setup']
# don't allow invalid trust roots
if not validate_trust_root(openid_request):
return default_render_failure(request, "Invalid OpenID trust root")
# check if user with given email exists
# Failure is redirected to this method (by using the original URL),
# which will bring up the login dialog.
email = request.POST.get('email', None)
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
request.session['openid_error'] = True
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"OpenID login failed - Unknown user email")
else:
msg = u"OpenID login failed - Unknown user email: {0}".format(email)
AUDIT_LOG.warning(msg)
return HttpResponseRedirect(openid_request_url)
# attempt to authenticate user (but not actually log them in...)
# Failure is again redirected to the login dialog.
username = user.username
password = request.POST.get('password', None)
try:
user = authenticate(username=username, password=password, request=request)
except RateLimitException:
AUDIT_LOG.warning(u'OpenID - Too many failed login attempts.')
return HttpResponseRedirect(openid_request_url)
if user is None:
request.session['openid_error'] = True
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"OpenID login failed - invalid password")
else:
msg = u"OpenID login failed - password for {0} is invalid".format(email)
AUDIT_LOG.warning(msg)
return HttpResponseRedirect(openid_request_url)
# authentication succeeded, so fetch user information
# that was requested
if user is not None and user.is_active:
# remove error from session since login succeeded
if 'openid_error' in request.session:
del request.session['openid_error']
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u"OpenID login success - user.id: {0}".format(user.id))
else:
AUDIT_LOG.info(u"OpenID login success - {0} ({1})".format(
user.username, user.email))
# redirect user to return_to location
url = endpoint + urlquote(user.username)
response = openid_request.answer(True, None, url)
# Note too that this is hardcoded, and not really responding to
# the extensions that were registered in the first place.
results = {
'nickname': user.username,
'email': user.email,
'fullname': user.profile.name,
}
# the request succeeded:
return provider_respond(server, openid_request, response, results)
# the account is not active, so redirect back to the login page:
request.session['openid_error'] = True
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id {0}".format(user.id))
else:
msg = u"Login failed - Account not active for user {0}".format(username)
AUDIT_LOG.warning(msg)
return HttpResponseRedirect(openid_request_url)
# determine consumer domain if applicable
return_to = ''
if 'openid.return_to' in request.REQUEST:
return_to = request.REQUEST['openid.return_to']
matches = re.match(r'\w+:\/\/([\w\.-]+)', return_to)
return_to = matches.group(1)
# display login page
response = render_to_response('provider_login.html', {
'error': error,
'return_to': return_to
})
# add custom XRDS header necessary for discovery process
response['X-XRDS-Location'] = get_xrds_url('xrds', request)
return response
def provider_identity(request):
"""
XRDS for identity discovery
"""
response = render_to_response('identity.xml',
{'url': get_xrds_url('login', request)},
content_type='text/xml')
# custom XRDS header necessary for discovery process
response['X-XRDS-Location'] = get_xrds_url('identity', request)
return response
def provider_xrds(request):
"""
XRDS for endpoint discovery
"""
response = render_to_response('xrds.xml',
{'url': get_xrds_url('login', request)},
content_type='text/xml')
# custom XRDS header necessary for discovery process
response['X-XRDS-Location'] = get_xrds_url('xrds', request)
return response
| jbzdak/edx-platform | common/djangoapps/external_auth/views.py | Python | agpl-3.0 | 36,622 |
from sympy import integrate, Rational, sqrt, Symbol
from sympy.physics.units import (au, amu, charge, day, find_unit,
foot, km, m, meter, minute, s,
speed_of_light, grams, quart, inch)
def test_units():
assert (5*m/s * day) / km == 432
assert foot / meter == Rational('0.3048')
# amu is a pure mass so mass/mass gives a number, not an amount (mol)
assert str(grams/(amu).n(2)) == '6.0e+23'
# Light from the sun needs about 8.3 minutes to reach earth
t = (1*au / speed_of_light).evalf() / minute
assert abs(t - 8.31) < 0.1
assert sqrt(m**2) == m
assert (sqrt(m))**2 == m
t = Symbol('t')
assert integrate(t*m/s, (t, 1*s, 5*s)) == 12*m*s
assert (t * m/s).integrate((t, 1*s, 5*s)) == 12*m*s
def test_issue_quart():
assert 4*quart/inch**3 == 231
def test_issue_2466():
assert (m < s).is_Relational
def test_find_unit():
assert find_unit('charge') == ['charge']
assert find_unit(charge) == ['C', 'charge', 'coulomb', 'coulombs']
| lidavidm/mathics-heroku | venv/lib/python2.7/site-packages/sympy/physics/tests/test_units.py | Python | gpl-3.0 | 1,062 |
from __future__ import absolute_import
from .selector import *
| boooka/GeoPowerOff | venv/lib/python2.7/site-packages/grab/selector/__init__.py | Python | apache-2.0 | 63 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import traceback
from google.appengine.api import users
import webapp2
from shared import utils
from shared.config import AUTO_TAGGED_FIELDS, CQ_BOT_PASSWORD_KEY
from shared.parsing import (
parse_fields,
parse_record_key,
parse_request,
parse_strings,
)
from model.password import Password
from model.record import Record
def update_record(key=None, tags=None, fields=None): # pragma: no cover
tags = tags or []
fields = fields or {}
if not key and len(tags) == 0 and len(fields) == 0:
raise ValueError('Empty record entries disallowed')
if not 'project' in fields:
raise ValueError('"Project" field missing')
for item in fields:
if item in AUTO_TAGGED_FIELDS:
tags.append('%s=%s' % (item, fields[item]))
record = Record(id=key)
record.tags = list(set(tags))
record.fields = fields
record.put()
class Post(webapp2.RequestHandler): # pragma: no cover
def get(self):
if not utils.is_valid_user():
self.redirect(users.create_login_url('/'))
return
try:
update_record(**parse_request(self.request, {
'key': parse_record_key,
'tags': parse_strings,
'fields': parse_fields,
}))
except ValueError as e:
logging.warning(traceback.format_exc())
self.response.write(e)
def post(self):
if not utils.is_valid_user() and not self._is_cq_bot():
self.response.set_status(403)
return
try:
packets = map(json.loads, self.request.get_all('p'))
for packet in packets:
if not isinstance(packet, dict):
raise ValueError('JSON dictionary expected.')
except ValueError as e:
logging.warning(traceback.format_exc())
self.response.write('Invalid packet: %s' % e)
return
try:
for packet in packets:
update_record(**utils.filter_dict(packet, ('key', 'tags', 'fields')))
except ValueError as e:
logging.warning(traceback.format_exc())
self.response.write(e)
def _is_cq_bot(self):
password = self.request.get('password')
if not password:
return False
sha1 = utils.password_sha1(password)
return sha1 == Password.get_by_id(CQ_BOT_PASSWORD_KEY).sha1
| nicko96/Chrome-Infra | appengine/chromium_cq_status/handlers/post.py | Python | bsd-3-clause | 2,369 |
from gevent import monkey
monkey.patch_all()
from gevent.server import StreamServer
from multiprocessing import Process, current_process
from hashlib import sha256
import socket
import pickle
import pyshark
from sys import argv
from flask import Flask, render_template
from flask_socketio import SocketIO
app = Flask(__name__)
socketio = SocketIO(app)
@app.route('/')
def main():
return render_template("index.html")
def http_capturer(portnum):
interface_name = argv[1]
capture = pyshark.LiveCapture(interface=interface_name, display_filter="http.request.method")
connection = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
connection.connect(('127.0.0.1', portnum))
connection.send(sha256(current_process().authkey).digest())
connfile = connection.makefile(mode='w')
pickler = pickle.Pickler(connfile, protocol=pickle.HIGHEST_PROTOCOL)
for packet in capture.sniff_continuously():
data = {
"time": str(packet.sniff_time),
"method": str(packet.http.request_method),
"url": str(packet.http.request_full_uri),
}
if hasattr(packet, "ip"):
data.update({"source": str(packet.ip.src)})
else:
data.update({"source": str(packet.ipv6.src)})
if hasattr(packet.http, "user_agent"):
data.update({"user-agent": str(packet.http.user_agent)})
else:
data.update({"user-agent": None})
if hasattr(packet, 'urlencoded-form'):
data.update({"params": str(packet['urlencoded-form'])})
else:
data.update({"params": None})
pickler.dump(data)
connfile.flush()
def http_receiver(connection, address):
(ip_addr, port) = address
if ip_addr != '127.0.0.1':
connection.close()
return
key = connection.recv(256)
lkey = sha256(current_process().authkey).digest()
if key != lkey:
connection.close()
print "wrong key from client: " + ip_addr + ":" + str(port)
return
unpickler = pickle.Unpickler(connection.makefile(mode='r'))
try:
while True:
data = unpickler.load()
socketio.emit('packet', data, namespace="/packets")
finally:
connection.close()
if __name__ == '__main__':
StreamServer('127.0.0.1:5005', handle=http_receiver).start()
capturer = Process(target=http_capturer, args=(5005,))
capturer.start()
socketio.run(app)
| shieldwed/package-viewer | packageviewer/app.py | Python | mit | 2,475 |
"""
Test for RFlink sensor components.
Test setup of rflink sensor component/platform. Verify manual and
automatic sensor creation.
"""
from datetime import timedelta
from homeassistant.components.rflink import CONF_RECONNECT_INTERVAL
from homeassistant.const import (
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
import homeassistant.core as ha
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import async_fire_time_changed
from tests.components.rflink.test_init import mock_rflink
DOMAIN = "binary_sensor"
CONFIG = {
"rflink": {
"port": "/dev/ttyABC0",
"ignore_devices": ["ignore_wildcard_*", "ignore_sensor"],
},
DOMAIN: {
"platform": "rflink",
"devices": {
"test": {"name": "test", "device_class": "door"},
"test2": {
"name": "test2",
"device_class": "motion",
"off_delay": 30,
"force_update": True,
},
},
},
}
async def test_default_setup(hass, monkeypatch):
"""Test all basic functionality of the rflink sensor component."""
# setup mocking rflink module
event_callback, create, _, _ = await mock_rflink(hass, CONFIG, DOMAIN, monkeypatch)
# make sure arguments are passed
assert create.call_args_list[0][1]["ignore"]
# test default state of sensor loaded from config
config_sensor = hass.states.get("binary_sensor.test")
assert config_sensor
assert config_sensor.state == STATE_OFF
assert config_sensor.attributes["device_class"] == "door"
# test on event for config sensor
event_callback({"id": "test", "command": "on"})
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test").state == STATE_ON
# test off event for config sensor
event_callback({"id": "test", "command": "off"})
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test").state == STATE_OFF
# test allon event for config sensor
event_callback({"id": "test", "command": "allon"})
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test").state == STATE_ON
# test alloff event for config sensor
event_callback({"id": "test", "command": "alloff"})
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test").state == STATE_OFF
async def test_entity_availability(hass, monkeypatch):
"""If Rflink device is disconnected, entities should become unavailable."""
# Make sure Rflink mock does not 'recover' to quickly from the
# disconnect or else the unavailability cannot be measured
config = CONFIG
failures = [True, True]
config[CONF_RECONNECT_INTERVAL] = 60
# Create platform and entities
_, _, _, disconnect_callback = await mock_rflink(
hass, config, DOMAIN, monkeypatch, failures=failures
)
# Entities are available by default
assert hass.states.get("binary_sensor.test").state == STATE_OFF
# Mock a disconnect of the Rflink device
disconnect_callback()
# Wait for dispatch events to propagate
await hass.async_block_till_done()
# Entity should be unavailable
assert hass.states.get("binary_sensor.test").state == STATE_UNAVAILABLE
# Reconnect the Rflink device
disconnect_callback()
# Wait for dispatch events to propagate
await hass.async_block_till_done()
# Entities should be available again
assert hass.states.get("binary_sensor.test").state == STATE_OFF
async def test_off_delay(hass, legacy_patchable_time, monkeypatch):
"""Test off_delay option."""
# setup mocking rflink module
event_callback, create, _, _ = await mock_rflink(hass, CONFIG, DOMAIN, monkeypatch)
# make sure arguments are passed
assert create.call_args_list[0][1]["ignore"]
events = []
on_event = {"id": "test2", "command": "on"}
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event)
hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
now = dt_util.utcnow()
# fake time and turn on sensor
future = now + timedelta(seconds=0)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=future):
async_fire_time_changed(hass, future)
event_callback(on_event)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test2")
assert state.state == STATE_ON
assert len(events) == 1
# fake time and turn on sensor again
future = now + timedelta(seconds=15)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=future):
async_fire_time_changed(hass, future)
event_callback(on_event)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test2")
assert state.state == STATE_ON
assert len(events) == 2
# fake time and verify sensor still on (de-bounce)
future = now + timedelta(seconds=35)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=future):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test2")
assert state.state == STATE_ON
assert len(events) == 2
# fake time and verify sensor is off
future = now + timedelta(seconds=45)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=future):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test2")
assert state.state == STATE_OFF
assert len(events) == 3
| tchellomello/home-assistant | tests/components/rflink/test_binary_sensor.py | Python | apache-2.0 | 5,889 |
# -*- coding: utf-8 -*-
# Copyright 2019 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models
class HrPromotionTransition(models.Model):
_name = "hr.promotion_transition"
_inherit = ["hr.promotion_transition", "hr.career_transition"]
| open-synergy/opnsynid-hr | hr_promotion_transition_timesheet_computation/models/hr_promotion_transition.py | Python | agpl-3.0 | 310 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
""" Defines the main State object."""
# pylint: disable=too-few-public-methods
from __future__ import (absolute_import, division, print_function)
import json
import pickle
import inspect
import copy
from sans.common.enums import SANSFacility
from sans.state.state_base import (StateBase, TypedParameter,
rename_descriptor_names, validator_sub_state)
from sans.state.data import StateData
from sans.state.move import StateMove
from sans.state.reduction_mode import StateReductionMode
from sans.state.slice_event import StateSliceEvent
from sans.state.mask import StateMask
from sans.state.wavelength import StateWavelength
from sans.state.save import StateSave
from sans.state.adjustment import StateAdjustment
from sans.state.scale import StateScale
from sans.state.convert_to_q import StateConvertToQ
from sans.state.automatic_setters import (automatic_setters)
# Note that the compatibility state is not part of the new reduction chain, but allows us to accurately compare
# results obtained via the old and new reduction chain
from sans.state.compatibility import (StateCompatibility, get_compatibility_builder)
# ----------------------------------------------------------------------------------------------------------------------
# State
# ----------------------------------------------------------------------------------------------------------------------
@rename_descriptor_names
class State(StateBase):
data = TypedParameter(StateData, validator_sub_state)
move = TypedParameter(StateMove, validator_sub_state)
reduction = TypedParameter(StateReductionMode, validator_sub_state)
slice = TypedParameter(StateSliceEvent, validator_sub_state)
mask = TypedParameter(StateMask, validator_sub_state)
wavelength = TypedParameter(StateWavelength, validator_sub_state)
save = TypedParameter(StateSave, validator_sub_state)
scale = TypedParameter(StateScale, validator_sub_state)
adjustment = TypedParameter(StateAdjustment, validator_sub_state)
convert_to_q = TypedParameter(StateConvertToQ, validator_sub_state)
compatibility = TypedParameter(StateCompatibility, validator_sub_state)
def __init__(self):
super(State, self).__init__()
def validate(self):
is_invalid = dict()
# Make sure that the substates are contained
if not self.data:
is_invalid.update("State: The state object needs to include a StateData object.")
if not self.move:
is_invalid.update("State: The state object needs to include a StateMove object.")
if not self.reduction:
is_invalid.update("State: The state object needs to include a StateReduction object.")
if not self.slice:
is_invalid.update("State: The state object needs to include a StateSliceEvent object.")
if not self.mask:
is_invalid.update("State: The state object needs to include a StateMask object.")
if not self.wavelength:
is_invalid.update("State: The state object needs to include a StateWavelength object.")
if not self.save:
is_invalid.update("State: The state object needs to include a StateSave object.")
if not self.scale:
is_invalid.update("State: The state object needs to include a StateScale object.")
if not self.adjustment:
is_invalid.update("State: The state object needs to include a StateAdjustment object.")
if not self.convert_to_q:
is_invalid.update("State: The state object needs to include a StateConvertToQ object.")
# We don't enforce a compatibility mode, we just create one if it does not exist
if not self.compatibility:
if self.data:
self.compatibility = get_compatibility_builder(self.data).build()
if is_invalid:
raise ValueError("State: There is an issue with your in put. See: {0}".format(json.dumps(is_invalid)))
# Check the attributes themselves
is_invalid = {}
for descriptor_name, descriptor_object in inspect.getmembers(type(self)):
if inspect.isdatadescriptor(descriptor_object) and isinstance(descriptor_object, TypedParameter):
try:
attr = getattr(self, descriptor_name)
attr.validate()
except ValueError as err:
is_invalid.update({descriptor_name: pickle.dumps(str(err))})
if is_invalid:
raise ValueError("State: There is an issue with your in put. See: {0}".format(json.dumps(is_invalid)))
# ----------------------------------------------------------------------------------------------------------------------
# Builder
# ----------------------------------------------------------------------------------------------------------------------
class StateBuilder(object):
@automatic_setters(State)
def __init__(self):
super(StateBuilder, self).__init__()
self.state = State()
def build(self):
# Make sure that the product is in a valid state, ie not incomplete
self.state.validate()
return copy.copy(self.state)
# ------------------------------------------
# Factory method for SANStateDataBuilder
# ------------------------------------------
def get_state_builder(data_info):
facility = data_info.facility
if facility is SANSFacility.ISIS:
return StateBuilder()
else:
raise NotImplementedError("SANSStateBuilder: Could not find any valid state builder for the "
"specified SANSStateData object {0}".format(str(data_info)))
| mganeva/mantid | scripts/SANS/sans/state/state.py | Python | gpl-3.0 | 5,932 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2017 Digi International Inc. All Rights Reserved.
from digi.xbee.models.mode import OperatingMode
from digi.xbee.models.address import XBee16BitAddress, XBee64BitAddress
from digi.xbee.models.status import ATCommandStatus, DiscoveryStatus, TransmitStatus, ModemStatus
from digi.xbee.packets.aft import ApiFrameType
from digi.xbee.packets.base import XBeeAPIPacket, DictKeys
from digi.xbee.util import utils
from digi.xbee.exception import InvalidOperatingModeException, InvalidPacketException
from digi.xbee.io import IOSample, IOLine
class ATCommPacket(XBeeAPIPacket):
"""
This class represents an AT command packet.
Used to query or set module parameters on the local device. This API
command applies changes after executing the command. (Changes made to
module parameters take effect once changes are applied.).
command response is received as an :class:`.ATCommResponsePacket`.
.. seealso::
| :class:`.ATCommResponsePacket`
| :class:`.XBeeAPIPacket`
"""
__MIN_PACKET_LENGTH = 6
def __init__(self, frame_id, command, parameter=None):
"""
Class constructor. Instantiates a new :class:`.ATCommPacket` object with the provided parameters.
Args:
frame_id (Integer): the frame ID of the packet.
command (String): the AT command of the packet. Must be a string.
parameter (Bytearray, optional): the AT command parameter. Optional.
Raises:
ValueError: if ``frame_id`` is less than 0 or greater than 255.
ValueError: if length of ``command`` is different than 2.
.. seealso::
| :class:`.XBeeAPIPacket`
"""
if len(command) != 2:
raise ValueError("Invalid command " + command)
if frame_id < 0 or frame_id > 255:
raise ValueError("Frame id must be between 0 and 255.")
super().__init__(ApiFrameType.AT_COMMAND)
self.__command = command
self.__parameter = parameter
self._frame_id = frame_id
@staticmethod
def create_packet(raw, operating_mode):
"""
Override method.
Returns:
:class:`.ATCommPacket`
Raises:
InvalidPacketException: if the bytearray length is less than 6. (start delim. + length (2 bytes) + frame
type + frame id + checksum = 6 bytes).
InvalidPacketException: if the length field of 'raw' is different than its real length. (length field: bytes
2 and 3)
InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`.
InvalidPacketException: if the calculated checksum is different than the checksum field value (last byte).
InvalidPacketException: if operating_mode mode is not supported.
InvalidPacketException: if the frame type is different than :attr:`.ApiFrameType.AT_COMMAND`
.. seealso::
| :meth:`.XBeePacket.create_packet`
| :meth:`.XBeeAPIPacket._check_api_packet`
"""
if operating_mode != OperatingMode.ESCAPED_API_MODE and operating_mode != OperatingMode.API_MODE:
raise InvalidOperatingModeException(operating_mode.name + " is not supported.")
raw = XBeeAPIPacket._unescape_data(raw) if operating_mode == OperatingMode.ESCAPED_API_MODE else raw
XBeeAPIPacket._check_api_packet(raw, min_length=ATCommPacket.__MIN_PACKET_LENGTH)
if raw[3] != ApiFrameType.AT_COMMAND.code:
raise InvalidPacketException("This packet is not an AT command packet.")
return ATCommPacket(raw[4], str(raw[5:7]), raw[7:-1])
def needs_id(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket.needs_id`
"""
return True
def _get_api_packet_spec_data(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data`
"""
if self.__parameter is not None:
return bytearray(self.__command, 'utf8') + self.__parameter
return bytearray(self.__command, 'utf8')
def _get_api_packet_spec_data_dict(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data_dict`
"""
return {DictKeys.COMMAND: self.__command,
DictKeys.PARAMETER: list(self.__parameter) if self.__parameter is not None else None}
def __get_command(self):
"""
Returns the AT command of the packet.
Returns:
String: the AT command of the packet.
"""
return self.__command
def __set_command(self, command):
"""
Sets the AT command of the packet.
Args:
command (String): the new AT command of the packet. Must have length = 2.
Raises:
ValueError: if length of ``command`` is different than 2.
"""
if len(command) != 2:
raise ValueError("Invalid command " + command)
self.__command = command
def __get_parameter(self):
"""
Returns the parameter of the packet.
Returns:
Bytearray: the parameter of the packet.
"""
return self.__parameter
def __set_parameter(self, param):
"""
Sets the parameter of the packet.
Args:
param (Bytearray): the new parameter of the packet.
"""
self.__parameter = param
command = property(__get_command, __set_command)
"""String. AT command."""
parameter = property(__get_parameter, __set_parameter)
"""Bytearray. AT command parameter."""
class ATCommResponsePacket(XBeeAPIPacket):
"""
This class represents an AT command response packet.
In response to an AT command message, the module will send an AT command
response message. Some commands will send back multiple frames (for example,
the ``ND`` - Node Discover command).
This packet is received in response of an :class:`.ATCommPacket`.
Response also includes an :class:`.ATCommandStatus` object with the status
of the AT command.
.. seealso::
| :class:`.ATCommPacket`
| :class:`.ATCommandStatus`
| :class:`.XBeeAPIPacket`
"""
__MIN_PACKET_LENGTH = 9
def __init__(self, frame_id, command, response_status=ATCommandStatus.OK, comm_value=None):
"""
Class constructor. Instantiates a new :class:`.ATCommResponsePacket` object with the provided parameters.
Args:
frame_id (Integer): the frame ID of the packet. Must be between 0 and 255.
command (String): the AT command of the packet. Must be a string.
response_status (:class:`.ATCommandStatus`): the status of the AT command.
comm_value (Bytearray, optional): the AT command response value. Optional.
Raises:
ValueError: if ``frame_id`` is less than 0 or greater than 255.
ValueError: if length of ``command`` is different than 2.
.. seealso::
| :class:`.ATCommandStatus`
| :class:`.XBeeAPIPacket`
"""
if frame_id < 0 or frame_id > 255:
raise ValueError("Frame id must be between 0 and 255.")
if len(command) != 2:
raise ValueError("Invalid command " + command)
super().__init__(ApiFrameType.AT_COMMAND_RESPONSE)
self._frame_id = frame_id
self.__command = command
self.__response_status = response_status
self.__comm_value = comm_value
@staticmethod
def create_packet(raw, operating_mode):
"""
Override method.
Returns:
:class:`.ATCommResponsePacket`
Raises:
InvalidPacketException: if the bytearray length is less than 9. (start delim. + length (2 bytes) +
frame type + frame id + at command (2 bytes) + command status + checksum = 9 bytes).
InvalidPacketException: if the length field of 'raw' is different than its real length. (length field: bytes
2 and 3)
InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`.
InvalidPacketException: if the calculated checksum is different than the checksum field value (last byte).
InvalidPacketException: if operating_mode mode is not supported.
InvalidPacketException: if the frame type is different than :attr:`.ApiFrameType.AT_COMMAND_RESPONSE`.
InvalidPacketException: if the command status field is not a valid value. See :class:`.ATCommandStatus`
.. seealso::
| :meth:`.XBeePacket.create_packet`
| :meth:`.XBeeAPIPacket._check_api_packet`
"""
if operating_mode != OperatingMode.ESCAPED_API_MODE and operating_mode != OperatingMode.API_MODE:
raise InvalidOperatingModeException(operating_mode.name + " is not supported.")
raw = XBeeAPIPacket._unescape_data(raw) if operating_mode == OperatingMode.ESCAPED_API_MODE else raw
XBeeAPIPacket._check_api_packet(raw, min_length=ATCommResponsePacket.__MIN_PACKET_LENGTH)
if raw[3] != ApiFrameType.AT_COMMAND_RESPONSE.code:
raise InvalidPacketException("This packet is not an AT command response packet.")
if ATCommandStatus.get(raw[7]) is None:
raise InvalidPacketException("Invalid command status.")
return ATCommResponsePacket(raw[4], raw[5:7].decode(), ATCommandStatus.get(raw[7]), raw[8:-1])
def needs_id(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket.needs_id`
"""
return True
def _get_api_packet_spec_data(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data`
"""
ret = bytearray(self.__command)
ret.append(self.__response_status.code)
if self.__comm_value is not None:
ret += self.__comm_value
return ret
def _get_api_packet_spec_data_dict(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data_dict`
"""
return {DictKeys.COMMAND: self.__command,
DictKeys.AT_CMD_STATUS: self.__response_status,
DictKeys.RF_DATA: list(self.__comm_value) if self.__comm_value is not None else None}
def __get_command(self):
"""
Returns the AT command of the packet.
Returns:
String: the AT command of the packet.
"""
return self.__command
def __set_command(self, command):
"""
Sets the AT command of the packet.
Args:
command (String): the new AT command of the packet. Must have length = 2.
Raises:
ValueError: if length of ``command`` is different than 2.
"""
if len(command) != 2:
raise ValueError("Invalid command " + command)
self.__command = command
def __get_value(self):
"""
Returns the AT command response value.
Returns:
Bytearray: the AT command response value.
"""
return self.__comm_value
def __set_value(self, __comm_value):
"""
Sets the AT command response value.
Args:
__comm_value (Bytearray): the new AT command response value.
"""
self.__comm_value = __comm_value
def __get_response_status(self):
"""
Returns the AT command response status of the packet.
Returns:
:class:`.ATCommandStatus`: the AT command response status of the packet.
.. seealso::
| :class:`.ATCommandStatus`
"""
return self.__response_status
def __set_response_status(self, response_status):
"""
Sets the AT command response status of the packet
Args:
response_status (:class:`.ATCommandStatus`) : the new AT command response status of the packet.
.. seealso::
| :class:`.ATCommandStatus`
"""
self.__response_status = response_status
command = property(__get_command, __set_command)
"""String. AT command."""
command_value = property(__get_value, __set_value)
"""Bytearray. AT command value."""
status = property(__get_response_status, __set_response_status)
""":class:`.ATCommandStatus`. AT command response status."""
class ReceivePacket(XBeeAPIPacket):
"""
This class represents a receive packet. Packet is built using the parameters
of the constructor or providing a valid byte array.
When the module receives an RF packet, it is sent out the UART using this
message type.
This packet is received when external devices send transmit request
packets to this module.
Among received data, some options can also be received indicating
transmission parameters.
.. seealso::
| :class:`.TransmitPacket`
| :class:`.ReceiveOptions`
| :class:`.XBeeAPIPacket`
"""
__MIN_PACKET_LENGTH = 16
def __init__(self, x64bit_addr, x16bit_addr, receive_options, rf_data=None):
"""
Class constructor. Instantiates a new :class:`.ReceivePacket` object with the provided parameters.
Args:
x64bit_addr (:class:`.XBee64BitAddress`): the 64-bit source address.
x16bit_addr (:class:`.XBee16BitAddress`): the 16-bit source address.
receive_options (Integer): bitfield indicating the receive options.
rf_data (Bytearray, optional): received RF data. Optional.
.. seealso::
| :class:`.ReceiveOptions`
| :class:`.XBee16BitAddress`
| :class:`.XBee64BitAddress`
| :class:`.XBeeAPIPacket`
"""
super().__init__(ApiFrameType.RECEIVE_PACKET)
self.__x64bit_addr = x64bit_addr
self.__x16bit_addr = x16bit_addr
self.__receive_options = receive_options
self.__rf_data = rf_data
@staticmethod
def create_packet(raw, operating_mode):
"""
Override method.
Returns:
:class:`.ATCommResponsePacket`
Raises:
InvalidPacketException: if the bytearray length is less than 16. (start delim. + length (2 bytes) + frame
type + frame id + 64bit addr. + 16bit addr. + Receive options + checksum = 16 bytes).
InvalidPacketException: if the length field of 'raw' is different than its real length. (length field: bytes
2 and 3)
InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`.
InvalidPacketException: if the calculated checksum is different than the checksum field value (last byte).
InvalidPacketException: if operating_mode mode is not supported.
InvalidPacketException: if the frame type is not :attr:`.ApiFrameType.RECEIVE_PACKET`.
.. seealso::
| :meth:`.XBeePacket.create_packet`
| :meth:`.XBeeAPIPacket._check_api_packet`
"""
if operating_mode != OperatingMode.ESCAPED_API_MODE and operating_mode != OperatingMode.API_MODE:
raise InvalidOperatingModeException(operating_mode.name + " is not supported.")
raw = XBeeAPIPacket._unescape_data(raw) if operating_mode == OperatingMode.ESCAPED_API_MODE else raw
XBeeAPIPacket._check_api_packet(raw, min_length=ReceivePacket.__MIN_PACKET_LENGTH)
if raw[3] != ApiFrameType.RECEIVE_PACKET.code:
raise InvalidPacketException("This packet is not a receive packet.")
return ReceivePacket(XBee64BitAddress(raw[4:12]),
XBee16BitAddress(raw[12:14]),
raw[14],
raw[15:-1])
def needs_id(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket.needs_id`
"""
return False
def _get_api_packet_spec_data(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data`
"""
ret = self.__x64bit_addr.address
ret += self.__x16bit_addr.address
ret.append(self.__receive_options)
if self.__rf_data is not None:
return ret + self.__rf_data
return ret
def _get_api_packet_spec_data_dict(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data_dict`
"""
return {DictKeys.X64BIT_ADDR: self.__x64bit_addr.address,
DictKeys.X16BIT_ADDR: self.__x16bit_addr.address,
DictKeys.RECEIVE_OPTIONS: self.__receive_options,
DictKeys.RF_DATA: list(self.__rf_data) if self.__rf_data is not None else None}
def __get_64bit_addr(self):
"""
Returns the 64-bit source address.
Returns:
:class:`.XBee64BitAddress`: the 64-bit source address.
.. seealso::
| :class:`.XBee64BitAddress`
"""
return self.__x64bit_addr
def __set_64bit_addr(self, x64bit_addr):
"""
Sets the 64-bit source address.
Args:
x64bit_addr (:class:`.XBee64BitAddress`): the new 64-bit source address.
.. seealso::
| :class:`.XBee64BitAddress`
"""
self.__x64bit_addr = x64bit_addr
def __get_16bit_addr(self):
"""
Returns the 16-bit source address.
Returns:
:class:`.XBee16BitAddress`: the 16-bit source address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
return self.__x16bit_addr
def __set_16bit_addr(self, x16bit_addr):
"""
Sets the 16-bit source address.
Args:
x16bit_addr (:class:`.XBee16BitAddress`): the new 16-bit source address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
self.__x16bit_addr = x16bit_addr
def __get_options(self):
"""
Returns the receive options bitfield.
Returns:
Integer: the receive options bitfield.
.. seealso::
| :class:`.ReceiveOptions`
"""
return self.__receive_options
def __set_options(self, receive_options):
"""
Sets the receive options bitfield.
Args:
receive_options (Integer): the new receive options bitfield.
.. seealso::
| :class:`.ReceiveOptions`
"""
self.__receive_options = receive_options
def __get_rf_data(self):
"""
Returns the received RF data.
Returns:
Bytearray: the received RF data.
"""
if self.__rf_data is None:
return None
return self.__rf_data.copy()
def __set_rf_data(self, rf_data):
"""
Sets the received RF data.
Args:
rf_data (Bytearray): the new received RF data.
"""
if rf_data is None:
self.__rf_data = None
else:
self.__rf_data = rf_data.copy()
x64bit_source_addr = property(__get_64bit_addr, __set_64bit_addr)
""":class:`.XBee64BitAddress`. 64-bit source address."""
x16bit_source_addr = property(__get_16bit_addr, __set_16bit_addr)
""":class:`.XBee16BitAddress`. 16-bit source address."""
receive_options = property(__get_options, __set_options)
"""Integer. Receive options bitfield."""
rf_data = property(__get_rf_data, __set_rf_data)
"""Bytearray. Received RF data."""
class RemoteATCommandPacket(XBeeAPIPacket):
"""
This class represents a Remote AT command Request packet. Packet is built
using the parameters of the constructor or providing a valid byte array.
Used to query or set module parameters on a remote device. For parameter
changes on the remote device to take effect, changes must be applied, either
by setting the apply changes options bit, or by sending an ``AC`` command
to the remote node.
Remote command options are set as a bitfield.
If configured, command response is received as a :class:`.RemoteATCommandResponsePacket`.
.. seealso::
| :class:`.RemoteATCommandResponsePacket`
| :class:`.XBeeAPIPacket`
"""
__MIN_PACKET_LENGTH = 19
def __init__(self, frame_id, x64bit_addr, x16bit_addr, transmit_options, command, parameter=None):
"""
Class constructor. Instantiates a new :class:`.RemoteATCommandPacket` object with the provided parameters.
Args:
frame_id (integer): the frame ID of the packet.
x64bit_addr (:class:`.XBee64BitAddress`): the 64-bit destination address.
x16bit_addr (:class:`.XBee16BitAddress`): the 16-bit destination address.
transmit_options (Integer): bitfield of supported transmission options.
command (String): AT command to send.
parameter (Bytearray, optional): AT command parameter. Optional.
Raises:
ValueError: if ``frame_id`` is less than 0 or greater than 255.
ValueError: if length of ``command`` is different than 2.
.. seealso::
| :class:`.RemoteATCmdOptions`
| :class:`.XBee16BitAddress`
| :class:`.XBee64BitAddress`
| :class:`.XBeeAPIPacket`
"""
if frame_id < 0 or frame_id > 255:
raise ValueError("Frame id must be between 0 and 255.")
if len(command) != 2:
raise ValueError("Invalid command " + command)
super().__init__(ApiFrameType.REMOTE_AT_COMMAND_REQUEST)
self._frame_id = frame_id
self.__x64bit_addr = x64bit_addr
self.__x16bit_addr = x16bit_addr
self.__transmit_options = transmit_options
self.__command = command
self.__parameter = parameter
@staticmethod
def create_packet(raw, operating_mode):
"""
Override method.
Returns:
:class:`.RemoteATCommandPacket`
Raises:
InvalidPacketException: if the Bytearray length is less than 19. (start delim. + length (2 bytes) + frame
type + frame id + 64bit addr. + 16bit addr. + transmit options + command (2 bytes) + checksum =
19 bytes).
InvalidPacketException: if the length field of 'raw' is different than its real length. (length field: bytes
2 and 3)
InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`.
InvalidPacketException: if the calculated checksum is different than the checksum field value (last byte).
InvalidPacketException: if operating_mode mode is not supported.
InvalidPacketException: if the frame type is not :attr:`.ApiFrameType.REMOTE_AT_COMMAND_REQUEST`.
.. seealso::
| :meth:`.XBeePacket.create_packet`
| :meth:`.XBeeAPIPacket._check_api_packet`
"""
if operating_mode != OperatingMode.ESCAPED_API_MODE and operating_mode != OperatingMode.API_MODE:
raise InvalidOperatingModeException(operating_mode.name + " is not supported.")
raw = XBeeAPIPacket._unescape_data(raw) if operating_mode == OperatingMode.ESCAPED_API_MODE else raw
XBeeAPIPacket._check_api_packet(raw, min_length=RemoteATCommandPacket.__MIN_PACKET_LENGTH)
if raw[3] != ApiFrameType.REMOTE_AT_COMMAND_REQUEST.code:
raise InvalidPacketException("This packet is not a remote AT command request packet.")
return RemoteATCommandPacket(
raw[4],
XBee64BitAddress(raw[5:12]),
XBee16BitAddress(raw[13:15]),
raw[15],
str(raw[16:18]),
raw[18:-1]
)
def needs_id(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket.needs_id`
"""
return True
def _get_api_packet_spec_data(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data`
"""
ret = self.__x64bit_addr.address
ret += self.__x16bit_addr.address
ret.append(self.__transmit_options)
ret += bytearray(self.__command, 'utf8')
return ret if self.__parameter is None else ret + self.__parameter
def _get_api_packet_spec_data_dict(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data_dict`
"""
return {DictKeys.X64BIT_ADDR: self.__x64bit_addr.address,
DictKeys.X16BIT_ADDR: self.__x16bit_addr.address,
DictKeys.TRANSMIT_OPTIONS: self.__transmit_options,
DictKeys.COMMAND: self.__command,
DictKeys.PARAMETER: list(self.__parameter) if self.__parameter is not None else None}
def __get_64bit_addr(self):
"""
Returns the 64-bit destination address.
Returns:
:class:`.XBee64BitAddress`: the 64-bit destination address.
.. seealso::
| :class:`.XBee64BitAddress`
"""
return self.__x64bit_addr
def __set_64bit_addr(self, x64bit_addr):
"""
Sets the 64-bit destination address.
Args:
x64bit_addr (:class:`.XBee64BitAddress`): the new 64-bit destination address.
.. seealso::
| :class:`.XBee64BitAddress`
"""
self.__x64bit_addr = x64bit_addr
def __get_16bit_addr(self):
"""
Returns the 16-bit destination address.
Returns:
:class:`.XBee16BitAddress`: the 16-bit destination address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
return self.__x16bit_addr
def __set_16bit_addr(self, x16bit_addr):
"""
Sets the 16-bit destination address.
Args:
x16bit_addr (:class:`.XBee16BitAddress`): the new 16-bit destination address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
self.__x16bit_addr = x16bit_addr
def __get_transmit_options(self):
"""
Returns the transmit options bitfield.
Returns:
Integer: the transmit options bitfield.
.. seealso::
| :class:`.RemoteATCmdOptions`
"""
return self.__transmit_options
def __set_transmit_options(self, transmit_options):
"""
Sets the transmit options bitfield.
Args:
transmit_options (Integer): the new transmit options bitfield.
.. seealso::
| :class:`.RemoteATCmdOptions`
"""
self.__transmit_options = transmit_options
def __get_parameter(self):
"""
Returns the AT command parameter.
Returns:
Bytearray: the AT command parameter.
"""
return self.__parameter
def __set_parameter(self, parameter):
"""
Sets the AT command parameter.
Args:
parameter (Bytearray): the new AT command parameter.
"""
self.__parameter = parameter
def __get_command(self):
"""
Returns the AT command.
Returns:
String: the AT command.
"""
return self.__command
def __set_command(self, command):
"""
Sets the AT command.
Args:
command (String): the new AT command.
"""
self.__command = command
x64bit_dest_addr = property(__get_64bit_addr, __set_64bit_addr)
""":class:`.XBee64BitAddress`. 64-bit destination address."""
x16bit_dest_addr = property(__get_16bit_addr, __set_16bit_addr)
""":class:`.XBee16BitAddress`. 16-bit destination address."""
transmit_options = property(__get_transmit_options, __set_transmit_options)
"""Integer. Transmit options bitfield."""
command = property(__get_command, __set_command)
"""String. AT command."""
parameter = property(__get_parameter, __set_parameter)
"""Bytearray. AT command parameter."""
class RemoteATCommandResponsePacket(XBeeAPIPacket):
"""
This class represents a remote AT command response packet. Packet is built
using the parameters of the constructor or providing a valid byte array.
If a module receives a remote command response RF data frame in response
to a remote AT command request, the module will send a remote AT command
response message out the UART. Some commands may send back multiple frames,
for example, Node Discover (``ND``) command.
This packet is received in response of a :class:`.RemoteATCommandPacket`.
Response also includes an object with the status of the AT command.
.. seealso::
| :class:`.RemoteATCommandPacket`
| :class:`.ATCommandStatus`
| :class:`.XBeeAPIPacket`
"""
__MIN_PACKET_LENGTH = 19
def __init__(self, frame_id, x64bit_addr, x16bit_addr, command, response_status, comm_value=None):
"""
Class constructor. Instantiates a new :class:`.RemoteATCommandResponsePacket` object with the provided
parameters.
Args:
frame_id (Integer): the frame ID of the packet.
x64bit_addr (:class:`.XBee64BitAddress`): the 64-bit source address
x16bit_addr (:class:`.XBee16BitAddress`): the 16-bit source address.
command (String): the AT command of the packet. Must be a string.
response_status (:class:`.ATCommandStatus`): the status of the AT command.
comm_value (Bytearray, optional): the AT command response value. Optional.
Raises:
ValueError: if ``frame_id`` is less than 0 or greater than 255.
ValueError: if length of ``command`` is different than 2.
.. seealso::
| :class:`.ATCommandStatus`
| :class:`.XBee16BitAddress`
| :class:`.XBee64BitAddress`
| :class:`.XBeeAPIPacket`
"""
if frame_id > 255 or frame_id < 0:
raise ValueError("frame_id must be between 0 and 255.")
if len(command) != 2:
raise ValueError("Invalid command " + command)
super().__init__(ApiFrameType.REMOTE_AT_COMMAND_RESPONSE)
self._frame_id = frame_id
self.__x64bit_addr = x64bit_addr
self.__x16bit_addr = x16bit_addr
self.__command = command
self.__response_status = response_status
self.__comm_value = comm_value
@staticmethod
def create_packet(raw, operating_mode):
"""
Override method.
Returns:
:class:`.RemoteATCommandResponsePacket`.
Raises:
InvalidPacketException: if the bytearray length is less than 19. (start delim. + length (2 bytes) + frame
type + frame id + 64bit addr. + 16bit addr. + receive options + command (2 bytes) + checksum =
19 bytes).
InvalidPacketException: if the length field of 'raw' is different than its real length. (length field: bytes
2 and 3)
InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`.
InvalidPacketException: if the calculated checksum is different than the checksum field value (last byte).
InvalidPacketException: if operating_mode mode is not supported.
InvalidPacketException: if the frame type is not :attr:`.ApiFrameType.REMOTE_AT_COMMAND_RESPONSE`.
.. seealso::
| :meth:`.XBeePacket.create_packet`
| :meth:`.XBeeAPIPacket._check_api_packet`
"""
if operating_mode != OperatingMode.ESCAPED_API_MODE and operating_mode != OperatingMode.API_MODE:
raise InvalidOperatingModeException(operating_mode.name + " is not supported.")
raw = XBeeAPIPacket._unescape_data(raw) if operating_mode == OperatingMode.ESCAPED_API_MODE else raw
XBeeAPIPacket._check_api_packet(raw, min_length=RemoteATCommandResponsePacket.__MIN_PACKET_LENGTH)
if raw[3] != ApiFrameType.REMOTE_AT_COMMAND_RESPONSE.code:
raise InvalidPacketException("This packet is not a remote AT command response packet.")
return RemoteATCommandResponsePacket(raw[4], XBee64BitAddress(raw[5:13]),
XBee16BitAddress(raw[13:15]), str(raw[15:17].decode()),
ATCommandStatus.get(raw[17]), raw[18:-1])
def needs_id(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket.needs_id`
"""
return True
def _get_api_packet_spec_data(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data`
"""
ret = self.__x64bit_addr.address
ret += self.__x16bit_addr.address
ret += bytearray(self.__command)
ret.append(self.__response_status.code)
if self.__comm_value is not None:
ret += self.__comm_value
return ret
def _get_api_packet_spec_data_dict(self):
return {DictKeys.X4BIT_ADDR: self.__x64bit_addr.address,
DictKeys.X6BIT_ADDR: self.__x16bit_addr.address,
DictKeys.COMMAND: self.__command,
DictKeys.AT_CMD_STATUS: self.__response_status,
DictKeys.RF_DATA: list(self.__comm_value) if self.__comm_value is not None else None}
def __get_command(self):
"""
Returns the AT command of the packet.
Returns:
String: the AT command of the packet.
"""
return self.__command
def __set_command(self, command):
"""
Sets the AT command of the packet.
Args:
command (String): the new AT command of the packet. Must have length = 2.
Raises:
ValueError: if length of ``command`` is different than 2.
"""
if len(command) != 2:
raise ValueError("Invalid command " + command)
self.__command = command
def __get_value(self):
"""
Returns the AT command response value.
Returns:
Bytearray: the AT command response value.
"""
return self.__comm_value
def __set_value(self, comm_value):
"""
Sets the AT command response value.
Args:
comm_value (Bytearray): the new AT command response value.
"""
self.__comm_value = comm_value
def __get_response_status(self):
"""
Returns the AT command response status of the packet.
Returns:
:class:`.ATCommandStatus`: the AT command response status of the packet.
.. seealso::
| :class:`.ATCommandStatus`
"""
return self.__response_status
def __set_response_status(self, response_status):
"""
Sets the AT command response status of the packet
Args:
response_status (:class:`.ATCommandStatus`) : the new AT command response status of the packet.
.. seealso::
| :class:`.ATCommandStatus`
"""
self.__response_status = response_status
def __get_64bit_addr(self):
"""
Returns the 64-bit source address.
Returns:
:class:`.XBee64BitAddress`: the 64-bit source address.
.. seealso::
| :class:`.XBee64BitAddress`
"""
return self.__x64bit_addr
def __set_64bit_addr(self, x64bit_addr):
"""
Sets the 64-bit source address.
Args:
x64bit_addr (:class:`.XBee64BitAddress`): the new 64-bit source address
.. seealso::
| :class:`.XBee64BitAddress`
"""
self.__x64bit_addr = x64bit_addr
def __get_16bit_addr(self):
"""
Returns the 16-bit source address.
Returns:
:class:`.XBee16BitAddress`: the 16-bit source address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
return self.__x16bit_addr
def __set_16bit_addr(self, x16bit_addr):
"""
Sets the 16-bit source address.
Args:
x16bit_addr (:class:`.XBee16BitAddress`): the new 16-bit source address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
self.__x16bit_addr = x16bit_addr
x64bit_source_addr = property(__get_64bit_addr, __set_64bit_addr)
""":class:`.XBee64BitAddress`. 64-bit source address."""
x16bit_source_addr = property(__get_16bit_addr, __set_16bit_addr)
""":class:`.XBee16BitAddress`. 16-bit source address."""
command = property(__get_command, __set_command)
"""String. AT command."""
command_value = property(__get_value, __set_value)
"""Bytearray. AT command value."""
status = property(__get_response_status, __set_response_status)
""":class:`.ATCommandStatus`. AT command response status."""
class TransmitPacket(XBeeAPIPacket):
"""
This class represents a transmit request packet. Packet is built using the parameters
of the constructor or providing a valid API byte array.
A transmit request API frame causes the module to send data as an RF
packet to the specified destination.
The 64-bit destination address should be set to ``0x000000000000FFFF``
for a broadcast transmission (to all devices).
The coordinator can be addressed by either setting the 64-bit address to
all ``0x00``} and the 16-bit address to ``0xFFFE``, OR by setting the
64-bit address to the coordinator's 64-bit address and the 16-bit address to
``0x0000``.
For all other transmissions, setting the 16-bit address to the correct
16-bit address can help improve performance when transmitting to multiple
destinations.
If a 16-bit address is not known, this field should be set to
``0xFFFE`` (unknown).
The transmit status frame ( :attr:`.ApiFrameType.TRANSMIT_STATUS`) will
indicate the discovered 16-bit address, if successful (see :class:`.TransmitStatusPacket`).
The broadcast radius can be set from ``0`` up to ``NH``. If set
to ``0``, the value of ``NH`` specifies the broadcast radius
(recommended). This parameter is only used for broadcast transmissions.
The maximum number of payload bytes can be read with the ``NP``
command.
Several transmit options can be set using the transmit options bitfield.
.. seealso::
| :class:`.TransmitOptions`
| :attr:`.XBee16BitAddress.COORDINATOR_ADDRESS`
| :attr:`.XBee16BitAddress.UNKNOWN_ADDRESS`
| :attr:`.XBee64BitAddress.BROADCAST_ADDRESS`
| :attr:`.XBee64BitAddress.COORDINATOR_ADDRESS`
| :class:`.XBeeAPIPacket`
"""
__MIN_PACKET_LENGTH = 18
def __init__(self, frame_id, x64bit_addr, x16bit_addr, broadcast_radius, transmit_options, rf_data=None):
"""
Class constructor. Instantiates a new :class:`.TransmitPacket` object with the provided parameters.
Args:
frame_id (integer): the frame ID of the packet.
x64bit_addr (:class:`.XBee64BitAddress`): the 64-bit destination address.
x16bit_addr (:class:`.XBee16BitAddress`): the 16-bit destination address.
broadcast_radius (Integer): maximum number of hops a broadcast transmission can occur.
transmit_options (Integer): bitfield of supported transmission options.
rf_data (Bytearray, optional): RF data that is sent to the destination device. Optional.
.. seealso::
| :class:`.TransmitOptions`
| :class:`.XBee16BitAddress`
| :class:`.XBee64BitAddress`
| :class:`.XBeeAPIPacket`
Raises:
ValueError: if ``frame_id`` is less than 0 or greater than 255.
"""
if frame_id > 255 or frame_id < 0:
raise ValueError("frame_id must be between 0 and 255.")
super().__init__(ApiFrameType.TRANSMIT_REQUEST)
self._frame_id = frame_id
self.__x64bit_addr = x64bit_addr
self.__x16bit_addr = x16bit_addr
self.__broadcast_radius = broadcast_radius
self.__transmit_options = transmit_options
self.__rf_data = rf_data
@staticmethod
def create_packet(raw, operating_mode):
"""
Override method.
Returns:
:class:`.TransmitPacket`.
Raises:
InvalidPacketException: if the bytearray length is less than 18. (start delim. + length (2 bytes) + frame
type + frame id + 64bit addr. + 16bit addr. + Receive options + checksum = 16 bytes).
InvalidPacketException: if the length field of 'raw' is different than its real length. (length field: bytes
2 and 3)
InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`.
InvalidPacketException: if the calculated checksum is different than the checksum field value (last byte).
InvalidPacketException: if operating_mode mode is not supported.
InvalidPacketException: if the frame type is not :attr:`.ApiFrameType.TRANSMIT_REQUEST`.
.. seealso::
| :meth:`.XBeePacket.create_packet`
| :meth:`.XBeeAPIPacket._check_api_packet`
"""
if operating_mode != OperatingMode.ESCAPED_API_MODE and operating_mode != OperatingMode.API_MODE:
raise InvalidOperatingModeException(operating_mode.name + " is not supported.")
raw = XBeeAPIPacket._unescape_data(raw) if operating_mode == OperatingMode.ESCAPED_API_MODE else raw
XBeeAPIPacket._check_api_packet(raw, min_length=TransmitPacket.__MIN_PACKET_LENGTH)
if raw[3] != ApiFrameType.TRANSMIT_REQUEST.code:
raise InvalidPacketException("This packet is not a transmit request packet.")
return TransmitPacket(raw[4], XBee64BitAddress(raw[5:13]),
XBee16BitAddress(raw[13:15]), raw[15],
raw[16], raw[17:-1])
def needs_id(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket.needs_id`
"""
return True
def _get_api_packet_spec_data(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data`
"""
ret = self.__x64bit_addr.address
ret += self.__x16bit_addr.address
ret.append(self.__broadcast_radius)
ret.append(self.__transmit_options)
if self.__rf_data is not None:
return ret + self.__rf_data
return ret
def _get_api_packet_spec_data_dict(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data_dict`
"""
return {DictKeys.X4BIT_ADDR: self.__x64bit_addr.address,
DictKeys.X6BIT_ADDR: self.__x16bit_addr.address,
DictKeys.BROADCAST_RADIUS: self.__broadcast_radius,
DictKeys.TRANSMIT_OPTIONS: self.__transmit_options,
DictKeys.RF_DATA: list(self.__rf_data) if self.__rf_data is not None else None}
def __get_rf_data(self):
"""
Returns the RF data to send.
Returns:
Bytearray: the RF data to send.
"""
if self.__rf_data is None:
return None
return self.__rf_data.copy()
def __set_rf_data(self, rf_data):
"""
Sets the RF data to send.
Args:
rf_data (Bytearray): the new RF data to send.
"""
if rf_data is None:
self.__rf_data = None
else:
self.__rf_data = rf_data.copy()
def __get_transmit_options(self):
"""
Returns the transmit options bitfield.
Returns:
Integer: the transmit options bitfield.
.. seealso::
| :class:`.TransmitOptions`
"""
return self.__transmit_options
def __set_transmit_options(self, transmit_options):
"""
Sets the transmit options bitfield.
Args:
transmit_options (Integer): the new transmit options bitfield.
.. seealso::
| :class:`.TransmitOptions`
"""
self.__transmit_options = transmit_options
def __get_broadcast_radius(self):
"""
Returns the broadcast radius. Broadcast radius is the maximum number of hops a broadcast transmission.
Returns:
Integer: the broadcast radius.
"""
return self.__broadcast_radius
def __set_broadcast_radius(self, br_radius):
"""
Sets the broadcast radius. Broadcast radius is the maximum number of hops a broadcast transmission.
Args:
br_radius (Integer): the new broadcast radius.
"""
self.__broadcast_radius = br_radius
def __get_64bit_addr(self):
"""
Returns the 64-bit destination address.
Returns:
:class:`.XBee64BitAddress`: the 64-bit destination address.
.. seealso::
| :class:`.XBee64BitAddress`
"""
return self.__x64bit_addr
def __set_64bit_addr(self, x64bit_addr):
"""
Sets the 64-bit destination address.
Args:
x64bit_addr (:class:`.XBee64BitAddress`): the new 64-bit destination address.
.. seealso::
| :class:`.XBee64BitAddress`
"""
self.__x64bit_addr = x64bit_addr
def __get_16bit_addr(self):
"""
Returns the 16-bit destination address.
Returns:
:class:`XBee16BitAddress`: the 16-bit destination address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
return self.__x16bit_addr
def __set_16bit_addr(self, x16bit_addr):
"""
Sets the 16-bit destination address.
Args:
x16bit_addr (:class:`.XBee16BitAddress`): the new 16-bit destination address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
self.__x16bit_addr = x16bit_addr
x64bit_dest_addr = property(__get_64bit_addr, __set_64bit_addr)
""":class:`.XBee64BitAddress`. 64-bit destination address."""
x16bit_dest_addr = property(__get_16bit_addr, __set_16bit_addr)
""":class:`.XBee16BitAddress`. 16-bit destination address."""
transmit_options = property(__get_transmit_options, __set_transmit_options)
"""Integer. Transmit options bitfield."""
broadcast_radius = property(__get_broadcast_radius, __set_broadcast_radius)
"""Integer. Broadcast radius."""
rf_data = property(__get_rf_data, __set_rf_data)
"""Bytearray. RF data to send."""
class TransmitStatusPacket(XBeeAPIPacket):
"""
This class represents a transmit status packet. Packet is built using the
parameters of the constructor or providing a valid raw byte array.
When a Transmit Request is completed, the module sends a transmit status
message. This message will indicate if the packet was transmitted
successfully or if there was a failure.
This packet is the response to standard and explicit transmit requests.
.. seealso::
| :class:`.TransmitPacket`
"""
__MIN_PACKET_LENGTH = 11
def __init__(self, frame_id, x16bit_addr, transmit_retry_count, transmit_status=TransmitStatus.SUCCESS,
discovery_status=DiscoveryStatus.NO_DISCOVERY_OVERHEAD):
"""
Class constructor. Instantiates a new :class:`.TransmitStatusPacket` object with the provided parameters.
Args:
frame_id (Integer): the frame ID of the packet.
x16bit_addr (:class:`.XBee16BitAddress`): 16-bit network address the packet was delivered to.
transmit_retry_count (Integer): the number of application transmission retries that took place.
transmit_status (:class:`.TransmitStatus`, optional): transmit status. Default: SUCCESS. Optional.
discovery_status (:class:`DiscoveryStatus`, optional): discovery status. Default: NO_DISCOVERY_OVERHEAD.
Optional.
Raises:
ValueError: if ``frame_id`` is less than 0 or greater than 255.
.. seealso::
| :class:`.DiscoveryStatus`
| :class:`.TransmitStatus`
| :class:`.XBee16BitAddress`
| :class:`.XBeeAPIPacket`
"""
if frame_id < 0 or frame_id > 255:
raise ValueError("Frame id must be between 0 and 255.")
super().__init__(ApiFrameType.TRANSMIT_STATUS)
self._frame_id = frame_id
self.__x16bit_addr = x16bit_addr
self.__transmit_retry_count = transmit_retry_count
self.__transmit_status = transmit_status
self.__discovery_status = discovery_status
@staticmethod
def create_packet(raw, operating_mode):
"""
Override method.
Returns:
:class:`.TransmitStatusPacket`
Raises:
InvalidPacketException: if the bytearray length is less than 11. (start delim. + length (2 bytes) + frame
type + frame id + 16bit addr. + transmit retry count + delivery status + discovery status + checksum =
11 bytes).
InvalidPacketException: if the length field of 'raw' is different than its real length. (length field: bytes
2 and 3)
InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`.
InvalidPacketException: if the calculated checksum is different than the checksum field value (last byte).
InvalidPacketException: if operating_mode mode is not supported.
InvalidPacketException: if the frame type is not :attr:`.ApiFrameType.TRANSMIT_STATUS`.
.. seealso::
| :meth:`.XBeePacket.create_packet`
| :meth:`.XBeeAPIPacket._check_api_packet`
"""
if operating_mode != OperatingMode.ESCAPED_API_MODE and operating_mode != OperatingMode.API_MODE:
raise InvalidOperatingModeException(operating_mode.name + " is not supported.")
raw = XBeeAPIPacket._unescape_data(raw) if operating_mode == OperatingMode.ESCAPED_API_MODE else raw
XBeeAPIPacket._check_api_packet(raw, min_length=TransmitStatusPacket.__MIN_PACKET_LENGTH)
if raw[3] != ApiFrameType.TRANSMIT_STATUS.code:
raise InvalidPacketException("This packet is not a transmit status packet.")
return TransmitStatusPacket(raw[4], XBee16BitAddress(raw[5:7]), raw[7],
TransmitStatus.get(raw[8]), DiscoveryStatus.get(raw[9]))
def needs_id(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket.needs_id`
"""
return True
def _get_api_packet_spec_data(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data`
"""
ret = self.__x16bit_addr.address
ret.append(self.__transmit_retry_count)
ret.append(self.__transmit_status.code)
ret.append(self.__discovery_status.code)
return ret
def _get_api_packet_spec_data_dict(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data_dict`
"""
return {DictKeys.X16BIT_ADDR: self.__x16bit_addr.address,
DictKeys.TRANS_R_COUNT: self.__transmit_retry_count,
DictKeys.TS_STATUS: self.__transmit_status,
DictKeys.DS_STATUS: self.__discovery_status}
def __get_16bit_addr(self):
"""
Returns the 16-bit destination address.
Returns:
:class:`.XBee16BitAddress`: the 16-bit destination address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
return self.__x16bit_addr
def __set_16bit_addr(self, x16bit_addr):
"""
Sets the 16-bit destination address.
Args:
x16bit_addr (:class:`.XBee16BitAddress`): the new 16-bit destination address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
self.__x16bit_addr = x16bit_addr
def __get_transmit_status(self):
"""
Returns the transmit status.
Returns:
:class:`.TransmitStatus`: the transmit status.
.. seealso::
| :class:`.TransmitStatus`
"""
return self.__transmit_status
def __set_transmit_status(self, transmit_status):
"""
Sets the transmit status.
Args:
transmit_status (:class:`.TransmitStatus`): the new transmit status to set.
.. seealso::
| :class:`.TransmitStatus`
"""
self.__transmit_status = transmit_status
def __get_transmit_retry_count(self):
"""
Returns the transmit retry count.
Returns:
Integer: the transmit retry count.
"""
return self.__transmit_retry_count
def __set_transmit_retry_count(self, transmit_retry_count):
"""
Sets the transmit retry count.
Args:
transmit_retry_count (Integer): the new transmit retry count.
"""
self.__transmit_retry_count = transmit_retry_count
def __get_discovery_status(self):
"""
Returns the discovery status.
Returns:
:class:`.DiscoveryStatus`: the discovery status.
.. seealso::
| :class:`.DiscoveryStatus`
"""
return self.__discovery_status
def __set_discovery_status(self, discovery_status):
"""
Sets the discovery status.
Args:
discovery_status (:class:`.DiscoveryStatus`): the new discovery status to set.
.. seealso::
| :class:`.DiscoveryStatus`
"""
self.__discovery_status = discovery_status
x16bit_dest_addr = property(__get_16bit_addr, __set_16bit_addr)
""":class:`.XBee16BitAddress`. 16-bit destination address."""
transmit_retry_count = property(__get_transmit_retry_count, __set_transmit_retry_count)
"""Integer. Transmit retry count value."""
transmit_status = property(__get_transmit_status, __set_transmit_status)
""":class:`.TransmitStatus`. Transmit status."""
discovery_status = property(__get_discovery_status, __set_discovery_status)
""":class:`.DiscoveryStatus`. Discovery status."""
class ModemStatusPacket(XBeeAPIPacket):
"""
This class represents a modem status packet. Packet is built using the
parameters of the constructor or providing a valid API raw byte array.
RF module status messages are sent from the module in response to specific
conditions and indicates the state of the modem in that moment.
.. seealso::
| :class:`.XBeeAPIPacket`
"""
__MIN_PACKET_LENGTH = 6
def __init__(self, modem_status):
"""
Class constructor. Instantiates a new :class:`.ModemStatusPacket` object with the provided parameters.
Args:
modem_status (:class:`.ModemStatus`): the modem status event.
.. seealso::
| :class:`.ModemStatus`
| :class:`.XBeeAPIPacket`
"""
super().__init__(ApiFrameType.MODEM_STATUS)
self.__modem_status = modem_status
@staticmethod
def create_packet(raw, operating_mode):
"""
Override method.
Returns:
:class:`.ModemStatusPacket`.
Raises:
InvalidPacketException: if the bytearray length is less than 6. (start delim. + length (2 bytes) + frame
type + modem status + checksum = 6 bytes).
InvalidPacketException: if the length field of 'raw' is different than its real length. (length field: bytes
2 and 3)
InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`.
InvalidPacketException: if the calculated checksum is different than the checksum field value (last byte).
InvalidPacketException: if operating_mode mode is not supported.
InvalidPacketException: if the frame type is not :attr:`.ApiFrameType.MODEM_STATUS`.
.. seealso::
| :meth:`.XBeePacket.create_packet`
| :meth:`.XBeeAPIPacket._check_api_packet`
"""
if operating_mode != OperatingMode.ESCAPED_API_MODE and operating_mode != OperatingMode.API_MODE:
raise InvalidOperatingModeException(operating_mode.name + " is not supported.")
raw = XBeeAPIPacket._unescape_data(raw) if operating_mode == OperatingMode.ESCAPED_API_MODE else raw
XBeeAPIPacket._check_api_packet(raw, min_length=ModemStatusPacket.__MIN_PACKET_LENGTH)
if raw[3] != ApiFrameType.MODEM_STATUS.code:
raise InvalidPacketException("This packet is not a modem status packet.")
return ModemStatusPacket(ModemStatus.get(raw[4]))
def needs_id(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket.needs_id`
"""
return False
def _get_api_packet_spec_data(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data_dict`
"""
return bytearray([self.__modem_status.code])
def _get_api_packet_spec_data_dict(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data_dict`
"""
return {DictKeys.MODEM_STATUS: self.__modem_status}
def __get_modem_status(self):
"""
Returns the modem status event.
Returns:
:class:`.ModemStatus`: The modem status event.
.. seealso::
| :class:`.ModemStatus`
"""
return self.__modem_status
def __set_modem_status(self, modem_status):
"""
Sets the modem status event.
Args:
modem_status (:class:`.ModemStatus`): the new modem status event to set.
.. seealso::
| :class:`.ModemStatus`
"""
self.__modem_status = modem_status
modem_status = property(__get_modem_status, __set_modem_status)
""":class:`.ModemStatus`. Modem status event."""
class IODataSampleRxIndicatorPacket(XBeeAPIPacket):
"""
This class represents an IO data sample RX indicator packet. Packet is built
using the parameters of the constructor or providing a valid API byte array.
When the module receives an IO sample frame from a remote device, it
sends the sample out the UART using this frame type (when ``AO=0``). Only modules
running API firmware will send IO samples out the UART.
Among received data, some options can also be received indicating
transmission parameters.
.. seealso::
| :class:`.XBeeAPIPacket`
| :class:`.ReceiveOptions`
"""
__MIN_PACKET_LENGTH = 20
def __init__(self, x64bit_addr, x16bit_addr, receive_options, rf_data=None):
"""
Class constructor. Instantiates a new :class:`.IODataSampleRxIndicatorPacket` object with the provided
parameters.
Args:
x64bit_addr (:class:`.XBee64BitAddress`): the 64-bit source address.
x16bit_addr (:class:`.XBee16BitAddress`): the 16-bit source address.
receive_options (Integer): bitfield indicating the receive options.
rf_data (Bytearray, optional): received RF data. Optional.
Raises:
ValueError: if ``rf_data`` is not ``None`` and it's not valid for create an :class:`.IOSample`.
.. seealso::
| :class:`.IOSample`
| :class:`.ReceiveOptions`
| :class:`.XBee16BitAddress`
| :class:`.XBee64BitAddress`
| :class:`.XBeeAPIPacket`
"""
super().__init__(ApiFrameType.IO_DATA_SAMPLE_RX_INDICATOR)
self.__x64bit_addr = x64bit_addr
self.__x16bit_addr = x16bit_addr
self.__receive_options = receive_options
self.__rf_data = rf_data
self.__io_sample = IOSample(rf_data) if rf_data is not None and len(rf_data) >= 5 else None
@staticmethod
def create_packet(raw, operating_mode):
"""
Override method.
Returns:
:class:`.IODataSampleRxIndicatorPacket`.
Raises:
InvalidPacketException: if the bytearray length is less than 20. (start delim. + length (2 bytes) + frame
type + 64bit addr. + 16bit addr. + rf data (5 bytes) + checksum = 20 bytes).
InvalidPacketException: if the length field of 'raw' is different than its real length. (length field: bytes
2 and 3)
InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`.
InvalidPacketException: if the calculated checksum is different than the checksum field value (last byte).
InvalidPacketException: if operating_mode mode is not supported.
InvalidPacketException: if the frame type is not :attr:`.ApiFrameType.IO_DATA_SAMPLE_RX_INDICATOR`.
.. seealso::
| :meth:`.XBeePacket.create_packet`
| :meth:`.XBeeAPIPacket._check_api_packet`
"""
if operating_mode != OperatingMode.ESCAPED_API_MODE and operating_mode != OperatingMode.API_MODE:
raise InvalidOperatingModeException(operating_mode.name + " is not supported.")
raw = XBeeAPIPacket._unescape_data(raw) if operating_mode == OperatingMode.ESCAPED_API_MODE else raw
XBeeAPIPacket._check_api_packet(raw, min_length=IODataSampleRxIndicatorPacket.__MIN_PACKET_LENGTH)
if raw[3] != ApiFrameType.IO_DATA_SAMPLE_RX_INDICATOR.code:
raise InvalidPacketException("This packet is not an IO data sample RX indicator packet.")
return IODataSampleRxIndicatorPacket(XBee64BitAddress(raw[4:12]), XBee16BitAddress(raw[12:14]),
raw[14], raw[15:-1])
def needs_id(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket.needs_id`
"""
return False
def _get_api_packet_spec_data(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data`
"""
ret = self.__x64bit_addr.address
ret += self.__x16bit_addr.address
ret.append(self.__receive_options.code)
if self.__rf_data is not None:
ret += self.__rf_data
return ret
def _get_api_packet_spec_data_dict(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data_dict`
"""
base = {DictKeys.X64BIT_ADDR: self.__x64bit_addr.address,
DictKeys.X16BIT_ADDR: self.__x16bit_addr.address,
DictKeys.RECEIVE_OPTIONS: self.__receive_options}
if self.__io_sample is not None:
base[DictKeys.NUM_SAMPLES] = 1
base[DictKeys.DIGITAL_MASK] = self.__io_sample.digital_mask
base[DictKeys.ANALOG_MASK] = self.__io_sample.analog_mask
# Digital values
for i in range(16):
if self.__io_sample.has_digital_value(IOLine.get(i)):
base[IOLine.get(i).description + "digital value"] = \
utils.hex_to_string(self.__io_sample.get_digital_value(IOLine.get(i)))
# Analog values
for i in range(6):
if self.__io_sample.has_analog_value(IOLine.get(i)):
base[IOLine.get(i).description + "analog value"] = \
utils.hex_to_string(self.__io_sample.get_analog_value(IOLine.get(i)))
# Power supply
if self.__io_sample.has_power_supply_value():
base["Power supply value "] = "%02X" % self.__io_sample.power_supply_value
elif self.__rf_data is not None:
base[DictKeys.RF_DATA] = utils.hex_to_string(self.__rf_data)
return base
def is_broadcast(self):
"""
Override method.
.. seealso::
| :meth:`XBeeAPIPacket.is_broadcast`
"""
return utils.is_bit_enabled(self.__receive_options, 1)
def __get_64bit_addr(self):
"""
Returns the 64-bit source address.
Returns:
:class:`.XBee64BitAddress`: the 64-bit source address.
.. seealso::
| :class:`.XBee64BitAddress`
"""
return self.__x64bit_addr
def __set_64bit_addr(self, x64bit_addr):
"""
Sets the 64-bit source address.
Args:
x64bit_addr (:class:`.XBee64BitAddress`): the new 64-bit source address
.. seealso::
| :class:`.XBee64BitAddress`
"""
self.__x64bit_addr = x64bit_addr
def __get_16bit_addr(self):
"""
Returns the 16-bit source address.
Returns:
:class:`.XBee16BitAddress`: the 16-bit source address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
return self.__x16bit_addr
def __set_16bit_addr(self, x16bit_addr):
"""
Sets the 16-bit source address.
Args:
x16bit_addr (:class:`.XBee16BitAddress`): the new 16-bit source address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
self.__x16bit_addr = x16bit_addr
def __get_options(self):
"""
Returns the receive options bitfield.
Returns:
Integer: the receive options bitfield.
.. seealso::
| :class:`.ReceiveOptions`
"""
return self.__receive_options
def __set_options(self, receive_options):
"""
Sets the receive options bitfield.
Args:
receive_options (Integer): the new receive options bitfield.
.. seealso::
| :class:`.ReceiveOptions`
"""
self.__receive_options = receive_options
def __get_rf_data(self):
"""
Returns the received RF data.
Returns:
Bytearray: the received RF data.
"""
if self.__rf_data is None:
return None
return self.__rf_data.copy()
def __set_rf_data(self, rf_data):
"""
Sets the received RF data.
Args:
rf_data (Bytearray): the new received RF data.
"""
if rf_data is None:
self.__rf_data = None
else:
self.__rf_data = rf_data.copy()
# Modify the ioSample accordingly
if rf_data is not None and len(rf_data) >= 5:
self.__io_sample = IOSample(self.__rf_data)
else:
self.__io_sample = None
def __get_io_sample(self):
"""
Returns the IO sample corresponding to the data contained in the packet.
Returns:
:class:`.IOSample`: the IO sample of the packet, ``None`` if the packet has not any data or if the
sample could not be generated correctly.
.. seealso::
| :class:`.IOSample`
"""
return self.__io_sample
def __set_io_sample(self, io_sample):
"""
Sets the IO sample of the packet.
Args:
io_sample (:class:`.IOSample`): the new IO sample to set.
.. seealso::
| :class:`.IOSample`
"""
self.__io_sample = io_sample
x64bit_source_addr = property(__get_64bit_addr, __set_64bit_addr)
""":class:`.XBee64BitAddress`. 64-bit source address."""
x16bit_source_addr = property(__get_16bit_addr, __set_16bit_addr)
""":class:`.XBee16BitAddress`. 16-bit source address."""
receive_options = property(__get_options, __set_options)
"""Integer. Receive options bitfield."""
rf_data = property(__get_rf_data, __set_rf_data)
"""Bytearray. Received RF data."""
io_sample = property(__get_io_sample, __set_io_sample)
""":class:`.IOSample`: IO sample corresponding to the data contained in the packet."""
class ExplicitAddressingPacket(XBeeAPIPacket):
"""
This class represents an explicit addressing command packet. Packet is
built using the parameters of the constructor or providing a valid API
payload.
Allows application layer fields (endpoint and cluster ID) to be
specified for a data transmission. Similar to the transmit request, but
also requires application layer addressing fields to be specified
(endpoints, cluster ID, profile ID). An explicit addressing request API
frame causes the module to send data as an RF packet to the specified
destination, using the specified source and destination endpoints, cluster
ID, and profile ID.
The 64-bit destination address should be set to ``0x000000000000FFFF`` for
a broadcast transmission (to all devices).
The coordinator can be addressed by either setting the 64-bit address to all
``0x00`` and the 16-bit address to ``0xFFFE``, OR by setting the 64-bit
address to the coordinator's 64-bit address and the 16-bit address to ``0x0000``.
For all other transmissions, setting the 16-bit address to the correct
16-bit address can help improve performance when transmitting to
multiple destinations.
If a 16-bit address is not known, this field should be set to
``0xFFFE`` (unknown).
The transmit status frame ( :attr:`.ApiFrameType.TRANSMIT_STATUS`) will
indicate the discovered 16-bit address, if successful (see :class:`.TransmitStatusPacket`)).
The broadcast radius can be set from ``0`` up to ``NH``. If set
to ``0``, the value of ``NH`` specifies the broadcast radius
(recommended). This parameter is only used for broadcast transmissions.
The maximum number of payload bytes can be read with the ``NP``
command. Note: if source routing is used, the RF payload will be reduced
by two bytes per intermediate hop in the source route.
Several transmit options can be set using the transmit options bitfield.
.. seealso::
| :class:`.TransmitOptions`
| :attr:`.XBee16BitAddress.COORDINATOR_ADDRESS`
| :attr:`.XBee16BitAddress.UNKNOWN_ADDRESS`
| :attr:`.XBee64BitAddress.BROADCAST_ADDRESS`
| :attr:`.XBee64BitAddress.COORDINATOR_ADDRESS`
| :class:`.ExplicitRXIndicatorPacket`
| :class:`.XBeeAPIPacket`
"""
__MIN_PACKET_LENGTH = 24
def __init__(self, frame_id, x64bit_addr, x16bit_addr, source_endpoint, dest_endpoint, cluster_id,
profile_id, broadcast_radius=0x00, transmit_options=0x00, rf_data=None):
"""
Class constructor. . Instantiates a new :class:`.ExplicitAddressingPacket` object with the provided parameters.
Args:
frame_id (Integer): the frame ID of the packet.
x64bit_addr (:class:`.XBee64BitAddress`): the 64-bit address.
x16bit_addr (:class:`.XBee16BitAddress`): the 16-bit address.
source_endpoint (Integer): source endpoint. 1 byte.
dest_endpoint (Integer): destination endpoint. 1 byte.
cluster_id (Integer): cluster id. Must be between 0 and 0xFFFF.
profile_id (Integer): profile id. Must be between 0 and 0xFFFF.
broadcast_radius (Integer): maximum number of hops a broadcast transmission can occur.
transmit_options (Integer): bitfield of supported transmission options.
rf_data (Bytearray, optional): RF data that is sent to the destination device. Optional.
Raises:
ValueError: if ``frame_id``, ``src_endpoint`` or ``dst_endpoint`` are less than 0 or greater than 255.
ValueError: if lengths of ``cluster_id`` or ``profile_id`` (respectively) are less than 0 or greater than
0xFFFF.
.. seealso::
| :class:`.XBee16BitAddress`
| :class:`.XBee64BitAddress`
| :class:`.TransmitOptions`
| :class:`.XBeeAPIPacket`
"""
if frame_id < 0 or frame_id > 255:
raise ValueError("Frame id must be between 0 and 255.")
if source_endpoint < 0 or source_endpoint > 255:
raise ValueError("Source endpoint must be between 0 and 255.")
if dest_endpoint < 0 or dest_endpoint > 255:
raise ValueError("Destination endpoint must be between 0 and 255.")
if cluster_id < 0 or cluster_id > 0xFFFF:
raise ValueError("Cluster id must be between 0 and 0xFFFF.")
if profile_id < 0 or profile_id > 0xFFFF:
raise ValueError("Profile id must be between 0 and 0xFFFF.")
super().__init__(ApiFrameType.EXPLICIT_ADDRESSING)
self._frame_id = frame_id
self.__x64_addr = x64bit_addr
self.__x16_addr = x16bit_addr
self.__source_endpoint = source_endpoint
self.__dest_endpoint = dest_endpoint
self.__cluster_id = cluster_id
self.__profile_id = profile_id
self.__broadcast_radius = broadcast_radius
self.__transmit_options = transmit_options
self.__rf_data = rf_data
@staticmethod
def create_packet(raw, operating_mode):
"""
Override method.
Returns:
:class:`.ExplicitAddressingPacket`.
Raises:
InvalidPacketException: if the bytearray length is less than 24. (start delim. + length (2 bytes) + frame
type + frame ID + 64bit addr. + 16bit addr. + source endpoint + dest. endpoint + cluster ID (2 bytes) +
profile ID (2 bytes) + broadcast radius + transmit options + checksum = 24 bytes).
InvalidPacketException: if the length field of 'raw' is different than its real length. (length field: bytes
2 and 3)
InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`.
InvalidPacketException: if the calculated checksum is different than the checksum field value (last byte).
InvalidPacketException: if operating_mode mode is not supported.
InvalidPacketException: if the frame type is different than :attr:`.ApiFrameType.EXPLICIT_ADDRESSING`
.. seealso::
| :meth:`.XBeePacket.create_packet`
| :meth:`.XBeeAPIPacket._check_api_packet`
"""
if operating_mode != OperatingMode.ESCAPED_API_MODE and operating_mode != OperatingMode.API_MODE:
raise InvalidOperatingModeException(operating_mode.name + " is not supported.")
raw = XBeeAPIPacket._unescape_data(raw) if operating_mode == OperatingMode.ESCAPED_API_MODE else raw
XBeeAPIPacket._check_api_packet(raw, min_length=ExplicitAddressingPacket.__MIN_PACKET_LENGTH)
if raw[3] != ApiFrameType.EXPLICIT_ADDRESSING.code:
raise InvalidPacketException("This packet is not an explicit addressing packet")
return ExplicitAddressingPacket(raw[4], XBee64BitAddress(raw[5:13]), XBee16BitAddress(raw[13:15]),
raw[15], raw[16], utils.bytes_to_int(raw[17:19]),
utils.bytes_to_int(raw[19:21]), raw[21], raw[22], raw[23:-1])
def needs_id(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket.needs_id`
"""
return True
def _get_api_packet_spec_data(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data`
"""
raw = self.__x64_addr.address
raw += self.__x16_addr.address
raw.append(self.__source_endpoint)
raw.append(self.__dest_endpoint)
raw += utils.int_to_bytes(self.__cluster_id, 2)
raw += utils.int_to_bytes(self.__profile_id, 2)
raw.append(self.__broadcast_radius)
raw.append(self.__transmit_options)
if self.__rf_data is not None:
raw += self.__rf_data
return raw
def _get_api_packet_spec_data_dict(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data_dict`
"""
return {DictKeys.X64BIT_ADDR: self.__x64_addr.address,
DictKeys.X16BIT_ADDR: self.__x16_addr.address,
DictKeys.SOURCE_ENDPOINT: self.__source_endpoint,
DictKeys.DEST_ENDPOINT: self.__dest_endpoint,
DictKeys.CLUSTER_ID: self.__cluster_id,
DictKeys.PROFILE_ID: self.__profile_id,
DictKeys.BROADCAST_RADIUS: self.__broadcast_radius,
DictKeys.TRANSMIT_OPTIONS: self.__transmit_options,
DictKeys.RF_DATA: self.__rf_data}
def __get_source_endpoint(self):
"""
Returns the source endpoint of the transmission.
Returns:
Integer: the source endpoint of the transmission.
"""
return self.__dest_endpoint
def __set_source_endpoint(self, source_endpoint):
"""
Sets the source endpoint of the transmission.
Args:
source_endpoint (Integer): the new source endpoint of the transmission.
"""
self.__source_endpoint = source_endpoint
def __get_dest_endpoint(self):
"""
Returns the destination endpoint of the transmission.
Returns:
Integer: the destination endpoint of the transmission.
"""
return self.__dest_endpoint
def __set_dest_endpoint(self, dest_endpoint):
"""
Sets the destination endpoint of the transmission.
Args:
dest_endpoint (Integer): the new destination endpoint of the transmission.
"""
self.__dest_endpoint = dest_endpoint
def __get_cluster_id(self):
"""
Returns the cluster ID of the transmission.
Returns:
Integer: the cluster ID of the transmission.
"""
return self.__cluster_id
def __set_cluster_id(self, cluster_id):
"""
Sets the cluster ID of the transmission.
Args:
cluster_id (Integer): the new cluster ID of the transmission.
"""
self.__cluster_id = cluster_id
def __get_profile_id(self):
"""
Returns the profile ID of the transmission.
Returns
Integer: the profile ID of the transmission.
"""
return self.__profile_id
def __set_profile_id(self, profile_id):
"""
Sets the profile ID of the transmission.
Args
profile_id (Integer): the new profile ID of the transmission.
"""
self.__profile_id = profile_id
def __get_rf_data(self):
"""
Returns the RF data to send.
Returns:
Bytearray: the RF data to send.
"""
if self.__rf_data is None:
return None
return self.__rf_data.copy()
def __set_rf_data(self, rf_data):
"""
Sets the RF data to send.
Args:
rf_data (Bytearray): the new RF data to send.
"""
if rf_data is None:
self.__rf_data = None
else:
self.__rf_data = rf_data.copy()
def __get_transmit_options(self):
"""
Returns the transmit options bitfield.
Returns:
Integer: the transmit options bitfield.
.. seealso::
| :class:`.TransmitOptions`
"""
return self.__transmit_options
def __set_transmit_options(self, transmit_options):
"""
Sets the transmit options bitfield.
Args:
transmit_options (Integer): the new transmit options bitfield.
.. seealso::
| :class:`.TransmitOptions`
"""
self.__transmit_options = transmit_options
def __get_broadcast_radius(self):
"""
Returns the broadcast radius. Broadcast radius is the maximum number of hops a broadcast transmission.
Returns:
Integer: the broadcast radius.
"""
return self.__broadcast_radius
def __set_broadcast_radius(self, br_radius):
"""
Sets the broadcast radius. Broadcast radius is the maximum number of hops a broadcast transmission.
Args:
br_radius (Integer): the new broadcast radius.
"""
self.__broadcast_radius = br_radius
def __get_64bit_addr(self):
"""
Returns the 64-bit destination address.
Returns:
:class:`.XBee64BitAddress`: the 64-bit destination address.
.. seealso::
| :class:`.XBee64BitAddress`
"""
return self.__x64bit_addr
def __set_64bit_addr(self, x64bit_addr):
"""
Sets the 64-bit destination address.
Args:
x64bit_addr (:class:`.XBee64BitAddress`): the new 64-bit destination address.
.. seealso::
| :class:`.XBee64BitAddress`
"""
self.__x64bit_addr = x64bit_addr
def __get_16bit_addr(self):
"""
Returns the 16-bit destination address.
Returns:
:class:`XBee16BitAddress`: the 16-bit destination address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
return self.__x16bit_addr
def __set_16bit_addr(self, x16bit_addr):
"""
Sets the 16-bit destination address.
Args:
x16bit_addr (:class:`.XBee16BitAddress`): the new 16-bit destination address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
self.__x16bit_addr = x16bit_addr
x64bit_dest_addr = property(__get_64bit_addr, __set_64bit_addr)
""":class:`.XBee64BitAddress`. 64-bit destination address."""
x16bit_dest_addr = property(__get_16bit_addr, __set_16bit_addr)
""":class:`.XBee16BitAddress`. 16-bit destination address."""
transmit_options = property(__get_transmit_options, __set_transmit_options)
"""Integer. Transmit options bitfield."""
broadcast_radius = property(__get_broadcast_radius, __set_broadcast_radius)
"""Integer. Broadcast radius."""
source_endpoint = property(__get_source_endpoint, __set_source_endpoint)
"""Integer. Source endpoint of the transmission."""
dest_endpoint = property(__get_dest_endpoint, __set_dest_endpoint)
"""Integer. Destination endpoint of the transmission."""
cluster_id = property(__get_cluster_id, __set_cluster_id)
"""Integer. Cluster ID of the transmission."""
profile_id = property(__get_profile_id, __set_profile_id)
"""Integer. Profile ID of the transmission."""
rf_data = property(__get_rf_data, __set_rf_data)
"""Bytearray. RF data to send."""
class ExplicitRXIndicatorPacket(XBeeAPIPacket):
"""
This class represents an explicit RX indicator packet. Packet is
built using the parameters of the constructor or providing a valid API
payload.
When the modem receives an RF packet it is sent out the UART using this
message type (when ``AO=1``).
This packet is received when external devices send explicit addressing
packets to this module.
Among received data, some options can also be received indicating
transmission parameters.
.. seealso::
| :class:`.XBeeReceiveOptions`
| :class:`.ExplicitAddressingPacket`
| :class:`.XBeeAPIPacket`
"""
__MIN_PACKET_LENGTH = 22
def __init__(self, x64bit_addr, x16bit_addr, source_endpoint,
dest_endpoint, cluster_id, profile_id, receive_options, rf_data=None):
"""
Class constructor. Instantiates a new :class:`.ExplicitRXIndicatorPacket` object with the provided parameters.
Args:
x64bit_addr (:class:`.XBee64BitAddress`): the 64-bit source address.
x16bit_addr (:class:`.XBee16BitAddress`): the 16-bit source address.
source_endpoint (Integer): source endpoint. 1 byte.
dest_endpoint (Integer): destination endpoint. 1 byte.
cluster_id (Integer): cluster ID. Must be between 0 and 0xFFFF.
profile_id (Integer): profile ID. Must be between 0 and 0xFFFF.
receive_options (Integer): bitfield indicating the receive options.
rf_data (Bytearray, optional): received RF data. Optional.
Raises:
ValueError: if ``src_endpoint`` or ``dst_endpoint`` are less than 0 or greater than 255.
ValueError: if lengths of ``cluster_id`` or ``profile_id`` (respectively) are different than 2.
.. seealso::
| :class:`.XBee16BitAddress`
| :class:`.XBee64BitAddress`
| :class:`.XBeeReceiveOptions`
| :class:`.XBeeAPIPacket`
"""
if source_endpoint < 0 or source_endpoint > 255:
raise ValueError("Source endpoint must be between 0 and 255.")
if dest_endpoint < 0 or dest_endpoint > 255:
raise ValueError("Destination endpoint must be between 0 and 255.")
if cluster_id < 0 or cluster_id > 0xFFFF:
raise ValueError("Cluster id must be between 0 and 0xFFFF.")
if profile_id < 0 or profile_id > 0xFFFF:
raise ValueError("Profile id must be between 0 and 0xFFFF.")
super().__init__(ApiFrameType.EXPLICIT_RX_INDICATOR)
self.__x64bit_addr = x64bit_addr
self.__x16bit_addr = x16bit_addr
self.__source_endpoint = source_endpoint
self.__dest_endpoint = dest_endpoint
self.__cluster_id = cluster_id
self.__profile_id = profile_id
self.__receive_options = receive_options
self.__rf_data = rf_data
@staticmethod
def create_packet(raw, operating_mode):
"""
Override method.
Returns:
:class:`.ExplicitRXIndicatorPacket`.
Raises:
InvalidPacketException: if the bytearray length is less than 22. (start delim. + length (2 bytes) + frame
type + 64bit addr. + 16bit addr. + source endpoint + dest. endpoint + cluster ID (2 bytes) +
profile ID (2 bytes) + receive options + checksum = 22 bytes).
InvalidPacketException: if the length field of 'raw' is different than its real length. (length field: bytes
2 and 3)
InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`.
InvalidPacketException: if the calculated checksum is different than the checksum field value (last byte).
InvalidPacketException: if operating_mode mode is not supported.
InvalidPacketException: if the frame type is different than :attr:`.ApiFrameType.EXPLICIT_RX_INDICATOR`
.. seealso::
| :meth:`.XBeePacket.create_packet`
| :meth:`.XBeeAPIPacket._check_api_packet`
"""
if operating_mode != OperatingMode.ESCAPED_API_MODE and operating_mode != OperatingMode.API_MODE:
raise InvalidOperatingModeException(operating_mode.name + " is not supported.")
raw = XBeeAPIPacket._unescape_data(raw) if operating_mode == OperatingMode.ESCAPED_API_MODE else raw
XBeeAPIPacket._check_api_packet(raw, min_length=ExplicitRXIndicatorPacket.__MIN_PACKET_LENGTH)
if raw[3] != ApiFrameType.EXPLICIT_RX_INDICATOR.code:
raise InvalidPacketException("This packet is not an explicit RX indicator packet.")
return ExplicitRXIndicatorPacket(XBee64BitAddress(raw[4:12]), XBee16BitAddress(raw[12:14]), raw[14], raw[15],
utils.bytes_to_int(raw[16:18]), utils.bytes_to_int(raw[18:20]),
raw[20], raw[21:-1])
def needs_id(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket.needs_id`
"""
return False
def _get_api_packet_spec_data(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data`
"""
raw = self.__x64bit_addr.address
raw += self.__x16bit_addr.address
raw.append(self.__source_endpoint)
raw.append(self.__dest_endpoint)
raw += utils.int_to_bytes(self.__cluster_id)
raw += utils.int_to_bytes(self.__profile_id)
raw.append(self.__receive_options)
if self.__rf_data is not None:
raw += self.__rf_data
return raw
def _get_api_packet_spec_data_dict(self):
"""
Override method.
.. seealso::
| :meth:`.XBeeAPIPacket._get_api_packet_spec_data_dict`
"""
return {DictKeys.X64BIT_ADDR: self.__x64bit_addr.address,
DictKeys.X16BIT_ADDR: self.__x16bit_addr.address,
DictKeys.SOURCE_ENDPOINT: self.__source_endpoint,
DictKeys.DEST_ENDPOINT: self.__dest_endpoint,
DictKeys.CLUSTER_ID: self.__cluster_id,
DictKeys.PROFILE_ID: self.__profile_id,
DictKeys.RECEIVE_OPTIONS: self.__receive_options,
DictKeys.RF_DATA: self.__rf_data}
def __get_64bit_addr(self):
"""
Returns the 64-bit source address.
Returns:
:class:`.XBee64BitAddress`: the 64-bit source address.
.. seealso::
| :class:`.XBee64BitAddress`
"""
return self.__x64bit_addr
def __set_64bit_addr(self, x64bit_addr):
"""
Sets the 64-bit source address.
Args:
x64bit_addr (:class:`.XBee64BitAddress`): the new 64-bit source address.
.. seealso::
| :class:`.XBee64BitAddress`
"""
self.__x64bit_addr = x64bit_addr
def __get_16bit_addr(self):
"""
Returns the 16-bit source address.
Returns:
:class:`.XBee16BitAddress`: the 16-bit source address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
return self.__x16bit_addr
def __set_16bit_addr(self, x16bit_addr):
"""
Sets the 16-bit source address.
Args:
x16bit_addr (:class:`.XBee16BitAddress`): the new 16-bit source address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
self.__x16bit_addr = x16bit_addr
def __get_source_endpoint(self):
"""
Returns the source endpoint of the transmission.
Returns:
Integer: the source endpoint of the transmission.
"""
return self.__dest_endpoint
def __set_source_endpoint(self, source_endpoint):
"""
Sets the source endpoint of the transmission.
Args:
source_endpoint (Integer): the new source endpoint of the transmission.
"""
self.__source_endpoint = source_endpoint
def __get_dest_endpoint(self):
"""
Returns the destination endpoint of the transmission.
Returns:
Integer: the destination endpoint of the transmission.
"""
return self.__dest_endpoint
def __set_dest_endpoint(self, dest_endpoint):
"""
Sets the destination endpoint of the transmission.
Args:
dest_endpoint (Integer): the new destination endpoint of the transmission.
"""
self.__dest_endpoint = dest_endpoint
def __get_cluster_id(self):
"""
Returns the cluster ID of the transmission.
Returns:
Integer: the cluster ID of the transmission.
"""
return self.__cluster_id
def __set_cluster_id(self, cluster_id):
"""
Sets the cluster ID of the transmission.
Args:
cluster_id (Integer): the new cluster ID of the transmission.
"""
self.__cluster_id = cluster_id
def __get_profile_id(self):
"""
Returns the profile ID of the transmission.
Returns
Integer: the profile ID of the transmission.
"""
return self.__profile_id
def __set_profile_id(self, profile_id):
"""
Sets the profile ID of the transmission.
Args
profile_id (Integer): the new profile ID of the transmission.
"""
self.__profile_id = profile_id
def __get_options(self):
"""
Returns the receive options bitfield.
Returns:
Integer: the receive options bitfield.
.. seealso::
| :class:`.XBeeReceiveOptions`
"""
return self.__receive_options
def __set_options(self, receive_options):
"""
Sets the receive options bitfield.
Args:
receive_options (Integer): the new receive options bitfield.
.. seealso::
| :class:`.XBeeReceiveOptions`
"""
self.__receive_options = receive_options
def __get_rf_data(self):
"""
Returns the received RF data.
Returns:
Bytearray: the received RF data.
"""
if self.__rf_data is None:
return None
return self.__rf_data.copy()
def __set_rf_data(self, rf_data):
"""
Sets the received RF data.
Args:
rf_data (Bytearray): the new received RF data.
"""
if rf_data is None:
self.__rf_data = None
else:
self.__rf_data = rf_data.copy()
x64bit_source_addr = property(__get_64bit_addr, __set_64bit_addr)
""":class:`.XBee64BitAddress`. 64-bit source address."""
x16bit_source_addr = property(__get_16bit_addr, __set_16bit_addr)
""":class:`.XBee16BitAddress`. 16-bit source address."""
receive_options = property(__get_options, __set_options)
"""Integer. Receive options bitfield."""
source_endpoint = property(__get_source_endpoint, __set_source_endpoint)
"""Integer. Source endpoint of the transmission."""
dest_endpoint = property(__get_dest_endpoint, __set_dest_endpoint)
"""Integer. Destination endpoint of the transmission."""
cluster_id = property(__get_cluster_id, __set_cluster_id)
"""Integer. Cluster ID of the transmission."""
profile_id = property(__get_profile_id, __set_profile_id)
"""Integer. Profile ID of the transmission."""
rf_data = property(__get_rf_data, __set_rf_data)
"""Bytearray. Received RF data."""
| diescalo/rst_test | digi/xbee/packets/common.py | Python | mpl-2.0 | 96,238 |
from django.contrib.auth.decorators import permission_required
from django.contrib.auth import logout
from django.conf import settings
from django.http import HttpResponse, JsonResponse, Http404
from django.shortcuts import render, reverse, get_object_or_404
import yaml
from mittab.apps.tab.archive import ArchiveExporter
from mittab.apps.tab.forms import SchoolForm, RoomForm, UploadDataForm, ScratchForm, \
SettingsForm
from mittab.apps.tab.helpers import redirect_and_flash_error, \
redirect_and_flash_success
from mittab.apps.tab.models import *
from mittab.libs import cache_logic
from mittab.libs.tab_logic import TabFlags
from mittab.libs.data_import import import_judges, import_rooms, import_teams, \
import_scratches
def index(request):
number_teams = Team.objects.count()
number_judges = Judge.objects.count()
number_schools = School.objects.count()
number_debaters = Debater.objects.count()
number_rooms = Room.objects.count()
school_list = [(school.pk, school.name) for school in School.objects.all()]
judge_list = [(judge.pk, judge.name) for judge in Judge.objects.all()]
team_list = [(team.pk, team.display_backend) for team in Team.objects.all()]
debater_list = [(debater.pk, debater.display)
for debater in Debater.objects.all()]
room_list = [(room.pk, room.name) for room in Room.objects.all()]
return render(request, "common/index.html", locals())
def tab_logout(request, *args):
logout(request)
return redirect_and_flash_success(request,
"Successfully logged out",
path="/")
def render_403(request, *args, **kwargs):
response = render(request, "common/403.html")
response.status_code = 403
return response
def render_404(request, *args, **kwargs):
response = render(request, "common/404.html")
response.status_code = 404
return response
def render_500(request, *args, **kwargs):
response = render(request, "common/500.html")
response.status_code = 500
return response
#View for manually adding scratches
def add_scratch(request):
if request.method == "POST":
form = ScratchForm(request.POST)
if form.is_valid():
form.save()
return redirect_and_flash_success(request,
"Scratch created successfully")
else:
form = ScratchForm(initial={"scratch_type": 0})
return render(request, "common/data_entry.html", {
"title": "Adding Scratch",
"form": form
})
#### BEGIN SCHOOL ###
#Three views for entering, viewing, and editing schools
def view_schools(request):
#Get a list of (id,school_name) tuples
c_schools = [(s.pk, s.name, 0, "") for s in School.objects.all()]
return render(
request, "common/list_data.html", {
"item_type": "school",
"title": "Viewing All Schools",
"item_list": c_schools
})
def view_school(request, school_id):
school_id = int(school_id)
try:
school = School.objects.get(pk=school_id)
except School.DoesNotExist:
return redirect_and_flash_error(request, "School not found")
if request.method == "POST":
form = SchoolForm(request.POST, instance=school)
if form.is_valid():
try:
form.save()
except ValueError:
return redirect_and_flash_error(
request,
"School name cannot be validated, most likely a non-existent school"
)
return redirect_and_flash_success(
request, "School {} updated successfully".format(
form.cleaned_data["name"]))
else:
form = SchoolForm(instance=school)
links = [("/school/" + str(school_id) + "/delete/", "Delete")]
return render(
request, "common/data_entry.html", {
"form": form,
"links": links,
"title": "Viewing School: %s" % (school.name)
})
def enter_school(request):
if request.method == "POST":
form = SchoolForm(request.POST)
if form.is_valid():
try:
form.save()
except ValueError:
return redirect_and_flash_error(
request,
"School name cannot be validated, most likely a duplicate school"
)
return redirect_and_flash_success(
request,
"School {} created successfully".format(
form.cleaned_data["name"]),
path="/")
else:
form = SchoolForm()
return render(request, "common/data_entry.html", {
"form": form,
"title": "Create School"
})
@permission_required("tab.school.can_delete", login_url="/403/")
def delete_school(request, school_id):
error_msg = None
try:
school_id = int(school_id)
school = School.objects.get(pk=school_id)
school.delete()
except School.DoesNotExist:
error_msg = "That school does not exist"
except Exception as e:
error_msg = str(e)
if error_msg:
return redirect_and_flash_error(request, error_msg)
return redirect_and_flash_success(request,
"School deleted successfully",
path="/")
#### END SCHOOL ###
#### BEGIN ROOM ###
def view_rooms(request):
def flags(room):
result = 0
if room.rank == 0:
result |= TabFlags.ROOM_ZERO_RANK
else:
result |= TabFlags.ROOM_NON_ZERO_RANK
return result
all_flags = [[TabFlags.ROOM_ZERO_RANK, TabFlags.ROOM_NON_ZERO_RANK]]
all_rooms = [(room.pk, room.name, flags(room),
TabFlags.flags_to_symbols(flags(room)))
for room in Room.objects.all()]
filters, symbol_text = TabFlags.get_filters_and_symbols(all_flags)
return render(
request, "common/list_data.html", {
"item_type": "room",
"title": "Viewing All Rooms",
"item_list": all_rooms,
"symbol_text": symbol_text,
"filters": filters
})
def view_room(request, room_id):
room_id = int(room_id)
try:
room = Room.objects.get(pk=room_id)
except Room.DoesNotExist:
return redirect_and_flash_error(request, "Room not found")
if request.method == "POST":
form = RoomForm(request.POST, instance=room)
if form.is_valid():
try:
form.save()
except ValueError:
return redirect_and_flash_error(
request,
"Room name cannot be validated, most likely a non-existent room"
)
return redirect_and_flash_success(
request, "School {} updated successfully".format(
form.cleaned_data["name"]))
else:
form = RoomForm(instance=room)
return render(request, "common/data_entry.html", {
"form": form,
"links": [],
"title": "Viewing Room: %s" % (room.name)
})
def enter_room(request):
if request.method == "POST":
form = RoomForm(request.POST)
if form.is_valid():
try:
form.save()
except ValueError:
return redirect_and_flash_error(
request,
"Room name cannot be validated, most likely a duplicate room"
)
return redirect_and_flash_success(
request,
"Room {} created successfully".format(
form.cleaned_data["name"]),
path="/")
else:
form = RoomForm()
return render(request, "common/data_entry.html", {
"form": form,
"title": "Create Room"
})
def batch_checkin(request):
rooms_and_checkins = []
round_numbers = list([i + 1 for i in range(TabSettings.get("tot_rounds"))])
for room in Room.objects.all():
checkins = []
for round_number in [0] + round_numbers: # 0 is for outrounds
checkins.append(room.is_checked_in_for_round(round_number))
rooms_and_checkins.append((room, checkins))
return render(request, "tab/room_batch_checkin.html", {
"rooms_and_checkins": rooms_and_checkins,
"round_numbers": round_numbers
})
@permission_required("tab.tab_settings.can_change", login_url="/403")
def room_check_in(request, room_id, round_number):
room_id, round_number = int(room_id), int(round_number)
if round_number < 0 or round_number > TabSettings.get("tot_rounds"):
# 0 is so that outrounds don't throw an error
raise Http404("Round does not exist")
room = get_object_or_404(Room, pk=room_id)
if request.method == "POST":
if not room.is_checked_in_for_round(round_number):
check_in = RoomCheckIn(room=room, round_number=round_number)
check_in.save()
elif request.method == "DELETE":
if room.is_checked_in_for_round(round_number):
check_ins = RoomCheckIn.objects.filter(room=room,
round_number=round_number)
check_ins.delete()
else:
raise Http404("Must be POST or DELETE")
return JsonResponse({"success": True})
@permission_required("tab.scratch.can_delete", login_url="/403/")
def delete_scratch(request, item_id, scratch_id):
try:
scratch_id = int(scratch_id)
scratch = Scratch.objects.get(pk=scratch_id)
scratch.delete()
except Scratch.DoesNotExist:
return redirect_and_flash_error(
request,
"This scratch does not exist, please try again with a valid id.")
return redirect_and_flash_success(request,
"Scratch deleted successfully",
path="/")
def view_scratches(request):
# Get a list of (id,school_name) tuples
c_scratches = [(s.team.pk, str(s), 0, "") for s in Scratch.objects.all()]
return render(
request, "common/list_data.html", {
"item_type": "team",
"title": "Viewing All Scratches for Teams",
"item_list": c_scratches
})
def get_settings_from_yaml():
default_settings = []
with open(settings.SETTING_YAML_PATH, "r") as stream:
default_settings = yaml.safe_load(stream)
to_return = []
for setting in default_settings:
tab_setting = TabSettings.objects.filter(key=setting["name"]).first()
if tab_setting:
if "type" in setting and setting["type"] == "boolean":
setting["value"] = tab_setting.value == 1
else:
setting["value"] = tab_setting.value
to_return.append(setting)
return to_return
### SETTINGS VIEWS ###
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def settings_form(request):
yaml_settings = get_settings_from_yaml()
if request.method == "POST":
_settings_form = SettingsForm(request.POST, settings=yaml_settings)
if _settings_form.is_valid():
_settings_form.save()
return redirect_and_flash_success(
request,
"Tab settings updated!",
path=reverse("settings_form")
)
return render( # Allows for proper validation checking
request, "tab/settings_form.html", {
"form": settings_form,
})
_settings_form = SettingsForm(settings=yaml_settings)
return render(
request, "tab/settings_form.html", {
"form": _settings_form,
})
def upload_data(request):
team_info = {"errors": [], "uploaded": False}
judge_info = {"errors": [], "uploaded": False}
room_info = {"errors": [], "uploaded": False}
scratch_info = {"errors": [], "uploaded": False}
if request.method == "POST":
form = UploadDataForm(request.POST, request.FILES)
if form.is_valid():
if "team_file" in request.FILES:
team_info["errors"] = import_teams.import_teams(
request.FILES["team_file"])
team_info["uploaded"] = True
if "judge_file" in request.FILES:
judge_info["errors"] = import_judges.import_judges(
request.FILES["judge_file"])
judge_info["uploaded"] = True
if "room_file" in request.FILES:
room_info["errors"] = import_rooms.import_rooms(
request.FILES["room_file"])
room_info["uploaded"] = True
if "scratch_file" in request.FILES:
scratch_info["errors"] = import_scratches.import_scratches(
request.FILES["scratch_file"])
scratch_info["uploaded"] = True
if not team_info["errors"] + judge_info["errors"] + \
room_info["errors"] + scratch_info["errors"]:
return redirect_and_flash_success(request,
"Data imported successfully")
else:
form = UploadDataForm()
return render(
request, "common/data_upload.html", {
"form": form,
"title": "Upload Input Files",
"team_info": team_info,
"judge_info": judge_info,
"room_info": room_info,
"scratch_info": scratch_info
})
def force_cache_refresh(request):
key = request.GET.get("key", "")
cache_logic.invalidate_cache(key)
redirect_to = request.GET.get("next", "/")
return redirect_and_flash_success(request,
"Refreshed!",
path=redirect_to)
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def generate_archive(request):
tournament_name = request.META["SERVER_NAME"].split(".")[0]
filename = tournament_name + ".xml"
xml = ArchiveExporter(tournament_name).export_tournament()
response = HttpResponse(xml, content_type="text/xml; charset=utf-8")
response["Content-Length"] = len(xml)
response["Content-Disposition"] = "attachment; filename=%s" % filename
return response
| jolynch/mit-tab | mittab/apps/tab/views.py | Python | mit | 14,458 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from dbconf import *
import database as db
import os
import re
confs = db.query("""SELECT domains.name, websites.config, users.login
FROM domains, websites, users
WHERE domains.id = websites.id_domains
AND users.id = websites.id_users
AND websites.enabled = 'yes'""")
for conf in confs:
path = '/home/%s/%s' % (conf.login, conf.name)
log_path = '/home/%s/logs/%s' % (conf.login, conf.name)
os.system('mkdir -p "%s"' % log_path)
os.system('chown -R %s:www-data "/home/%s/logs/"' % (conf.login, conf.login))
os.system('chmod -R ug+rwx,o-rwx "/home/%s/logs/"' % conf.login)
print """
$HTTP["host"] == "%s" {
server.document-root = "%s/"
accesslog.filename = "%s/access.log"
%s
}""" % (conf.name, path, log_path, re.sub("\n", "\n\t", conf.config))
| sr/beadmin | lighttpd-mysql-config.py | Python | gpl-3.0 | 815 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.