text
stringlengths 0
1.05M
| meta
dict |
---|---|
# $Id$
"""Internet Control Message Protocol for IPv6."""
import dpkt, ip6
ICMP6_DST_UNREACH = 1 # dest unreachable, codes:
ICMP6_PACKET_TOO_BIG = 2 # packet too big
ICMP6_TIME_EXCEEDED = 3 # time exceeded, code:
ICMP6_PARAM_PROB = 4 # ip6 header bad
ICMP6_ECHO_REQUEST = 128 # echo service
ICMP6_ECHO_REPLY = 129 # echo reply
MLD_LISTENER_QUERY = 130 # multicast listener query
MLD_LISTENER_REPORT = 131 # multicast listener report
MLD_LISTENER_DONE = 132 # multicast listener done
# RFC2292 decls
ICMP6_MEMBERSHIP_QUERY = 130 # group membership query
ICMP6_MEMBERSHIP_REPORT = 131 # group membership report
ICMP6_MEMBERSHIP_REDUCTION = 132 # group membership termination
ND_ROUTER_SOLICIT = 133 # router solicitation
ND_ROUTER_ADVERT = 134 # router advertisment
ND_NEIGHBOR_SOLICIT = 135 # neighbor solicitation
ND_NEIGHBOR_ADVERT = 136 # neighbor advertisment
ND_REDIRECT = 137 # redirect
ICMP6_ROUTER_RENUMBERING = 138 # router renumbering
ICMP6_WRUREQUEST = 139 # who are you request
ICMP6_WRUREPLY = 140 # who are you reply
ICMP6_FQDN_QUERY = 139 # FQDN query
ICMP6_FQDN_REPLY = 140 # FQDN reply
ICMP6_NI_QUERY = 139 # node information request
ICMP6_NI_REPLY = 140 # node information reply
ICMP6_MAXTYPE = 201
class ICMP6(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('code', 'B', 0),
('sum', 'H', 0)
)
class Error(dpkt.Packet):
__hdr__ = (('pad', 'I', 0), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.ip6 = ip6.IP6(self.data)
class Unreach(Error):
pass
class TooBig(Error):
__hdr__ = (('mtu', 'I', 1232), )
class TimeExceed(Error):
pass
class ParamProb(Error):
__hdr__ = (('ptr', 'I', 0), )
class Echo(dpkt.Packet):
__hdr__ = (('id', 'H', 0), ('seq', 'H', 0))
_typesw = { 1:Unreach, 2:TooBig, 3:TimeExceed, 4:ParamProb,
128:Echo, 129:Echo }
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
try:
self.data = self._typesw[self.type](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
| {
"repo_name": "djhenderson/dpkt",
"path": "dpkt/icmp6.py",
"copies": "17",
"size": "2604",
"license": "bsd-3-clause",
"hash": -4893693534180753000,
"line_mean": 35.1666666667,
"line_max": 74,
"alpha_frac": 0.5399385561,
"autogenerated": false,
"ratio": 3.3994778067885116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018036024233648723,
"num_lines": 72
} |
# $Id$
"""Internet Protocol, version 6."""
import dpkt
class IP6(dpkt.Packet):
__hdr__ = (
('v_fc_flow', 'I', 0x60000000L),
('plen', 'H', 0), # payload length (not including header)
('nxt', 'B', 0), # next header protocol
('hlim', 'B', 0), # hop limit
('src', '16s', ''),
('dst', '16s', '')
)
# XXX - to be shared with IP. We cannot refer to the ip module
# right now because ip.__load_protos() expects the IP6 class to be
# defined.
_protosw = None
def _get_v(self):
return self.v_fc_flow >> 28
def _set_v(self, v):
self.v_fc_flow = (self.v_fc_flow & ~0xf0000000L) | (v << 28)
v = property(_get_v, _set_v)
def _get_fc(self):
return (self.v_fc_flow >> 20) & 0xff
def _set_fc(self, v):
self.v_fc_flow = (self.v_fc_flow & ~0xff00000L) | (v << 20)
fc = property(_get_fc, _set_fc)
def _get_flow(self):
return self.v_fc_flow & 0xfffff
def _set_flow(self, v):
self.v_fc_flow = (self.v_fc_flow & ~0xfffff) | (v & 0xfffff)
flow = property(_get_flow, _set_flow)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.extension_hdrs = dict(((i, None) for i in ext_hdrs))
if self.plen:
buf = self.data[:self.plen]
else: # due to jumbo payload or TSO
buf = self.data
next = self.nxt
while (next in ext_hdrs):
ext = ext_hdrs_cls[next](buf)
self.extension_hdrs[next] = ext
buf = buf[ext.length:]
next = ext.nxt
# set the payload protocol id
setattr(self, 'p', next)
try:
self.data = self._protosw[next](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def headers_str(self):
"""
Output extension headers in order defined in RFC1883 (except dest opts)
"""
header_str = ""
for hdr in ext_hdrs:
if not self.extension_hdrs[hdr] is None:
header_str += str(self.extension_hdrs[hdr])
return header_str
def __str__(self):
if (self.nxt == 6 or self.nxt == 17 or self.nxt == 58) and \
not self.data.sum:
# XXX - set TCP, UDP, and ICMPv6 checksums
p = str(self.data)
s = dpkt.struct.pack('>16s16sxBH', self.src, self.dst, self.nxt, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
try:
self.data.sum = dpkt.in_cksum_done(s)
except AttributeError:
pass
return self.pack_hdr() + self.headers_str() + str(self.data)
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
set_proto = classmethod(set_proto)
def get_proto(cls, p):
return cls._protosw[p]
get_proto = classmethod(get_proto)
import ip
# We are most likely still in the middle of ip.__load_protos() which
# implicitly loads this module through __import__(), so the content of
# ip.IP._protosw is still incomplete at the moment. By sharing the
# same dictionary by reference as opposed to making a copy, when
# ip.__load_protos() finishes, we will also automatically get the most
# up-to-date dictionary.
IP6._protosw = ip.IP._protosw
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
setattr(self, 'length', (self.len + 1) * 8)
options = []
index = 0
while (index < self.length - 2):
opt_type = ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue;
opt_length = ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append({'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
setattr(self, 'options', options)
class IP6HopOptsHeader(IP6OptsHeader): pass
class IP6DstOptsHeader(IP6OptsHeader): pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
)
def _get_sl_bits(self):
return self.rsvd_sl_bits & 0xffffff
def _set_sl_bits(self, v):
self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
sl_bits = property(_get_sl_bits, _set_sl_bits)
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len / 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
setattr(self, 'addresses', addresses)
setattr(self, 'length', self.len * 8 + 8)
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('resv', 'B', 0), # reserved, set to 0
('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
('id', 'I', 0) # fragments id
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
setattr(self, 'length', self.__hdr_len__)
def _get_frag_off(self):
return self.frag_off_resv_m >> 3
def _set_frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
frag_off = property(_get_frag_off, _set_frag_off)
def _get_m_flag(self):
return self.frag_off_resv_m & 1
def _set_m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
m_flag = property(_get_m_flag, _set_m_flag)
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
setattr(self, 'length', (self.len + 2) * 4)
setattr(self, 'auth_data', self.data[:(self.len - 1) * 4])
class IP6ESPHeader(IP6ExtensionHeader):
def unpack(self, buf):
raise NotImplementedError("ESP extension headers are not supported.")
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP, ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
if __name__ == '__main__':
import unittest
class IP6TestCase(unittest.TestCase):
def test_IP6(self):
s = '`\x00\x00\x00\x00(\x06@\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11$\xff\xfe\x8c\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80r\xcd\xca\x00\x16\x04\x84F\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\t\x00\x00\x02\x04\x05\xa0\x01\x03\x03\x00\x01\x01\x08\n}\x185?\x00\x00\x00\x00'
ip = IP6(s)
#print `ip`
ip.data.sum = 0
s2 = str(ip)
ip2 = IP6(s)
#print `ip2`
assert(s == s2)
def test_IP6RoutingHeader(self):
s = '`\x00\x00\x00\x00<+@ H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca G\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\x91\x7f\x00\x00'
ip = IP6(s)
s2 = str(ip)
# 43 is Routing header id
assert(len(ip.extension_hdrs[43].addresses) == 2)
assert(ip.tcp)
assert(s == s2)
def test_IP6FragmentHeader(self):
s = '\x06\xee\xff\xfb\x00\x00\xff\xff'
fh = IP6FragmentHeader(s)
s2 = str(fh)
assert(fh.nxt == 6)
assert(fh.id == 65535)
assert(fh.frag_off == 8191)
assert(fh.m_flag == 1)
def test_IP6OptionsHeader(self):
s = ';\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00'
options = IP6OptsHeader(s).options
assert(len(options) == 3)
def test_IP6AHHeader(self):
s = ';\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ah = IP6AHHeader(s)
assert(ah.length == 24)
assert(ah.auth_data == 'xxxxxxxx')
assert(ah.spi == 0x2020202)
assert(ah.seq == 0x1010101)
def test_IP6ExtensionHeaders(self):
p = '`\x00\x00\x00\x00<+@ H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca G\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\x91\x7f\x00\x00'
ip = IP6(p)
o = ';\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00'
options = IP6HopOptsHeader(o)
ip.extension_hdrs[0] = options
fh = '\x06\xee\xff\xfb\x00\x00\xff\xff'
ip.extension_hdrs[44] = IP6FragmentHeader(fh)
ah = ';\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ip.extension_hdrs[51] = IP6AHHeader(ah)
do = ';\x02\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
ip.extension_hdrs[60] = IP6DstOptsHeader(do)
assert(len([k for k in ip.extension_hdrs if (not ip.extension_hdrs[k] is None)]) == 5)
unittest.main()
| {
"repo_name": "afghanistanyn/dpkt",
"path": "dpkt/ip6.py",
"copies": "17",
"size": "12002",
"license": "bsd-3-clause",
"hash": 8089525888408985000,
"line_mean": 38.0944625407,
"line_max": 376,
"alpha_frac": 0.5405765706,
"autogenerated": false,
"ratio": 2.903943866440842,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id$
#
# Locate all standard modules available in this build.
#
# This script is designed to run on Python 1.5.2 and newer.
#
# Written by Fredrik Lundh, January 2005
#
import imp, sys, os, re, time
identifier = "python-%s-%s" % (sys.version[:3], sys.platform)
timestamp = time.strftime("%Y%m%dT%H%M%SZ", time.gmtime(time.time()))
# known test packages
TEST_PACKAGES = "test.", "bsddb.test.", "distutils.tests."
try:
import platform
platform = platform.platform()
except:
platform = None # unknown
suffixes = imp.get_suffixes()
def get_suffix(file):
for suffix in suffixes:
if file[-len(suffix[0]):] == suffix[0]:
return suffix
return None
def main():
path = getpath()
modules = {}
for m in sys.builtin_module_names:
modules[m] = None
for p in path:
modules.update(getmodules(p))
keys = modules.keys()
keys.sort()
# filter out known test packages
def cb(m):
for d in TEST_PACKAGES:
if m[:len(d)] == d:
return 0
return 1
keys = filter(cb, keys)
try:
outfile = sys.argv[1]
if outfile == "-":
outfile = None
elif outfile == "-f":
outfile = "modules-" + identifier + ".txt"
except IndexError:
outfile = None
if not outfile:
out = sys.stdout
else:
out = open(outfile, "w")
out.write("# module list (generated by listmodules.py)\n")
out.write("#\n")
out.write("# timestamp=%s\n" % repr(timestamp))
out.write("# sys.version=%s\n" % repr(sys.version))
out.write("# sys.platform=%s\n" % repr(sys.platform))
if platform:
out.write("# platform=%s\n" % repr(platform))
out.write("#\n")
for k in keys:
out.write(k + "\n")
if out is not sys.stdout:
out.close()
print out.name, "ok (%d modules)" % len(modules)
def getmodules(p):
# get modules in a given directory
modules = {}
for f in os.listdir(p):
f = os.path.join(p, f)
if os.path.isfile(f):
m, e = os.path.splitext(f)
suffix = get_suffix(f)
if not suffix:
continue
m = os.path.basename(m)
if re.compile("(?i)[a-z_]\w*$").match(m):
if suffix[2] == imp.C_EXTENSION:
# check that this extension can be imported
try:
__import__(m)
except ImportError:
continue
modules[m] = f
elif os.path.isdir(f):
m = os.path.basename(f)
if os.path.isfile(os.path.join(f, "__init__.py")):
for mm, f in getmodules(f).items():
modules[m + "." + mm] = f
return modules
def getpath():
path = map(os.path.normcase, map(os.path.abspath, sys.path[:]))
# get rid of site packages
for p in path:
if p[-13:] == "site-packages":
def cb(p, site_package_path=os.path.abspath(p)):
return p[:len(site_package_path)] != site_package_path
path = filter(cb, path)
break
# get rid of non-existent directories and the current directory
def cb(p, cwd=os.path.normcase(os.getcwd())):
return os.path.isdir(p) and p != cwd
path = filter(cb, path)
return path
if __name__ == "__main__":
main()
| {
"repo_name": "hr4e/QuestionnaireQx",
"path": "listmodules.py",
"copies": "1",
"size": "3420",
"license": "apache-2.0",
"hash": 5849893026299655000,
"line_mean": 26.1428571429,
"line_max": 70,
"alpha_frac": 0.5347953216,
"autogenerated": false,
"ratio": 3.5849056603773586,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9599252238063702,
"avg_score": 0.004089748782731239,
"num_lines": 126
} |
# $Id$
"""mime - MIME decoding Library"""
import rfc822, re
import cStringIO as StringIO
class Error(Exception):
pass
class Entity(rfc822.Message):
def __init__(self, msg, mime=0, eol=r"\r\n", headers=None):
if headers is not None:
fp = StringIO.StringIO(headers + "\n" + msg)
else:
fp = StringIO.StringIO(msg)
del msg
rfc822.Message.__init__(self, fp)
del self.fp
self.mime = mime or ("MIME-Version" in self)
if not self.mime:
self.body = fp.read()
return
if "Content-Type" in self:
self.content_type = parse_content_type(self["Content-Type"])
else:
self.content_type = None
if "Content-Disposition" in self:
self.content_disposition = \
parse_content_disposition(self["Content-Disposition"])
else:
self.content_disposition = None
self.entities = []
if self.content_type and self.content_type[0][:10] == "multipart/":
bre = re.compile(r"(" + eol + r")?--" +
re.escape(self.content_type[1]["boundary"]) + r"(--)?[ \t]*" +
r"(" + eol + r")?")
msg = fp.read()
start = 0
while 1:
while 1:
end = bre.search(msg, start)
if not end:
raise Error("End boundary not found in multipart")
if end.group(1) is not None or end.start() == 0 and start == 0:
break
if start == 0:
self.body = msg[start:end.start()]
else:
self.entities.append(Entity(msg[start:end.start()], mime=1, eol=eol))
start = end.end()
if end.group(2) == "--":
break
else:
encoding = self.get("Content-Transfer-Encoding", "7bit").lower()
self.body = decode(encoding, fp.read())
_tspecials = "()<>@,;:\\\"/[]?="
_whitespace = " \t\r\n"
_tokenchars = "!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"^_`abcdefghijklmnopqrstuvwxyz{|}~"
def parse_content_disposition(s):
i = 0
# skip whitespace before disposition
while i < len(s) and s[i] in _whitespace: i+= 1
if i >= len(s):
raise Error("Unexpected end of string before disposition")
# disposition
disposition = ""
while i < len(s) and s[i] in _tokenchars:
disposition += s[i]
i+= 1
while i < len(s) and s[i] in _whitespace: i+= 1
if i >= len(s):
return (disposition, {})
if s[i] != ';':
raise Error("Unexpected character %s in disposition" % repr(s[i]))
i+= 1
return (disposition, parse_params(s[i:]))
def parse_content_type(s):
i = 0
# skip whitespace before type "/" subtype
while i < len(s) and s[i] in _whitespace: i+= 1
if i >= len(s):
raise Error("Unexpected end of string before type")
# type
content_type = ""
while i < len(s) and s[i] in _tokenchars:
content_type += s[i]
i+= 1
if i >= len(s):
raise Error("Unexpected end of string in type")
# "/"
if s[i] != "/":
raise Error("Unexpected character %s in type" % repr(s[i]))
content_type += "/"
i+= 1
# subtype
while i < len(s) and s[i] in _tokenchars:
content_type += s[i]
i+= 1
while i < len(s) and s[i] in _whitespace: i+= 1
if i >= len(s):
return (content_type, {})
if s[i] != ';':
raise Error("Unexpected character %s in subtype" % repr(s[i]))
i+= 1
return (content_type, parse_params(s[i:]))
def parse_params(s):
params = {}
i = 0
while 1:
# skip whitespace before an attribute/value pair
while i < len(s) and s[i] in _whitespace: i+= 1
if i >= len(s): break
# fetch the attribute
attribute = ""
while i < len(s) and s[i] in _tokenchars:
attribute += s[i]
i+= 1
if i >= len(s):
raise Error("Unexpected end of string in attribute")
# now we should have an equals sign
if s[i] != "=":
raise Error("Unexpected character %s in attribute" % repr(s[i]))
i+= 1
if i >= len(s):
raise Error("Unexpected end of string after '='")
# now we should have the value - either a token or a quoted-string
value = ""
if s[i] != '"':
# token
while i < len(s) and s[i] in _tokenchars:
value += s[i]
i+= 1
else:
# quoted-string
i+= 1
while 1:
if i >= len(s):
raise Error("Unexpected end of string in quoted-string")
if s[i] == '"':
break
if i == "\\":
i+= 1
if i >= len(s):
raise Error("Unexpected end of string in quoted-pair")
value += s[i]
i+= 1
i+= 1
params[attribute.lower()] = value
while i < len(s) and s[i] in _whitespace: i+= 1
if i >= len(s):
break
if s[i] != ";":
raise Error("Unexpected character %s after parameter" % repr(s[i]))
i+= 1
return params
def decode(encoding, s):
if encoding == '7bit' or encoding == '8bit' or encoding == 'binary':
return s
elif encoding == 'quoted-printable':
import quopri
ifp = StringIO.StringIO(s)
ofp = StringIO.StringIO()
quopri.decode(ifp, ofp)
return ofp.getvalue()
elif encoding == 'base64':
import base64
return base64.decodestring(s)
else:
raise Error("Unknown encoding %s" % repr(encoding))
def decodeword(s):
if s[:2] != "=?" or s[-2:] != "?=":
return unicode(s)
t = s.split("?")
if len(t) != 3:
return unicode(s)
charset = t[0].lower()
encoding = t[1].lower()
if encoding == "q":
s = decode("quoted-printable", t[2])
elif encoding == "b":
s = decode("base64", t[2])
else:
raise Error("Unknown encoded-word encoding %s" % repr(encoding))
try:
s = s.decode(charset)
except (LookupError, UnicodeError):
raise Error("Error decoding %s as %s" % (repr(s), repr(encoding)))
return s
| {
"repo_name": "jribbens/jonpy",
"path": "jon/mime.py",
"copies": "1",
"size": "5681",
"license": "mit",
"hash": -6548297419265174000,
"line_mean": 26.8480392157,
"line_max": 79,
"alpha_frac": 0.5653934167,
"autogenerated": false,
"ratio": 3.3222222222222224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43876156389222226,
"avg_score": null,
"num_lines": null
} |
# $Id:$
'''Minimal Windows COM interface.
Allows pyglet to use COM interfaces on Windows without comtypes. Unlike
comtypes, this module does not provide property interfaces, read typelibs,
nice-ify return values or permit Python implementations of COM interfaces. We
don't need anything that sophisticated to work with DirectX.
All interfaces should derive from IUnknown (defined in this module). The
Python COM interfaces are actually pointers to the implementation (take note
when translating methods that take an interface as argument).
Interfaces can define methods::
class IDirectSound8(com.IUnknown):
_methods_ = [
('CreateSoundBuffer', com.STDMETHOD()),
('GetCaps', com.STDMETHOD(LPDSCAPS)),
...
]
Only use STDMETHOD or METHOD for the method types (not ordinary ctypes
function types). The 'this' pointer is bound automatically... e.g., call::
device = IDirectSound8()
DirectSoundCreate8(None, ctypes.byref(device), None)
caps = DSCAPS()
device.GetCaps(caps)
Because STDMETHODs use HRESULT as the return type, there is no need to check
the return value.
Don't forget to manually manage memory... call Release() when you're done with
an interface.
'''
import ctypes
class GUID(ctypes.Structure):
_fields_ = [
('Data1', ctypes.c_ulong),
('Data2', ctypes.c_ushort),
('Data3', ctypes.c_ushort),
('Data4', ctypes.c_ubyte * 8)
]
def __init__(self, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8):
self.Data1 = l
self.Data2 = w1
self.Data3 = w2
self.Data4[:] = (b1, b2, b3, b4, b5, b6, b7, b8)
LPGUID = ctypes.POINTER(GUID)
IID = GUID
REFIID = ctypes.POINTER(IID)
class METHOD(object):
'''COM method.'''
def __init__(self, restype, *args):
self.restype = restype
self.argtypes = args
def get_field(self):
return ctypes.WINFUNCTYPE(self.restype, *self.argtypes)
class STDMETHOD(METHOD):
'''COM method with HRESULT return value.'''
def __init__(self, *args):
super(STDMETHOD, self).__init__(ctypes.HRESULT, *args)
class COMMethodInstance(object):
'''Binds a COM interface method.'''
def __init__(self, name, i, method):
self.name = name
self.i = i
self.method = method
def __get__(self, obj, tp):
if obj is not None:
return lambda *args: \
self.method.get_field()(self.i, self.name)(obj, *args)
raise AttributeError()
class COMInterface(ctypes.Structure):
'''Dummy struct to serve as the type of all COM pointers.'''
_fields_ = [
('lpVtbl', ctypes.c_void_p),
]
class InterfaceMetaclass(type(ctypes.POINTER(COMInterface))):
'''Creates COM interface pointers.'''
def __new__(cls, name, bases, dct):
methods = []
for base in bases[::-1]:
methods.extend(base.__dict__.get('_methods_', ()))
methods.extend(dct.get('_methods_', ()))
for i, (n, method) in enumerate(methods):
dct[n] = COMMethodInstance(n, i, method)
dct['_type_'] = COMInterface
return super(InterfaceMetaclass, cls).__new__(cls, name, bases, dct)
class Interface(ctypes.POINTER(COMInterface)):
'''Base COM interface pointer.'''
__metaclass__ = InterfaceMetaclass
class IUnknown(Interface):
_methods_ = [
('QueryInterface', STDMETHOD(REFIID, ctypes.c_void_p)),
('AddRef', METHOD(ctypes.c_int)),
('Release', METHOD(ctypes.c_int))
]
| {
"repo_name": "hazelnusse/sympy-old",
"path": "sympy/thirdparty/pyglet/pyglet/com.py",
"copies": "5",
"size": "3535",
"license": "bsd-3-clause",
"hash": -6399041892013928000,
"line_mean": 29.2136752137,
"line_max": 78,
"alpha_frac": 0.6297029703,
"autogenerated": false,
"ratio": 3.6368312757201644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011094940769846474,
"num_lines": 117
} |
# $Id$
"""Miscellaneous functions that are part of the vtk_kit.
This module is imported by vtk_kit.init() after the rest of the vtk_kit has
been initialised. To use these functions in your module code, do e.g.:
import moduleKits; moduleKits.vtk_kit.misc.flatterProp3D(obj);
@author: Charl P. Botha <http://cpbotha.net/>
"""
# this import does no harm; we go after the rest of vtk_kit has been
# initialised
import vtk
def flattenProp3D(prop3D):
"""Get rid of the UserTransform() of an actor by integrating it with
the 'main' matrix.
"""
if not prop3D.GetUserTransform():
# no flattening here, move along
return
# get the current "complete" matrix (combining internal and user)
currentMatrix = vtk.vtkMatrix4x4()
prop3D.GetMatrix(currentMatrix)
# apply it to a transform
currentTransform = vtk.vtkTransform()
currentTransform.Identity()
currentTransform.SetMatrix(currentMatrix)
# zero the prop3D UserTransform
prop3D.SetUserTransform(None)
# and set the internal matrix of the prop3D
prop3D.SetPosition(currentTransform.GetPosition())
prop3D.SetScale(currentTransform.GetScale())
prop3D.SetOrientation(currentTransform.GetOrientation())
# we should now also be able to zero the origin
#prop3D.SetOrigin(0,0,0)
def planePlaneIntersection(
planeNormal0, planeOrigin0, planeNormal1, planeOrigin1):
"""Given two plane definitions, determine the intersection line using
the method on page 233 of Graphics Gems III: 'Plane-to-Plane Intersection'
Returns tuple with lineOrigin and lineVector.
"""
# convert planes to Hessian form first:
# http://mathworld.wolfram.com/HessianNormalForm.html
# calculate p, orthogonal distance from the plane to the origin
p0 = - vtk.vtkMath.Dot(planeOrigin0, planeNormal0)
p1 = - vtk.vtkMath.Dot(planeOrigin1, planeNormal1)
# we already have n, the planeNormal
# calculate cross product
L = [0.0, 0.0, 0.0]
vtk.vtkMath.Cross(planeNormal0, planeNormal1, L)
absL = [abs(e) for e in L]
maxAbsL = max(absL)
if maxAbsL == 0.0:
raise ValueError, "Planes are almost parallel."
w = absL.index(maxAbsL)
Lw = L[w]
# we're going to set the maxLidx'th component of our lineOrigin (i.e.
# any point on the line) to 0
P = [0.0, 0.0, 0.0]
# we want either [0, 1], [1, 2] or [2, 0]
if w == 0:
u = 1
v = 2
elif w == 1:
u = 2
v = 0
else:
u = 0
v = 1
P[u] = (planeNormal0[v] * p1 - planeNormal1[v] * p0) / float(Lw)
P[v] = (planeNormal1[u] * p0 - planeNormal0[u] * p1) / float(Lw)
P[w] = 0 # just for completeness
vtk.vtkMath.Normalize(L)
return (P, L)
| {
"repo_name": "codester2/devide",
"path": "module_kits/vtk_kit/misc.py",
"copies": "7",
"size": "2776",
"license": "bsd-3-clause",
"hash": 905408408086246400,
"line_mean": 28.5319148936,
"line_max": 78,
"alpha_frac": 0.6585014409,
"autogenerated": false,
"ratio": 3.3047619047619046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010504038493696117,
"num_lines": 94
} |
# $Id$
"""Multi-threaded Routing Toolkit."""
import dpkt
import bgp
# Multi-threaded Routing Toolkit
# http://www.ietf.org/internet-drafts/draft-ietf-grow-mrt-03.txt
# MRT Types
NULL = 0
START = 1
DIE = 2
I_AM_DEAD = 3
PEER_DOWN = 4
BGP = 5 # Deprecated by BGP4MP
RIP = 6
IDRP = 7
RIPNG = 8
BGP4PLUS = 9 # Deprecated by BGP4MP
BGP4PLUS_01 = 10 # Deprecated by BGP4MP
OSPF = 11
TABLE_DUMP = 12
BGP4MP = 16
BGP4MP_ET = 17
ISIS = 32
ISIS_ET = 33
OSPF_ET = 64
# BGP4MP Subtypes
BGP4MP_STATE_CHANGE = 0
BGP4MP_MESSAGE = 1
BGP4MP_ENTRY = 2
BGP4MP_SNAPSHOT = 3
BGP4MP_MESSAGE_32BIT_AS = 4
# Address Family Types
AFI_IPv4 = 1
AFI_IPv6 = 2
class MRTHeader(dpkt.Packet):
__hdr__ = (
('ts', 'I', 0),
('type', 'H', 0),
('subtype', 'H', 0),
('len', 'I', 0)
)
class TableDump(dpkt.Packet):
__hdr__ = (
('view', 'H', 0),
('seq', 'H', 0),
('prefix', 'I', 0),
('prefix_len', 'B', 0),
('status', 'B', 1),
('originated_ts', 'I', 0),
('peer_ip', 'I', 0),
('peer_as', 'H', 0),
('attr_len', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
plen = self.attr_len
l = []
while plen > 0:
attr = bgp.BGP.Update.Attribute(self.data)
self.data = self.data[len(attr):]
plen -= len(attr)
l.append(attr)
self.attributes = l
class BGP4MPMessage(dpkt.Packet):
__hdr__ = (
('src_as', 'H', 0),
('dst_as', 'H', 0),
('intf', 'H', 0),
('family', 'H', AFI_IPv4),
('src_ip', 'I', 0),
('dst_ip', 'I', 0)
)
class BGP4MPMessage_32(dpkt.Packet):
__hdr__ = (
('src_as', 'I', 0),
('dst_as', 'I', 0),
('intf', 'H', 0),
('family', 'H', AFI_IPv4),
('src_ip', 'I', 0),
('dst_ip', 'I', 0)
)
| {
"repo_name": "lzp819739483/dpkt",
"path": "dpkt/mrt.py",
"copies": "17",
"size": "1939",
"license": "bsd-3-clause",
"hash": -1127289880551056800,
"line_mean": 20.0760869565,
"line_max": 64,
"alpha_frac": 0.472408458,
"autogenerated": false,
"ratio": 2.581890812250333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020665170745678824,
"num_lines": 92
} |
# $Id$
#----------------------------------------------------------------------
# Name: Logger.py
# Purpose: Provides a Logger class which can be wrapped around another
# python class to log method calls and attribute changes.
# Requires: Python 2.0 (or higher?)
#
# Author: greg Landrum (Landrum@RationalDiscovery.com)
# License:
#
# Copyright (c) 2001-2006 Greg Landrum and Rational Discovery LLC,
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
#----------------------------------------------------------------------
""" Provides a Logger class which can be wrapped around another
python class to log method calls and attribute changes.
"""
import types
import re
reType = type(re.compile('f*'))
stringTypes = [types.StringType,types.UnicodeType,reType]
def isPresent(what,checkList):
""" checks to see if any of the regular expressions in a list match a string
**Arguments**
- what: the thing to match against
- checkList: the list of regexps to try the match with
**Returns**
1 or 0 depending upon whether or not _what_ was found in checkList
**Notes**
- the search is done using _match_, so we match *any* part of _what_
"""
for entry in checkList:
if type(entry) == reType:
if entry.match(what) is not None:
return 1
else:
if what == entry:
return 1
return 0
class Callable(object):
""" this is just a simple class with a __call__ method we'll use to pass
any method invocations back to the caller.
"""
def __init__(self,log,obj,method):
""" Constructor
**Arguments:**
- log: a python list (or anything else supporting the append method)
which is used to log the actual invocation of a method
- obj: the object which is calling the method
- method: the *name* of the method to be invoked
"""
self._obj = obj
self._log = log
self._method = method
def __call__(self,*args,**kwargs):
""" logs the method name and arguments and makes the call
"""
self._log.append((self._method,args,kwargs))
return getattr(self._obj,self._method)(*args,**kwargs)
class Logger(object):
""" This is the actual wrapper class.
The wrapper is fairly thin; it only has one methods of its own:
- _LoggerGetLog()
and then several instance variables:
- _loggerFlushCommand
- _loggerClass
- _loggerObj
- _loggerCommandLog
- _loggerIgnore
These names were chosen to minimize the likelihood of a collision
with the attributes of a wrapped class. Obviously... ;-)
The general idea of using this is that you wrap a class in the logger,
and then use the class as you normally would. Whenever you want to
get the contents of the log (for example after running your program for
a while), you can call _loggerCommandLog. The resulting list can be
played back in another (or the same) object using the replay() function
defined below.
The logger can, optionally, be set to flush its log whenever a method with
a particular name is invoked. For example, you may want to be wrapping
some kind of drawing canvas and want to reset the log whenever the canvas
is cleared because there's no point in storing commands which will have
no effect on the final drawing.
**Note**
because of the way I've worked this, the log will actually be flushed
whenever the client program accesses the flush method, it need not be invoked.
i.e. if the loggerFlushCommand is 'foo', then doing either wrappedObj.foo() or
wrappedObj.foo will reset the log. This is undesirable and will most likely
be fixed in a future version
"""
def __init__(self,klass,*args,**kwargs):
""" Constructor
**Arguments**
The one required argument here is _klass_, which is the class
to be wrapped.
**Optional Keyword Arguments**
- loggerFlushCommand: the name of the attribute which will flush the log
- loggerIgnore: a list of regexps defining methods which should not be
logged
**All other arguments are passed to the constructor for _klass_ **
"""
if kwargs.has_key('loggerFlushCommand'):
self.__dict__['_loggerFlushCommand'] = kwargs['loggerFlushCommand']
del kwargs['loggerFlushCommand']
else:
self.__dict__['_loggerFlushCommand'] = None
if kwargs.has_key('loggerIgnore'):
tempL = kwargs['loggerIgnore']
for entry in tempL:
if type(entry) not in stringTypes:
raise ValueError,'All entries in loggerIgnore must be either strings or regexps'
self.__dict__['_loggerIgnore'] = tempL
del kwargs['loggerIgnore']
else:
self.__dict__['_loggerIgnore'] = []
self.__dict__['_loggerClass'] = klass
self.__dict__['_loggerObj'] = klass(*args,**kwargs)
self.__dict__['_loggerCommandLog'] = []
def _LoggerGetLog(self):
""" Returns the contents of the command log as a python list
"""
return self._loggerCommandLog
def __getattr__(self,which):
""" here's where the logging of method invocations takes place
"""
if hasattr(self._loggerObj,which):
tmpAttr = getattr(self._loggerObj,which)
if type(tmpAttr) == types.MethodType:
loggerFlushCommand = self._loggerFlushCommand
if which == loggerFlushCommand:
self._loggerCommandLog = []
return Callable([],self._loggerObj,which)
elif self._loggerIgnore != [] and isPresent(which,self._loggerIgnore):
return Callable([],self._loggerObj,which)
else:
return Callable(self._loggerCommandLog,self._loggerObj,which)
else:
return tmpAttr
else:
raise AttributeError, '%s instance has no attribute %s'%(repr(self._loggerClass.__name__),repr(which))
def __setattr__(self,which,val):
""" setattr calls (i.e. wrappedObject.foo = 1) are also logged
"""
d = self.__dict__
if d.has_key(which):
d[which] = val
else:
self._loggerCommandLog.append((setattr,which,val))
setattr(self._loggerObj,which,val)
return val
def replay(logItems,obj):
""" loops through the items in a Logger log list and invokes
them in obj
**Arguments**
- logItems: a list of 3 tuples containing:
1) method name
2) tuple of positional arguments
3) dictionary of keyword arguments
- obj: the object in which the log should be replayed
**Returns**
a list with the the return values of all the method
invocations/attribute assignments
"""
if isinstance(logItems,Logger):
logItems = logItems._LoggerGetLog()
resList = []
for method,a1,a2 in logItems:
if callable(method):
if method == setattr:
method(obj,a1,a2)
resList.append(a2)
else:
resList.append(method(obj,a1,a2))
else:
a = getattr(obj,method)
resList.append(apply(a,a1,a2))
return resList
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Logger/Logger.py",
"copies": "2",
"size": "7290",
"license": "bsd-3-clause",
"hash": 3352629666295961600,
"line_mean": 29.5020920502,
"line_max": 108,
"alpha_frac": 0.6330589849,
"autogenerated": false,
"ratio": 4.204152249134948,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5837211234034948,
"avg_score": null,
"num_lines": null
} |
# $Id$
blurbs = {}
blurbs[1] = ('blurb_image_1_jpg',
'beluga whale',
"""
Beluga whales of the St. Lawrence estuary (SLE) have been found to have cancer rates "much higher than those observed in domestic animals and humans, except in sheep in certain parts of the world, where environmental contaminants are believed to be involved in the etiology of this condition. SLE beluga and their environment are contaminated by polycyclic aromatic hydrocarbons (PAHs) produced by the local aluminum smelters. The human population living in proximity of the SLE beluga habitat is affected by rates of cancer higher than those found in people in the rest of Quebec and Canada, and some of these cancers have been epidemiologically related to PAHs."
""",
"""
<font size='+1'><b>Cancer in wildlife, a case study: beluga from the St. Lawrence estuary, Quebec, Canada.</b></font>
<br />
<b>Martineau D, Lemberger K, Dallaire A, Labelle P, Lipscomb TP, Michel P, Mikaelian I.</b>
<br />
Environ Health Perspect. 2002 Mar;110(3):285-92. (<a href="http://www.ncbi.nlm.nih.gov:80/entrez/query.fcgi?cmd=Retrieve&db=PubMed&list_uids=11882480&dopt=Abstract">Pubmed</a>)""")
blurbs[2] = ('blurb_image_2_jpg',
'alligator',
"""
"The alligator population at Lake Apopka in central Florida declined dramatically between 1980 and 1987. Endocrine-disrupting chemicals and specifically DDT metabolites have been implicated in the alligators' reproductive failure. The DDT metabolite hypothesis is based largely on the observation of elevated concentrations of p,p-DDE and p,p-DDD in alligator eggs obtained from Lake Apopka in 1984 and 1985."
""",
"""
<font size='+1'><b>Reproductive toxins and alligator abnormalities at Lake Apopka, Florida.</b></font>
<br />
<br>Semenza JC, Tolbert PE, Rubin CH, Guillette LJ Jr, Jackson RJ.</b>
<br />
Environ Health Perspect. 1997 Oct;105(10):1030-2. (<a href="http://www.ncbi.nlm.nih.gov:80/entrez/query.fcgi?cmd=Retrieve&db=PubMed&list_uids=9349835&dopt=Abstract">Pubmed</a>)""")
blurbs[3] = ('blurb_image_3_jpg',
'crow',
"""
"West Nile (WN) virus was first recognized as a cause of encephalitis in humans and other animals in the United States in 1999, and dead bird surveillance in the northeastern states provided a valuable window into the temporal and geographic distribution of viral activity."
""",
"""
<font size='+1'><b>Crow deaths as a sentinel surveillance system for West Nile virus in the Northeastern United States, 1999.</b></font>
<br />
<b>Eidson M, Komar N, Sorhage F, Nelson R, Talbot T, Mostashari F, et al.</b>
<br />
Emerg Infect Dis. 2001 Jul-Aug;7(4):615-20. (<a href='http://www.ncbi.nlm.nih.gov:80/entrez/query.fcgi?cmd=Retrieve&db=PubMed&list_uids=11585521&dopt=Abstract'>Pubmed</a>)""")
| {
"repo_name": "dchud/sentinel",
"path": "canary/ui/tour_blurb.py",
"copies": "1",
"size": "2964",
"license": "mit",
"hash": 7512383045770744000,
"line_mean": 64.8666666667,
"line_max": 676,
"alpha_frac": 0.6892712551,
"autogenerated": false,
"ratio": 2.993939393939394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41832106490393944,
"avg_score": null,
"num_lines": null
} |
# $Id$
class FakeFile(object):
def __init__(self):
self.closed = 0
self.softspace = 0
self._buf = ""
self.chunksize = 4096
self.newlines = None
def close(self):
self.closed = 1
def _check_open(self):
if self.closed:
raise IOError("File is closed")
def flush(self):
self._check_open()
def read(self, size=-1):
raise IOError("cannot read()")
def readline(self, size=-1):
raise IOError("cannot readline()")
def readlines(self, size=-1):
raise IOError("cannot readlines()")
def xreadlines(self):
return iter(self)
def __iter__(self):
while 1:
line = self.readline()
if line == "":
break
yield line
def seek(self, offset, whence=0):
raise IOError("cannot seek()")
def tell(self):
raise IOError("cannot tell()")
def truncate(self):
raise IOError("cannot truncate()")
def write(self, s):
raise IOError("cannot write()")
def writelines(self, seq):
raise IOError("cannot writelines()")
class FakeInput(FakeFile):
name = "<fake input stream>"
mode = "rb"
def _read(self, size):
return ""
def _read_more(self, size=-1):
d = self._read(size)
self._buf += d
return len(d)
def read(self, size=-1):
self._check_open()
if size < 0:
while self._read_more():
pass
r = self._buf
self._buf = ""
return r
while len(self._buf) < size and self._read_more(self.chunksize):
pass
if len(self._buf) == size:
r = self._buf
self._buf = ""
return r
r = self._buf[:size]
self._buf = self._buf[size:]
return r
def readline(self, size=-1):
self._check_open()
start = 0
while 1:
if size < 0:
pos = self._buf.find("\n", start)
else:
pos = self._buf.find("\n", start, size)
start = len(self._buf)
if pos >= 0:
return self.read(pos + 1)
if size >= 0 and len(self._buf) >= size:
return self.read(size)
if not self._read_more(self.chunksize):
return self.read()
def readlines(self, size=-1):
self._check_open()
lines = []
while 1:
line = self.readline()
if line == "":
return lines
lines.append(line)
if size >= 0:
size -= len(line)
if size <= 0:
return lines
class FakeOutput(FakeFile):
name = "<fake output stream>"
mode = "wb"
def __init__(self, write):
FakeFile.__init__(self)
self._write = write
def write(self, s):
self._write(s)
def writelines(self, seq):
for line in seq:
self._write(line)
| {
"repo_name": "jribbens/jonpy",
"path": "jon/fakefile.py",
"copies": "1",
"size": "2622",
"license": "mit",
"hash": -4884186683223600000,
"line_mean": 19.1692307692,
"line_max": 68,
"alpha_frac": 0.5560640732,
"autogenerated": false,
"ratio": 3.5384615384615383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4594525611661538,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""Network Basic Input/Output System."""
import struct
import dpkt, dns
def encode_name(name):
"""Return the NetBIOS first-level encoded name."""
l = []
for c in struct.pack('16s', name):
c = ord(c)
l.append(chr((c >> 4) + 0x41))
l.append(chr((c & 0xf) + 0x41))
return ''.join(l)
def decode_name(nbname):
"""Return the NetBIOS first-level decoded nbname."""
if len(nbname) != 32:
return nbname
l = []
for i in range(0, 32, 2):
l.append(chr(((ord(nbname[i]) - 0x41) << 4) |
((ord(nbname[i+1]) - 0x41) & 0xf)))
return ''.join(l).split('\x00', 1)[0]
# RR types
NS_A = 0x01 # IP address
NS_NS = 0x02 # Name Server
NS_NULL = 0x0A # NULL
NS_NB = 0x20 # NetBIOS general Name Service
NS_NBSTAT = 0x21 # NetBIOS NODE STATUS
# RR classes
NS_IN = 1
# NBSTAT name flags
NS_NAME_G = 0x8000 # group name (as opposed to unique)
NS_NAME_DRG = 0x1000 # deregister
NS_NAME_CNF = 0x0800 # conflict
NS_NAME_ACT = 0x0400 # active
NS_NAME_PRM = 0x0200 # permanent
# NBSTAT service names
nbstat_svcs = {
# (service, unique): list of ordered (name prefix, service name) tuples
(0x00, 0):[ ('', 'Domain Name') ],
(0x00, 1):[ ('IS~', 'IIS'), ('', 'Workstation Service') ],
(0x01, 0):[ ('__MSBROWSE__', 'Master Browser') ],
(0x01, 1):[ ('', 'Messenger Service') ],
(0x03, 1):[ ('', 'Messenger Service') ],
(0x06, 1):[ ('', 'RAS Server Service') ],
(0x1B, 1):[ ('', 'Domain Master Browser') ],
(0x1C, 0):[ ('INet~Services', 'IIS'), ('', 'Domain Controllers') ],
(0x1D, 1):[ ('', 'Master Browser') ],
(0x1E, 0):[ ('', 'Browser Service Elections') ],
(0x1F, 1):[ ('', 'NetDDE Service') ],
(0x20, 1):[ ('Forte_$ND800ZA', 'DCA IrmaLan Gateway Server Service'),
('', 'File Server Service') ],
(0x21, 1):[ ('', 'RAS Client Service') ],
(0x22, 1):[ ('', 'Microsoft Exchange Interchange(MSMail Connector)') ],
(0x23, 1):[ ('', 'Microsoft Exchange Store') ],
(0x24, 1):[ ('', 'Microsoft Exchange Directory') ],
(0x2B, 1):[ ('', 'Lotus Notes Server Service') ],
(0x2F, 0):[ ('IRISMULTICAST', 'Lotus Notes') ],
(0x30, 1):[ ('', 'Modem Sharing Server Service') ],
(0x31, 1):[ ('', 'Modem Sharing Client Service') ],
(0x33, 0):[ ('IRISNAMESERVER', 'Lotus Notes') ],
(0x43, 1):[ ('', 'SMS Clients Remote Control') ],
(0x44, 1):[ ('', 'SMS Administrators Remote Control Tool') ],
(0x45, 1):[ ('', 'SMS Clients Remote Chat') ],
(0x46, 1):[ ('', 'SMS Clients Remote Transfer') ],
(0x4C, 1):[ ('', 'DEC Pathworks TCPIP service on Windows NT') ],
(0x52, 1):[ ('', 'DEC Pathworks TCPIP service on Windows NT') ],
(0x87, 1):[ ('', 'Microsoft Exchange MTA') ],
(0x6A, 1):[ ('', 'Microsoft Exchange IMC') ],
(0xBE, 1):[ ('', 'Network Monitor Agent') ],
(0xBF, 1):[ ('', 'Network Monitor Application') ]
}
def node_to_service_name((name, service, flags)):
try:
unique = int(flags & NS_NAME_G == 0)
for namepfx, svcname in nbstat_svcs[(service, unique)]:
if name.startswith(namepfx):
return svcname
except KeyError:
pass
return ''
class NS(dns.DNS):
"""NetBIOS Name Service."""
class Q(dns.DNS.Q):
pass
class RR(dns.DNS.RR):
"""NetBIOS resource record."""
def unpack_rdata(self, buf, off):
if self.type == NS_A:
self.ip = self.rdata
elif self.type == NS_NBSTAT:
num = ord(self.rdata[0])
off = 1
l = []
for i in range(num):
name = self.rdata[off:off+15].split(None, 1)[0].split('\x00', 1)[0]
service = ord(self.rdata[off+15])
off += 16
flags = struct.unpack('>H', self.rdata[off:off+2])[0]
off += 2
l.append((name, service, flags))
self.nodenames = l
# XXX - skip stats
def pack_name(self, buf, name):
return dns.DNS.pack_name(self, buf, encode_name(name))
def unpack_name(self, buf, off):
name, off = dns.DNS.unpack_name(self, buf, off)
return decode_name(name), off
class Session(dpkt.Packet):
"""NetBIOS Session Service."""
__hdr__ = (
('type', 'B', 0),
('flags', 'B', 0),
('len', 'H', 0)
)
SSN_MESSAGE = 0
SSN_REQUEST = 1
SSN_POSITIVE = 2
SSN_NEGATIVE = 3
SSN_RETARGET = 4
SSN_KEEPALIVE = 5
class Datagram(dpkt.Packet):
"""NetBIOS Datagram Service."""
__hdr__ = (
('type', 'B', 0),
('flags', 'B', 0),
('id', 'H', 0),
('src', 'I', 0),
('sport', 'H', 0),
('len', 'H', 0),
('off', 'H', 0)
)
DGRAM_UNIQUE = 0x10
DGRAM_GROUP = 0x11
DGRAM_BROADCAST = 0x12
DGRAM_ERROR = 0x13
DGRAM_QUERY = 0x14
DGRAM_POSITIVE = 0x15
DGRAM_NEGATIVE = 0x16
| {
"repo_name": "warjiang/dpkt",
"path": "dpkt/netbios.py",
"copies": "17",
"size": "4963",
"license": "bsd-3-clause",
"hash": -7701020746320438000,
"line_mean": 31.2272727273,
"line_max": 87,
"alpha_frac": 0.5218617772,
"autogenerated": false,
"ratio": 2.957687723480334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""
Google App Engine Script that handles administration screens for the
blog.
"""
import cgi
import logging
import xmlrpc
import xmlrpclib
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import urlfetch
from models import *
import request
import defs
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
TECHNORATI_PING_RPC_URL = 'http://rpc.technorati.com/rpc/ping'
FAKE_TECHNORATI_PING_RPC_URL = 'http://localhost/~bmc/technorati-mock/'
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class ShowArticlesHandler(request.BlogRequestHandler):
"""
Handles the main admin page, which lists all articles in the blog,
with links to their corresponding edit pages.
"""
def get(self):
articles = Article.get_all()
template_vars = {'articles' : articles}
self.response.out.write(self.render_template('admin-main.html',
template_vars))
class NewArticleHandler(request.BlogRequestHandler):
"""
Handles requests to create and edit a new article.
"""
def get(self):
article = Article(title='New article',
body='Content goes here',
draft=True)
template_vars = {'article' : article}
self.response.out.write(self.render_template('admin-edit.html',
template_vars))
class SaveArticleHandler(request.BlogRequestHandler):
"""
Handles form submissions to save an edited article.
"""
def post(self):
title = cgi.escape(self.request.get('title'))
body = cgi.escape(self.request.get('content'))
s_id = cgi.escape(self.request.get('id'))
id = int(s_id) if s_id else None
tags = cgi.escape(self.request.get('tags'))
published_when = cgi.escape(self.request.get('published_when'))
draft = cgi.escape(self.request.get('draft'))
if tags:
tags = [t.strip() for t in tags.split(',')]
else:
tags = []
tags = Article.convert_string_tags(tags)
if not draft:
draft = False
else:
draft = (draft.lower() == 'on')
article = Article.get(id) if id else None
if article:
# It's an edit of an existing item.
just_published = article.draft and (not draft)
article.title = title
article.body = body
article.tags = tags
article.draft = draft
else:
# It's new.
article = Article(title=title,
body=body,
tags=tags,
draft=draft)
just_published = not draft
article.save()
if just_published:
logging.debug('Article %d just went from draft to published. '
'Alerting the media.' % article.id)
alert_the_media()
edit_again = cgi.escape(self.request.get('edit_again'))
edit_again = edit_again and (edit_again.lower() == 'true')
if edit_again:
self.redirect('/admin/article/edit/?id=%s' % article.id)
else:
self.redirect('/admin/')
class EditArticleHandler(request.BlogRequestHandler):
"""
Handles requests to edit an article.
"""
def get(self):
id = int(self.request.get('id'))
article = Article.get(id)
if not article:
raise ValueError, 'Article with ID %d does not exist.' % id
article.tag_string = ', '.join(article.tags)
template_vars = {'article' : article}
self.response.out.write(self.render_template('admin-edit.html',
template_vars))
class DeleteArticleHandler(request.BlogRequestHandler):
"""
Handles form submissions to delete an article.
"""
def get(self):
id = int(self.request.get('id'))
article = Article.get(id)
if article:
article.delete()
self.redirect('/admin/')
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def ping_technorati():
if defs.ON_GAE:
url = TECHNORATI_PING_RPC_URL
else:
url = FAKE_TECHNORATI_PING_RPC_URL
logging.debug('Pinging Technorati at: %s' % url)
try:
transport = xmlrpc.GoogleXMLRPCTransport()
rpc_server = xmlrpclib.ServerProxy(url, transport=transport)
result = rpc_server.weblogUpdates.ping(defs.BLOG_NAME,
defs.CANONICAL_BLOG_URL)
if result.get('flerror', False) == True:
logging.error('Technorati ping error from server: %s' %
result.get('message', '(No message in RPC result)'))
else:
logging.debug('Technorati ping successful.')
except:
raise urlfetch.DownloadError, \
"Can't ping Technorati: %s" % sys.exc_info()[1]
def alert_the_media():
# Right now, we only alert Technorati
ping_technorati()
# -----------------------------------------------------------------------------
# Main program
# -----------------------------------------------------------------------------
application = webapp.WSGIApplication(
[('/admin/?', ShowArticlesHandler),
('/admin/article/new/?', NewArticleHandler),
('/admin/article/delete/?', DeleteArticleHandler),
('/admin/article/save/?', SaveArticleHandler),
('/admin/article/edit/?', EditArticleHandler),
],
debug=True)
def main():
util.run_wsgi_app(application)
if __name__ == "__main__":
main()
| {
"repo_name": "bmc/picoblog",
"path": "admin.py",
"copies": "1",
"size": "6107",
"license": "bsd-3-clause",
"hash": -790064542897241200,
"line_mean": 32.3715846995,
"line_max": 79,
"alpha_frac": 0.5167840183,
"autogenerated": false,
"ratio": 4.4674469641550845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027708527278022406,
"num_lines": 183
} |
# $Id$
"""
Google App Engine Script that handles display of the published
items in the blog.
"""
__docformat__ = 'restructuredtext'
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import logging
import os
import sys
import math
import random
import datetime
# Google AppEngine imports
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from models import *
from rst import rst2html
import defs
import request
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class DateCount(object):
"""
Convenience class for storing and sorting year/month counts.
"""
def __init__(self, date, count):
self.date = date
self.count = count
def __cmp__(self, other):
return cmp(self.date, other.date)
def __hash__(self):
return self.date.__hash__()
def __str__(self):
return '%s(%d)' % (self.date, self.count)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, str(self))
class TagCount(object):
"""
Convenience class for storing and sorting tags and counts.
"""
def __init__(self, tag, count):
self.css_class = ""
self.count = count
self.tag = tag
class AbstractPageHandler(request.BlogRequestHandler):
"""
Abstract base class for all handlers in this module. Basically,
this class exists to consolidate common logic.
"""
def get_tag_counts(self):
"""
Get tag counts and calculate tag cloud frequencies.
:rtype: list
:return: list of ``TagCount`` objects, in random order
"""
tag_counts = Article.get_all_tags()
result = []
if tag_counts:
maximum = max(tag_counts.values())
for tag, count in tag_counts.items():
tc = TagCount(tag, count)
# Determine the popularity of this term as a percentage.
percent = math.floor((tc.count * 100) / maximum)
# determine the CSS class for this term based on the percentage
if percent <= 20:
tc.css_class = 'tag-cloud-tiny'
elif 20 < percent <= 40:
tc.css_class = 'tag-cloud-small'
elif 40 < percent <= 60:
tc.css_class = 'tag-cloud-medium'
elif 60 < percent <= 80:
tc.css_class = 'tag-cloud-large'
else:
tc.css_class = 'tag-cloud-huge'
result.append(tc)
random.shuffle(result)
return result
def get_month_counts(self):
"""
Get date counts, sorted in reverse chronological order.
:rtype: list
:return: list of ``DateCount`` objects
"""
hash = Article.get_all_datetimes()
datetimes = hash.keys()
date_count = {}
for dt in datetimes:
just_date = datetime.date(dt.year, dt.month, 1)
try:
date_count[just_date] += hash[dt]
except KeyError:
date_count[just_date] = hash[dt]
dates = date_count.keys()
dates.sort()
dates.reverse()
return [DateCount(date, date_count[date]) for date in dates]
def augment_articles(self, articles, url_prefix, html=True):
"""
Augment the ``Article`` objects in a list with the expanded
HTML, the path to the article, and the full URL of the article.
The augmented fields are:
- ``html``: the optionally expanded HTML
- ``path``: the article's path
- ``url``: the full URL to the article
:Parameters:
articles : list
list of ``Article`` objects to be augmented
url_prefix : str
URL prefix to use when constructing full URL from path
html : bool
``True`` to generate HTML from each article's RST
"""
for article in articles:
if html:
try:
article.html = rst2html(article.body)
except AttributeError:
article.html = ''
article.path = '/' + defs.ARTICLE_URL_PATH + '/%s' % article.id
article.url = url_prefix + article.path
def render_articles(self,
articles,
request,
recent,
template_name='show-articles.html'):
"""
Render a list of articles.
:Parameters:
articles : list
list of ``Article`` objects to render
request : HttpRequest
the GAE HTTP request object
recent : list
list of recent ``Article`` objects. May be empty.
template_name : str
name of template to use
:rtype: str
:return: the rendered articles
"""
url_prefix = 'http://' + request.environ['SERVER_NAME']
port = request.environ['SERVER_PORT']
if port:
url_prefix += ':%s' % port
self.augment_articles(articles, url_prefix)
self.augment_articles(recent, url_prefix, html=False)
last_updated = datetime.datetime.now()
if articles:
last_updated = articles[0].published_when
blog_url = url_prefix
tag_path = '/' + defs.TAG_URL_PATH
tag_url = url_prefix + tag_path
date_path = '/' + defs.DATE_URL_PATH
date_url = url_prefix + date_path
media_path = '/' + defs.MEDIA_URL_PATH
media_url = url_prefix + media_path
template_variables = {'blog_name' : defs.BLOG_NAME,
'blog_owner' : defs.BLOG_OWNER,
'articles' : articles,
'tag_list' : self.get_tag_counts(),
'date_list' : self.get_month_counts(),
'version' : '0.3',
'last_updated' : last_updated,
'blog_path' : '/',
'blog_url' : blog_url,
'archive_path' : '/' + defs.ARCHIVE_URL_PATH,
'tag_path' : tag_path,
'tag_url' : tag_url,
'date_path' : date_path,
'date_url' : date_url,
'rss2_path' : '/' + defs.RSS2_URL_PATH,
'recent' : recent}
return self.render_template(template_name, template_variables)
def get_recent(self):
"""
Get up to ``defs.TOTAL_RECENT`` recent articles.
:rtype: list
:return: list of recent ``Article`` objects
"""
articles = Article.published()
total_recent = min(len(articles), defs.TOTAL_RECENT)
if articles:
recent = articles[0:total_recent]
else:
recent = []
return recent
class FrontPageHandler(AbstractPageHandler):
"""
Handles requests to display the front (or main) page of the blog.
"""
def get(self):
articles = Article.published()
if len(articles) > defs.MAX_ARTICLES_PER_PAGE:
articles = articles[:defs.MAX_ARTICLES_PER_PAGE]
self.response.out.write(self.render_articles(articles,
self.request,
self.get_recent()))
class ArticlesByTagHandler(AbstractPageHandler):
"""
Handles requests to display a set of articles that have a
particular tag.
"""
def get(self, tag):
articles = Article.all_for_tag(tag)
self.response.out.write(self.render_articles(articles,
self.request,
self.get_recent()))
class ArticlesForMonthHandler(AbstractPageHandler):
"""
Handles requests to display a set of articles that were published
in a given month.
"""
def get(self, year, month):
articles = Article.all_for_month(int(year), int(month))
self.response.out.write(self.render_articles(articles,
self.request,
self.get_recent()))
class SingleArticleHandler(AbstractPageHandler):
"""
Handles requests to display a single article, given its unique ID.
Handles nonexistent IDs.
"""
def get(self, id):
article = Article.get(int(id))
if article:
template = 'show-articles.html'
articles = [article]
more = None
else:
template = 'not-found.html'
articles = []
self.response.out.write(self.render_articles(articles=articles,
request=self.request,
recent=self.get_recent(),
template_name=template))
class ArchivePageHandler(AbstractPageHandler):
"""
Handles requests to display the list of all articles in the blog.
"""
def get(self):
articles = Article.published()
self.response.out.write(self.render_articles(articles,
self.request,
[],
'archive.html'))
class RSSFeedHandler(AbstractPageHandler):
"""
Handles request for an RSS2 feed of the blog's contents.
"""
def get(self):
articles = Article.published()
self.response.headers['Content-Type'] = 'text/xml'
self.response.out.write(self.render_articles(articles,
self.request,
[],
'rss2.xml'))
class NotFoundPageHandler(AbstractPageHandler):
"""
Handles pages that aren't found.
"""
def get(self):
self.response.out.write(self.render_articles([],
self.request,
[],
'not-found.html'))
# -----------------------------------------------------------------------------
# Main program
# -----------------------------------------------------------------------------
application = webapp.WSGIApplication(
[('/', FrontPageHandler),
('/tag/([^/]+)/*$', ArticlesByTagHandler),
('/date/(\d\d\d\d)-(\d\d)/?$', ArticlesForMonthHandler),
('/id/(\d+)/?$', SingleArticleHandler),
('/archive/?$', ArchivePageHandler),
('/rss2/?$', RSSFeedHandler),
('/.*$', NotFoundPageHandler),
],
debug=True)
def main():
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| {
"repo_name": "bmc/picoblog",
"path": "blog.py",
"copies": "1",
"size": "11584",
"license": "bsd-3-clause",
"hash": 631293038713389600,
"line_mean": 32.4797687861,
"line_max": 79,
"alpha_frac": 0.4752244475,
"autogenerated": false,
"ratio": 4.800663075010361,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004669833802136995,
"num_lines": 346
} |
# $Id$
"""
Handles pinging Technorati for the blog.
"""
__docformat__ = 'restructuredtext' # for Epydoc
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import sys
import xmlrpclib
import logging
from google.appengine.api import urlfetch
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class GoogleXMLRPCTransport(object):
"""Handles an HTTP transaction to an XML-RPC server."""
def __init__(self):
pass
def request(self, host, handler, request_body, verbose=0):
"""
Send a complete request, and parse the response. See xmlrpclib.py.
:Parameters:
host : str
target host
handler : str
RPC handler on server (i.e., path to handler)
request_body : str
XML-RPC request body
verbose : bool/int
debugging flag. Ignored by this implementation
:rtype: dict
:return: parsed response, as key/value pairs
"""
# issue XML-RPC request
result = None
url = 'http://%s%s' % (host, handler)
try:
response = urlfetch.fetch(url,
payload=request_body,
method=urlfetch.POST,
headers={'Content-Type': 'text/xml'})
except:
msg = 'Failed to fetch %s' % url
raise xmlrpclib.ProtocolError(host + handler, 500, msg, {})
if response.status_code != 200:
logging.error('%s returned status code %s' %
(url, response.status_code))
raise xmlrpclib.ProtocolError(host + handler,
response.status_code,
"",
response.headers)
else:
result = self.__parse_response(response.content)
return result
def __parse_response(self, response_body):
p, u = xmlrpclib.getparser(use_datetime=0)
p.feed(response_body)
return u.close()
| {
"repo_name": "bmc/picoblog",
"path": "xmlrpc.py",
"copies": "1",
"size": "2454",
"license": "bsd-3-clause",
"hash": 5033407257696745000,
"line_mean": 30.4615384615,
"line_max": 79,
"alpha_frac": 0.423797881,
"autogenerated": false,
"ratio": 5.577272727272727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6501070608272728,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""
Introduction
============
``fortune`` is a stripped-down implementation of the classic BSD Unix
``fortune`` command. It combines the capabilities of the ``strfile`` command
(which produces the fortune index file) and the ``fortune`` command (which
displays a random fortune). It reads the traditional ``fortune`` program's
text file format.
Usage
=====
Usage::
fortune [OPTIONS] /path/to/fortunes
OPTIONS
-h, --help Show usage and exit.
-u, --update Update the index file.
-q, --quiet When updating the index file, do so quietly.
-V, --version Show version and exit.
If you omit the path, ``fortune`` looks at the ``FORTUNE_FILE`` environment
variable. If that environment variable isn't set, ``fortune`` aborts.
Fortune Cookie File Format
==========================
A fortune cookie file is a text file full of quotes. The format is simple:
The file consists of paragraphs separated by lines containing a single '%'
character. For example::
A little caution outflanks a large cavalry.
-- Bismarck
%
A little retrospection shows that although many fine, useful software
systems have been designed by committees and built as part of multipart
projects, those software systems that have excited passionate fans are
those that are the products of one or a few designing minds, great
designers. Consider Unix, APL, Pascal, Modula, the Smalltalk interface,
even Fortran; and contrast them with Cobol, PL/I, Algol, MVS/370, and
MS-DOS.
-- Fred Brooks, Jr.
%
A man is not old until regrets take the place of dreams.
-- John Barrymore
The Index File
==============
For efficiency and speed, ``fortune`` uses an index file to store the offsets
and lengths of every fortune in the text fortune file. So, before you can use
``fortune`` to read a random fortune, you have to generate the data file. With
the traditional BSD ``fortune`` program, you used the I{strfile}(8) command
to generate the index. With I{this} fortune program, however, you simply
pass a special argument to the ``fortune`` command::
fortune -u /path/to/fortunes
That command will generate a binary ``/path/to/fortunes.dat`` file that
contains the index. You should run ``fortune -u`` whenever you change the text
fortune file.
Generating a Random Fortune
===========================
Once you have an index file, you can generate a random fortune simply by
running the ``fortune`` utility with the path to your text fortunes file::
fortune /path/to/fortunes
Differences
===========
This version of ``fortune`` does not provide some of the more advanced
capabilities of the original BSD program. For instance, it lacks:
- the ability to mark offensive and inoffensive fortunes
- the ability to separate long and short quotes
- the ability to print all fortunes matching a regular expression
It does, however, provide the most important function: The ability to display
a random quote from a set of quotes.
License and Copyright Info
==========================
Copyright (c) 2008 Brian M. Clapper
This is free software, released under the following BSD-like license:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- The end-user documentation included with the redistribution, if any,
must include the following acknowlegement:
This product includes software developed by Brian M. Clapper
(bmc@clapper.org, http://www.clapper.org/bmc/). That software is
copyright (c) 2008 Brian M. Clapper.
Alternately, this acknowlegement may appear in the software itself, if
and wherever such third-party acknowlegements normally appear.
THIS SOFTWARE IS PROVIDED B{AS IS} AND ANY EXPRESSED OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BRIAN M.
CLAPPER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import random
import os
import sys
import cPickle as pickle
from grizzled.cmdline import CommandLineParser
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['main', 'get_random_fortune', 'make_fortune_data_file']
# Info about the module
__version__ = '1.0'
__author__ = 'Brian M. Clapper'
__email__ = 'bmc@clapper.org'
__url__ = 'http://software.clapper.org/fortune/'
__copyright__ = '2008-2011 Brian M. Clapper'
__license__ = 'BSD-style license'
# ---------------------------------------------------------------------------
# Internal Constants
# ---------------------------------------------------------------------------
_PICKLE_PROTOCOL = 2
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def random_int(start, end):
try:
# Use SystemRandom, if it's available, since it's likely to have
# more entropy.
r = random.SystemRandom()
except:
r = random
return r.randint(start, end)
def get_random_fortune(fortune_file):
"""
Get a random fortune from the specified file. Barfs if the corresponding
``.dat`` file isn't present.
:Parameters:
fortune_file : str
path to file containing fortune cookies
:rtype: str
:return: the random fortune
"""
fortune_index_file = fortune_file + '.dat'
if not os.path.exists(fortune_index_file):
raise ValueError, 'Can\'t find file "%s"' % fortune_index_file
fortuneIndex = open(fortune_index_file)
data = pickle.load(fortuneIndex)
fortuneIndex.close()
randomRecord = random_int(0, len(data) - 1)
(start, length) = data[randomRecord]
f = open(fortune_file, 'rU')
f.seek(start)
fortuneCookie = f.read(length)
f.close()
return fortuneCookie
def _read_fortunes(fortune_file):
""" Yield fortunes as lists of lines """
result = []
start = None
pos = 0
for line in fortune_file:
if line == "%\n":
if pos == 0: # "%" at top of file. Skip it.
continue
yield (start, pos - start, result)
result = []
start = None
else:
if start == None:
start = pos
result.append(line)
pos += len(line)
if result:
yield (start, pos - start, result)
def make_fortune_data_file(fortune_file, quiet=False):
"""
Create or update the data file for a fortune cookie file.
:Parameters:
fortune_file : str
path to file containing fortune cookies
quiet : bool
If ``True``, don't display progress messages
"""
fortune_index_file = fortune_file + '.dat'
if not quiet:
print 'Updating "%s" from "%s"...' % (fortune_index_file, fortune_file)
data = []
shortest = sys.maxint
longest = 0
for start, length, fortune in _read_fortunes(open(fortune_file, 'rU')):
data += [(start, length)]
shortest = min(shortest, length)
longest = max(longest, length)
fortuneIndex = open(fortune_index_file, 'wb')
pickle.dump(data, fortuneIndex, _PICKLE_PROTOCOL)
fortuneIndex.close()
if not quiet:
print 'Processed %d fortunes.\nLongest: %d\nShortest %d' %\
(len(data), longest, shortest)
def main():
"""
Main program.
"""
usage = 'Usage: %s [OPTIONS] fortune_file' % os.path.basename(sys.argv[0])
arg_parser = CommandLineParser(usage=usage)
arg_parser.add_option('-q', '--quiet', action='store_true', dest='quiet',
help="When updating the index file, don't emit " \
"messages.")
arg_parser.add_option('-u', '--update', action='store_true', dest='update',
help='Update the index file, instead of printing a '
'fortune.')
arg_parser.add_option('-V', '--version', action='store_true',
dest='show_version', help='Show version and exit.')
arg_parser.epilogue = 'If <fortune_file> is omitted, fortune looks at the ' \
'FORTUNE_FILE environment variable for the path.'
options, args = arg_parser.parse_args(sys.argv)
if len(args) == 2:
fortune_file = args[1]
else:
try:
fortune_file = os.environ['FORTUNE_FILE']
except KeyError:
arg_parser.show_usage('Missing fortune file.')
try:
if options.show_version:
print 'fortune, version %s' % __version__
elif options.update:
make_fortune_data_file(fortune_file)
else:
sys.stdout.write(get_random_fortune(fortune_file))
except ValueError, msg:
print >> sys.stderr, msg
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_name": "sean-smith/fortune",
"path": "fortune/__init__.py",
"copies": "1",
"size": "9779",
"license": "bsd-3-clause",
"hash": 3352750646916822000,
"line_mean": 32.9548611111,
"line_max": 81,
"alpha_frac": 0.6141732283,
"autogenerated": false,
"ratio": 3.922583233052547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5036756461352546,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""
RST support.
"""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import logging
import os
import sys
import StringIO
from docutils import core, nodes, parsers
from docutils.parsers.rst import states, directives
from docutils.core import publish_parts
from pygments import lexers, util, highlight, formatters
from pygments.styles import get_style_by_name
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def code_block(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
The code-block directive provides syntax highlighting for blocks
of code. It is used with the the following syntax::
.. code-block:: python
import sys
def main():
sys.stdout.write("Hello world")
The resulting output is placed in a ``<div>`` block, with CSS class
"code-block". A suitable CSS rule is something like this::
div.code-block
{
margin-left: 2em ;
margin-right: 2em ;
background-color: #eeeeee;
font-family: "Courier New", Courier, monospace;
font-size: 10pt;
}
Adapted from http://lukeplant.me.uk/blog.php?id=1107301665
"""
def _custom_highlighter(code):
outfile = StringIO.StringIO()
highlight(code, lexer, formatter, outfile)
return outfile.getvalue()
def _noop_highlighter(code):
return code
language = arguments[0]
highlighter = None
element = 'div'
# Get the highlighter
if language in ['xml', 'html']:
lexer = lexers.get_lexer_by_name('text')
highlighter = _noop_highlighter
element = 'pre'
else:
try:
lexer = lexers.get_lexer_by_name(language)
formatter = formatters.get_formatter_by_name('html')
highlighter = _custom_highlighter
except util.ClassNotFound:
lexer = lexers.get_lexer_by_name('text')
highlighter = _noop_highlighter
element = 'pre'
if highlighter is None:
node = nodes.literal_block(block_text, block_text)
error = state_machine.reporter.error('The "%s" directive does not '
'support language "%s".' %
(name, language), node, line=lineno)
return [error]
if not content:
node = nodes.literal_block(block_text, block_text)
error = state_machine.reporter.error('The "%s" block is empty; '
'content required.' %
(name), node, line=lineno)
return [error]
include_text = highlighter("\n".join(content))
html = '<%s class="code-block %s">\n%s\n</%s>\n' %\
(element, language, include_text, element)
raw = nodes.raw('', html, format='html')
return [raw]
def rst2html(s, pygments_style='colorful', stylesheet=None):
settings = {'style' : pygments_style, 'config' : None}
# Necessary, because otherwise docutils attempts to read a config file
# via the codecs module, which doesn't work with AppEngine.
os.environ['DOCUTILSCONFIG'] = ""
parts = publish_parts(source=s,
writer_name='html4css1',
settings_overrides=settings)
return parts['fragment']
# ---------------------------------------------------------------------------
# Initialization
# ---------------------------------------------------------------------------
code_block.arguments = (1, 0, 0)
code_block.options = {'languages' : parsers.rst.directives.unchanged}
code_block.content = 1
# Register with docutils
directives.register_directive('code-block', code_block)
| {
"repo_name": "bmc/picoblog",
"path": "rst.py",
"copies": "1",
"size": "4041",
"license": "bsd-3-clause",
"hash": -8632255579918680000,
"line_mean": 30.5703125,
"line_max": 81,
"alpha_frac": 0.5251175452,
"autogenerated": false,
"ratio": 4.465193370165746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015203563468693658,
"num_lines": 128
} |
# $Id$
"""
The stpyfits module is an extension to the `astropy.io.fits` module which offers
additional features specific to STScI. These features include the handling
of Constant Data Value Arrays.
"""
import functools
import numpy as np
import astropy
from astropy.io import fits
# A few imports for backward compatibility; in the earlier stpyfits these were
# overridden, but with fits's new extension system it's not necessary
from astropy.io.fits.util import _is_int
from astropy.utils import lazyproperty
from distutils.version import LooseVersion
ASTROPY_VER_GE32 = LooseVersion(astropy.__version__) >= LooseVersion('3.2')
STPYFITS_ENABLED = False # Not threadsafe TODO: (should it be?)
# Register the extension classes; simply importing stpyfits does not
# automatically enable it. Instead, it can be enabled/disabled using these
# functions.
def enable_stpyfits():
global STPYFITS_ENABLED
if not STPYFITS_ENABLED:
fits.register_hdu(ConstantValuePrimaryHDU)
fits.register_hdu(ConstantValueImageHDU)
STPYFITS_ENABLED = True
def disable_stpyfits():
global STPYFITS_ENABLED
if STPYFITS_ENABLED:
fits.unregister_hdu(ConstantValuePrimaryHDU)
fits.unregister_hdu(ConstantValueImageHDU)
STPYFITS_ENABLED = False
def with_stpyfits(func):
@functools.wraps(func)
def wrapped_with_stpyfits(*args, **kwargs):
global STPYFITS_ENABLED
was_enabled = STPYFITS_ENABLED
enable_stpyfits()
if ASTROPY_VER_GE32:
from astropy.io.fits.header import _BasicHeader
fromfile_orig = _BasicHeader.fromfile
def fromfile_patch(*args):
raise Exception
_BasicHeader.fromfile = fromfile_patch
try:
# BUG: Forcefully disable lazy loading.
# Lazy loading breaks ability to initialize ConstantValueHDUs
# TODO: Investigate the cause upstream (astropy.io.fits)
if 'write' not in func.__name__:
kwargs['lazy_load_hdus'] = False
retval = func(*args, **kwargs)
finally:
# Only disable stpyfits if it wasn't already enabled
if not was_enabled:
disable_stpyfits()
if ASTROPY_VER_GE32:
_BasicHeader.fromfile = fromfile_orig
return retval
return wrapped_with_stpyfits
class _ConstantValueImageBaseHDU(fits.hdu.image._ImageBaseHDU):
"""
A class that extends the `astropy.io.fits.hdu.base._BaseHDU` class to extend its
behavior to implement STScI specific extensions to `astropy.io.fits`.
The `astropy.io.fits.hdu.base._BaseHDU class` is:
"""
__doc__ += fits.hdu.image._ImageBaseHDU.__doc__
def __init__(self, data=None, header=None, do_not_scale_image_data=False,
uint=False, **kwargs):
if header and 'PIXVALUE' in header and header['NAXIS'] == 0:
header = header.copy()
# Add NAXISn keywords for each NPIXn keyword in the header and
# remove the NPIXn keywords
naxis = 0
for card in reversed(header['NPIX*'].cards):
try:
idx = int(card.keyword[len('NPIX'):])
except ValueError:
continue
hdrlen = len(header)
header.set('NAXIS' + str(idx), card.value,
card.comment, after='NAXIS')
del header[card.keyword]
if len(header) < hdrlen:
# A blank card was used when updating the header; add the
# blank back in.
# TODO: Fix header.set so that it has an option not to
# use a blank card--this is a detail that we really
# shouldn't have to worry about otherwise
header.append()
# Presumably the NPIX keywords are in order of their axis, but
# just in case somehow they're not...
naxis = max(naxis, idx)
# Update the NAXIS keyword with the correct number of axes
header['NAXIS'] = naxis
elif header and 'PIXVALUE' in header:
pixval = header['PIXVALUE']
if header['BITPIX'] > 0:
pixval = int(pixval)
arrayval = self._check_constant_value_data(data)
if arrayval is not None:
header = header.copy()
# Update the PIXVALUE keyword if necessary
if arrayval != pixval:
header['PIXVALUE'] = arrayval
else:
header = header.copy()
# There is a PIXVALUE keyword but NAXIS is not 0 and the data
# does not match the PIXVALUE.
# Must remove the PIXVALUE and NPIXn keywords so we recognize
# that there is non-constant data in the file.
del header['PIXVALUE']
for card in header['NPIX*'].cards:
try:
idx = int(card.keyword[len('NPIX'):])
except ValueError:
continue
del header[card.keyword]
# Make sure to pass any arguments other than data and header as
# keyword arguments, because PrimaryHDU and ImageHDU have stupidly
# different signatures for __init__
super(_ConstantValueImageBaseHDU, self).__init__(
data, header, do_not_scale_image_data=do_not_scale_image_data,
uint=uint)
@property
def size(self):
"""
The HDU's size should always come up as zero so long as there's no
actual data in it other than the constant value array.
"""
if 'PIXVALUE' in self._header:
return 0
else:
return super(_ConstantValueImageBaseHDU, self).size
@lazyproperty
def data(self):
if ('PIXVALUE' in self._header and 'NPIX1' not in self._header and
self._header['NAXIS'] > 0):
bitpix = self._header['BITPIX']
dims = self.shape
# Special case where the pixvalue can be present but all the NPIXn
# keywords are zero.
if sum(dims) == 0:
return None
code = BITPIX2DTYPE[bitpix]
pixval = self._header['PIXVALUE']
if code in ['uint8', 'int16', 'int32', 'int64']:
pixval = int(pixval)
raw_data = np.zeros(shape=dims, dtype=code) + pixval
if raw_data.dtype.str[0] != '>':
raw_data = raw_data.byteswap(True)
raw_data.dtype = raw_data.dtype.newbyteorder('>')
if self._bzero != 0 or self._bscale != 1:
if bitpix > 16: # scale integers to Float64
data = np.array(raw_data, dtype=np.float64)
elif bitpix > 0: # scale integers to Float32
data = np.array(raw_data, dtype=np.float32)
else: # floating point cases
data = raw_data
if self._bscale != 1:
np.multiply(data, self._bscale, data)
if self._bzero != 0:
data += self._bzero
# delete the keywords BSCALE and BZERO after scaling
del self._header['BSCALE']
del self._header['BZERO']
self._header['BITPIX'] = DTYPE2BITPIX[data.dtype.name]
else:
data = raw_data
return data
else:
return super(_ConstantValueImageBaseHDU, self).data
@data.setter
def data(self, data):
self.__dict__['data'] = data
self._modified = True
if self.data is not None and not isinstance(data, np.ndarray):
# Try to coerce the data into a numpy array--this will work, on
# some level, for most objects
try:
data = np.array(data)
except:
raise TypeError('data object %r could not be coerced into an '
'ndarray' % data)
if isinstance(data, np.ndarray):
self._bitpix = DTYPE2BITPIX[data.dtype.name]
self._axes = list(data.shape)
self._axes.reverse()
elif self.data is None:
self._axes = []
else:
raise ValueError('not a valid data array')
self.update_header()
@classmethod
def match_header(cls, header):
"""A constant value HDU will only be recognized as such if the header
contains a valid PIXVALUE and NAXIS == 0.
"""
pixvalue = header.get('PIXVALUE')
naxis = header.get('NAXIS', 0)
return (super(_ConstantValueImageBaseHDU, cls).match_header(header) and
(isinstance(pixvalue, float) or _is_int(pixvalue)) and
naxis == 0)
def update_header(self):
if (not self._modified and not self._header._modified and
(self._has_data and self.shape == self.data.shape)):
# Not likely that anything needs updating
return
super(_ConstantValueImageBaseHDU, self).update_header()
if 'PIXVALUE' in self._header and self._header['NAXIS'] > 0:
# This is a Constant Value Data Array. Verify that the data
# actually matches the PIXVALUE
pixval = self._header['PIXVALUE']
if self._header['BITPIX'] > 0:
pixval = int(pixval)
if self.data is None or self.data.nbytes == 0:
# Empty data array; just keep the existing PIXVALUE
arrayval = self._header['PIXVALUE']
else:
arrayval = self._check_constant_value_data(self.data)
if arrayval is not None:
if arrayval != pixval:
self._header['PIXVALUE'] = arrayval
naxis = self._header['NAXIS']
self._header['NAXIS'] = 0
for idx in range(naxis, 0, -1):
axisval = self._header['NAXIS%d' % idx]
self._header.set('NPIX%d' % idx, axisval,
'length of constant array axis %d' % idx,
after='PIXVALUE')
del self._header['NAXIS%d' % idx]
else:
# No longer a constant value array; remove any remaining
# NPIX or PIXVALUE keywords
try:
del self._header['PIXVALUE']
except KeyError:
pass
try:
del self._header['NPIX*']
except KeyError:
pass
def _summary(self):
summ = super(_ConstantValueImageBaseHDU, self)._summary()
outsumm = ((summ[0], summ[1],
summ[2].replace('ConstantValue', '')) + summ[3:])
return outsumm
def _writedata_internal(self, fileobj):
if 'PIXVALUE' in self._header:
# This is a Constant Value Data Array, so no data is written
return 0
else:
return super(_ConstantValueImageBaseHDU, self)._writedata_internal(fileobj)
def _check_constant_value_data(self, data):
"""Verify that the HDU's data is a constant value array."""
arrayval = data.flat[0]
if np.all(data == arrayval):
return arrayval
return None
class ConstantValuePrimaryHDU(_ConstantValueImageBaseHDU,
fits.hdu.PrimaryHDU):
"""Primary HDUs with constant value arrays."""
class ConstantValueImageHDU(_ConstantValueImageBaseHDU, fits.hdu.ImageHDU):
"""Image extension HDUs with constant value arrays."""
# Import the rest of the astropy.io.fits module
from astropy.io.fits import * # noqa
# For backward-compatibility with older code that thinks PrimaryHDU and
# ImageHDU should support the ConstantValue features
PrimaryHDU = ConstantValuePrimaryHDU
ImageHDU = ConstantValueImageHDU
# Override the other "convenience" functions to use stpyfits
open = fitsopen = with_stpyfits(fits.open)
info = with_stpyfits(fits.info)
append = with_stpyfits(fits.append)
writeto = with_stpyfits(fits.writeto)
update = with_stpyfits(fits.update)
getheader = with_stpyfits(fits.getheader)
getdata = with_stpyfits(fits.getdata)
getval = with_stpyfits(fits.getval)
setval = with_stpyfits(fits.setval)
delval = with_stpyfits(fits.delval)
__all__ = fits.__all__ + ['enable_stpyfits', 'disable_stpyfits',
'with_stpyfits', 'ConstantValuePrimaryHDU',
'ConstantValueImageHDU']
| {
"repo_name": "spacetelescope/stsci.tools",
"path": "lib/stsci/tools/stpyfits.py",
"copies": "1",
"size": "12764",
"license": "bsd-3-clause",
"hash": -3045296125214106600,
"line_mean": 36.1046511628,
"line_max": 87,
"alpha_frac": 0.5692572861,
"autogenerated": false,
"ratio": 4.163078930202218,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5232336216302218,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""Point-to-Point Protocol."""
import struct
import dpkt
# XXX - finish later
# http://www.iana.org/assignments/ppp-numbers
PPP_IP = 0x21 # Internet Protocol
PPP_IP6 = 0x57 # Internet Protocol v6
# Protocol field compression
PFC_BIT = 0x01
class PPP(dpkt.Packet):
__hdr__ = (
('p', 'B', PPP_IP),
)
_protosw = {}
def set_p(cls, p, pktclass):
cls._protosw[p] = pktclass
set_p = classmethod(set_p)
def get_p(cls, p):
return cls._protosw[p]
get_p = classmethod(get_p)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.p & PFC_BIT == 0:
self.p = struct.unpack('>H', buf[:2])[0]
self.data = self.data[1:]
try:
self.data = self._protosw[self.p](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, struct.error, dpkt.UnpackError):
pass
def pack_hdr(self):
try:
if self.p > 0xff:
return struct.pack('>H', self.p)
return dpkt.Packet.pack_hdr(self)
except struct.error, e:
raise dpkt.PackError(str(e))
def __load_protos():
g = globals()
for k, v in g.iteritems():
if k.startswith('PPP_'):
name = k[4:]
modname = name.lower()
try:
mod = __import__(modname, g)
except ImportError:
continue
PPP.set_p(v, getattr(mod, name))
if not PPP._protosw:
__load_protos()
| {
"repo_name": "mennis/dpkt",
"path": "dpkt/ppp.py",
"copies": "17",
"size": "1563",
"license": "bsd-3-clause",
"hash": -4180740707023803000,
"line_mean": 23.8095238095,
"line_max": 74,
"alpha_frac": 0.5227127319,
"autogenerated": false,
"ratio": 3.2698744769874475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010034013605442177,
"num_lines": 63
} |
# $Id$
_q_exports = [
'contact_us',
'study_methodologies',
'linkage',
'tour',
'disclaimer',
'questionnaire',
]
import sys
from quixote import get_publisher
from quixote.util import StaticFile
from canary.qx_defs import NotLoggedInError
from canary.qx_utils import MyStaticFile, load_static_exports
from canary.ui.pages import not_found
from canary.ui.about import about_ui
this_module = sys.modules[__name__]
contact_us = about_ui.contact_us
study_methodologies = about_ui.study_methodologies
linkage = about_ui.linkage
tour = about_ui.tour
disclaimer = about_ui.disclaimer
questionnaire = about_ui.questionnaire
config = get_publisher().config
html_files = load_static_exports(config.static_html_dir)
for file, path in html_files:
_q_exports.append(file)
setattr(this_module, file, MyStaticFile(path,
mime_type='text/html', cache_time=30))
def _q_lookup (request, name=''):
if name == 'ecohealth-2005-animals.pdf':
return StaticFile(config.static_html_dir + '/ecohealth-2005-animals.pdf',
mime_type='application/pdf')
elif name == 'ecohealth-2004-outfoxing.pdf':
return StaticFile(config.static_html_dir + '/ecohealth-2004-outfoxing.pdf',
mime_type='application/pdf')
else:
return not_found()
| {
"repo_name": "dchud/sentinel",
"path": "canary/ui/about/__init__.py",
"copies": "1",
"size": "1314",
"license": "mit",
"hash": 760567959031896300,
"line_mean": 24.7647058824,
"line_max": 84,
"alpha_frac": 0.696347032,
"autogenerated": false,
"ratio": 3.204878048780488,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9263283880429667,
"avg_score": 0.02758824007016412,
"num_lines": 51
} |
# $Id$
_q_exports = [
'queue',
'record',
'session',
'source',
'source_map',
'sources',
'term',
'user',
'users',
'concept',
'category',
'dv_data',
'auto',
'reload_sources',
]
import string
from quixote.errors import AccessError, PublishError, TraversalError
from canary.ui.admin import admin_ui
from canary.ui.admin import queue
from canary.ui.admin import record
from canary.ui.admin import session
from canary.ui.admin import source
from canary.ui.admin import term
from canary.ui.admin import user
from canary.ui.admin import concept
from canary.ui.admin import category
from canary.ui.admin import dv_data
from canary.ui.pageframe import header, footer
from canary.ui.pages import not_found
from canary.qx_defs import NotLoggedInError
_q_index = admin_ui._q_index
users = admin_ui.users
source_map = admin_ui.source_map
sources = admin_ui.sources
reload_sources = admin_ui.reload_sources
auto = admin_ui.auto
def _q_access (request):
if request.session.user == None:
raise NotLoggedInError('Authorized access only.')
if not (request.session.user.is_admin):
raise AccessError("You don't have access to this page.")
| {
"repo_name": "dchud/sentinel",
"path": "canary/ui/admin/__init__.py",
"copies": "1",
"size": "1209",
"license": "mit",
"hash": 5519176664952375000,
"line_mean": 22.7058823529,
"line_max": 68,
"alpha_frac": 0.7080231596,
"autogenerated": false,
"ratio": 3.3214285714285716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4529451731028572,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""Real-Time Transport Protocol"""
from dpkt import Packet
# version 1100 0000 0000 0000 ! 0xC000 14
# p 0010 0000 0000 0000 ! 0x2000 13
# x 0001 0000 0000 0000 ! 0x1000 12
# cc 0000 1111 0000 0000 ! 0x0F00 8
# m 0000 0000 1000 0000 ! 0x0080 7
# pt 0000 0000 0111 1111 ! 0x007F 0
#
_VERSION_MASK= 0xC000
_P_MASK = 0x2000
_X_MASK = 0x1000
_CC_MASK = 0x0F00
_M_MASK = 0x0080
_PT_MASK = 0x007F
_VERSION_SHIFT=14
_P_SHIFT = 13
_X_SHIFT = 12
_CC_SHIFT = 8
_M_SHIFT = 7
_PT_SHIFT = 0
VERSION = 2
class RTP(Packet):
__hdr__ = (
('_type', 'H', 0x8000),
('seq', 'H', 0),
('ts', 'I', 0),
('ssrc', 'I', 0),
)
csrc = ''
def _get_version(self): return (self._type&_VERSION_MASK)>>_VERSION_SHIFT
def _set_version(self, ver):
self._type = (ver << _VERSION_SHIFT) | (self._type & ~_VERSION_MASK)
def _get_p(self): return (self._type & _P_MASK) >> _P_SHIFT
def _set_p(self, p): self._type = (p << _P_SHIFT) | (self._type & ~_P_MASK)
def _get_x(self): return (self._type & _X_MASK) >> _X_SHIFT
def _set_x(self, x): self._type = (x << _X_SHIFT) | (self._type & ~_X_MASK)
def _get_cc(self): return (self._type & _CC_MASK) >> _CC_SHIFT
def _set_cc(self, cc): self._type = (cc<<_CC_SHIFT)|(self._type&~_CC_MASK)
def _get_m(self): return (self._type & _M_MASK) >> _M_SHIFT
def _set_m(self, m): self._type = (m << _M_SHIFT) | (self._type & ~_M_MASK)
def _get_pt(self): return (self._type & _PT_MASK) >> _PT_SHIFT
def _set_pt(self, m): self._type = (m << _PT_SHIFT)|(self._type&~_PT_MASK)
version = property(_get_version, _set_version)
p = property(_get_p, _set_p)
x = property(_get_x, _set_x)
cc = property(_get_cc, _set_cc)
m = property(_get_m, _set_m)
pt = property(_get_pt, _set_pt)
def __len__(self):
return self.__hdr_len__ + len(self.csrc) + len(self.data)
def __str__(self):
return self.pack_hdr() + self.csrc + str(self.data)
def unpack(self, buf):
super(RTP, self).unpack(buf)
self.csrc = buf[self.__hdr_len__:self.__hdr_len__ + self.cc * 4]
self.data = buf[self.__hdr_len__ + self.cc * 4:]
| {
"repo_name": "af001/dpkt",
"path": "dpkt/rtp.py",
"copies": "17",
"size": "2265",
"license": "bsd-3-clause",
"hash": -4305897177762907000,
"line_mean": 31.3571428571,
"line_max": 79,
"alpha_frac": 0.5373068433,
"autogenerated": false,
"ratio": 2.5709421112372306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.026167773886582847,
"num_lines": 70
} |
# $Id$
"""Remote Authentication Dial-In User Service."""
import dpkt
# http://www.untruth.org/~josh/security/radius/radius-auth.html
# RFC 2865
class RADIUS(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('id', 'B', 0),
('len', 'H', 4),
('auth', '16s', '')
)
attrs = ''
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.attrs = parse_attrs(self.data)
self.data = ''
def parse_attrs(buf):
"""Parse attributes buffer into a list of (type, data) tuples."""
attrs = []
while buf:
t = ord(buf[0])
l = ord(buf[1])
if l < 2:
break
d, buf = buf[2:l], buf[l:]
attrs.append((t, d))
return attrs
# Codes
RADIUS_ACCESS_REQUEST = 1
RADIUS_ACCESS_ACCEPT = 2
RADIUS_ACCESS_REJECT = 3
RADIUS_ACCT_REQUEST = 4
RADIUS_ACCT_RESPONSE = 5
RADIUS_ACCT_STATUS = 6
RADIUS_ACCESS_CHALLENGE = 11
# Attributes
RADIUS_USER_NAME = 1
RADIUS_USER_PASSWORD = 2
RADIUS_CHAP_PASSWORD = 3
RADIUS_NAS_IP_ADDR = 4
RADIUS_NAS_PORT = 5
RADIUS_SERVICE_TYPE = 6
RADIUS_FRAMED_PROTOCOL = 7
RADIUS_FRAMED_IP_ADDR = 8
RADIUS_FRAMED_IP_NETMASK = 9
RADIUS_FRAMED_ROUTING = 10
RADIUS_FILTER_ID = 11
RADIUS_FRAMED_MTU = 12
RADIUS_FRAMED_COMPRESSION = 13
RADIUS_LOGIN_IP_HOST = 14
RADIUS_LOGIN_SERVICE = 15
RADIUS_LOGIN_TCP_PORT = 16
# unassigned
RADIUS_REPLY_MESSAGE = 18
RADIUS_CALLBACK_NUMBER = 19
RADIUS_CALLBACK_ID = 20
# unassigned
RADIUS_FRAMED_ROUTE = 22
RADIUS_FRAMED_IPX_NETWORK = 23
RADIUS_STATE = 24
RADIUS_CLASS = 25
RADIUS_VENDOR_SPECIFIC = 26
RADIUS_SESSION_TIMEOUT = 27
RADIUS_IDLE_TIMEOUT = 28
RADIUS_TERMINATION_ACTION = 29
RADIUS_CALLED_STATION_ID = 30
RADIUS_CALLING_STATION_ID = 31
RADIUS_NAS_ID = 32
RADIUS_PROXY_STATE = 33
RADIUS_LOGIN_LAT_SERVICE = 34
RADIUS_LOGIN_LAT_NODE = 35
RADIUS_LOGIN_LAT_GROUP = 36
RADIUS_FRAMED_ATALK_LINK = 37
RADIUS_FRAMED_ATALK_NETWORK = 38
RADIUS_FRAMED_ATALK_ZONE = 39
# 40-59 reserved for accounting
RADIUS_CHAP_CHALLENGE = 60
RADIUS_NAS_PORT_TYPE = 61
RADIUS_PORT_LIMIT = 62
RADIUS_LOGIN_LAT_PORT = 63
| {
"repo_name": "GTiroadkill/dpkt",
"path": "dpkt/radius.py",
"copies": "18",
"size": "2106",
"license": "bsd-3-clause",
"hash": -7291517830581431000,
"line_mean": 22.9318181818,
"line_max": 69,
"alpha_frac": 0.650997151,
"autogenerated": false,
"ratio": 2.4689331770222744,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""Remote Procedure Call."""
import struct
import dpkt
# RPC.dir
CALL = 0
REPLY = 1
# RPC.Auth.flavor
AUTH_NONE = AUTH_NULL = 0
AUTH_UNIX = 1
AUTH_SHORT = 2
AUTH_DES = 3
# RPC.Reply.stat
MSG_ACCEPTED = 0
MSG_DENIED = 1
# RPC.Reply.Accept.stat
SUCCESS = 0
PROG_UNAVAIL = 1
PROG_MISMATCH = 2
PROC_UNAVAIL = 3
GARBAGE_ARGS = 4
SYSTEM_ERR = 5
# RPC.Reply.Reject.stat
RPC_MISMATCH = 0
AUTH_ERROR = 1
class RPC(dpkt.Packet):
__hdr__ = (
('xid', 'I', 0),
('dir', 'I', CALL)
)
class Auth(dpkt.Packet):
__hdr__ = (('flavor', 'I', AUTH_NONE), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:4+n]
def __len__(self):
return 8 + len(self.data)
def __str__(self):
return self.pack_hdr() + struct.pack('>I', len(self.data)) + \
str(self.data)
class Call(dpkt.Packet):
__hdr__ = (
('rpcvers', 'I', 2),
('prog', 'I', 0),
('vers', 'I', 0),
('proc', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.cred = RPC.Auth(self.data)
self.verf = RPC.Auth(self.data[len(self.cred):])
self.data = self.data[len(self.cred) + len(self.verf):]
def __len__(self):
return len(str(self)) # XXX
def __str__(self):
return dpkt.Packet.__str__(self) + \
str(getattr(self, 'cred', RPC.Auth())) + \
str(getattr(self, 'verf', RPC.Auth())) + \
str(self.data)
class Reply(dpkt.Packet):
__hdr__ = (('stat', 'I', MSG_ACCEPTED), )
class Accept(dpkt.Packet):
__hdr__ = (('stat', 'I', SUCCESS), )
def unpack(self, buf):
self.verf = RPC.Auth(buf)
buf = buf[len(self.verf):]
self.stat = struct.unpack('>I', buf[:4])[0]
if self.stat == SUCCESS:
self.data = buf[4:]
elif self.stat == PROG_MISMATCH:
self.low, self.high = struct.unpack('>II', buf[4:12])
self.data = buf[12:]
def __len__(self):
if self.stat == PROG_MISMATCH: n = 8
else: n = 0
return len(self.verf) + 4 + n + len(self.data)
def __str__(self):
if self.stat == PROG_MISMATCH:
return str(self.verf) + struct.pack('>III', self.stat,
self.low, self.high) + self.data
return str(self.verf) + dpkt.Packet.__str__(self)
class Reject(dpkt.Packet):
__hdr__ = (('stat', 'I', AUTH_ERROR), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.stat == RPC_MISMATCH:
self.low, self.high = struct.unpack('>II', self.data[:8])
self.data = self.data[8:]
elif self.stat == AUTH_ERROR:
self.why = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
def __len__(self):
if self.stat == RPC_MISMATCH: n = 8
elif self.stat == AUTH_ERROR: n =4
else: n = 0
return 4 + n + len(self.data)
def __str__(self):
if self.stat == RPC_MISMATCH:
return struct.pack('>III', self.stat, self.low,
self.high) + self.data
elif self.stat == AUTH_ERROR:
return struct.pack('>II', self.stat, self.why) + self.data
return dpkt.Packet.__str__(self)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.stat == MSG_ACCEPTED:
self.data = self.accept = self.Accept(self.data)
elif self.status == MSG_DENIED:
self.data = self.reject = self.Reject(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.dir == CALL:
self.data = self.call = self.Call(self.data)
elif self.dir == REPLY:
self.data = self.reply = self.Reply(self.data)
def unpack_xdrlist(cls, buf):
l = []
while buf:
if buf.startswith('\x00\x00\x00\x01'):
p = cls(buf[4:])
l.append(p)
buf = p.data
elif buf.startswith('\x00\x00\x00\x00'):
break
else:
raise dpkt.UnpackError, 'invalid XDR list'
return l
def pack_xdrlist(*args):
return '\x00\x00\x00\x01'.join(map(str, args)) + '\x00\x00\x00\x00'
| {
"repo_name": "warjiang/dpkt",
"path": "dpkt/rpc.py",
"copies": "17",
"size": "4817",
"license": "bsd-3-clause",
"hash": 8962395186154282000,
"line_mean": 31.9931506849,
"line_max": 78,
"alpha_frac": 0.467718497,
"autogenerated": false,
"ratio": 3.4855282199710564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: $
#
# See http://pyunit.sourceforge.net/pyunit.html
#
import cgi, SubmitDatasetUtils, os, re
from rdflib import URIRef
def setDatasetsBaseDir(base):
global DatasetsBaseDir
DatasetsBaseDir = base
#global HostName = "zoo-admiral-behav.zoo.ox.ac.uk"
#global HostName = "zoo-admiral-silk.zoo.ox.ac.uk"
#global HostName = "zoo-admiral-devel.zoo.ox.ac.uk"
#global HostName = "zoo-admiral-ibrg.zoo.ox.ac.uk"
#global hostname = "zakynthos.zoo.ox.ac.uk"
global HostName, SiloName, Username, Password, FileName
global FilePath, FileMimeType, ZipMimeType
global DirName, DirPath
global DatasetsEmptyDirName, DatasetsEmptyDirPath
global UpdatedTitle, UpdatedDescription, TestPat
#HostName = "localhost"
#HostName = "zoo-admiral-ibrg.zoo.ox.ac.uk"
#HostName = "datastage-test.bodleian.ox.ac.uk"
HostName = "10.0.0.140"
SiloName = "admiral"
Username = "admiral"
Password = "admiral"
FileName = "file1.txt"
FilePath = DatasetsBaseDir + os.path.sep + FileName
FileMimeType = "text/plain"
ZipMimeType = "application/zip"
DirName = "DatasetsTopDir"
DirPath = DatasetsBaseDir + os.path.sep + DirName
DatasetsEmptyDirName = "DatasetsEmptySubDir"
DatasetsEmptyDirPath = DatasetsBaseDir + os.path.sep + DirName + os.path.sep + DatasetsEmptyDirName
UpdatedTitle = "Updated Title"
UpdatedDescription = "Updated Description"
TestPat = re.compile("^.*$(?<!\.zip)")
global ManifestName, ManifestFilePath
ManifestName = "manifest.rdf"
ManifestFilePath = DatasetsBaseDir + os.path.sep + DirName + os.path.sep + ManifestName
global formdata, updatedformdata
formdata = \
{ 'datDir' : cgi.MiniFieldStorage('datDir' , DirPath)
, 'datId' : cgi.MiniFieldStorage('datId' , "SubmissionToolTest")
, 'title' : cgi.MiniFieldStorage('title' , "Submission tool test title")
, 'description' : cgi.MiniFieldStorage('description' , "Submission tool test description")
, 'user' : cgi.MiniFieldStorage('user' , Username)
, 'pass' : cgi.MiniFieldStorage('pass' , Password)
, 'endpointhost': cgi.MiniFieldStorage('endpointhost', HostName)
, 'basepath' : cgi.MiniFieldStorage('basepath' , "/"+SiloName+"/")
, 'submit' : cgi.MiniFieldStorage('submit' , "Submit")
, 'directory' : cgi.MiniFieldStorage('directory' , DirPath)
}
updatedformdata = \
{ 'datDir' : cgi.MiniFieldStorage('datDir' , DirPath)
, 'datId' : cgi.MiniFieldStorage('datId' , "SubmissionToolTest")
, 'title' : cgi.MiniFieldStorage('title' , "Submission tool updated test title")
, 'description' : cgi.MiniFieldStorage('description' , "Submission tool updated test description")
, 'user' : cgi.MiniFieldStorage('user' , Username)
, 'pass' : cgi.MiniFieldStorage('pass' , Password)
, 'endpointhost': cgi.MiniFieldStorage('endpointhost', HostName)
, 'basepath' : cgi.MiniFieldStorage('basepath' , "/"+SiloName+"/")
, 'submit' : cgi.MiniFieldStorage('submit' , "Submit")
}
global DatasetId, DatasetDir, Title, Description, User, ElementValueList, ElementValueUpdatedList
DatasetId = SubmitDatasetUtils.getFormParam('datId', formdata)
DatasetDir = SubmitDatasetUtils.getFormParam('datDir', formdata)
Title = SubmitDatasetUtils.getFormParam('title', formdata)
Description = SubmitDatasetUtils.getFormParam('description', formdata)
User = SubmitDatasetUtils.getFormParam('user', formdata)
ElementValueList = [User, DatasetId, Title, Description]
ElementValueUpdatedList = [User, DatasetId, UpdatedTitle, UpdatedDescription]
global dcterms, oxds
dcterms = URIRef("http://purl.org/dc/terms/")
oxds = URIRef("http://vocab.ox.ac.uk/dataset/schema#")
global NamespaceDictionary
NamespaceDictionary = {
"dcterms" : dcterms ,
"oxds" : oxds
}
global ElementCreatorUri,ElementIdentifierUri,ElementTitleUri,ElementDescriptionUri,ElementUriList
ElementCreatorUri = URIRef(dcterms + "creator")
ElementIdentifierUri = URIRef(dcterms + "identifier")
ElementTitleUri = URIRef(dcterms + "title")
ElementDescriptionUri = URIRef(dcterms + "description")
ElementUriList = [ElementCreatorUri, ElementIdentifierUri, ElementTitleUri, ElementDescriptionUri]
return
| {
"repo_name": "bhavanaananda/DataStage",
"path": "src/SubmitDatasetHandler/tests/TestConfig.py",
"copies": "1",
"size": "5825",
"license": "mit",
"hash": -3460042218677464000,
"line_mean": 58.4387755102,
"line_max": 136,
"alpha_frac": 0.5210300429,
"autogenerated": false,
"ratio": 4.05076495132128,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5071794994221279,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""Snoop file format."""
import sys, time
import dpkt
# RFC 1761
SNOOP_MAGIC = 0x736E6F6F70000000L
SNOOP_VERSION = 2
SDL_8023 = 0
SDL_8024 = 1
SDL_8025 = 2
SDL_8026 = 3
SDL_ETHER = 4
SDL_HDLC = 5
SDL_CHSYNC = 6
SDL_IBMCC = 7
SDL_FDDI = 8
SDL_OTHER = 9
dltoff = { SDL_ETHER:14 }
class PktHdr(dpkt.Packet):
"""snoop packet header."""
__byte_order__ = '!'
__hdr__ = (
('orig_len', 'I', 0),
('incl_len', 'I', 0),
('rec_len', 'I', 0),
('cum_drops', 'I', 0),
('ts_sec', 'I', 0),
('ts_usec', 'I', 0),
)
class FileHdr(dpkt.Packet):
"""snoop file header."""
__byte_order__ = '!'
__hdr__ = (
('magic', 'Q', SNOOP_MAGIC),
('v', 'I', SNOOP_VERSION),
('linktype', 'I', SDL_ETHER),
)
class Writer(object):
"""Simple snoop dumpfile writer."""
def __init__(self, fileobj, linktype=SDL_ETHER):
self.__f = fileobj
fh = FileHdr(linktype=linktype)
self.__f.write(str(fh))
def writepkt(self, pkt, ts=None):
if ts is None:
ts = time.time()
s = str(pkt)
n = len(s)
pad_len = 4 - n % 4 if n % 4 else 0
ph = PktHdr(orig_len=n,incl_len=n,
rec_len=PktHdr.__hdr_len__+n+pad_len,
ts_sec=int(ts),
ts_usec=int((int(ts) - float(ts)) * 1000000.0))
self.__f.write(str(ph))
self.__f.write(s + '\0' * pad_len)
def close(self):
self.__f.close()
class Reader(object):
"""Simple pypcap-compatible snoop file reader."""
def __init__(self, fileobj):
self.name = fileobj.name
self.fd = fileobj.fileno()
self.__f = fileobj
buf = self.__f.read(FileHdr.__hdr_len__)
self.__fh = FileHdr(buf)
self.__ph = PktHdr
if self.__fh.magic != SNOOP_MAGIC:
raise ValueError, 'invalid snoop header'
self.dloff = dltoff[self.__fh.linktype]
self.filter = ''
def fileno(self):
return self.fd
def datalink(self):
return self.__fh.linktype
def setfilter(self, value, optimize=1):
return NotImplementedError
def readpkts(self):
return list(self)
def dispatch(self, cnt, callback, *args):
if cnt > 0:
for i in range(cnt):
ts, pkt = self.next()
callback(ts, pkt, *args)
else:
for ts, pkt in self:
callback(ts, pkt, *args)
def loop(self, callback, *args):
self.dispatch(0, callback, *args)
def __iter__(self):
self.__f.seek(FileHdr.__hdr_len__)
while 1:
buf = self.__f.read(PktHdr.__hdr_len__)
if not buf: break
hdr = self.__ph(buf)
buf = self.__f.read(hdr.rec_len - PktHdr.__hdr_len__)
yield (hdr.ts_sec + (hdr.ts_usec / 1000000.0), buf[:hdr.incl_len])
| {
"repo_name": "warjiang/dpkt",
"path": "dpkt/snoop.py",
"copies": "22",
"size": "2963",
"license": "bsd-3-clause",
"hash": -4804749727115225000,
"line_mean": 24.1101694915,
"line_max": 78,
"alpha_frac": 0.4971312859,
"autogenerated": false,
"ratio": 3.1091290661070303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: $
#
# Test configuration parameters
#
class TestConfig:
#hostname = "zoo-admiral-behav.zoo.ox.ac.uk"
#hostname = "zoo-admiral-silk.zoo.ox.ac.uk"
#hostname = "zoo-admiral-devel.zoo.ox.ac.uk"
#hostname = "zoo-admiral-ibrg.zoo.ox.ac.uk"
#hostname = "zakynthos.zoo.ox.ac.uk"
HostName = "datastage-test.bodleian.ox.ac.uk"
hostname = "10.0.0.140"
cifssharename = "data"
cifsmountpoint = "mountadmiral"
webdavmountpoint = "mountadmiralwebdav"
webdavbaseurl = "http://"+HostName+"/data/"
readmefile = "ADMIRAL.README"
readmetext = "This directory is the root of the ADMIRAL shared file system.\n"
userAname = "TestUser1"
userApass = "user1"
userBname = "TestUser2"
userBpass = "user2"
userDname = "TestUserD"
userDpass = "userd"
userRGleadername = "TestLeader"
userRGleaderpass = "leader"
collabname = "TestCollab"
collabpass = "collab"
# End.
| {
"repo_name": "bhavanaananda/DataStage",
"path": "test/FileShare/tests/TestConfig.py",
"copies": "1",
"size": "1074",
"license": "mit",
"hash": -6675034401838946000,
"line_mean": 30.5882352941,
"line_max": 88,
"alpha_frac": 0.5763500931,
"autogenerated": false,
"ratio": 2.871657754010695,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3948007847110695,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""Transmission Control Protocol."""
import dpkt
# TCP control flags
TH_FIN = 0x01 # end of data
TH_SYN = 0x02 # synchronize sequence numbers
TH_RST = 0x04 # reset connection
TH_PUSH = 0x08 # push
TH_ACK = 0x10 # acknowledgment number set
TH_URG = 0x20 # urgent pointer set
TH_ECE = 0x40 # ECN echo, RFC 3168
TH_CWR = 0x80 # congestion window reduced
TCP_PORT_MAX = 65535 # maximum port
TCP_WIN_MAX = 65535 # maximum (unscaled) window
class TCP(dpkt.Packet):
__hdr__ = (
('sport', 'H', 0xdead),
('dport', 'H', 0),
('seq', 'I', 0xdeadbeefL),
('ack', 'I', 0),
('off_x2', 'B', ((5 << 4) | 0)),
('flags', 'B', TH_SYN),
('win', 'H', TCP_WIN_MAX),
('sum', 'H', 0),
('urp', 'H', 0)
)
opts = ''
def _get_off(self): return self.off_x2 >> 4
def _set_off(self, off): self.off_x2 = (off << 4) | (self.off_x2 & 0xf)
off = property(_get_off, _set_off)
def __len__(self):
return self.__hdr_len__ + len(self.opts) + len(self.data)
def __str__(self):
return self.pack_hdr() + self.opts + str(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
ol = ((self.off_x2 >> 4) << 2) - self.__hdr_len__
if ol < 0:
raise dpkt.UnpackError, 'invalid header length'
self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol]
self.data = buf[self.__hdr_len__ + ol:]
# Options (opt_type) - http://www.iana.org/assignments/tcp-parameters
TCP_OPT_EOL = 0 # end of option list
TCP_OPT_NOP = 1 # no operation
TCP_OPT_MSS = 2 # maximum segment size
TCP_OPT_WSCALE = 3 # window scale factor, RFC 1072
TCP_OPT_SACKOK = 4 # SACK permitted, RFC 2018
TCP_OPT_SACK = 5 # SACK, RFC 2018
TCP_OPT_ECHO = 6 # echo (obsolete), RFC 1072
TCP_OPT_ECHOREPLY = 7 # echo reply (obsolete), RFC 1072
TCP_OPT_TIMESTAMP = 8 # timestamp, RFC 1323
TCP_OPT_POCONN = 9 # partial order conn, RFC 1693
TCP_OPT_POSVC = 10 # partial order service, RFC 1693
TCP_OPT_CC = 11 # connection count, RFC 1644
TCP_OPT_CCNEW = 12 # CC.NEW, RFC 1644
TCP_OPT_CCECHO = 13 # CC.ECHO, RFC 1644
TCP_OPT_ALTSUM = 14 # alt checksum request, RFC 1146
TCP_OPT_ALTSUMDATA = 15 # alt checksum data, RFC 1146
TCP_OPT_SKEETER = 16 # Skeeter
TCP_OPT_BUBBA = 17 # Bubba
TCP_OPT_TRAILSUM = 18 # trailer checksum
TCP_OPT_MD5 = 19 # MD5 signature, RFC 2385
TCP_OPT_SCPS = 20 # SCPS capabilities
TCP_OPT_SNACK = 21 # selective negative acks
TCP_OPT_REC = 22 # record boundaries
TCP_OPT_CORRUPT = 23 # corruption experienced
TCP_OPT_SNAP = 24 # SNAP
TCP_OPT_TCPCOMP = 26 # TCP compression filter
TCP_OPT_MAX = 27
def parse_opts(buf):
"""Parse TCP option buffer into a list of (option, data) tuples."""
opts = []
while buf:
o = ord(buf[0])
if o > TCP_OPT_NOP:
try:
l = ord(buf[1])
d, buf = buf[2:l], buf[l:]
except ValueError:
#print 'bad option', repr(str(buf))
opts.append(None) # XXX
break
else:
d, buf = '', buf[1:]
opts.append((o,d))
return opts
| {
"repo_name": "af001/dpkt",
"path": "dpkt/tcp.py",
"copies": "17",
"size": "3179",
"license": "bsd-3-clause",
"hash": 2078466677274853400,
"line_mean": 31.4387755102,
"line_max": 75,
"alpha_frac": 0.5769109783,
"autogenerated": false,
"ratio": 2.6804384485666106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.034845464191941025,
"num_lines": 98
} |
# $Id$
"""Trivial File Transfer Protocol."""
import struct
import dpkt
# Opcodes
OP_RRQ = 1 # read request
OP_WRQ = 2 # write request
OP_DATA = 3 # data packet
OP_ACK = 4 # acknowledgment
OP_ERR = 5 # error code
# Error codes
EUNDEF = 0 # not defined
ENOTFOUND = 1 # file not found
EACCESS = 2 # access violation
ENOSPACE = 3 # disk full or allocation exceeded
EBADOP = 4 # illegal TFTP operation
EBADID = 5 # unknown transfer ID
EEXISTS = 6 # file already exists
ENOUSER = 7 # no such user
class TFTP(dpkt.Packet):
__hdr__ = (('opcode', 'H', 1), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.opcode in (OP_RRQ, OP_WRQ):
l = self.data.split('\x00')
self.filename = l[0]
self.mode = l[1]
self.data = ''
elif self.opcode in (OP_DATA, OP_ACK):
self.block = struct.unpack('>H', self.data[:2])
self.data = self.data[2:]
elif self.opcode == OP_ERR:
self.errcode = struct.unpack('>H', self.data[:2])
self.errmsg = self.data[2:].split('\x00')[0]
self.data = ''
def __len__(self):
return len(str(self))
def __str__(self):
if self.opcode in (OP_RRQ, OP_WRQ):
s = '%s\x00%s\x00' % (self.filename, self.mode)
elif self.opcode in (OP_DATA, OP_ACK):
s = struct.pack('>H', self.block)
elif self.opcode == OP_ERR:
s = struct.pack('>H', self.errcode) + ('%s\x00' % self.errmsg)
else:
s = ''
return self.pack_hdr() + s + self.data
| {
"repo_name": "xldrx/dpkt",
"path": "dpkt/tftp.py",
"copies": "16",
"size": "1676",
"license": "bsd-3-clause",
"hash": -4751037487932629000,
"line_mean": 29.4727272727,
"line_max": 74,
"alpha_frac": 0.5232696897,
"autogenerated": false,
"ratio": 3.1863117870722433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011383082045086299,
"num_lines": 55
} |
# $Id$
#
"""unit testing code for 3D stuff
"""
from rdkit import RDConfig
import unittest, os
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import TorsionFingerprints
class TestCase(unittest.TestCase):
def testConformerRMS(self):
m1 = Chem.MolFromSmiles('CNc(n2)nc(C)cc2Nc(cc34)ccc3[nH]nc4')
cids = AllChem.EmbedMultipleConfs(m1, 2)
m2 = Chem.MolFromSmiles('CNc(n2)nc(C)cc2Nc(cc34)ccc3[nH]nc4')
m2.AddConformer(m1.GetConformer(id=1))
# test that the prealigned flag is working
rms1 = AllChem.GetConformerRMS(m1, 0, 1, prealigned=True)
rms2 = AllChem.GetConformerRMS(m1, 0, 1, prealigned=False)
self.assertTrue((rms1 > rms2))
# test that RMS is the same as calculated by AlignMol()
self.assertAlmostEqual(rms2, AllChem.GetBestRMS(m2, m1, 1, 0), 3)
# the RMS with itself must be zero
rms2 = AllChem.GetConformerRMS(m1, 0, 0, prealigned=True)
self.assertAlmostEqual(rms2, 0.0, 4)
def testConformerRMSMatrix(self):
m1 = Chem.MolFromSmiles('CNc(n2)nc(C)cc2Nc(cc34)ccc3[nH]nc4')
cids = AllChem.EmbedMultipleConfs(m1, 3)
m2 = Chem.MolFromSmiles('CNc(n2)nc(C)cc2Nc(cc34)ccc3[nH]nc4')
m2.AddConformer(m1.GetConformer(id=0))
# test that the RMS matrix has the correct size
rmat = AllChem.GetConformerRMSMatrix(m1)
self.assertEqual(len(rmat), 3)
# test that the elements are in the right order
self.assertAlmostEqual(rmat[0], AllChem.GetBestRMS(m1, m2, 1, 0), 3)
self.assertAlmostEqual(rmat[1], AllChem.GetBestRMS(m1, m2, 2, 0), 3)
# test the prealigned option
rmat2 = AllChem.GetConformerRMSMatrix(m1, prealigned=True)
self.assertAlmostEqual(rmat[0], rmat2[0])
def testTorsionFingerprints(self):
# we use the xray structure from the paper (JCIM, 52, 1499, 2012): 1DWD
refFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '1DWD_ligand.pdb')
ref = Chem.MolFromSmiles(
'NC(=[NH2+])c1ccc(C[C@@H](NC(=O)CNS(=O)(=O)c2ccc3ccccc3c2)C(=O)N2CCCCC2)cc1')
mol = Chem.MolFromPDBFile(refFile)
mol = AllChem.AssignBondOrdersFromTemplate(ref, mol)
# the torsion lists
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol)
self.assertEqual(len(tors_list), 11)
self.assertEqual(len(tors_list_rings), 4)
self.assertAlmostEqual(tors_list[-1][1], 180.0, 4)
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol, maxDev='spec')
self.assertAlmostEqual(tors_list[-1][1], 90.0, 4)
self.assertRaises(ValueError, TorsionFingerprints.CalculateTorsionLists, mol, maxDev='test')
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol, symmRadius=0)
self.assertEqual(len(tors_list[0][0]), 2)
# the weights
weights = TorsionFingerprints.CalculateTorsionWeights(mol)
self.assertAlmostEqual(weights[4], 1.0)
self.assertEqual(len(weights), len(tors_list + tors_list_rings))
weights = TorsionFingerprints.CalculateTorsionWeights(mol, 15, 14)
self.assertAlmostEqual(weights[3], 1.0)
self.assertRaises(ValueError, TorsionFingerprints.CalculateTorsionWeights, mol, 15, 3)
# the torsion angles
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol)
torsions = TorsionFingerprints.CalculateTorsionAngles(mol, tors_list, tors_list_rings)
self.assertEqual(len(weights), len(torsions))
self.assertAlmostEqual(torsions[2][0][0], 232.5346, 4)
# the torsion fingerprint deviation
tfd = TorsionFingerprints.CalculateTFD(torsions, torsions)
self.assertAlmostEqual(tfd, 0.0)
refFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '1PPC_ligand.pdb')
mol2 = Chem.MolFromPDBFile(refFile)
mol2 = AllChem.AssignBondOrdersFromTemplate(ref, mol2)
torsions2 = TorsionFingerprints.CalculateTorsionAngles(mol2, tors_list, tors_list_rings)
weights = TorsionFingerprints.CalculateTorsionWeights(mol)
tfd = TorsionFingerprints.CalculateTFD(torsions, torsions2, weights=weights)
self.assertAlmostEqual(tfd, 0.0691, 4)
tfd = TorsionFingerprints.CalculateTFD(torsions, torsions2)
self.assertAlmostEqual(tfd, 0.1115, 4)
# the wrapper functions
tfd = TorsionFingerprints.GetTFDBetweenMolecules(mol, mol2)
self.assertAlmostEqual(tfd, 0.0691, 4)
mol.AddConformer(mol2.GetConformer(), assignId=True)
mol.AddConformer(mol2.GetConformer(), assignId=True)
tfd = TorsionFingerprints.GetTFDBetweenConformers(mol, confIds1=[0], confIds2=[1, 2])
self.assertEqual(len(tfd), 2)
self.assertAlmostEqual(tfd[0], 0.0691, 4)
tfdmat = TorsionFingerprints.GetTFDMatrix(mol)
self.assertEqual(len(tfdmat), 3)
def testTorsionFingerprintsAtomReordering(self):
# we use the xray structure from the paper (JCIM, 52, 1499, 2012): 1DWD
refFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '1DWD_ligand.pdb')
ref = Chem.MolFromSmiles(
'NC(=[NH2+])c1ccc(C[C@@H](NC(=O)CNS(=O)(=O)c2ccc3ccccc3c2)C(=O)N2CCCCC2)cc1')
mol1 = Chem.MolFromPDBFile(refFile)
mol1 = AllChem.AssignBondOrdersFromTemplate(ref, mol1)
refFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '1DWD_ligand_reordered.pdb')
mol2 = Chem.MolFromPDBFile(refFile)
mol2 = AllChem.AssignBondOrdersFromTemplate(ref, mol2)
tfd = TorsionFingerprints.GetTFDBetweenMolecules(mol1, mol2)
self.assertEqual(tfd, 0.0)
def testTorsionFingerprintsColinearBonds(self):
# test that single bonds adjacent to triple bonds are ignored
mol = Chem.MolFromSmiles('CCC#CCC')
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol,
ignoreColinearBonds=True)
self.assertEqual(len(tors_list), 0)
weights = TorsionFingerprints.CalculateTorsionWeights(mol, ignoreColinearBonds=True)
self.assertEqual(len(weights), 0)
# test that they are not ignored, but alternative atoms searched for
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(
mol, ignoreColinearBonds=False)
self.assertEqual(len(tors_list), 1)
self.assertEqual(tors_list[0][0][0], (0, 1, 4, 5))
weights = TorsionFingerprints.CalculateTorsionWeights(mol, ignoreColinearBonds=False)
self.assertEqual(len(weights), 1)
# test that single bonds adjacent to terminal triple bonds are always ignored
mol = Chem.MolFromSmiles('C#CCC')
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol,
ignoreColinearBonds=True)
self.assertEqual(len(tors_list), 0)
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(
mol, ignoreColinearBonds=False)
self.assertEqual(len(tors_list), 0)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/UnitTestMol3D.py",
"copies": "1",
"size": "6852",
"license": "bsd-3-clause",
"hash": 4290156925621778400,
"line_mean": 43.2064516129,
"line_max": 100,
"alpha_frac": 0.7129305312,
"autogenerated": false,
"ratio": 2.9194716659565403,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9118082563020602,
"avg_score": 0.002863926827187478,
"num_lines": 155
} |
# $Id$
#
# Usage:
# <Directory /where/ever>
# PythonOutputFilter gzipfilter
# SetOutputFilter gzipfilter
# </Directory>
from mod_python import apache
import os
import sys
import gzip
import cStringIO
from mod_python import apache
def compress(s):
sio = cStringIO.StringIO()
f = gzip.GzipFile(mode='wb', fileobj=sio)
f.write(s)
f.close()
return sio.getvalue()
def accepts_gzip(req):
if req.headers_in.has_key('accept-encoding'):
encodings = req.headers_in['accept-encoding']
return (encodings.find("gzip") != -1)
return 0
###
### main filter function
###
def outputfilter(filter):
if (filter.req.main or
not accepts_gzip(filter.req)):
# Presense of filter.req.main tells us that
# we are in a subrequest. We don't want to compress
# the data more than once, so we pass_on() in
# subrequests. We also pass_on() if the client
# does not accept gzip encoding, of course.
filter.pass_on()
else:
if not filter.req.sent_bodyct:
# the above test allows us to set the encoding once
# rather than every time the filter is invoked
filter.req.headers_out['content-encoding'] = 'gzip'
# loop through content, compressing
s = filter.read()
while s:
s = compress(s)
filter.write(s)
s = filter.read()
if s is None:
# this means we received an EOS, so we pass it on
# by closing the filter
filter.close()
| {
"repo_name": "Distrotech/mod_python",
"path": "examples/gzipfilter.py",
"copies": "1",
"size": "1620",
"license": "apache-2.0",
"hash": 2685012090594292700,
"line_mean": 21.5,
"line_max": 63,
"alpha_frac": 0.5827160494,
"autogenerated": false,
"ratio": 3.9416058394160585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5024321888816059,
"avg_score": null,
"num_lines": null
} |
# $Id: $
#
# Utility functions for testing HTTP
#
import os
import sys
import httplib
import urllib2
from TestConfig import TestConfig
# Execute a specified HTTP method using a supplied urllib2 opener object,
# following a single HTTP 301 redirection response
def do_cifsMount(areaName, userName, userPass):
#print "do_cifsMount: "+areaName+", "+userName+", "+userPass+", "+TestConfig.hostname
mountcommand = ( '/sbin/mount.cifs //%(host)s/%(share)s/%(area)s %(mountpt)s -o rw,user=%(user)s,password=%(pass)s,nounix,forcedirectio' %
{ 'host': TestConfig.hostname
, 'share': TestConfig.cifssharename
, 'area': areaName
, 'user': userName
, 'mountpt': TestConfig.cifsmountpoint
, 'pass': userPass
} )
print mountcommand
status=os.system(mountcommand)
# OS.system Returns a 16-bit number, whose low byte is the signal number that killed the process, and
# whose high byte is the exit status (if the signal number is zero); the high bit of the low byte is set if a core file was produced
return status
def do_cifsUnmount():
os.system('/sbin/umount.cifs ' + TestConfig.cifsmountpoint)
return
def do_cifsCreateFile(fileName, createFileContent):
###print "do_cifsCreateFile: "+TestConfig.cifsmountpoint + '/' + fileName
f = open(TestConfig.cifsmountpoint + '/' + fileName, 'w+')
assert f, "File creation failed"
f.write(createFileContent)
f.close()
return createFileContent
def do_cifsReadFile(fileName):
f = open(TestConfig.cifsmountpoint + '/' + fileName, 'r')
readFileContent = f.read()
f.close()
return readFileContent
def do_cifsUpdateFile(fileName, updateFileContent):
f = open(TestConfig.cifsmountpoint + '/' + fileName,'a+')
f.write(updateFileContent)
f.close()
return updateFileContent
def do_cifsDeleteFile(fileName):
deleteMessage = (0,"Success")
# Test and delete file
try:
s = os.stat(TestConfig.cifsmountpoint + '/'+ fileName)
except OSError as e:
#print repr(e)
deleteMessage = (e.errno,str(e))
# except:
# assert (False), "File "+ fileName+" not found or other stat error"
# deleteMessage = str(e)
else:
try:
os.remove(TestConfig.cifsmountpoint + '/'+ fileName)
except OSError as e:
deleteMessage = (e.errno,str(e))
else:
try:
s = os.stat(TestConfig.cifsmountpoint + '/'+ fileName)
assert (False), "File "+ fileName+" not deleted"
except OSError as e:
deleteMessage = (e.errno,str(e))
# except:
# pass
return deleteMessage
| {
"repo_name": "tectronics/admiral-jiscmrd",
"path": "test/FileShare/tests/TestCifsUtils.py",
"copies": "2",
"size": "2808",
"license": "mit",
"hash": -5463849125869422000,
"line_mean": 32.4285714286,
"line_max": 142,
"alpha_frac": 0.6160968661,
"autogenerated": false,
"ratio": 3.8100407055630936,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5426137571663093,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""
Utility functions
"""
# Functions added here should really be as portable as possible
# and generally useful.
import os
import sys
import stat
import time
import logging
LOG = logging.getLogger(__name__)
def ensure_dir(dir_path):
"""
Ensure a directory path exists (by creating it if it doesn't).
"""
if not os.path.exists(dir_path):
try:
os.makedirs(dir_path)
LOG.debug("created directory %s" % dir_path)
return True
except OSError, e:
# FIX ME: Need error codes/etc so this will exit(<code>) or raise
# an appropriate holland exception
LOG.error("os.makedirs(%s): %s" % (dir_path, e))
raise
return False
def protected_path(path):
"""
Take a path, and if the file/dir exist pass back a protected path
(suffixed).
Returns:
string = new file path
Example:
>>> mypath = '/tmp'
>>> new_path = helpers.protected_path(mypath)
>>> new_path
'/tmp.0'
"""
log = logging.getLogger(__name__)
safety = 0
safe_path = path
while True:
if os.path.exists(safe_path):
safe_path = "%s.%s" % (path, safety)
else:
break
safety = safety + 1
return safe_path
def format_bytes(bytes, precision=2):
"""
Format an integer number of bytes to a human
readable string.
If bytes is negative, this method raises ArithmeticError
"""
import math
if bytes < 0:
raise ArithmeticError("Only Positive Integers Allowed")
if bytes != 0:
exponent = math.floor(math.log(bytes, 1024))
else:
exponent = 0
return "%.*f%s" % (
precision,
bytes / (1024 ** exponent),
['B','KB','MB','GB','TB','PB','EB','ZB','YB'][int(exponent)]
)
def normpath(path):
from os.path import normpath, abspath
return abspath(normpath(path))
def relpath(path, start=os.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in os.path.abspath(start).split(os.sep) if x]
path_list = [x for x in os.path.abspath(path).split(os.sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.curdir
return os.path.join(*rel_list)
def getmount(path):
"""Return the mount point of a path
:param path: path to find the mountpoint for
:returns: str mounpoint path
"""
path = os.path.realpath(path)
while path != os.path.sep:
if os.path.ismount(path):
return path
path = os.path.abspath(os.path.join(path, os.pardir))
return path
def disk_capacity(target_path):
"""Find the total capacity of the filesystem that target_path is on
:returns: integer number of bytes
"""
path = getmount(target_path)
info = os.statvfs(path)
return info.f_frsize*info.f_blocks
def disk_free(target_path):
"""
Find the amount of space free on a given path
Path must exist.
This method does not take into account quotas
returns the size in bytes potentially available
to a non privileged user
"""
path = getmount(target_path)
info = os.statvfs(path)
return info.f_frsize*info.f_bavail
def directory_size(path):
"""
Find the size of all files in a directory, recursively
Returns the size in bytes on success
"""
from os.path import join, getsize
result = 0
for root, dirs, files in os.walk(path):
for name in files:
try:
sz = getsize(join(root,name))
result = result + sz
except OSError, exc:
pass
return result
| {
"repo_name": "m00dawg/holland",
"path": "holland/core/util/path.py",
"copies": "1",
"size": "3916",
"license": "bsd-3-clause",
"hash": 9066763566823183000,
"line_mean": 23.6289308176,
"line_max": 77,
"alpha_frac": 0.598825332,
"autogenerated": false,
"ratio": 3.7690086621751684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4867833994175168,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""Virtual Router Redundancy Protocol."""
import dpkt
class VRRP(dpkt.Packet):
__hdr__ = (
('vtype', 'B', 0x21),
('vrid', 'B', 0),
('priority', 'B', 0),
('count', 'B', 0),
('atype', 'B', 0),
('advtime', 'B', 0),
('sum', 'H', 0),
)
addrs = ()
auth = ''
def _get_v(self):
return self.vtype >> 4
def _set_v(self, v):
self.vtype = (self.vtype & ~0xf) | (v << 4)
v = property(_get_v, _set_v)
def _get_type(self):
return self.vtype & 0xf
def _set_type(self, v):
self.vtype = (self.vtype & ~0xf0) | (v & 0xf)
type = property(_get_type, _set_type)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
for off in range(0, 4 * self.count, 4):
l.append(self.data[off:off+4])
self.addrs = l
self.auth = self.data[off+4:]
self.data = ''
def __len__(self):
return self.__hdr_len__ + (4 * self.count) + len(self.auth)
def __str__(self):
data = ''.join(self.addrs) + self.auth
if not self.sum:
self.sum = dpkt.in_cksum(self.pack_hdr() + data)
return self.pack_hdr() + data
| {
"repo_name": "warjiang/dpkt",
"path": "dpkt/vrrp.py",
"copies": "17",
"size": "1230",
"license": "bsd-3-clause",
"hash": 6549160418411411000,
"line_mean": 25.170212766,
"line_max": 67,
"alpha_frac": 0.4707317073,
"autogenerated": false,
"ratio": 2.956730769230769,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009102882932670166,
"num_lines": 47
} |
# $Id$
"""Web Template system"""
import sys, os, re
import jon.cgi as cgi
_code_cache = {}
_replace_validchunk = re.compile("^[A-Za-z0-9_]+$")
def replace(wt, template, namespace):
"""Substitute variables in template and output with jcgi.write
Variables are indicated by surrounding them with "$$".
If the name is suffixed with "()" then it must be a function not a variable.
The default encoding is to call jcgi.html_encode
unless the variable is prefixed with "%" in which case it is jcgi.url_encode
or the variable is prefixed with "=" in which case no encoding is used.
If the variable is empty (i.e. the template contains "$$$$") then a literal
"$$" is output.
"""
html = 0
for chunk in template.split("$$"):
html = not html
if html:
wt.req.write(chunk)
else:
if chunk == "":
wt.req.write("$$")
else:
encode = cgi.html_encode
if chunk[0] == "=":
encode = lambda x: x
chunk = chunk[1:]
elif chunk[0] == "%":
encode = cgi.url_encode
chunk = chunk[1:]
if not _replace_validchunk.match(chunk):
raise ValueError("'%s' is not a valid identifier" % chunk)
if callable(getattr(namespace, chunk)):
out = getattr(namespace, chunk)()
else:
out = getattr(namespace, chunk)
if not isinstance(out, unicode):
out = str(out)
wt.req.write(encode(out))
class TemplateCode(object):
template_as_file = 0
def __init__(self, outer, wt=None):
self.outer = outer
if wt:
self.wt = wt
else:
self.wt = self.outer.wt
self.req = self.wt.req
def process(self, template, selected=None):
process(self.wt, template, self, selected)
def main(self, template):
self.process(template)
class GlobalTemplate(TemplateCode):
def main(self, template):
self._pageTemplate = template
if hasattr(self, "get_template"):
self.process(self.get_template())
else:
if self.template_as_file:
self.process(open(self.template_name(), "rb"))
else:
encoding = self.wt.get_template_encoding()
if encoding is None:
self.process(open(self.template_name(), "rb").read())
else:
self.process(unicode(open(self.template_name(), "rb").read(),
encoding))
def template_name(self):
return self.wt.etc + "/template.html"
class page(TemplateCode):
pass
class _page(TemplateCode):
def main(self, template):
# Contents of the page block are ignored, the original template
# is substituted instead
obj = self.outer.page(self.outer)
if obj.template_as_file:
import StringIO as cStringIO
obj.main(StringIO.StringIO(self.outer._pageTemplate))
else:
obj.main(self.outer._pageTemplate)
_process_sb = re.compile("<!--wt:([A-Za-z0-9_]+)(/)?-->")
def process(wt, template, namespace, selected=None):
pos = 0
while 1:
start = _process_sb.search(template, pos)
if not start:
break
name = start.group(1)
if start.lastindex == 2: # shorttag
end = start.end()
endmark = ""
else:
endmark = "<!--wt:/%s-->" % name
end = template.find(endmark, start.end())
if end == -1:
raise ValueError("No end block for %s" % name)
replace(wt, template[pos:start.start()], namespace)
if name != "_null" and (selected == None or selected == name or
(type(selected) == type([]) and name in selected) or
(type(selected) == type(()) and name in selected)):
obj = getattr(namespace, name)(namespace, wt)
if obj.template_as_file:
import cStringIO as StringIO
obj.main(StringIO.StringIO(template[start.end():end]))
else:
obj.main(template[start.end():end])
del obj
pos = end + len(endmark)
replace(wt, template[pos:], namespace)
class Handler(cgi.Handler):
cache_code = 0
def _get_template(self):
for i in range(4):
template = self.req.environ.get("REDIRECT_" * i + "WT_TEMPLATE_FILENAME")
if template:
return template
raise Exception("Couldn't determine template filename")
def _get_etc(self):
sp = os.path.split(self.req.environ["DOCUMENT_ROOT"])
if sp[1] == "":
sp = os.path.split(sp[0])
return sp[0] + "/etc"
def _get_code(self):
code = self.req.environ.get("PATH_TRANSLATED")
if code is None:
code = self.req.environ["SCRIPT_FILENAME"]
return code
def get_template_encoding(self):
return None
def pre_load(self):
pass
def pre_request(self, obj):
pass
def post_request(self, obj):
pass
def process(self, req):
self.req = req
self.etc = self._get_etc()
self.pre_load()
self.template = self._get_template()
codefname = self._get_code()
try:
namespace = _code_cache[codefname]
except KeyError:
namespace = { "wt": sys.modules[__name__] }
code = compile(open(codefname, "r").read(), codefname, "exec")
exec code in namespace
del code
if self.cache_code:
_code_cache[codefname] = namespace
obj = namespace["main"](None, self)
self.pre_request(obj)
if obj.template_as_file:
obj.main(open(self.template, "rb"))
else:
encoding = self.get_template_encoding()
if encoding is None:
obj.main(open(self.template, "rb").read())
else:
obj.main(unicode(open(self.template, "rb").read(), encoding))
self.post_request(obj)
class DebugHandler(cgi.DebugHandlerMixIn, Handler):
pass
| {
"repo_name": "jribbens/jonpy",
"path": "jon/wt/__init__.py",
"copies": "1",
"size": "5588",
"license": "mit",
"hash": -8935102069301511000,
"line_mean": 27.2222222222,
"line_max": 80,
"alpha_frac": 0.6088045812,
"autogenerated": false,
"ratio": 3.5912596401028276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47000642213028276,
"avg_score": null,
"num_lines": null
} |
# $Id$
# workaround to use adodbapi and other modules not
# included with DeVIDE
#import sys
#sys.path = sys.path + ['', 'H:\\bld\\bin\\Wrapping\\CSwig\\Python\\RelWithDebInfo', 'H:\\opt\\python23\\Lib\\site-packages\\adodbapi', 'C:\\WINNT\\System32\\python23.zip', 'H:\\', 'H:\\opt\\python23\\DLLs', 'H:\\opt\\python23\\lib', 'H:\\opt\\python23\\lib\\plat-win', 'H:\\opt\\python23\\lib\\lib-tk', 'H:\\opt\\python23', 'H:\\opt\\python23\\Lib\\site-packages\\win32', 'H:\\opt\\python23\\Lib\\site-packages\\win32\\lib', 'H:\\opt\\python23\\Lib\\site-packages\\Pythonwin', 'H:\\opt\\python23\\lib\\site-packages\\adodbapi']
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
import fixitk as itk
import wx
from datetime import date
from adodbapi import *
from modules.Insight.typeModules.transformStackClass import transformStackClass
class reconstructionRDR(ScriptedConfigModuleMixin, ModuleBase):
"""Fetches a transform stack from an MS Access database
$Revision: 1.1 $
"""
def __init__(self, module_manager):
# call the parent constructor
ModuleBase.__init__(self, module_manager)
# this is our output
self._transformStack = transformStackClass( self )
# module_utils.setup_vtk_object_progress(self, self._reader,
# 'Fetching transformStack from database...')
self._config.databaseFile = ""
self._config.reconstructionName = "--- select a value ---"
configList = [
('Database:', 'databaseFile', 'base:str', 'filebrowser',
'Database from which to fetch a reconstruction''s transformStack.',
{'fileMode' : wx.OPEN,
'fileMask' :
'MDB files (*.mdb)|*.mdb|All files (*.*)|*.*'}),
('Reconstruction:', 'reconstructionName', 'base:str', 'choice',
'Specific reconstruction to use.', ("--- select a value ---",) ) ]
ScriptedConfigModuleMixin.__init__(self, configList)
self._viewFrame = self._createViewFrame({'Module (self)' : self})
self.config_to_logic()
self.syncViewWithLogic()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
# for input_idx in range(len(self.get_input_descriptions())):
# self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
# get rid of our reference
# del self._reader
def get_input_descriptions(self):
return ()
def set_input(self, idx, inputStream):
raise Exception
def get_output_descriptions(self):
return ('2D Transform Stack',)
def get_output(self, idx):
return self._transformStack
def logic_to_config(self):
#self._config.filePrefix = self._reader.GetFilePrefix()
#self._config.filePattern = self._reader.GetFilePattern()
#self._config.firstSlice = self._reader.GetFileNameSliceOffset()
#e = self._reader.GetDataExtent()
#self._config.lastSlice = self._config.firstSlice + e[5] - e[4]
#self._config.spacing = self._reader.GetDataSpacing()
#self._config.fileLowerLeft = bool(self._reader.GetFileLowerLeft())
pass
def config_to_logic(self):
# get choice widget binding
choiceBind = self._widgets[self._configList[1][0:5]]
# abuse
choiceBind.Clear()
choiceBind.Append( "--- select a value ---" )
reconstructionName = "--- select a value ---"
# attempt to append a list at once
if len( self._config.databaseFile ):
for e in self._getAvailableReconstructions():
choiceBind.Append( e )
def execute_module(self):
if len( self._config.databaseFile ):
self._fetchTransformStack()
def _getAvailableReconstructions( self ):
# connect to the database
connectString = r"Provider=Microsoft.Jet.OLEDB.4.0;Data Source=" + self._config.databaseFile + ";"
try:
connection = adodbapi.connect( connectString )
except adodbapi.Error:
raise IOError, "Could not open database."
cursor = connection.cursor()
cursor.execute( "SELECT name FROM reconstruction ORDER BY run_date" )
reconstruction_list = cursor.fetchall()
cursor.close()
connection.close()
# cast list of 1-tuples to list
return [ e[ 0 ] for e in reconstruction_list ]
def _fetchTransformStack( self ):
# connect to the database
connectString = r"Provider=Microsoft.Jet.OLEDB.4.0;Data Source=" + self._config.databaseFile + ";"
try:
connection = adodbapi.connect( connectString )
except adodbapi.Error:
raise IOError, "Could not open database."
# try to figure out reconstruction ID from recontruction name
print "NAME: " + self._config.reconstructionName
cursor = connection.cursor()
cursor.execute( "SELECT id FROM reconstruction WHERE name=?", [ self._config.reconstructionName ] )
row = cursor.fetchone()
if row == None:
raise IOError, "Reconstruction not found."
print "reconstructionID: %i" % row[ 0 ]
# build transformStack
cursor.execute( "SELECT angle, centerx, centery, tx, ty FROM centeredRigid2DTransform WHERE reconstruction_id=? ORDER BY seq_nr", [ row[ 0 ] ] )
for row in cursor.fetchall():
trfm = itk.itkCenteredRigid2DTransform_New()
params = trfm.GetParameters()
for i in range(0,5):
params.SetElement( i, row[ i ] )
trfm.SetParameters( params )
self._transformStack.append( trfm )
cursor.close()
connection.close()
| {
"repo_name": "codester2/devide",
"path": "modules/user/reconstructionRDR.py",
"copies": "7",
"size": "5329",
"license": "bsd-3-clause",
"hash": -2715287726698264000,
"line_mean": 32.30625,
"line_max": 528,
"alpha_frac": 0.695440045,
"autogenerated": false,
"ratio": 3.277367773677737,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.07790635410691431,
"num_lines": 160
} |
# $Id$
# Train a mixture of Gaussians to approximate a multi-mode dataset.
# It seems fairly easy to fall into some local minimum. Good solutions
# have errors around -200.
# This example reproduces Fig. 5.21 from Bishop (2006).
__author__ = 'Martin Felder'
import pylab as p
import numpy as np
from pybrain.structure.modules import LinearLayer, BiasUnit, SigmoidLayer
from pybrain.structure import FullConnection, FeedForwardNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers.mixturedensity import RPropMinusTrainerMix, BackpropTrainerMix
from pybrain.structure.modules.mixturedensity import MixtureDensityLayer
def multigaussian(x, mean, stddev):
"""Returns value of uncorrelated Gaussians at given scalar point.
x: scalar
mean: vector
stddev: vector
"""
tmp = -0.5 * ((x-mean)/stddev)**2
return np.exp(tmp) / (np.sqrt(2.*np.pi) * stddev)
if __name__ == '__main__':
# build a network
n = FeedForwardNetwork()
# linear input layer
n.addInputModule(LinearLayer(1, name='in'))
# output layer of type 'outclass'
N_GAUSSIANS = 3
n.addOutputModule(MixtureDensityLayer(dim=1, name='out', mix=N_GAUSSIANS))
# add bias module and connection to out module
n.addModule(BiasUnit(name = 'bias'))
n.addConnection(FullConnection(n['bias'], n['out']))
# arbitrary number of hidden layers of type 'hiddenclass'
n.addModule(SigmoidLayer(5, name='hidden'))
n.addConnection(FullConnection(n['bias'], n['hidden']))
# network with hidden layer(s), connections
# from in to first hidden and last hidden to out
n.addConnection(FullConnection(n['in'], n['hidden']))
n.addConnection(FullConnection(n['hidden'], n['out']))
n.sortModules()
n._setParameters(np.random.uniform(-0.1, 0.1, size=n.paramdim))
# build some data
y = np.arange(0.0, 1.0, 0.005).reshape(200,1)
x = (
y +
0.3 * np.sin(2 * np.pi * y) +
np.random.uniform(-0.1, 0.1, y.size).reshape(y.size, 1)
)
dataset = SupervisedDataSet(1, 1)
dataset.setField('input', x)
dataset.setField('target', y)
# train the network
trainer = RPropMinusTrainerMix(n, dataset=dataset, verbose=True,
weightdecay=0.05)
trainer.trainEpochs(200)
# plot the density and other stuff
p.subplot(2, 2, 3)
dens = []
newx = np.arange(0.0, 1.0, 0.01)
newx = newx.reshape(newx.size, 1)
dataset.setField('input', newx)
out = n.activateOnDataset(dataset)
for pars in out:
stds = pars[N_GAUSSIANS:N_GAUSSIANS*2]
means = pars[N_GAUSSIANS*2:N_GAUSSIANS*3]
line = multigaussian(newx, means, stds)
density = line[:,0] * pars[0]
for gaussian in range(1, N_GAUSSIANS):
density += line[:, gaussian] * pars[gaussian]
dens.append(density)
newx = newx.flatten()
dens = np.array(dens).transpose()
p.contourf(newx, newx, dens, 30)
p.title("cond. probab. dens.")
p.subplot(221)
out = np.array(out)
p.plot(newx, out[:,0:3])
p.title("mixing coefficient")
p.subplot(222)
p.plot(newx, out[:,6:9])
p.title("means of Gaussians")
p.subplot(224)
p.scatter(x.flatten(), y.flatten(),
marker='o', edgecolor='g', facecolors='none')
p.hold(True)
cmode = dens.argmax(axis=0)
p.plot(newx, newx[cmode], "or", markersize=3)
p.xlim(0, 1)
p.ylim(0, 1)
p.title("data and cond. mode")
p.show()
| {
"repo_name": "yonglehou/pybrain",
"path": "examples/supervised/neuralnets+svm/example_mixturedensity.py",
"copies": "26",
"size": "3548",
"license": "bsd-3-clause",
"hash": -6189743814080675000,
"line_mean": 32.4716981132,
"line_max": 95,
"alpha_frac": 0.6364148816,
"autogenerated": false,
"ratio": 3.1763652641002684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016599186622437395,
"num_lines": 106
} |
# $Id$
import sys
if sys.platform == 'win32':
sys.path.append(".\\..") # - dla windowsa
else:
sys.path.append("../") # - dla linuksa
from pygglib import GGSession
from GGConstans import *
from Contacts import *
import time
def login_ok(sender, args):
print 'Zalogowano :>'
def print_msg(sender, args):
print 'Message received:'
print 'sender:', args.sender
print 'seq:', args.seq
print 'msg_class:', GGMsgTypes.reverse_lookup(args.msg_class)
print 'message:', args.message
print
def print_unknown_packet(sender, args):
print 'Unknow packet received: type: %d, length: %d' % (args.type, args.length)
print
def notify_reply(sender, args):
print 'reply notified'
if __name__ == "__main__":
try:
contacts = ContactsList([Contact({'uin':3993939}), Contact({'uin':3217426}), Contact({'uin':4102378})])
session = GGSession(uin = 11327271, password = 'eto2007', contacts_list = contacts)
session.register('on_login_ok', login_ok)
session.register('on_msg_recv', print_msg)
session.register('on_unknown_packet', print_unknown_packet)
session.register('on_notify_reply', notify_reply)
session.login()
time.sleep(2)
print "uin: %d, status: %s description: %s" % (session.contacts_list[3217426].uin, GGStatuses.reverse_lookup_without_mask(session.contacts_list[3217426].status), session.contacts_list[3217426].description)
print "uin: %d, status: %s description: %s" % (session.contacts_list[4102378].uin, GGStatuses.reverse_lookup_without_mask(session.contacts_list[4102378].status), session.contacts_list[4102378].description)
print "uin: %d, status: %s description: %s" % (session.contacts_list[3993939].uin, GGStatuses.reverse_lookup_without_mask(session.contacts_list[3993939].status), session.contacts_list[3993939].description)
time.sleep(4)
session.logout()
finally:
try:
session.logout()
except: #znaczy, ze nie jestesmy zalogowani
pass
| {
"repo_name": "jakubkosinski/pygglib",
"path": "tests/contacts_tests.py",
"copies": "1",
"size": "1946",
"license": "mit",
"hash": -1986436749114092500,
"line_mean": 36.1568627451,
"line_max": 207,
"alpha_frac": 0.6978417266,
"autogenerated": false,
"ratio": 2.913173652694611,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8730175632761561,
"avg_score": 0.07616794930660993,
"num_lines": 51
} |
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, string='') - returns a new hash object implementing the
given hash function; initializing the hash
using the given string data.
Named constructor functions are also available, these are much faster
than using new():
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are guaranteed
to exist. See the algorithms_guaranteed and algorithms_available attributes
to find out what algorithm names can be passed to new().
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the strings passed to the update() method
so far. This may contain non-ASCII characters, including
NUL bytes.
- hexdigest(): Like digest() except the digest is returned as a string of
double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update("Nobody inspects")
>>> m.update(" the spammish repetition")
>>> m.digest()
'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
# This tuple and __get_builtin_constructor() must be modified if a new
# always available algorithm is added.
__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
algorithms_guaranteed = set(__always_supported)
algorithms_available = set(__always_supported)
algorithms = __always_supported
__all__ = __always_supported + ('new', 'algorithms_guaranteed',
'algorithms_available', 'algorithms',
'pbkdf2_hmac')
def __get_builtin_constructor(name):
try:
if name in ('SHA1', 'sha1'):
import _sha
return _sha.new
elif name in ('MD5', 'md5'):
import _md5
return _md5.new
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
bs = name[3:]
if bs == '256':
return _sha256.sha256
elif bs == '224':
return _sha256.sha224
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
bs = name[3:]
if bs == '512':
return _sha512.sha512
elif bs == '384':
return _sha512.sha384
except ImportError:
pass # no extension module, this hash is unsupported.
raise ValueError('unsupported hash type ' + name)
def __get_openssl_constructor(name):
try:
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to OpenSSL.
f()
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
return __get_builtin_constructor(name)
def __py_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
return __get_builtin_constructor(name)(string)
def __hash_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
try:
return _hashlib.new(name, string)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(string)
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
algorithms_available = algorithms_available.union(
_hashlib.openssl_md_meth_names)
except ImportError:
new = __py_new
__get_hash = __get_builtin_constructor
for __func_name in __always_supported:
# try them all, some may not work due to the OpenSSL
# version not supporting that algorithm.
try:
globals()[__func_name] = __get_hash(__func_name)
except ValueError:
import logging
logging.exception('code for hash %s was not found.', __func_name)
try:
# OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA
from _hashlib import pbkdf2_hmac
except ImportError:
import binascii
import struct
_trans_5C = b"".join(chr(x ^ 0x5C) for x in range(256))
_trans_36 = b"".join(chr(x ^ 0x36) for x in range(256))
def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None):
"""Password based key derivation function 2 (PKCS #5 v2.0)
This Python implementations based on the hmac module about as fast
as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster
for long passwords.
"""
if not isinstance(hash_name, str):
raise TypeError(hash_name)
if not isinstance(password, (bytes, bytearray)):
password = bytes(buffer(password))
if not isinstance(salt, (bytes, bytearray)):
salt = bytes(buffer(salt))
# Fast inline HMAC implementation
inner = new(hash_name)
outer = new(hash_name)
blocksize = getattr(inner, 'block_size', 64)
if len(password) > blocksize:
password = new(hash_name, password).digest()
password = password + b'\x00' * (blocksize - len(password))
inner.update(password.translate(_trans_36))
outer.update(password.translate(_trans_5C))
def prf(msg, inner=inner, outer=outer):
# PBKDF2_HMAC uses the password as key. We can re-use the same
# digest objects and just update copies to skip initialization.
icpy = inner.copy()
ocpy = outer.copy()
icpy.update(msg)
ocpy.update(icpy.digest())
return ocpy.digest()
if iterations < 1:
raise ValueError(iterations)
if dklen is None:
dklen = outer.digest_size
if dklen < 1:
raise ValueError(dklen)
hex_format_string = "%%0%ix" % (new(hash_name).digest_size * 2)
dkey = b''
loop = 1
while len(dkey) < dklen:
prev = prf(salt + struct.pack(b'>I', loop))
rkey = int(binascii.hexlify(prev), 16)
for i in xrange(iterations - 1):
prev = prf(prev)
rkey ^= int(binascii.hexlify(prev), 16)
loop += 1
dkey += binascii.unhexlify(hex_format_string % rkey)
return dkey[:dklen]
# Cleanup locals()
del __always_supported, __func_name, __get_hash
del __py_new, __hash_new, __get_openssl_constructor
| {
"repo_name": "kubaszostak/gdal-dragndrop",
"path": "osgeo/apps/Python27/Lib/hashlib.py",
"copies": "7",
"size": "8063",
"license": "mit",
"hash": 2834929953636192000,
"line_mean": 34.4841628959,
"line_max": 84,
"alpha_frac": 0.6000248047,
"autogenerated": false,
"ratio": 4.0194416749750745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8119466479675074,
"avg_score": null,
"num_lines": null
} |
# $Id:$
from pyglet.media import Source, AudioFormat, AudioData
import ctypes
import os
import math
class ProceduralSource(Source):
def __init__(self, duration, sample_rate=44800, sample_size=16):
self._duration = float(duration)
self.audio_format = AudioFormat(
channels=1,
sample_size=sample_size,
sample_rate=sample_rate)
self._offset = 0
self._bytes_per_sample = sample_size >> 3
self._bytes_per_second = self._bytes_per_sample * sample_rate
self._max_offset = int(self._bytes_per_second * self._duration)
if self._bytes_per_sample == 2:
self._max_offset &= 0xfffffffe
def _get_audio_data(self, bytes):
bytes = min(bytes, self._max_offset - self._offset)
if bytes <= 0:
return None
timestamp = float(self._offset) / self._bytes_per_second
duration = float(bytes) / self._bytes_per_second
data = self._generate_data(bytes, self._offset)
self._offset += bytes
is_eos = self._offset >= self._max_offset
return AudioData(data,
bytes,
timestamp,
duration,
is_eos)
def _generate_data(self, bytes, offset):
'''Generate `bytes` bytes of data.
Return data as ctypes array or string.
'''
raise NotImplementedError('abstract')
def _seek(self, timestamp):
self._offset = int(timestamp * self._bytes_per_second)
# Bound within duration
self._offset = min(max(self._offset, 0), self._max_offset)
# Align to sample
if self._bytes_per_sample == 2:
self._offset &= 0xfffffffe
class Silence(ProceduralSource):
def _generate_data(self, bytes, offset):
if self._bytes_per_sample == 1:
return '\127' * bytes
else:
return '\0' * bytes
class WhiteNoise(ProceduralSource):
def _generate_data(self, bytes, offset):
return os.urandom(bytes)
class Sine(ProceduralSource):
def __init__(self, duration, frequency=440, **kwargs):
super(Sine, self).__init__(duration, **kwargs)
self.frequency = frequency
def _generate_data(self, bytes, offset):
if self._bytes_per_sample == 1:
start = offset
samples = bytes
bias = 127
amplitude = 127
data = (ctypes.c_ubyte * samples)()
else:
start = offset >> 1
samples = bytes >> 1
bias = 0
amplitude = 32767
data = (ctypes.c_short * samples)()
step = self.frequency * (math.pi * 2) / self.audio_format.sample_rate
for i in range(samples):
data[i] = int(math.sin(step * (i + start)) * amplitude + bias)
return data
class Saw(ProceduralSource):
def __init__(self, duration, frequency=440, **kwargs):
super(Saw, self).__init__(duration, **kwargs)
self.frequency = frequency
def _generate_data(self, bytes, offset):
# XXX TODO consider offset
if self._bytes_per_sample == 1:
samples = bytes
value = 127
max = 255
min = 0
data = (ctypes.c_ubyte * samples)()
else:
samples = bytes >> 1
value = 0
max = 32767
min = -32768
data = (ctypes.c_short * samples)()
step = (max - min) * 2 * self.frequency / self.audio_format.sample_rate
for i in range(samples):
value += step
if value > max:
value = max - (value - max)
step = -step
if value < min:
value = min - (value - min)
step = -step
data[i] = value
return data
class Square(ProceduralSource):
def __init__(self, duration, frequency=440, **kwargs):
super(Square, self).__init__(duration, **kwargs)
self.frequency = frequency
def _generate_data(self, bytes, offset):
# XXX TODO consider offset
if self._bytes_per_sample == 1:
samples = bytes
value = 0
amplitude = 255
data = (ctypes.c_ubyte * samples)()
else:
samples = bytes >> 1
value = -32768
amplitude = 65535
data = (ctypes.c_short * samples)()
period = self.audio_format.sample_rate / self.frequency / 2
count = 0
for i in range(samples):
count += 1
if count == period:
value = amplitude - value
count = 0
data[i] = value
return data
| {
"repo_name": "certik/sympy-oldcore",
"path": "sympy/plotting/pyglet/media/procedural.py",
"copies": "1",
"size": "4932",
"license": "bsd-3-clause",
"hash": -8371373572432896000,
"line_mean": 31.1006711409,
"line_max": 79,
"alpha_frac": 0.5089213301,
"autogenerated": false,
"ratio": 4.299912816041848,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5308834146141848,
"avg_score": null,
"num_lines": null
} |
# $Id$
import sys
import unittest
if sys.platform == 'win32':
sys.path.append(".\\..") # - dla windowsa
else:
sys.path.append("../") # - dla linuksa
from GGConstans import *
from Contacts import *
class ContactUnitTest(unittest.TestCase):
def testCreateDictWithShownName(self):
uin = 1234
nick = 'Nick'
shown_name = 'Test'
dict = {'uin':uin, 'nick':nick, 'shown_name':shown_name}
contact = Contact(dict)
self.assertEqual(contact.shown_name, shown_name)
self.assertEqual(contact.uin, uin)
self.assertEqual(contact.nick, nick)
self.assertEqual(contact.name, "")
self.assertEqual(contact.surname, "")
self.assertEqual(contact.mobilephone, "")
self.assertEqual(contact.group, "")
self.assertEqual(contact.email, "")
self.assertEqual(contact.available, 0)
self.assertEqual(contact.available_source, "")
self.assertEqual(contact.message, 0)
self.assertEqual(contact.message_source, "")
self.assertEqual(contact.hidden, 0)
self.assertEqual(contact.telephone, "")
def testCreateDictWithoutShownName(self):
uin = 1234
nick = 'Nick'
name = 'Name'
surname = 'Surname'
mobilephone = '123456789'
group = 'Group'
email = 'email@email'
available = 2
available_source = 'source'
message = 1
message_source = 'source'
hidden = 0
telephone = '123456789'
dict = {'uin':uin,'nick':nick,'name':name,'surname':surname,'mobilephone':mobilephone,'group':group,'email':email,'available':available, \
'available_source':available_source,'message':message,'message_source':message_source,'hidden':hidden,'telephone':telephone}
contact = Contact(dict)
self.assertEqual(contact.shown_name, str(uin))
self.assertEqual(contact.uin, uin)
self.assertEqual(contact.nick, nick)
self.assertEqual(contact.name, name)
self.assertEqual(contact.surname, surname)
self.assertEqual(contact.mobilephone, mobilephone)
self.assertEqual(contact.group, group)
self.assertEqual(contact.email, email)
self.assertEqual(contact.available, available)
self.assertEqual(contact.available_source, available_source)
self.assertEqual(contact.message, message)
self.assertEqual(contact.message_source, message_source)
self.assertEqual(contact.hidden, hidden)
self.assertEqual(contact.telephone, telephone)
def testCreateString(self):
uin = 1234
nick = 'Nick'
shown_name = 'ShownName'
name = 'Name'
surname = 'Surname'
mobilephone = '123456789'
group = 'Group'
email = 'email@email'
available = 2
available_source = 'source'
message = 1
message_source = 'source'
hidden = 0
telephone = '123456789'
request_string = ";".join([name, surname, nick, shown_name, mobilephone, group, str(uin), email, str(available), available_source, str(message), message_source, str(hidden), telephone])
dict = {'request_string':request_string}
contact = Contact(dict)
self.assertEqual(contact.shown_name, shown_name)
self.assertEqual(contact.uin, uin)
self.assertEqual(contact.nick, nick)
self.assertEqual(contact.name, name)
self.assertEqual(contact.surname, surname)
self.assertEqual(contact.mobilephone, mobilephone)
self.assertEqual(contact.group, group)
self.assertEqual(contact.email, email)
self.assertEqual(contact.available, available)
self.assertEqual(contact.available_source, available_source)
self.assertEqual(contact.message, message)
self.assertEqual(contact.message_source, message_source)
self.assertEqual(contact.hidden, hidden)
self.assertEqual(contact.telephone, telephone)
def testRequestString(self):
uin = 1234
nick = 'Nick'
shown_name = 'ShownName'
name = 'Name'
surname = 'Surname'
mobilephone = '123456789'
group = 'Group'
email = 'email@email'
available = 2
available_source = 'source'
message = 1
message_source = 'source'
hidden = 0
telephone = '123456789'
request_string = ";".join([name, surname, nick, shown_name, mobilephone, group, str(uin), email, str(available), available_source, str(message), message_source, str(hidden), telephone])
dict = {'request_string':request_string}
contact = Contact(dict)
self.assertEqual(contact.request_string(), request_string)
class ContactsListUnitTest(unittest.TestCase):
def setUp(self):
file = open('kontakty.txt')
self.request_string = file.read()
self.lines = self.request_string.split("\n")
file.close()
self.contacts = []
for line in self.lines:
if line != '':
self.contacts.append(Contact({'request_string':line}))
def testAddContact(self):
clist = ContactsList()
clist.add_contact(self.contacts[0])
self.assertEqual(len(clist), 1)
self.assertEqual(clist[self.contacts[0].uin], self.contacts[0])
for contact in self.contacts:
clist.add_contact(contact)
self.assertEqual(len(clist), len(self.contacts))
for contact in self.contacts:
self.assertEqual(clist[contact.uin], contact)
def testRemoveContact(self):
clist = ContactsList(self.request_string)
clist.remove_contact(self.contacts[0].uin)
self.assertEqual(clist[self.contacts[0].uin], None)
self.assertEqual(len(clist), len(self.contacts) - 1)
def testRequestString(self):
clist = ContactsList(self.request_string)
self.assertEqual(clist.export_request_string(), self.request_string)
def testContainsContact(self):
clist = ContactsList()
c = Contact({"uin":1234, "shown_name":"Trol"})
self.assertFalse(1234 in clist)
self.assertFalse(c in clist)
clist.add_contact(c)
self.assertTrue(1234 in clist)
self.assertTrue(c in clist)
if __name__ == "__main__":
suite1 = unittest.makeSuite(ContactUnitTest)
unittest.TextTestRunner().run(suite1)
suite2 = unittest.makeSuite(ContactsListUnitTest)
unittest.TextTestRunner().run(suite2)
| {
"repo_name": "jakubkosinski/pygglib",
"path": "tests/contacts_unit_test.py",
"copies": "1",
"size": "6829",
"license": "mit",
"hash": 3774160536988546600,
"line_mean": 37.0228571429,
"line_max": 193,
"alpha_frac": 0.6046273246,
"autogenerated": false,
"ratio": 4.081888822474596,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5186516147074596,
"avg_score": null,
"num_lines": null
} |
# $Id: 0d2e5b9d01530c575fc4f6834113699dda23cc4a $
"""
Input/Output utility methods and classes.
"""
from __future__ import absolute_import
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import os
import zipfile
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['AutoFlush', 'MultiWriter', 'PushbackFile', 'Zip']
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class AutoFlush(object):
"""
An ``AutoFlush`` wraps a file-like object and flushes the output
(via a call to ``flush()`` after every write operation. Here's how
to use an ``AutoFlush`` object to force standard output to flush after
every write:
.. python::
import sys
from grizzled.io import AutoFlush
sys.stdout = AutoFlush(sys.stdout)
"""
def __init__(self, f):
"""
Create a new ``AutoFlush`` object to wrap a file-like object.
:Parameters:
f : file
A file-like object that contains both a ``write()`` method
and a ``flush()`` method.
"""
self.__file = f
def write(self, buf):
"""
Write the specified buffer to the file.
:Parameters:
buf : str or bytes
buffer to write
"""
self.__file.write(buf)
self.__file.flush()
def flush(self):
"""
Force a flush.
"""
self.__file.flush()
def truncate(self, size=-1):
"""
Truncate the underlying file. Might fail.
:Parameters:
size : int
Where to truncate. If less than 0, then file's current position
is used.
"""
if size < 0:
size = self.__file.tell()
self.__file.truncate(size)
def tell(self):
"""
Return the file's current position, if applicable.
:rtype: int
:return: Current file position
"""
return self.__file.tell()
def seek(self, offset, whence=os.SEEK_SET):
"""
Set the file's current position. The ``whence`` argument is optional;
legal values are:
- ``os.SEEK_SET`` or 0: absolute file positioning (default)
- ``os.SEEK_CUR`` or 1: seek relative to the current position
- ``os.SEEK_END`` or 2: seek relative to the file's end
There is no return value. Note that if the file is opened for
appending (mode 'a' or 'a+'), any ``seek()`` operations will be undone
at the next write. If the file is only opened for writing in append
mode (mode 'a'), this method is essentially a no-op, but it remains
useful for files opened in append mode with reading enabled (mode
'a+'). If the file is opened in text mode (without 'b'), only offsets
returned by ``tell()`` are legal. Use of other offsets causes
undefined behavior.
Note that not all file objects are seekable.
:Parameters:
offset : int
where to seek
whence : int
see above
"""
self.__file.seek(offset, whence)
def fileno(self):
"""
Return the integer file descriptor used by the underlying file.
:rtype: int
:return: the file descriptor
"""
return self.__file.fileno()
class MultiWriter(object):
"""
Wraps multiple file-like objects so that they all may be written at once.
For example, the following code arranges to have anything written to
``sys.stdout`` go to ``sys.stdout`` and to a temporary file:
.. python::
import sys
from grizzled.io import MultiWriter
sys.stdout = MultiWriter(sys.__stdout__, open('/tmp/log', 'w'))
"""
def __init__(self, *args):
"""
Create a new ``MultiWriter`` object to wrap one or more file-like
objects.
:Parameters:
args : iterable
One or more file-like objects to wrap
"""
self.__files = args
def write(self, buf):
"""
Write the specified buffer to the wrapped files.
:Parameters:
buf : str or bytes
buffer to write
"""
for f in self.__files:
f.write(buf)
def flush(self):
"""
Force a flush.
"""
for f in self.__files:
f.flush()
def close(self):
"""
Close all contained files.
"""
for f in self.__files:
f.close()
class PushbackFile(object):
"""
A file-like wrapper object that permits pushback.
"""
def __init__(self, f):
"""
Create a new ``PushbackFile`` object to wrap a file-like object.
:Parameters:
f : file
A file-like object that contains both a ``write()`` method
and a ``flush()`` method.
"""
self.__buf = [c for c in ''.join(f.readlines())]
def write(self, buf):
"""
Write the specified buffer to the file. This method throws an
unconditional exception, since ``PushbackFile`` objects are read-only.
:Parameters:
buf : str or bytes
buffer to write
:raise NotImplementedError: unconditionally
"""
raise NotImplementedError, 'PushbackFile is read-only'
def pushback(self, s):
"""
Push a character or string back onto the input stream.
:Parameters:
s : str
the string to push back onto the input stream
"""
self.__buf = [c for c in s] + self.__buf
unread=pushback
def read(self, n=-1):
"""
Read *n* bytes from the open file.
:Parameters:
n : int
Number of bytes to read. A negative number instructs
``read()`` to read all remaining bytes.
:return: the bytes read
"""
resultBuf = None
if n > len(self.__buf):
n = len(self.__buf)
if (n < 0) or (n >= len(self.__buf)):
resultBuf = self.__buf
self.__buf = []
else:
resultBuf = self.__buf[0:n]
self.__buf = self.__buf[n:]
return ''.join(resultBuf)
def readline(self, length=-1):
"""
Read the next line from the file.
:Parameters:
length : int
a length hint, or negative if you don't care
:rtype: str
:return: the line
"""
i = 0
while i < len(self.__buf) and (self.__buf[i] != '\n'):
i += 1
result = self.__buf[0:i+1]
self.__buf = self.__buf[i+1:]
return ''.join(result)
def readlines(self, sizehint=0):
"""
Read all remaining lines in the file.
:rtype: list
:return: list of lines
"""
return self.read(-1)
def __iter__(self):
return self
def next(self):
"""A file object is its own iterator.
:rtype: str
:return: the next line from the file
:raise StopIteration: end of file
:raise IncludeError: on error
"""
line = self.readline()
if (line == None) or (len(line) == 0):
raise StopIteration
return line
def close(self):
"""Close the file. A no-op in this class."""
pass
def flush(self):
"""
Force a flush. This method throws an unconditional exception, since
``PushbackFile`` objects are read-only.
:raise NotImplementedError: unconditionally
"""
raise NotImplementedError, 'PushbackFile is read-only'
def truncate(self, size=-1):
"""
Truncate the underlying file. This method throws an unconditional exception, since
``PushbackFile`` objects are read-only.
:Parameters:
size : int
Where to truncate. If less than 0, then file's current
position is used
:raise NotImplementedError: unconditionally
"""
raise NotImplementedError, 'PushbackFile is read-only'
def tell(self):
"""
Return the file's current position, if applicable. This method throws
an unconditional exception, since ``PushbackFile`` objects are
read-only.
:rtype: int
:return: Current file position
:raise NotImplementedError: unconditionally
"""
raise NotImplementedError, 'PushbackFile is not seekable'
def seek(self, offset, whence=os.SEEK_SET):
"""
Set the file's current position. This method throws an unconditional
exception, since ``PushbackFile`` objects are not seekable.
:Parameters:
offset : int
where to seek
whence : int
see above
:raise NotImplementedError: unconditionally
"""
raise NotImplementedError, 'PushbackFile is not seekable'
def fileno(self):
"""
Return the integer file descriptor used by the underlying file.
:rtype: int
:return: the file descriptor
"""
return -1
class Zip(zipfile.ZipFile):
"""
``Zip`` extends the standard ``zipfile.ZipFile`` class and provides a
method to extract the contents of a zip file into a directory. Adapted
from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252508.
"""
def __init__(self, file, mode="r",
compression=zipfile.ZIP_STORED,
allow_zip64=False):
"""
Constructor. Initialize a new zip file.
:Parameters:
file : str
path to zip file
mode : str
open mode. Valid values are 'r' (read), 'w' (write), and
'a' (append)
compression : int
Compression type. Valid values: ``zipfile.ZIP_STORED`,
``zipfile.ZIP_DEFLATED``
allow_zip64 : bool
Whether or not Zip64 extensions are to be used
"""
zipfile.ZipFile.__init__(self, file, mode, compression, allow_zip64)
self.zipFile = file
def extract(self, output_dir):
"""
Unpack the zip file into the specified output directory.
:Parameters:
output_dir : str
path to output directory. The directory is
created if it doesn't already exist.
"""
if not output_dir.endswith(':') and not os.path.exists(output_dir):
os.mkdir(output_dir)
num_files = len(self.namelist())
# extract files to directory structure
for i, name in enumerate(self.namelist()):
if not name.endswith('/'):
directory = os.path.dirname(name)
if directory == '':
directory = None
if directory:
directory = os.path.join(output_dir, directory)
if not os.path.exists(directory):
os.makedirs(directory)
outfile = open(os.path.join(output_dir, name), 'wb')
outfile.write(self.read(name))
outfile.flush()
outfile.close()
| {
"repo_name": "yencarnacion/jaikuengine",
"path": ".google_appengine/lib/grizzled/grizzled/io/__init__.py",
"copies": "19",
"size": "11715",
"license": "apache-2.0",
"hash": 8707027021779322000,
"line_mean": 27.7837837838,
"line_max": 91,
"alpha_frac": 0.518822023,
"autogenerated": false,
"ratio": 4.650655021834061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: 209a_reg_handle_423_ok.py 3105 2010-02-23 11:03:07Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
pjsua = "--null-audio --id=sip:CLIENT --registrar sip:127.0.0.1:$PORT " + \
"--realm=python --user=username --password=password " + \
"--auto-update-nat=0"
# 423 Response without Min-Expires header
req1 = sip.RecvfromTransaction("Initial request", 423,
include=["REGISTER sip"],
exclude=[],
resp_hdr=[]
)
# Client should retry with Expires header containing special value (pjsip specific)
req2 = sip.RecvfromTransaction("REGISTER retry after 423 response without Min-Expires header", 423,
include=["REGISTER sip", "Expires: 3601"],
exclude=[],
resp_hdr=["Min-Expires: 3612"]
)
# Client should retry with proper Expires header now
req3 = sip.RecvfromTransaction("REGISTER retry after proper 423", 200,
include=["Expires: 3612"],
exclude=[],
expect="registration success"
)
recvfrom_cfg = sip.RecvfromCfg("Reregistration after 423 response",
pjsua, [req1, req2, req3])
| {
"repo_name": "yuezhou/telephony",
"path": "telephony/Classes/pjproject-2.2.1/tests/pjsua/scripts-recvfrom/209a_reg_handle_423_ok.py",
"copies": "58",
"size": "1054",
"license": "mit",
"hash": -6292262118407778000,
"line_mean": 33,
"line_max": 99,
"alpha_frac": 0.6821631879,
"autogenerated": false,
"ratio": 3.1091445427728615,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: 230_reg_bad_fail_stale_true.py 2392 2008-12-22 18:54:58Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
# In this test we simulate broken server, where it always sends
# stale=true with all 401 responses. We should expect pjsip to
# retry the authentication until PJSIP_MAX_STALE_COUNT is
# exceeded. When pjsip retries the authentication, it should
# use the new nonce from server
pjsua = "--null-audio --id=sip:CLIENT --registrar sip:127.0.0.1:$PORT " + \
"--realm=python --user=username --password=password"
req1 = sip.RecvfromTransaction("Initial request", 401,
include=["REGISTER sip"],
exclude=["Authorization"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"1\""]
)
req2 = sip.RecvfromTransaction("First retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"1\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"2\", stale=true"]
)
req3 = sip.RecvfromTransaction("Second retry retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"2\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"3\", stale=true"]
)
req4 = sip.RecvfromTransaction("Third retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"3\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"4\", stale=true"],
expect="PJSIP_EAUTHSTALECOUNT"
)
recvfrom_cfg = sip.RecvfromCfg("Failed registration retry (server rejects with stale=true) ",
pjsua, [req1, req2, req3, req4])
| {
"repo_name": "crissmoldovan/tisip",
"path": "iphone/pjsip/src/tests/pjsua/scripts-recvfrom/230_reg_bad_fail_stale_true.py",
"copies": "59",
"size": "1692",
"license": "mit",
"hash": 6406944162946690000,
"line_mean": 40.2682926829,
"line_max": 93,
"alpha_frac": 0.6731678487,
"autogenerated": false,
"ratio": 3.3307086614173227,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03550328188363841,
"num_lines": 41
} |
# $Id: 32c62bbf4b4a05fb16d4bc6894663370a8ae7ac9 $
#
# Nose program for testing grizzled.sys classes/functions
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
from __future__ import absolute_import
import sys
from grizzled.system import *
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
VERSIONS = [('2.5.1', 0x020501f0),
('1.5', 0x010500f0),
('2.6', 0x020600f0),
('2.4.3', 0x020403f0)]
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class TestSys(object):
def test_version_conversions(self):
for s, i in VERSIONS:
yield self.do_one_version_conversion, s, i
def do_one_version_conversion(self, string_version, binary_version):
h = python_version(string_version)
s = python_version_string(binary_version)
assert h == binary_version
assert s == string_version
def test_current_version(self):
ensure_version(sys.hexversion)
ensure_version(python_version_string(sys.hexversion))
major, minor, patch, final, rem = sys.version_info
binary_version = python_version('%d.%d.%d' % (major, minor, patch))
def test_class_for_name(self):
cls = class_for_name('grizzled.config.Configuration')
got_name = '%s.%s' % (cls.__module__, cls.__name__)
assert got_name == 'grizzled.config.Configuration'
try:
class_for_name('grizzled.foo.bar.baz')
assert False
except NameError:
pass
except ImportError:
pass
| {
"repo_name": "pigeonflight/strider-plone",
"path": "docker/appengine/lib/grizzled/grizzled/test/system/Test.py",
"copies": "19",
"size": "1909",
"license": "mit",
"hash": 3809478660899397600,
"line_mean": 33.0892857143,
"line_max": 77,
"alpha_frac": 0.4567836564,
"autogenerated": false,
"ratio": 4.251670378619154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004134150873281308,
"num_lines": 56
} |
# $Id: 55b6d7323887ac09f6bfba365205e952533b847b $
"""
The ``grizzled.text`` package contains text-related classes and modules.
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
from StringIO import StringIO
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['hexdump']
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
REPEAT_FORMAT = '*** Repeated %d times'
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def hexdump(source, out, width=16, start=0, limit=None, show_repeats=False):
"""
Produce a "standard" hexdump of the specified string or file-like
object. The output consists of a series of lines like this::
000000: 72 22 22 22 4f 53 20 72 6f 75 r'''OS rou
00000a: 74 69 6e 65 73 20 66 6f 72 20 tines for
000014: 4d 61 63 2c 20 4e 54 2c 20 6f Mac, NT, o
00001e: 72 20 50 6f 73 69 78 20 64 65 r Posix de
000028: 70 65 6e 64 69 6e 67 20 6f 6e pending on
000032: 20 77 68 61 74 20 73 79 73 74 what syst
00003c: 65 6d 20 77 65 27 72 65 20 6f em we're o
000046: 6e 2e 0a 0a 54 68 69 73 20 65 n...This e
The output width (i.e., the number of decoded characters shown on a
line) can be controlled with the ``width`` parameter.
Adjacent repeated lines are collapsed by default. For example::
000000: 00 00 00 00 00 00 00 00 00 00 ..........
*** Repeated 203 times
0007f8: 72 22 22 22 4f 53 20 72 6f 75 r'''OS rou
This behavior can be disabled via the ``show_repeats`` parameter.
:Parameters:
source : str or file
The object whose contents are to be dumped in hex. The
object can be a string or a file-like object.
out : file
Where to dump the hex output
width : int
The number of dumped characters per line
start : int
Offset within ``input`` where reading should begin
limit : int
Total number of bytes to dump. Defaults to everything from
``start`` to the end.
show_repeats : bool
``False`` to collapse repeated output lines, ``True`` to
dump all lines, even if they're repeats.
"""
def ascii(b):
"""Determine how to show a byte in ascii."""
if 32 <= b <= 126:
return chr(b)
else:
return '.'
pos = 0
ascii_map = [ ascii(c) for c in range(256) ]
lastbuf = ''
lastline = ''
repeat_count = 0
if width > 4:
space_col = width/2
else:
space_col = -1
if type(source) == str:
source = StringIO(source)
if start:
source.seek(start)
pos = start
hex_field_width = (width * 3) + 1
total_read = 0
while True:
if limit:
to_read = min(limit - total_read, width)
else:
to_read = width
buf = source.read(to_read)
length = len(buf)
total_read += length
if length == 0:
if repeat_count and (not show_repeats):
if repeat_count > 1:
print >> out, REPEAT_FORMAT % (repeat_count - 1)
elif repeat_count == 1:
print >> out, lastline
print >> out, lastline
break
else:
show_buf = True
if buf == lastbuf:
repeat_count += 1
show_buf = False
else:
if repeat_count and (not show_repeats):
if repeat_count == 1:
print >> out, lastline
else:
print >> out, REPEAT_FORMAT % (repeat_count - 1)
repeat_count = 0
# Build output line.
hex = ""
asc = ""
for i in range(length):
c = buf[i]
if i == space_col:
hex = hex + " "
hex = hex + ("%02x" % ord(c)) + " "
asc = asc + ascii_map[ord(c)]
line = "%06x: %-*s %s" % (pos, hex_field_width, hex, asc)
if show_buf:
print >> out, line
pos = pos + length
lastbuf = buf
lastline = line
def str2bool(s):
"""
Convert a string to a boolean value. The supported conversions are:
+--------------+---------------+
| String | Boolean value |
+==============+===============+
| "false" | False |
+--------------+---------------+
| "true" | True |
+--------------+---------------+
| "f" | False |
+--------------+---------------+
| "t" + True |
+--------------+---------------+
| "0" | False |
+--------------+---------------+
| "1" + True |
+--------------+---------------+
| "n" | False |
+--------------+---------------+
| "y" + True |
+--------------+---------------+
| "no" | False |
+--------------+---------------+
| "yes" + True |
+--------------+---------------+
| "off" | False |
+--------------+---------------+
| "on" + True |
+--------------+---------------+
Strings are compared in a case-blind fashion.
**Note**: This function is not currently localizable.
:Parameters:
s : str
The string to convert to boolean
:rtype: bool
:return: the corresponding boolean value
:raise ValueError: unrecognized boolean string
"""
try:
return {'false' : False,
'true' : True,
'f' : False,
't' : True,
'0' : False,
'1' : True,
'no' : False,
'yes' : True,
'y' : False,
'n' : True,
'off' : False,
'on' : True}[s.lower()]
except KeyError:
raise ValueError, 'Unrecognized boolean string: "%s"' % s
| {
"repo_name": "Kazade/NeHe-Website",
"path": "google_appengine/lib/grizzled/grizzled/text/__init__.py",
"copies": "19",
"size": "6796",
"license": "bsd-3-clause",
"hash": -3050995639938421000,
"line_mean": 30.3179723502,
"line_max": 77,
"alpha_frac": 0.3893466745,
"autogenerated": false,
"ratio": 4.4769433465085635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002253928017753803,
"num_lines": 217
} |
# $Id: 5e4fe45ea4436e0dc7d3743bff9679e052071746 $
# ---------------------------------------------------------------------------
"""
A dummy database driver, useful for testing.
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
from grizzled.db.base import DBDriver
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
BINARY = 0
NUMBER = 1
STRING = 2
DATETIME = 3
ROWID = 4
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class DummyCursor(object):
def close(self):
pass
def execute(self, statement, parameters=None):
self.rowcount = 0
self.description = ""
return None
def fetchone(self):
raise ValueError, "No results"
def fetchall(self):
raise ValueError, "No results"
def fetchmany(self, n):
raise ValueError, "No results"
class DummyDB(object):
def __init__(self):
pass
def cursor(self):
return DummyCursor()
def commit(self):
pass
def rollback(self):
pass
def close(self):
pass
class DummyDriver(DBDriver):
"""Dummy database driver, for testing."""
def get_import(self):
import dummydb
return dummydb
def get_display_name(self):
return "Dummy"
def do_connect(self,
host="localhost",
port=None,
user='',
password='',
database='default'):
return dummydb.DummyDB()
| {
"repo_name": "bratsche/Neutron-Drive",
"path": "google_appengine/lib/grizzled/grizzled/db/dummydb.py",
"copies": "19",
"size": "1861",
"license": "bsd-3-clause",
"hash": 6302335774715545000,
"line_mean": 21.975308642,
"line_max": 77,
"alpha_frac": 0.4051585169,
"autogenerated": false,
"ratio": 5.198324022346369,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: 8066a5bbef6962141ae539bef06493250cbeab57 $
"""
SQLite3 extended database driver.
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import os
import sys
import re
from grizzled.db.base import (DBDriver, Error, Warning, TableMetadata,
IndexMetadata, RDBMSMetadata)
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class SQLite3Driver(DBDriver):
"""DB Driver for SQLite, using the pysqlite DB API module."""
def get_import(self):
import sqlite3
return sqlite3
def get_display_name(self):
return "SQLite3"
def do_connect(self,
host=None,
port=None,
user='',
password='',
database='default'):
dbi = self.get_import()
return dbi.connect(database=database, isolation_level=None)
def get_rdbms_metadata(self, cursor):
import sqlite3
return RDBMSMetadata('SQLite', 'SQLite 3', sqlite3.sqlite_version)
def get_tables(self, cursor):
cursor.execute("select name from sqlite_master where type = 'table'")
table_names = []
rs = cursor.fetchone()
while rs is not None:
table_names += [rs[0]]
rs = cursor.fetchone()
return table_names
def get_table_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
# The table_info pragma returns results looking something like this:
#
# cid name type notnull dflt_value pk
# --- --------------- ----------------- ------- ---------- --
# 0 id integer 99 NULL 1
# 1 action_time datetime 99 NULL 0
# 2 user_id integer 99 NULL 0
# 3 content_type_id integer 0 NULL 0
# 4 object_id text 0 NULL 0
# 5 object_repr varchar(200) 99 NULL 0
# 6 action_flag smallint unsigned 99 NULL 0
# 7 change_message text 99 NULL 0
cursor.execute('PRAGMA table_info(%s)' % table)
rs = cursor.fetchone()
result = []
char_field_re = re.compile(r'(varchar|char)\((\d+)\)')
while rs is not None:
(id, name, type, not_null, default_value, is_primary) = rs
m = char_field_re.match(type)
if m:
type = m.group(1)
try:
max_char_size = int(m.group(2))
except ValueError:
log.error('Bad number in "%s" type for column "%s"' %
(type, name))
else:
max_char_size = 0
data = TableMetadata(name, type, max_char_size, 0, 0, not not_null)
result += [data]
rs = cursor.fetchone()
return result
def get_index_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
# First, get the list of indexes for the table, using the appropriate
# pragma. The pragma returns output like this:
#
# seq name unique
# --- ------- ------
# 0 id 0
# 1 name 0
# 2 address 0
result = []
cursor.execute("PRAGMA index_list('%s')" % table)
indexes = []
rs = cursor.fetchone()
while rs is not None:
indexes += [(rs[1], rs[2])]
rs = cursor.fetchone()
# Now, get the data about each index, using another pragma. This
# pragma returns data like this:
#
# seqno cid name
# ----- --- ---------------
# 0 3 content_type_id
for name, unique in indexes:
cursor.execute("PRAGMA index_info('%s')" % name)
rs = cursor.fetchone()
columns = []
while rs is not None:
columns += [rs[2]]
rs = cursor.fetchone()
description = 'UNIQUE' if unique else ''
result += [IndexMetadata(name, columns, description)]
return result
| {
"repo_name": "illicitonion/givabit",
"path": "lib/sdks/google_appengine_1.7.1/google_appengine/lib/grizzled/grizzled/db/sqlite.py",
"copies": "19",
"size": "4689",
"license": "apache-2.0",
"hash": -169029207604199140,
"line_mean": 32.2553191489,
"line_max": 79,
"alpha_frac": 0.4399658776,
"autogenerated": false,
"ratio": 4.588062622309198,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: 969e4c5fd51bb174563d06c1357489c2742813ec $
"""
Base classes for enhanced DB drivers.
"""
from __future__ import absolute_import
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import re
import time
import os
import sys
from datetime import date, datetime
from collections import namedtuple
from grizzled.exception import ExceptionWithMessage
from grizzled.decorators import abstract
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['DBDriver', 'DB', 'Cursor', 'DBError', 'Error', 'Warning',
'TableMetadata', 'IndexMetadata', 'RDBMSMetadata']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class DBError(ExceptionWithMessage):
"""
Base class for all DB exceptions.
"""
pass
class Error(DBError):
"""Thrown to indicate an error in the ``db`` module."""
pass
class Warning(DBError):
"""Thrown to indicate an error in the ``db`` module."""
pass
TableMetadata = namedtuple('TableMetadata', ['column_name',
'type_string',
'max_char_size',
'precision',
'scale',
'nullable'])
IndexMetadata = namedtuple('IndexMetadata', ['index_name',
'index_columns',
'description'])
RDBMSMetadata = namedtuple('RDBMSMetadata', ['vendor', 'product', 'version'])
class Cursor(object):
"""
Class for DB cursors returned by the ``DB.cursor()`` method. This class
conforms to the Python DB cursor interface, including the following
attributes.
:IVariables:
description : tuple
A read-only attribute that is a sequence of 7-item tuples, one per
column, from the last query executed. The tuple values are:
*(name, typecode, displaysize, internalsize, precision, scale)*
rowcount : int
A read-only attribute that specifies the number of rows
fetched in the last query, or -1 if unknown. *Note*: It's best
not to rely on the row count, because some database drivers
(such as SQLite) don't report valid row counts.
"""
def __init__(self, cursor, driver):
"""
Create a new Cursor object, wrapping the underlying real DB API
cursor.
:Parameters:
cursor
the real DB API cursor object
driver
the driver that is creating this object
"""
self.__cursor = cursor
self.__driver = driver
self.__description = None
self.__rowcount = -1
def __get_description(self):
return self.__description
description = property(__get_description,
doc='The description field. See class docs.')
def __get_rowcount(self):
return self.__rowcount
rowcount = property(__get_rowcount,
doc='Number of rows from last query, or -1')
def close(self):
"""
Close the cursor.
:raise Warning: Non-fatal warning
:raise Error: Error; unable to close
"""
dbi = self.__driver.get_import()
try:
return self.__cursor.close()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def execute(self, statement, parameters=None):
"""
Execute a SQL statement string with the given parameters.
'parameters' is a sequence when the parameter style is
'format', 'numeric' or 'qmark', and a dictionary when the
style is 'pyformat' or 'named'. See ``DB.paramstyle()``.
:Parameters:
statement : str
the SQL statement to execute
parameters : list
parameters to use, if the statement is parameterized
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
if parameters:
result = self.__cursor.execute(statement, parameters)
else:
result = self.__cursor.execute(statement)
try:
self.__rowcount = self.__cursor.rowcount
except AttributeError:
self.__rowcount = -1
self.__description = self.__cursor.description
return result
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
except:
raise Error(sys.exc_info()[1])
def executemany(self, statement, *parameters):
"""
Execute a SQL statement once for each item in the given parameters.
:Parameters:
statement : str
the SQL statement to execute
parameters : sequence
a sequence of sequences when the parameter style
is 'format', 'numeric' or 'qmark', and a sequence
of dictionaries when the style is 'pyformat' or
'named'.
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
result = self.__cursor.executemany(statement, *parameters)
self.__rowcount = self.__cursor.rowcount
self.__description = self.__cursor.description
return result
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
executeMany = executemany
def fetchone(self):
"""
Returns the next result set row from the last query, as a sequence
of tuples. Raises an exception if the last statement was not a query.
:rtype: tuple
:return: Next result set row
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
return self.__cursor.fetchone()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def fetchall(self):
"""
Returns all remaining result rows from the last query, as a sequence
of tuples. Raises an exception if the last statement was not a query.
:rtype: list of tuples
:return: List of rows, each represented as a tuple
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
return self.__cursor.fetchall()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
fetchAll = fetchall
def fetchmany(self, n):
"""
Returns up to n remaining result rows from the last query, as a
sequence of tuples. Raises an exception if the last statement was
not a query.
:Parameters:
n : int
maximum number of result rows to get
:rtype: list of tuples
:return: List of rows, each represented as a tuple
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
self.__cursor.fetchmany(n)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
fetchMany = fetchmany
def get_rdbms_metadata(self):
"""
Return data about the RDBMS: the product name, the version,
etc. The result is a named tuple, with the following fields:
vendor
The product vendor, if applicable, or ``None`` if not known
product
The name of the database product, or ``None`` if not known
version
The database product version, or ``None`` if not known
The fields may be accessed by position or name. This method
just calls through to the equivalent method in the underlying
``DBDriver`` implementation.
:rtype: named tuple
:return: the vendor information
"""
# Default implementation
dbi = self.__driver.get_import()
try:
return self.__driver.get_rdbms_metadata(self.__cursor)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def get_table_metadata(self, table):
"""
Get the metadata for a table. Returns a list of tuples, one for
each column. Each tuple consists of the following::
(column_name, type_string, max_char_size, precision, scale, nullable)
The tuple elements have the following meanings.
column_name
the name of the column
type_string
the column type, as a string
max_char_size
the maximum size for a character field, or ``None``
precision
the precision, for a numeric field; or ``None``
scale
the scale, for a numeric field; or ``None``
nullable
True if the column is nullable, False if it is not
The tuples are named tuples, so the fields may be referenced by the
names above or by position.
The data may come from the DB API's ``cursor.description`` field, or
it may be richer, coming from a direct SELECT against
database-specific tables.
:rtype: list
:return: list of tuples, as described above
:raise Warning: Non-fatal warning
:raise Error: Error
"""
# Default implementation
dbi = self.__driver.get_import()
try:
return self.__driver.get_table_metadata(table, self.__cursor)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def get_index_metadata(self, table):
"""
Get the metadata for the indexes for a table. Returns a list of
tuples, one for each index. Each tuple consists of the following::
(index_name, [index_columns], description)
The tuple elements have the following meanings.
index_name
the index name
index_columns
a list of column names
description
index description, or ``None``
The tuples are named tuples, so the fields may be referenced by the
names above or by position.
:rtype: list of tuples
:return: the list of tuples, or ``None`` if not supported in the
underlying database
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
return self.__driver.get_index_metadata(table, self.__cursor)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def get_tables(self):
"""
Get the list of tables in the database to which this cursor is
connected.
:rtype: list
:return: List of table names. The list will be empty if the database
contains no tables.
:raise NotImplementedError: Capability not supported by database driver
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
return self.__driver.get_tables(self.__cursor)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
class DB(object):
"""
The object returned by a call to ``DBDriver.connect()``. ``db`` wraps the
real database object returned by the underlying Python DB API module's
``connect()`` method.
"""
def __init__(self, db, driver):
"""
Create a new DB object.
:Parameters:
db
the underlying Python DB API database object
driver : DBDriver
the driver (i.e., the subclass of ``DBDriver``) that
created the ``db`` object
"""
self.__db = db
self.__driver = driver
dbi = driver.get_import()
for attr in ['BINARY', 'NUMBER', 'STRING', 'DATETIME', 'ROWID']:
try:
exec 'self.%s = dbi.%s' % (attr, attr)
except AttributeError:
exec 'self.%s = 0' % attr
def paramstyle(self):
"""
Get the parameter style for the underlying DB API module. The
result of this method call corresponds exactly to the underlying
DB API module's 'paramstyle' attribute. It will have one of the
following values:
+----------+-----------------------------------------------------------+
| format | The parameter marker is '%s', as in string |
| | formatting. A query looks like this:: |
| | |
| | c.execute('SELECT * FROM Foo WHERE Bar=%s', [x]) |
+----------+-----------------------------------------------------------+
| named | The parameter marker is ``:name``, and parameters |
| | are named. A query looks like this:: |
| | |
| | c.execute('SELECT * FROM Foo WHERE Bar=:x', {'x':x}) |
+----------+-----------------------------------------------------------+
| numeric | The parameter marker is ``:n``, giving the parameter's |
| | number (starting at 1). A query looks like this:: |
| | |
| | c.execute('SELECT * FROM Foo WHERE Bar=:1', [x]) |
+----------+-----------------------------------------------------------+
| pyformat | The parameter marker is ``:name``, and parameters |
| | are named. A query looks like this:: |
| | |
| | c.execute('SELECT * FROM Foo WHERE Bar=%(x)s', {'x':x}) |
+----------+-----------------------------------------------------------+
| qmark | The parameter marker is "?", and parameters are |
| | substituted in order. A query looks like this:: |
| | |
| | c.execute('SELECT * FROM Foo WHERE Bar=?', [x]) |
+----------+-----------------------------------------------------------+
"""
return self.__driver.get_import().paramstyle
def Binary(self, string):
"""
Returns an object representing the given string of bytes as a BLOB.
This method is equivalent to the module-level ``Binary()`` method in
an underlying DB API-compliant module.
:Parameters:
string : str
the string to convert to a BLOB
:rtype: object
:return: the corresponding BLOB
"""
return self.__driver.get_import().Binary(string)
def Date(self, year, month, day):
"""
Returns an object representing the specified date.
This method is equivalent to the module-level ``Date()`` method in
an underlying DB API-compliant module.
:Parameters:
year
the year
month
the month
day
the day of the month
:return: an object containing the date
"""
return self.__driver.get_import().Date(year, month, day)
def DateFromTicks(self, secs):
"""
Returns an object representing the date *secs* seconds after the
epoch. For example:
.. python::
import time
d = db.DateFromTicks(time.time())
This method is equivalent to the module-level ``DateFromTicks()``
method in an underlying DB API-compliant module.
:Parameters:
secs : int
the seconds from the epoch
:return: an object containing the date
"""
date = date.fromtimestamp(secs)
return self.__driver.get_import().Date(date.year, date.month, date.day)
def Time(self, hour, minute, second):
"""
Returns an object representing the specified time.
This method is equivalent to the module-level ``Time()`` method in an
underlying DB API-compliant module.
:Parameters:
hour
the hour of the day
minute
the minute within the hour. 0 <= *minute* <= 59
second
the second within the minute. 0 <= *second* <= 59
:return: an object containing the time
"""
dt = datetime.fromtimestamp(secs)
return self.__driver.get_import().Time(dt.hour, dt.minute, dt.second)
def TimeFromTicks(self, secs):
"""
Returns an object representing the time 'secs' seconds after the
epoch. For example:
.. python::
import time
d = db.TimeFromTicks(time.time())
This method is equivalent to the module-level ``TimeFromTicks()``
method in an underlying DB API-compliant module.
:Parameters:
secs : int
the seconds from the epoch
:return: an object containing the time
"""
dt = datetime.fromtimestamp(secs)
return self.__driver.get_import().Time(dt.hour, dt.minute, dt.second)
def Timestamp(self, year, month, day, hour, minute, second):
"""
Returns an object representing the specified time.
This method is equivalent to the module-level ``Timestamp()`` method
in an underlying DB API-compliant module.
:Parameters:
year
the year
month
the month
day
the day of the month
hour
the hour of the day
minute
the minute within the hour. 0 <= *minute* <= 59
second
the second within the minute. 0 <= *second* <= 59
:return: an object containing the timestamp
"""
return self.__driver.get_import().Timestamp(year, month, day,
hour, minute, second)
def TimestampFromTicks(self, secs):
"""
Returns an object representing the date and time ``secs`` seconds
after the epoch. For example:
.. python::
import time
d = db.TimestampFromTicks(time.time())
This method is equivalent to the module-level ``TimestampFromTicks()``
method in an underlying DB API-compliant module.
:Parameters:
secs : int
the seconds from the epoch
:return: an object containing the timestamp
"""
dt = datetime.now()
return self.__driver.get_import().Timestamp(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second)
def cursor(self):
"""
Get a cursor suitable for accessing the database. The returned object
conforms to the Python DB API cursor interface.
:return: the cursor
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
return Cursor(self.__db.cursor(), self.__driver)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def commit(self):
"""
Commit the current transaction.
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
self.__db.commit()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def rollback(self):
"""
Roll the current transaction back.
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
self.__db.rollback()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def close(self):
"""
Close the database connection.
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
self.__db.close()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
class DBDriver(object):
"""
Base class for all DB drivers.
"""
@abstract
def get_import(self):
"""
Get a bound import for the underlying DB API module. All subclasses
must provide an implementation of this method. Here's an example,
assuming the real underlying Python DB API module is 'foosql':
.. python::
def get_import(self):
import foosql
return foosql
:return: a bound module
"""
pass
def __display_name(self):
return self.get_display_name()
@abstract
def get_display_name(self):
"""
Get the driver's name, for display. The returned name ought to be
a reasonable identifier for the database (e.g., 'SQL Server',
'MySQL'). All subclasses must provide an implementation of this
method.
:rtype: str
:return: the driver's displayable name
"""
pass
display_name = property(__display_name,
doc='get a displayable name for the driver')
def connect(self,
host='localhost',
port=None,
user=None,
password='',
database=None):
"""
Connect to the underlying database. Subclasses should *not*
override this method. Instead, a subclass should override the
``do_connect()`` method.
:Parameters:
host : str
the host where the database lives
port : int
the TCP port to use when connecting, or ``None``
user : str
the user to use when connecting, or ``None``
password : str
the password to use when connecting, or ``None``
database : str
the name of the database to which to connect
:rtype: ``db``
:return: a ``db`` object representing the open database
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.get_import()
try:
self.__db = self.do_connect(host=host,
port=port,
user=user,
password=password,
database=database)
return DB(self.__db, self)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
@abstract
def do_connect(self,
host='localhost',
port=None,
user='',
password='',
database='default'):
"""
Connect to the actual underlying database, using the driver.
Subclasses must provide an implementation of this method. The
method must return the result of the real DB API implementation's
``connect()`` method. For instance:
.. python::
def do_connect():
dbi = self.get_import()
return dbi.connect(host=host, user=user, passwd=password,
database=database)
There is no need to catch exceptions; the ``DBDriver`` class's
``connect()`` method handles that.
:Parameters:
host : str
the host where the database lives
port : int
the TCP port to use when connecting
user : str
the user to use when connecting
password : str
the password to use when connecting
database : str
the name of the database to which to connect
:rtype: object
:return: a DB API-compliant object representing the open database
:raise Warning: Non-fatal warning
:raise Error: Error
"""
pass
def get_rdbms_metadata(self, cursor):
"""
Return data about the RDBMS: the product name, the version,
etc. The result is a named tuple, with the following fields.
vendor
The product vendor, if applicable, or ``None`` if not known
product
The name of the database product, or ``None`` if not known
version
The database product version, or ``None`` if not known
:Parameters:
cursor : Cursor
a ``Cursor`` object from a recent query
:rtype: named tuple
:return: the vendor information
"""
return RDBMSMetadata('unknown', 'unknown', 'unknown')
def get_index_metadata(self, table, cursor):
"""
Get the metadata for the indexes for a table. Returns a list of
tuples, one for each index. Each tuple consists of the following::
(index_name, [index_columns], description)
The tuple elements have the following meanings.
index_name
the index name
index_columns
a list of column names
description
index description, or ``None``
The tuples are named tuples, so the fields may be referenced by the
names above or by position.
The default implementation of this method returns `None`
:Parameters:
table : str
table name
cursor : Cursor
a ``Cursor`` object from a recent query
:rtype: list of tuples
:return: the list of tuples, or ``None`` if not supported in the
underlying database
:raise Warning: Non-fatal warning
"""
return None
def get_table_metadata(self, table, cursor):
"""
Get the metadata for a table. Returns a list of tuples, one for
each column. Each tuple consists of the following::
(column_name, type_string, max_char_size, precision, scale, nullable)
The tuple elements have the following meanings.
column_name
the name of the column
type_string
the column type, as a string
max_char_size
the maximum size for a character field, or ``None``
precision
the precision, for a numeric field; or ``None``
scale
the scale, for a numeric field; or ``None``
nullable
``True`` if the column is nullable, ``False`` if it is not
The tuples are named tuples, so the fields may be referenced by the
names above or by position.
The default implementation uses the DB API's ``cursor.description``
field. Subclasses are free to override this method to produce their
own version that uses other means.
:Parameters:
table : str
the table name for which metadata is desired
cursor : Cursor
a ``Cursor`` object from a recent query
:rtype: list
:return: list of tuples, as described above
:raise Warning: Non-fatal warning
:raise Error: Error
"""
self._ensure_valid_table(cursor, table)
dbi = self.get_import()
cursor.execute('SELECT * FROM %s WHERE 1=0' % table)
result = []
for col in cursor.description:
name = col[0]
type = col[1]
size = col[2]
internalSize = col[3]
precision = col[4]
scale = col[5]
nullable = col[6]
sType = None
try:
if type == dbi.BINARY:
stype = 'blob'
elif type == dbi.DATETIME:
stype = 'datetime'
elif type == dbi.NUMBER:
stype = 'numeric'
elif type == dbi.STRING:
sz = internalSize
if sz == None:
sz = size
elif sz <= 0:
sz = size
if sz == 1:
stype = 'char'
else:
stype = 'varchar'
size = sz
elif type == dbi.ROWID:
stype = 'id'
except AttributeError:
stype = None
if not sType:
stype = 'unknown (type code=%s)' % str(type)
data = TableMetadata(name, stype, size, precision, scale, nullable)
result += [data]
return result
def get_tables(self, cursor):
"""
Get the list of tables in the database.
:Parameters:
cursor : Cursor
a ``Cursor`` object from a recent query
:rtype: list
:return: List of table names. The list will be empty if the database
contains no tables.
:raise NotImplementedError: Capability not supported by database driver
:raise Warning: Non-fatal warning
:raise Error: Error
"""
raise NotImplementedError
def _ensure_valid_table(self, cursor, table_name):
"""
Determines whether a table name represents a legal table in the
current database, throwing an ``Error`` if not.
:Parameters:
cursor : Cursor
an open ``Cursor``
table_name : str
the table name
:raise Error: bad table name
"""
if not self._is_valid_table(cursor, table_name):
raise Error, 'No such table: "%s"' % table_name
def _is_valid_table(self, cursor, table_name):
"""
Determines whether a table name represents a legal table in the
current database, throwing an ``Error`` if not.
:Parameters:
cursor : Cursor
an open ``Cursor``
table_name : str
the table name
:rtype: bool
:return: ``True`` if the table is valid, ``False`` if not
"""
tables = self.get_tables(cursor)
return table_name in tables
| {
"repo_name": "bratsche/Neutron-Drive",
"path": "google_appengine/lib/grizzled/grizzled/db/base.py",
"copies": "19",
"size": "32027",
"license": "bsd-3-clause",
"hash": 88891257386059500,
"line_mean": 31.6806122449,
"line_max": 82,
"alpha_frac": 0.5133793362,
"autogenerated": false,
"ratio": 4.995632506629231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008005296833933069,
"num_lines": 980
} |
# $Id: 9efdb91769a07b38061d1041ac0486b77f362738 $
"""
Oracle extended database driver.
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import os
import sys
from grizzled.db.base import (DBDriver, Error, Warning,
TableMetadata, IndexMetadata, RDBMSMetadata)
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
VENDOR = 'Oracle Corporation'
PRODUCT = 'Oracle'
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class OracleDriver(DBDriver):
"""DB Driver for Oracle, using the cx_Oracle DB API module."""
def get_import(self):
import cx_Oracle
return cx_Oracle
def get_display_name(self):
return "Oracle"
def do_connect(self,
host='localhost',
port=None,
user='',
password='',
database='default'):
dbi = self.get_import()
return dbi.connect('%s/%s@%s' % (user, password, database))
def get_tables(self, cursor):
cursor.execute('select lower(table_name) from all_tables')
table_names = []
rs = cursor.fetchone()
while rs is not None:
name = rs[0]
# Skip tables with "$" in them.
if name.find('$') < 0:
table_names.append(name)
rs = cursor.fetchone()
return table_names
def get_rdbms_metadata(self, cursor):
cursor.execute("SELECT banner FROM v$version WHERE "
"banner LIKE 'Oracle%'")
rs = cursor.fetchone()
if rs is None:
result = RDBMSMetadata(VENDOR, PRODUCT, 'unknown')
else:
result = RDBMSMetadata(VENDOR, PRODUCT, rs[0])
return result
def get_table_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
cursor.execute("select column_name, data_type, data_length, "
"data_precision, data_scale, nullable, "
"char_col_decl_length from all_tab_columns "
"where lower(table_name) = '%s'" % table.lower())
results = []
rs = cursor.fetchone()
while rs:
column = rs[0]
coltype = rs[1]
data_length = rs[2]
precision = rs[3]
scale = rs[4]
nullable = (rs[5] == 'Y')
declared_char_length = rs[6]
if declared_char_length:
length = declared_char_length
else:
length = data_length
results += [TableMetadata(column,
coltype,
length,
precision,
scale,
nullable)]
rs = cursor.fetchone()
return results
def get_index_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
# First, issue a query to get the list of indexes and some
# descriptive information.
cursor.execute("select index_name, index_type, uniqueness, "
"max_extents,temporary from all_indexes where "
"lower(table_name) = '%s'" % table.lower())
names = []
description = {}
rs = cursor.fetchone()
while rs is not None:
(name, index_type, unique, max_extents, temporary) = rs
desc = 'Temporary ' if temporary == 'Y' else ''
unique = unique.lower()
if unique == 'nonunique':
unique = 'non-unique'
index_type = index_type.lower()
desc += '%s %s index' % (index_type, unique)
if max_extents:
desc += ' (max_extents=%d)' % max_extents
names.append(name)
description[name] = desc
rs = cursor.fetchone()
cursor.execute("SELECT aic.index_name, aic.column_name, "
"aic.column_position, aic.descend, aic.table_owner, "
"CASE alc.constraint_type WHEN 'U' THEN 'UNIQUE' "
"WHEN 'P' THEN 'PRIMARY KEY' ELSE '' END "
"AS index_type FROM all_ind_columns aic "
"LEFT JOIN all_constraints alc "
"ON aic.index_name = alc.constraint_name AND "
"aic.table_name = alc.table_name AND "
"aic.table_owner = alc.owner "
"WHERE lower(aic.table_name) = '%s' "
"ORDER BY COLUMN_POSITION" % table.lower())
rs = cursor.fetchone()
columns = {}
while rs is not None:
index_name = rs[0]
column_name = rs[1]
asc = rs[3]
cols = columns.get(index_name, [])
cols.append('%s %s' % (column_name, asc))
columns[index_name] = cols
rs = cursor.fetchone()
# Finally, assemble the result.
results = []
for name in names:
cols = columns.get(name, [])
desc = description.get(name, None)
results += [IndexMetadata(name, cols, desc)]
return results
| {
"repo_name": "gauribhoite/personfinder",
"path": "env/google_appengine/lib/grizzled/grizzled/db/oracle.py",
"copies": "19",
"size": "5635",
"license": "apache-2.0",
"hash": 7818532810293178000,
"line_mean": 34.21875,
"line_max": 77,
"alpha_frac": 0.4557231588,
"autogenerated": false,
"ratio": 4.548022598870056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: a384faeed9e9e609ca4b056c682c8ecda51f9e64 $
"""
MySQL extended database driver.
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import os
import sys
import re
from grizzled.db.base import (DBDriver, Error, Warning, TableMetadata,
IndexMetadata, RDBMSMetadata)
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
VENDOR = 'MySQL AB'
PRODUCT = 'MySQL'
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class MySQLDriver(DBDriver):
"""DB Driver for MySQL, using the MySQLdb DB API module."""
TYPE_RE = re.compile('([a-z]+)(\([0-9]+\))?')
def get_import(self):
import MySQLdb
return MySQLdb
def get_display_name(self):
return "MySQL"
def do_connect(self,
host="localhost",
port=None,
user="sa",
password="",
database="default"):
dbi = self.get_import()
return dbi.connect(host=host, user=user, passwd=password, db=database)
def get_rdbms_metadata(self, cursor):
cursor.execute('SELECT version()')
rs = cursor.fetchone()
if rs is None:
result = RDBMSMetadata(VENDOR, PRODUCT, 'unknown')
else:
result = RDBMSMetadata(VENDOR, PRODUCT, rs[0])
return result
def get_table_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
dbi = self.get_import()
cursor.execute('DESC %s' % table)
rs = cursor.fetchone()
results = []
while rs is not None:
column = rs[0]
coltype = rs[1]
null = False if rs[2] == 'NO' else True
match = self.TYPE_RE.match(coltype)
if match:
coltype = match.group(1)
size = match.group(2)
if size:
size = size[1:-1]
if coltype in ['varchar', 'char']:
max_char_size = size
precision = None
else:
max_char_size = None
precision = size
data = TableMetadata(column,
coltype,
max_char_size,
precision,
0,
null)
results += [data]
rs = cursor.fetchone()
return results
def get_index_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
dbi = self.get_import()
cursor.execute('SHOW INDEX FROM %s' % table)
rs = cursor.fetchone()
result = []
columns = {}
descr = {}
while rs is not None:
name = rs[2]
try:
columns[name]
except KeyError:
columns[name] = []
columns[name] += [rs[4]]
# Column 1 is a "non-unique" flag.
if (not rs[1]) or (name.lower() == 'primary'):
description = 'Unique'
else:
description = 'Non-unique'
if rs[10] is not None:
description += ', %s index' % rs[10]
descr[name] = description
rs = cursor.fetchone()
names = columns.keys()
names.sort()
for name in names:
result += [IndexMetadata(name, columns[name], descr[name])]
return result
def get_tables(self, cursor):
cursor.execute('SHOW TABLES')
table_names = []
rs = cursor.fetchone()
while rs is not None:
table_names += [rs[0]]
rs = cursor.fetchone()
return table_names
| {
"repo_name": "pigeonflight/strider-plone",
"path": "docker/appengine/lib/grizzled/grizzled/db/mysql.py",
"copies": "19",
"size": "4138",
"license": "mit",
"hash": -6384937807591979000,
"line_mean": 28.5571428571,
"line_max": 78,
"alpha_frac": 0.4294345094,
"autogenerated": false,
"ratio": 4.691609977324263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: admin.py e01f24bde54b 2009/09/06 13:55:06 jpartogi $
from django.contrib import admin
from django.core import urlresolvers
from django.utils.translation import ugettext as _
from contact_form.models import *
class SubjectAdmin(admin.ModelAdmin):
list_display = ('title', 'department_url')
def department_url(self, obj):
change_url = urlresolvers.reverse('admin:contact_form_department_change', args=(obj.department.id,))
return '<a href="%s">%s</a>' % (change_url, obj.department.name)
department_url.allow_tags = True
department_url.short_description = _('department')
class DepartmentAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'phone')
class MessageAdmin(admin.ModelAdmin):
list_display = ('subject', 'sender', 'created', 'is_spam')
list_filter = ('created', 'subject')
search_fields = ('sender_name', 'created')
def sender(self, obj):
return '<a href="mailto:%s">%s</a>' % (obj.sender_email, obj.sender_name)
sender.allow_tags = True
sender.short_description = _('sender')
admin.site.register(Subject, SubjectAdmin)
admin.site.register(Department, DepartmentAdmin)
admin.site.register(Message, MessageAdmin)
| {
"repo_name": "jpartogi/django-contact-form",
"path": "contact_form/admin.py",
"copies": "1",
"size": "1209",
"license": "bsd-3-clause",
"hash": 4982201994005634000,
"line_mean": 35.6363636364,
"line_max": 108,
"alpha_frac": 0.699751861,
"autogenerated": false,
"ratio": 3.51453488372093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9695855370375306,
"avg_score": 0.003686274869124874,
"num_lines": 33
} |
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils import nodes
class BaseAdmonition(Directive):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text)
if self.arguments:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
admonition_node += nodes.title(title_text, '', *textnodes)
admonition_node += messages
if self.options.has_key('class'):
classes = self.options['class']
else:
classes = ['admonition-' + nodes.make_id(title_text)]
admonition_node['classes'] += classes
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
option_spec = {'class': directives.class_option}
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdmonition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
| {
"repo_name": "PatrickKennedy/Sybil",
"path": "docutils/parsers/rst/directives/admonitions.py",
"copies": "2",
"size": "2189",
"license": "bsd-2-clause",
"hash": 1355361466040021200,
"line_mean": 21.5670103093,
"line_max": 76,
"alpha_frac": 0.6418455916,
"autogenerated": false,
"ratio": 3.915921288014311,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022411474675033616,
"num_lines": 97
} |
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class BaseAdmonition(Directive):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text)
if self.arguments:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
admonition_node += nodes.title(title_text, '', *textnodes)
admonition_node += messages
if 'class' in self.options:
classes = self.options['class']
else:
classes = ['admonition-' + nodes.make_id(title_text)]
admonition_node['classes'] += classes
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
option_spec = {'class': directives.class_option}
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdmonition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
| {
"repo_name": "kdwink/intellij-community",
"path": "python/helpers/py2only/docutils/parsers/rst/directives/admonitions.py",
"copies": "5",
"size": "2173",
"license": "apache-2.0",
"hash": 4439197025685575700,
"line_mean": 21.6354166667,
"line_max": 76,
"alpha_frac": 0.641509434,
"autogenerated": false,
"ratio": 3.922382671480144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 96
} |
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils import nodes
class BaseAdmonition(Directive):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text)
if self.arguments:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
admonition_node += nodes.title(title_text, '', *textnodes)
admonition_node += messages
if 'class' in self.options:
classes = self.options['class']
else:
classes = ['admonition-' + nodes.make_id(title_text)]
admonition_node['classes'] += classes
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
option_spec = {'class': directives.class_option}
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdmonition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
| {
"repo_name": "ivan-fedorov/intellij-community",
"path": "python/helpers/docutils/parsers/rst/directives/admonitions.py",
"copies": "51",
"size": "2182",
"license": "apache-2.0",
"hash": 4900746167438148000,
"line_mean": 21.4948453608,
"line_max": 76,
"alpha_frac": 0.6416131989,
"autogenerated": false,
"ratio": 3.9244604316546763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 97
} |
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils import nodes
class BaseAdmonition(Directive):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text)
if self.arguments:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
admonition_node += nodes.title(title_text, '', *textnodes)
admonition_node += messages
if 'class' in self.options:
classes = self.options['class']
else:
classes = ['admonition-' + nodes.make_id(title_text)]
admonition_node['classes'] += classes
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
option_spec = {'class': directives.class_option}
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdmonition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
| {
"repo_name": "rimbalinux/MSISDNArea",
"path": "docutils/parsers/rst/directives/admonitions.py",
"copies": "2",
"size": "2279",
"license": "bsd-3-clause",
"hash": -6029460971646479000,
"line_mean": 21.4948453608,
"line_max": 76,
"alpha_frac": 0.6143045195,
"autogenerated": false,
"ratio": 4.047957371225578,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 97
} |
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils.parsers.rst.roles import set_classes
from docutils import nodes
class BaseAdmonition(Directive):
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text, **self.options)
self.add_name(admonition_node)
if self.node_class is nodes.admonition:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
admonition_node += nodes.title(title_text, '', *textnodes)
admonition_node += messages
if not 'classes' in self.options:
admonition_node['classes'] += ['admonition-' +
nodes.make_id(title_text)]
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdmonition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
| {
"repo_name": "chirilo/remo",
"path": "vendor-local/lib/python/docutils/parsers/rst/directives/admonitions.py",
"copies": "16",
"size": "2273",
"license": "bsd-3-clause",
"hash": -710290248243712000,
"line_mean": 22.6770833333,
"line_max": 76,
"alpha_frac": 0.6410030796,
"autogenerated": false,
"ratio": 3.9325259515570936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022644927536231884,
"num_lines": 96
} |
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils.parsers.rst.roles import set_classes
from docutils import nodes
class BaseAdmonition(Directive):
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text, **self.options)
self.add_name(admonition_node)
if self.node_class is nodes.admonition:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
admonition_node += nodes.title(title_text, '', *textnodes)
admonition_node += messages
if not 'classes' in self.options:
admonition_node['classes'] += ['admonition-' +
nodes.make_id(title_text)]
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdmonition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
| {
"repo_name": "ajaxsys/dict-admin",
"path": "docutils/parsers/rst/directives/admonitions.py",
"copies": "2",
"size": "2369",
"license": "bsd-3-clause",
"hash": -5186080294969893000,
"line_mean": 22.6770833333,
"line_max": 76,
"alpha_frac": 0.6150274377,
"autogenerated": false,
"ratio": 4.02891156462585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.564393900232585,
"avg_score": null,
"num_lines": null
} |
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
class BaseAdmonition(Directive):
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text, **self.options)
self.add_name(admonition_node)
if self.node_class is nodes.admonition:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *textnodes)
title.source, title.line = (
self.state_machine.get_source_and_line(self.lineno))
admonition_node += title
admonition_node += messages
if not 'classes' in self.options:
admonition_node['classes'] += ['admonition-' +
nodes.make_id(title_text)]
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdmonition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
| {
"repo_name": "signed/intellij-community",
"path": "python/helpers/py3only/docutils/parsers/rst/directives/admonitions.py",
"copies": "44",
"size": "2404",
"license": "apache-2.0",
"hash": -2864561348624824300,
"line_mean": 23.5306122449,
"line_max": 76,
"alpha_frac": 0.6335274542,
"autogenerated": false,
"ratio": 3.947454844006568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022182786157941436,
"num_lines": 98
} |
# $Id: ah.py 34 2007-01-28 07:54:20Z dugsong $
# -*- coding: utf-8 -*-
"""Authentication Header."""
from __future__ import absolute_import
from . import dpkt
class AH(dpkt.Packet):
"""Authentication Header.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of AH.
auth: Authentication body.
data: Message data.
"""
__hdr__ = (
('nxt', 'B', 0),
('len', 'B', 0), # payload length
('rsvd', 'H', 0),
('spi', 'I', 0),
('seq', 'I', 0)
)
auth = b''
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.auth = self.data[:self.len]
buf = self.data[self.len:]
from . import ip
try:
self.data = ip.IP.get_proto(self.nxt)(buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def __len__(self):
return self.__hdr_len__ + len(self.auth) + len(self.data)
def __bytes__(self):
return self.pack_hdr() + bytes(self.auth) + bytes(self.data)
| {
"repo_name": "smutt/dpkt",
"path": "dpkt/ah.py",
"copies": "3",
"size": "1131",
"license": "bsd-3-clause",
"hash": -7692888085102468000,
"line_mean": 23.5869565217,
"line_max": 74,
"alpha_frac": 0.5207780725,
"autogenerated": false,
"ratio": 3.3461538461538463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5366931918653847,
"avg_score": null,
"num_lines": null
} |
# $Id: aim.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""AOL Instant Messenger."""
import dpkt
import struct
# OSCAR: http://iserverd1.khstu.ru/oscar/
class FLAP(dpkt.Packet):
__hdr__ = (
('ast', 'B', 0x2a), # '*'
('type', 'B', 0),
('seq', 'H', 0),
('len', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.ast != 0x2a:
raise dpkt.UnpackError('invalid FLAP header')
if len(self.data) < self.len:
raise dpkt.NeedData, '%d left, %d needed' % (len(self.data), self.len)
class SNAC(dpkt.Packet):
__hdr__ = (
('family', 'H', 0),
('subtype', 'H', 0),
('flags', 'H', 0),
('reqid', 'I', 0)
)
def tlv(buf):
n = 4
try:
t, l = struct.unpack('>HH', buf[:n])
except struct.error:
raise dpkt.UnpackError
v = buf[n:n + l]
if len(v) < l:
raise dpkt.NeedData
buf = buf[n + l:]
return t, l, v, buf
# TOC 1.0: http://jamwt.com/Py-TOC/PROTOCOL
# TOC 2.0: http://www.firestuff.org/projects/firetalk/doc/toc2.txt
| {
"repo_name": "lkash/test",
"path": "dpkt/aim.py",
"copies": "6",
"size": "1128",
"license": "bsd-3-clause",
"hash": -4040564666547426000,
"line_mean": 20.6923076923,
"line_max": 82,
"alpha_frac": 0.5044326241,
"autogenerated": false,
"ratio": 2.6985645933014353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.019889815672948206,
"num_lines": 52
} |
# $Id: aim.py 23 2006-11-08 15:45:33Z dugsong $
"""AOL Instant Messenger."""
import dpkt
import struct
# OSCAR: http://iserverd1.khstu.ru/oscar/
class FLAP(dpkt.Packet):
__hdr__ = (
('ast', 'B', 0x2a), # '*'
('type', 'B', 0),
('seq', 'H', 0),
('len', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.ast != 0x2a:
raise dpkt.UnpackError('invalid FLAP header')
if len(self.data) < self.len:
raise dpkt.NeedData, '%d left, %d needed' % (len(self.data), self.len)
class SNAC(dpkt.Packet):
__hdr__ = (
('family', 'H', 0),
('subtype', 'H', 0),
('flags', 'H', 0),
('reqid', 'I', 0)
)
def tlv(buf):
n = 4
try:
t, l = struct.unpack('>HH', buf[:n])
except struct.error:
raise dpkt.UnpackError
v = buf[n:n+l]
if len(v) < l:
raise dpkt.NeedData
buf = buf[n+l:]
return (t,l,v, buf)
# TOC 1.0: http://jamwt.com/Py-TOC/PROTOCOL
# TOC 2.0: http://www.firestuff.org/projects/firetalk/doc/toc2.txt
| {
"repo_name": "openhardnudd/QMarkdowner",
"path": "dpkt/aim.py",
"copies": "15",
"size": "1099",
"license": "mit",
"hash": -344682316206020540,
"line_mean": 22.3829787234,
"line_max": 82,
"alpha_frac": 0.508644222,
"autogenerated": false,
"ratio": 2.6739659367396595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import rdDepictor
from rdkit import Geometry
def AlignDepict(mol,core,corePattern=None,acceptFailure=False):
"""
Arguments:
- mol: the molecule to be aligned, this will come back
with a single conformer.
- core: a molecule with the core atoms to align to;
this should have a depiction.
- corePattern: (optional) an optional molecule to be used to
generate the atom mapping between the molecule
and the core.
"""
if core and corePattern:
if not core.GetNumAtoms(onlyExplicit=True)==corePattern.GetNumAtoms(onlyExplicit=True):
raise ValueError('When a pattern is provided, it must have the same number of atoms as the core')
coreMatch = core.GetSubstructMatch(corePattern)
if not coreMatch:
raise ValueError("Core does not map to itself")
else:
coreMatch = range(core.GetNumAtoms(onlyExplicit=True))
if corePattern:
match = mol.GetSubstructMatch(corePattern)
else:
match = mol.GetSubstructMatch(core)
if not match:
if not acceptFailure:
raise ValueError('Substructure match with core not found.')
else:
coordMap={}
else:
conf = core.GetConformer()
coordMap={}
for i,idx in enumerate(match):
pt3 = conf.GetAtomPosition(coreMatch[i])
pt2 = Geometry.Point2D(pt3.x,pt3.y)
coordMap[idx] = pt2
rdDepictor.Compute2DCoords(mol,clearConfs=True,coordMap=coordMap,canonOrient=False)
if __name__=='__main__':
import sys,getopt
def Usage():
pass
args,extras = getopt.getopt(sys.argv[1:],'p:ho:',['smiles','pattern='])
if len(extras)!=2:
print('ERROR: Not enough arguments', file=sys.stderr)
Usage()
sys.exit(1)
patt = None
useSmiles = False
outF=None
for arg,val in args:
if arg=='-h':
Usage()
sys.exit(0)
elif arg=='-p' or arg=='--pattern':
patt = Chem.MolFromSmarts(val)
elif arg=='--smiles':
useSmiles = True
elif arg=='-o':
outF = val
if not useSmiles:
core = Chem.MolFromMolFile(extras[0])
else:
core = Chem.MolFromSmiles(extras[0])
rdDepictor.Compute2DCoords(core)
if not useSmiles:
mol = Chem.MolFromMolFile(extras[1])
else:
mol = Chem.MolFromSmiles(extras[1])
AlignDepict(mol,core,patt)
if outF:
outF = open(outF,'w+')
else:
outF = sys.stdout
print(Chem.MolToMolBlock(mol), file=outF)
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/Chem/ChemUtils/AlignDepict.py",
"copies": "4",
"size": "2682",
"license": "bsd-3-clause",
"hash": -706042912084674300,
"line_mean": 26.9375,
"line_max": 103,
"alpha_frac": 0.6510067114,
"autogenerated": false,
"ratio": 3.2747252747252746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.05448416277367132,
"num_lines": 96
} |
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import rdDepictor
from rdkit import Geometry
def AlignDepict(mol, core, corePattern=None, acceptFailure=False):
"""
Arguments:
- mol: the molecule to be aligned, this will come back
with a single conformer.
- core: a molecule with the core atoms to align to;
this should have a depiction.
- corePattern: (optional) an optional molecule to be used to
generate the atom mapping between the molecule
and the core.
"""
if core and corePattern:
if not core.GetNumAtoms(onlyExplicit=True) == corePattern.GetNumAtoms(onlyExplicit=True):
raise ValueError(
'When a pattern is provided, it must have the same number of atoms as the core')
coreMatch = core.GetSubstructMatch(corePattern)
if not coreMatch:
raise ValueError("Core does not map to itself")
else:
coreMatch = range(core.GetNumAtoms(onlyExplicit=True))
if corePattern:
match = mol.GetSubstructMatch(corePattern)
else:
match = mol.GetSubstructMatch(core)
if not match:
if not acceptFailure:
raise ValueError('Substructure match with core not found.')
else:
coordMap = {}
else:
conf = core.GetConformer()
coordMap = {}
for i, idx in enumerate(match):
pt3 = conf.GetAtomPosition(coreMatch[i])
pt2 = Geometry.Point2D(pt3.x, pt3.y)
coordMap[idx] = pt2
rdDepictor.Compute2DCoords(mol, clearConfs=True, coordMap=coordMap, canonOrient=False)
if __name__ == '__main__':
import sys, getopt
def Usage():
pass
args, extras = getopt.getopt(sys.argv[1:], 'p:ho:', ['smiles', 'pattern='])
if len(extras) != 2:
print('ERROR: Not enough arguments', file=sys.stderr)
Usage()
sys.exit(1)
patt = None
useSmiles = False
outF = None
for arg, val in args:
if arg == '-h':
Usage()
sys.exit(0)
elif arg == '-p' or arg == '--pattern':
patt = Chem.MolFromSmarts(val)
elif arg == '--smiles':
useSmiles = True
elif arg == '-o':
outF = val
if not useSmiles:
core = Chem.MolFromMolFile(extras[0])
else:
core = Chem.MolFromSmiles(extras[0])
rdDepictor.Compute2DCoords(core)
if not useSmiles:
mol = Chem.MolFromMolFile(extras[1])
else:
mol = Chem.MolFromSmiles(extras[1])
AlignDepict(mol, core, patt)
if outF:
outF = open(outF, 'w+')
else:
outF = sys.stdout
print(Chem.MolToMolBlock(mol), file=outF)
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/ChemUtils/AlignDepict.py",
"copies": "1",
"size": "2717",
"license": "bsd-3-clause",
"hash": -4428389248506454500,
"line_mean": 27.3020833333,
"line_max": 93,
"alpha_frac": 0.6426205374,
"autogenerated": false,
"ratio": 3.301336573511543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9324462519167038,
"avg_score": 0.023898918348901056,
"num_lines": 96
} |
from rdkit import Chem
from rdkit.Chem import rdDepictor
from rdkit import Geometry
def AlignDepict(mol,core,corePattern=None,acceptFailure=False):
"""
Arguments:
- mol: the molecule to be aligned, this will come back
with a single conformer.
- core: a molecule with the core atoms to align to;
this should have a depiction.
- corePattern: (optional) an optional molecule to be used to
generate the atom mapping between the molecule
and the core.
"""
if core and corePattern:
if not core.GetNumAtoms(onlyExplicit=True)==corePattern.GetNumAtoms(onlyExplicit=True):
raise ValueError,'When a pattern is provided, it must have the same number of atoms as the core'
coreMatch = core.GetSubstructMatch(corePattern)
if not coreMatch:
raise ValueError,"Core does not map to itself"
else:
coreMatch = range(core.GetNumAtoms(onlyExplicit=True))
if corePattern:
match = mol.GetSubstructMatch(corePattern)
else:
match = mol.GetSubstructMatch(core)
if not match:
if not acceptFailure:
raise ValueError,'Substructure match with core not found.'
else:
coordMap={}
else:
conf = core.GetConformer()
coordMap={}
for i,idx in enumerate(match):
pt3 = conf.GetAtomPosition(coreMatch[i])
pt2 = Geometry.Point2D(pt3.x,pt3.y)
coordMap[idx] = pt2
rdDepictor.Compute2DCoords(mol,clearConfs=True,coordMap=coordMap,canonOrient=False)
if __name__=='__main__':
import sys,getopt
def Usage():
pass
args,extras = getopt.getopt(sys.argv[1:],'p:ho:',['smiles','pattern='])
if len(extras)!=2:
print >>sys.stderr,'ERROR: Not enough arguments'
Usage()
sys.exit(1)
patt = None
useSmiles = False
outF=None
for arg,val in args:
if arg=='-h':
Usage()
sys.exit(0)
elif arg=='-p' or arg=='--pattern':
patt = Chem.MolFromSmarts(val)
elif arg=='--smiles':
useSmiles = True
elif arg=='-o':
outF = val
if not useSmiles:
core = Chem.MolFromMolFile(extras[0])
else:
core = Chem.MolFromSmiles(extras[0])
rdDepictor.Compute2DCoords(core)
if not useSmiles:
mol = Chem.MolFromMolFile(extras[1])
else:
mol = Chem.MolFromSmiles(extras[1])
AlignDepict(mol,core,patt)
if outF:
outF = open(outF,'w+')
else:
outF = sys.stdout
print >>outF,Chem.MolToMolBlock(mol)
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/ChemUtils/AlignDepict.py",
"copies": "2",
"size": "2633",
"license": "bsd-3-clause",
"hash": -1663847178255756000,
"line_mean": 26.7157894737,
"line_max": 102,
"alpha_frac": 0.6490695025,
"autogenerated": false,
"ratio": 3.26270136307311,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9628166508832319,
"avg_score": 0.05672087134815843,
"num_lines": 95
} |
"""A widget which controls the alignment of its child."""
from Bin import Bin
from Constants import *
import base
class Alignment (Bin):
"""Alignment (width, height) -> Alignment
A Bin widget class which controls the alignment of its child.
The Alignment widget allows its child to be aligned at its edges
using the 'align' attribute and set_align() method. Dependant on the
alignment type (see also ALIGN_TYPES) the child will be placed
differently within the Alignment.
alignment.align = ALIGN_TOP
alignment.set_align (ALIGN_TOP)
However, not every alignment make sense, so a ALIGN_TOP | ALIGN_BOTTOM
would cause the widget to be placed at the top. The priority
order for the alignment follows. The lower the value, the higher the
priority.
Alignment Priority
-----------------------
ALIGN_TOP 0
ALIGN_BOTTOM 1
ALIGN_LEFT 0
ALIGN_RIGHT 1
ALIGN_NONE 2
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
Attributes:
align - Alignment of the child.
"""
def __init__ (self, width, height):
Bin.__init__ (self)
self._align = ALIGN_NONE
self.minsize = width, height
def set_focus (self, focus=True):
"""A.set_focus (...) -> bool
Overrides the default widget input focus.
Alignment widgets cannot be focused by default, thus this method
always returns False and does not do anything.
"""
return False
def set_align (self, align):
"""A.set_align (...) -> None
Sets the alignment for the child.
Raises a TypeError, if the passed argument is not a value from
ALIGN_TYPES.
"""
if not constants_is_align (align):
raise TypeError ("align must be a value from ALIGN_TYPES")
self._align = align
self.dirty = True
def draw_bg (self):
"""A.draw_bg () -> Surface
Draws the Alignment background surface and returns it.
Creates the visible surface of the Alignment and returns it to the
caller.
"""
return base.GlobalStyle.engine.draw_alignment (self)
def draw (self):
"""B.draw () -> None
Draws the Alignment surface and places its child on it.
"""
Bin.draw (self)
rect = self.image.get_rect ()
if self.child:
self.child.center = rect.center
if self.align & ALIGN_TOP == ALIGN_TOP:
self.child.top = rect.top + self.padding
elif self.align & ALIGN_BOTTOM == ALIGN_BOTTOM:
self.child.bottom = rect.bottom - self.padding
if self.align & ALIGN_LEFT == ALIGN_LEFT:
self.child.left = rect.left + self.padding
elif self.align & ALIGN_RIGHT == ALIGN_RIGHT:
self.child.right = rect.right - self.padding
self.image.blit (self.child.image, self.child.rect)
align = property (lambda self: self._align,
lambda self, var: self.set_align (var),
doc = "The alignment to use for the child.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Alignment.py",
"copies": "1",
"size": "4643",
"license": "bsd-2-clause",
"hash": 6918360537415412000,
"line_mean": 34.992248062,
"line_max": 78,
"alpha_frac": 0.6515184148,
"autogenerated": false,
"ratio": 4.2247497725204735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5376268187320473,
"avg_score": null,
"num_lines": null
} |
""" AMQP based Event Bus plugin based on the QPID library. """
from peloton.plugins import PelotonPlugin
from peloton.events import AbstractEventBusPlugin
from peloton.events import AbstractEventHandler
from peloton.exceptions import MessagingError
from peloton.exceptions import ConfigurationError
from twisted.internet import reactor
from twisted.internet.defer import DeferredQueue
import qpid
from qpid.queue import Closed
from qpid.client import Client
from qpid.content import Content
import cPickle as pickle
import threading
import sys
import os
import time
class AMQPEventBus(PelotonPlugin,AbstractEventBusPlugin):
"""Uses Python-QPID to hook into the AMQP event bus, most probably
provided by RabbitMQ but potentially any provider.
The QPID is not Twisted based and provides a blocking handler
for receiving messages off the bus.
This plugin needs to be superceded by one based on a Twisted AMQP
protocol handler for greater efficiency in this environment. As it
stands one thread is used per routing_key being listened for and in
the event that subscribers do not de-register, threads will be consumed
at an un-wholesome rate.
@todo - purging of threads with no real listeners behind them?
"""
def initialise(self):
self.vhost = self.kernel.settings.messagingVHost
self.host = self.kernel.settings.messagingHost
hp = self.host.split(':')
if len(hp) == 1:
self.port = 5672
else:
self.host = hp[0]
try:
self.port = int(hp[1])
except ValueError:
raise ConfigurationError("Invalid port number for AMQP host: %s " % hp[1])
# NB: THIS HANDLER DOES NOT SUPPORT REALM
self.realm = self.kernel.settings.messagingRealm
self.domain = self.kernel.settings.domain
self.node_guid = self.kernel.profile.guid
# key is ctag; value is handler object
self.handlersByCtag = {}
# key is <exchange>.<routing_key>; value is ctag
self.ctagByQueue = {}
# key is handler, value is (exchange, routing_key, ctag)
self.registeredHandlers = {}
def start(self):
specDir = os.sep.join(qpid.__file__.split('/')[:-2])+"/amqp_specs"
self.connection = Client(self.host, self.port,
spec=qpid.spec.load('file://%s/amqp0-8.xml' % specDir),
vhost=self.vhost)
self.connection.start({ 'LOGIN': self.config.username,
'PASSWORD': self.config.password})
exchanges = [('domain_control','topic'),
('logging', 'topic'),
('events', 'topic')]
self.registeredExchanges = []
self.mqueue = DeferredQueue()
self.mqueue.get().addCallback(self._processQueue)
self.channel = self.connection.channel(1)
self.channel.channel_open()
for x,t in exchanges:
self.channel.exchange_declare(exchange=x, type=t, auto_delete=False)
self.registeredExchanges.append(x)
def stop(self):
for _, _, q in self.ctagByQueue.values():
try:
q.close()
except:
pass
def register(self, key, handler, exchange='events'):
""" Register to receive events from the specified exchange (default 'events')
with all messages to be handled by a peloton.events.AbstractEventHandler instance."""
if exchange not in self.registeredExchanges:
raise MessagingError("Exchange %s not valid" % exchange)
if not isinstance(handler, AbstractEventHandler):
raise MessagingError("Subscription to %s.%s attempted with invalid handler: %s" % (exchange, key, str(handler)))
key = "%s.%s" % (self.domain, key)
queue = "%s.%s" % (exchange,key)
if not self.ctagByQueue.has_key(queue):
try:
qname, _, _ = self.channel.queue_declare(exclusive=True).fields
self.channel.queue_bind(queue=qname, exchange=exchange, routing_key=key)
ctag = self.channel.basic_consume(queue=qname, no_ack=True).consumer_tag
q = self.connection.queue(ctag)
self.ctagByQueue[queue] = (ctag, qname, q)
self._startListener(ctag, q)
except Exception:
self.logger.error("Message published to closed exchange: %s/%s " % (exchange, key))
raise MessagingError("Message published to closed exchange: %s/%s " % (exchange, key))
else:
ctag, qname, _ = self.ctagByQueue[queue]
record = (exchange, key, ctag, qname)
try:
queues = self.registeredHandlers[handler]
if record not in queues:
queues.append(record)
except KeyError:
self.registeredHandlers[handler]=[record]
try:
handlers = self.handlersByCtag[ctag]
if handler not in handlers:
handlers.append(handler)
except Exception, ex:
self.handlersByCtag[ctag] = [handler]
def deregister(self, handler):
""" Remove this handler from all listened for queues """
for exchange, key, ctag, qname in self.registeredHandlers[handler]:
queue = "%s.%s" % (exchange,key)
self.handlersByCtag[ctag].remove(handler)
if not self.handlersByCtag[ctag]:
# self.channel.queue_delete(queue=qname)
del(self.handlersByCtag[ctag])
_, _, q = self.ctagByQueue[queue]
q.close()
del(self.ctagByQueue[queue])
del(self.registeredHandlers[handler])
def fireEvent(self, key, exchange='events', **kwargs):
""" Fire an event with routing key 'key' on the specified
exchange using kwargs to build the event message. """
if exchange not in self.registeredExchanges:
raise MessagingError("Exchange %s not valid" % exchange)
kwargs.update({'sender_guid' : self.node_guid})
msg = Content(pickle.dumps(kwargs))
try:
self.channel.basic_publish(content=msg, exchange=exchange,
routing_key='%s.%s' % (self.domain, key))
except Exception:
self.logger.error("Message published to closed exchange: %s/%s " % (exchange, key))
raise MessagingError("Message published to closed exchange: %s/%s " % (exchange, key))
def _startListener(self, ctag, q):
""" Start a thread that listens for messages on a given
channel. On receipt of a message it is pushed onto self.mqueue for
processing in the main Twisted event loop. """
def _qListener(mq):
while True:
try:
v = q.get()
except Closed:
# listener has been stopped
break
except:
self.logger.error("Qlistener forced close on %s" % ctag)
break
reactor.callFromThread(mq.put, v)
mt = threading.Thread(target=_qListener, args=(self.mqueue,))
mt.setDaemon(True)
mt.start()
def _processQueue(self, msg):
""" Find all handlers for this message based on the consumer tag and
pass the message to them. """
self.mqueue.get().addCallback(self._processQueue)
ctag, _, _, exchange, routing_key = msg.fields
# remove the domain name from the key
routing_key = '.'.join(routing_key.split('.')[1:])
content = pickle.loads(msg.content.body)
if self.handlersByCtag.has_key(ctag):
# may have been deleted already.
handlersToGo = []
for handler in self.handlersByCtag[ctag]:
try:
handler.eventReceived(content, exchange=exchange,
key=routing_key, ctag=ctag)
except:
# error in handler; remove it:
self.logger.debug("Defunct error handler: removing.")
handlersToGo.append(handler)
for h in handlersToGo:
self.deregister(h)
def getRegisteredExchanges(self):
return self.registeredExchanges
| {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/plugins/amqpQpid.py",
"copies": "1",
"size": "8542",
"license": "bsd-3-clause",
"hash": 4001911569498086400,
"line_mean": 39.4834123223,
"line_max": 124,
"alpha_frac": 0.6023179583,
"autogenerated": false,
"ratio": 4.1607403799318075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5263058338231807,
"avg_score": null,
"num_lines": null
} |
"""$Id: application_test.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Output class for testing that all output messages are defined properly"""
from base import BaseFormatter
import feedvalidator
import os
LANGUAGE = os.environ.get('LANGUAGE', 'en')
lang = __import__('feedvalidator.i18n.%s' % LANGUAGE, globals(), locals(), LANGUAGE)
class Formatter(BaseFormatter):
def getMessage(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return lang.messages[classes[0]] % event.params
classes = classes + list(classes[0].__bases__)
del classes[0]
return None
def format(self, event):
"""returns the formatted representation of a single event"""
return self.getMessage(event)
| {
"repo_name": "stone5495/NewsBlur",
"path": "vendor/feedvalidator/formatter/application_test.py",
"copies": "16",
"size": "1034",
"license": "mit",
"hash": -3366803700111865000,
"line_mean": 35.9285714286,
"line_max": 94,
"alpha_frac": 0.6731141199,
"autogenerated": false,
"ratio": 3.435215946843854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: arp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Address Resolution Protocol."""
from __future__ import absolute_import
from . import dpkt
# Hardware address format
ARP_HRD_ETH = 0x0001 # ethernet hardware
ARP_HRD_IEEE802 = 0x0006 # IEEE 802 hardware
# Protocol address format
ARP_PRO_IP = 0x0800 # IP protocol
# ARP operation
ARP_OP_REQUEST = 1 # request to resolve ha given pa
ARP_OP_REPLY = 2 # response giving hardware address
ARP_OP_REVREQUEST = 3 # request to resolve pa given ha
ARP_OP_REVREPLY = 4 # response giving protocol address
class ARP(dpkt.Packet):
"""Address Resolution Protocol.
See more about the ARP on \
https://en.wikipedia.org/wiki/Address_Resolution_Protocol
Attributes:
__hdr__: Header fields of ARP.
"""
__hdr__ = (
('hrd', 'H', ARP_HRD_ETH),
('pro', 'H', ARP_PRO_IP),
('hln', 'B', 6), # hardware address length
('pln', 'B', 4), # protocol address length
('op', 'H', ARP_OP_REQUEST),
('sha', '6s', ''),
('spa', '4s', ''),
('tha', '6s', ''),
('tpa', '4s', '')
)
| {
"repo_name": "smutt/dpkt",
"path": "dpkt/arp.py",
"copies": "3",
"size": "1143",
"license": "bsd-3-clause",
"hash": 179643461044020160,
"line_mean": 26.2142857143,
"line_max": 61,
"alpha_frac": 0.5870516185,
"autogenerated": false,
"ratio": 3.1487603305785123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004761904761904762,
"num_lines": 42
} |
# $Id: asn1.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Abstract Syntax Notation #1."""
from __future__ import absolute_import
from __future__ import print_function
import struct
import time
from . import dpkt
from .compat import compat_ord
# Type class
CLASSMASK = 0xc0
UNIVERSAL = 0x00
APPLICATION = 0x40
CONTEXT = 0x80
PRIVATE = 0xc0
# Constructed (vs. primitive)
CONSTRUCTED = 0x20
# Universal-class tags
TAGMASK = 0x1f
INTEGER = 2
BIT_STRING = 3 # arbitrary bit string
OCTET_STRING = 4 # arbitrary octet string
NULL = 5
OID = 6 # object identifier
SEQUENCE = 16 # ordered collection of types
SET = 17 # unordered collection of types
PRINT_STRING = 19 # printable string
T61_STRING = 20 # T.61 (8-bit) character string
IA5_STRING = 22 # ASCII
UTC_TIME = 23
def utctime(buf):
"""Convert ASN.1 UTCTime string to UTC float.
TODO: Long description here.
Args:
buf: A buffer with format "yymnddhhmm"
Returns:
A floating point number, indicates seconds since the Epoch.
"""
yy = int(buf[:2])
mn = int(buf[2:4])
dd = int(buf[4:6])
hh = int(buf[6:8])
mm = int(buf[8:10])
try:
ss = int(buf[10:12])
buf = buf[12:]
except TypeError:
ss = 0
buf = buf[10:]
if buf[0] == '+':
hh -= int(buf[1:3])
mm -= int(buf[3:5])
elif buf[0] == '-':
hh += int(buf[1:3])
mm += int(buf[3:5])
return time.mktime((2000 + yy, mn, dd, hh, mm, ss, 0, 0, 0))
def decode(buf):
"""Sleazy ASN.1 decoder.
TODO: Long description here.
Args:
buf: A buffer with Sleazy ASN.1 data.
Returns:
A list of (id, value) tuples from ASN.1 BER/DER encoded buffer.
Raises:
UnpackError: An error occurred the ASN.1 length exceed.
"""
msg = []
while buf:
t = compat_ord(buf[0])
constructed = t & CONSTRUCTED
tag = t & TAGMASK
l = compat_ord(buf[1])
c = 0
if constructed and l == 128:
# XXX - constructed, indefinite length
msg.append((t, decode(buf[2:])))
elif l >= 128:
c = l & 127
if c == 1:
l = compat_ord(buf[2])
elif c == 2:
l = struct.unpack('>H', buf[2:4])[0]
elif c == 3:
l = struct.unpack('>I', buf[1:5])[0] & 0xfff
c = 2
elif c == 4:
l = struct.unpack('>I', buf[2:6])[0]
else:
# XXX - can be up to 127 bytes, but...
raise dpkt.UnpackError('excessive long-form ASN.1 length %d' % l)
# Skip type, length
buf = buf[2 + c:]
# Parse content
if constructed:
msg.append((t, decode(buf)))
elif tag == INTEGER:
if l == 0:
n = 0
elif l == 1:
n = compat_ord(buf[0])
elif l == 2:
n = struct.unpack('>H', buf[:2])[0]
elif l == 3:
n = struct.unpack('>I', buf[:4])[0] >> 8
elif l == 4:
n = struct.unpack('>I', buf[:4])[0]
else:
raise dpkt.UnpackError('excessive integer length > %d bytes' % l)
msg.append((t, n))
elif tag == UTC_TIME:
msg.append((t, utctime(buf[:l])))
else:
msg.append((t, buf[:l]))
# Skip content
buf = buf[l:]
return msg
def test_asn1():
s = b'0\x82\x02Q\x02\x01\x0bc\x82\x02J\x04xcn=Douglas J Song 1, ou=Information Technology Division, ou=Faculty and Staff, ou=People, o=University of Michigan, c=US\n\x01\x00\n\x01\x03\x02\x01\x00\x02\x01\x00\x01\x01\x00\x87\x0bobjectclass0\x82\x01\xb0\x04\rmemberOfGroup\x04\x03acl\x04\x02cn\x04\x05title\x04\rpostalAddress\x04\x0ftelephoneNumber\x04\x04mail\x04\x06member\x04\thomePhone\x04\x11homePostalAddress\x04\x0bobjectClass\x04\x0bdescription\x04\x18facsimileTelephoneNumber\x04\x05pager\x04\x03uid\x04\x0cuserPassword\x04\x08joinable\x04\x10associatedDomain\x04\x05owner\x04\x0erfc822ErrorsTo\x04\x08ErrorsTo\x04\x10rfc822RequestsTo\x04\nRequestsTo\x04\tmoderator\x04\nlabeledURL\x04\nonVacation\x04\x0fvacationMessage\x04\x05drink\x04\x0elastModifiedBy\x04\x10lastModifiedTime\x04\rmodifiersname\x04\x0fmodifytimestamp\x04\x0ccreatorsname\x04\x0fcreatetimestamp'
assert (decode(s) == [(48, [(2, 11), (99, [(4, b'cn=Douglas J Song 1, ou=Information Technology Division, ou=Faculty and Staff, ou=People, o=University of Michigan, c=US'), (10, b'\x00'), (10, b'\x03'), (2, 0), (2, 0), (1, b'\x00'), (135, b'objectclass'), (48, [(4, b'memberOfGroup'), (4, b'acl'), (4, b'cn'), (4, b'title'), (4, b'postalAddress'), (4, b'telephoneNumber'), (4, b'mail'), (4, b'member'), (4, b'homePhone'), (4, b'homePostalAddress'), (4, b'objectClass'), (4, b'description'), (4, b'facsimileTelephoneNumber'), (4, b'pager'), (4, b'uid'), (4, b'userPassword'), (4, b'joinable'), (4, b'associatedDomain'), (4, b'owner'), (4, b'rfc822ErrorsTo'), (4, b'ErrorsTo'), (4, b'rfc822RequestsTo'), (4, b'RequestsTo'), (4, b'moderator'), (4, b'labeledURL'), (4, b'onVacation'), (4, b'vacationMessage'), (4, b'drink'), (4, b'lastModifiedBy'), (4, b'lastModifiedTime'), (4, b'modifiersname'), (4, b'modifytimestamp'), (4, b'creatorsname'), (4, b'createtimestamp')])])])])
if __name__ == '__main__':
test_asn1()
print('Tests Successful...')
| {
"repo_name": "dimagol/trex-core",
"path": "scripts/external_libs/dpkt-1.9.1/dpkt/asn1.py",
"copies": "3",
"size": "5434",
"license": "apache-2.0",
"hash": 8840830073447924000,
"line_mean": 36.2191780822,
"line_max": 976,
"alpha_frac": 0.5794994479,
"autogenerated": false,
"ratio": 2.8904255319148935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4969924979814893,
"avg_score": null,
"num_lines": null
} |
# $Id: asn1.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Abstract Syntax Notation #1."""
import struct
import time
import dpkt
# Type class
CLASSMASK = 0xc0
UNIVERSAL = 0x00
APPLICATION = 0x40
CONTEXT = 0x80
PRIVATE = 0xc0
# Constructed (vs. primitive)
CONSTRUCTED = 0x20
# Universal-class tags
TAGMASK = 0x1f
INTEGER = 2
BIT_STRING = 3 # arbitrary bit string
OCTET_STRING = 4 # arbitrary octet string
NULL = 5
OID = 6 # object identifier
SEQUENCE = 16 # ordered collection of types
SET = 17 # unordered collection of types
PRINT_STRING = 19 # printable string
T61_STRING = 20 # T.61 (8-bit) character string
IA5_STRING = 22 # ASCII
UTC_TIME = 23
def utctime(buf):
"""Convert ASN.1 UTCTime string to UTC float."""
yy = int(buf[:2])
mn = int(buf[2:4])
dd = int(buf[4:6])
hh = int(buf[6:8])
mm = int(buf[8:10])
try:
ss = int(buf[10:12])
buf = buf[12:]
except TypeError:
ss = 0
buf = buf[10:]
if buf[0] == '+':
hh -= int(buf[1:3])
mm -= int(buf[3:5])
elif buf[0] == '-':
hh += int(buf[1:3])
mm += int(buf[3:5])
return time.mktime((2000 + yy, mn, dd, hh, mm, ss, 0, 0, 0))
def decode(buf):
"""Sleazy ASN.1 decoder.
Return list of (id, value) tuples from ASN.1 BER/DER encoded buffer.
"""
msg = []
while buf:
t = ord(buf[0])
constructed = t & CONSTRUCTED
tag = t & TAGMASK
l = ord(buf[1])
c = 0
if constructed and l == 128:
# XXX - constructed, indefinite length
msg.append((t, decode(buf[2:])))
elif l >= 128:
c = l & 127
if c == 1:
l = ord(buf[2])
elif c == 2:
l = struct.unpack('>H', buf[2:4])[0]
elif c == 3:
l = struct.unpack('>I', buf[1:5])[0] & 0xfff
c = 2
elif c == 4:
l = struct.unpack('>I', buf[2:6])[0]
else:
# XXX - can be up to 127 bytes, but...
raise dpkt.UnpackError('excessive long-form ASN.1 length %d' % l)
# Skip type, length
buf = buf[2 + c:]
# Parse content
if constructed:
msg.append((t, decode(buf)))
elif tag == INTEGER:
if l == 0:
n = 0
elif l == 1:
n = ord(buf[0])
elif l == 2:
n = struct.unpack('>H', buf[:2])[0]
elif l == 3:
n = struct.unpack('>I', buf[:4])[0] >> 8
elif l == 4:
n = struct.unpack('>I', buf[:4])[0]
else:
raise dpkt.UnpackError('excessive integer length > %d bytes' % l)
msg.append((t, n))
elif tag == UTC_TIME:
msg.append((t, utctime(buf[:l])))
else:
msg.append((t, buf[:l]))
# Skip content
buf = buf[l:]
return msg
def test_asn1():
s = '0\x82\x02Q\x02\x01\x0bc\x82\x02J\x04xcn=Douglas J Song 1, ou=Information Technology Division, ou=Faculty and Staff, ou=People, o=University of Michigan, c=US\n\x01\x00\n\x01\x03\x02\x01\x00\x02\x01\x00\x01\x01\x00\x87\x0bobjectclass0\x82\x01\xb0\x04\rmemberOfGroup\x04\x03acl\x04\x02cn\x04\x05title\x04\rpostalAddress\x04\x0ftelephoneNumber\x04\x04mail\x04\x06member\x04\thomePhone\x04\x11homePostalAddress\x04\x0bobjectClass\x04\x0bdescription\x04\x18facsimileTelephoneNumber\x04\x05pager\x04\x03uid\x04\x0cuserPassword\x04\x08joinable\x04\x10associatedDomain\x04\x05owner\x04\x0erfc822ErrorsTo\x04\x08ErrorsTo\x04\x10rfc822RequestsTo\x04\nRequestsTo\x04\tmoderator\x04\nlabeledURL\x04\nonVacation\x04\x0fvacationMessage\x04\x05drink\x04\x0elastModifiedBy\x04\x10lastModifiedTime\x04\rmodifiersname\x04\x0fmodifytimestamp\x04\x0ccreatorsname\x04\x0fcreatetimestamp'
assert (decode(s) == [(48, [(2, 11), (99, [(4, 'cn=Douglas J Song 1, ou=Information Technology Division, ou=Faculty and Staff, ou=People, o=University of Michigan, c=US'), (10, '\x00'), (10, '\x03'), (2, 0), (2, 0), (1, '\x00'), (135, 'objectclass'), (48, [(4, 'memberOfGroup'), (4, 'acl'), (4, 'cn'), (4, 'title'), (4, 'postalAddress'), (4, 'telephoneNumber'), (4, 'mail'), (4, 'member'), (4, 'homePhone'), (4, 'homePostalAddress'), (4, 'objectClass'), (4, 'description'), (4, 'facsimileTelephoneNumber'), (4, 'pager'), (4, 'uid'), (4, 'userPassword'), (4, 'joinable'), (4, 'associatedDomain'), (4, 'owner'), (4, 'rfc822ErrorsTo'), (4, 'ErrorsTo'), (4, 'rfc822RequestsTo'), (4, 'RequestsTo'), (4, 'moderator'), (4, 'labeledURL'), (4, 'onVacation'), (4, 'vacationMessage'), (4, 'drink'), (4, 'lastModifiedBy'), (4, 'lastModifiedTime'), (4, 'modifiersname'), (4, 'modifytimestamp'), (4, 'creatorsname'), (4, 'createtimestamp')])])])])
if __name__ == '__main__':
test_asn1()
print 'Tests Successful...' | {
"repo_name": "jameslittle/dpkt",
"path": "dpkt/asn1.py",
"copies": "6",
"size": "4886",
"license": "bsd-3-clause",
"hash": 7953910655257532000,
"line_mean": 39.3884297521,
"line_max": 937,
"alpha_frac": 0.5681539091,
"autogenerated": false,
"ratio": 2.8589818607372735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004106797546780353,
"num_lines": 121
} |
# $Id: asn1.py 23 2006-11-08 15:45:33Z dugsong $
"""Abstract Syntax Notation #1."""
import struct, time
import dpkt
# Type class
CLASSMASK = 0xc0
UNIVERSAL = 0x00
APPLICATION = 0x40
CONTEXT = 0x80
PRIVATE = 0xc0
# Constructed (vs. primitive)
CONSTRUCTED = 0x20
# Universal-class tags
TAGMASK = 0x1f
INTEGER = 2
BIT_STRING = 3 # arbitrary bit string
OCTET_STRING = 4 # arbitrary octet string
NULL = 5
OID = 6 # object identifier
SEQUENCE = 16 # ordered collection of types
SET = 17 # unordered collection of types
PRINT_STRING = 19 # printable string
T61_STRING = 20 # T.61 (8-bit) character string
IA5_STRING = 22 # ASCII
UTC_TIME = 23
def utctime(buf):
"""Convert ASN.1 UTCTime string to UTC float."""
yy = int(buf[:2])
mm = int(buf[2:4])
dd = int(buf[4:6])
hh = int(buf[6:8])
mm = int(buf[8:10])
try:
ss = int(buf[10:12])
buf = buf[12:]
except TypeError:
ss = 0
buf = buf[10:]
if buf[0] == '+':
hh -= int(buf[1:3])
mm -= int(buf[3:5])
elif buf[0] == '-':
hh += int(buf[1:3])
mm += int(buf[3:5])
return time.mktime((2000 + yy, mm, dd, hh, mm, ss, 0, 0, 0))
def decode(buf):
"""Sleazy ASN.1 decoder.
Return list of (id, value) tuples from ASN.1 BER/DER encoded buffer.
"""
msg = []
while buf:
t = ord(buf[0])
constructed = t & CONSTRUCTED
tag = t & TAGMASK
l = ord(buf[1])
c = 0
if constructed and l == 128:
# XXX - constructed, indefinite length
msg.append(t, decode(buf[2:]))
elif l >= 128:
c = l & 127
if c == 1:
l = ord(buf[2])
elif c == 2:
l = struct.unpack('>H', buf[2:4])[0]
elif c == 3:
l = struct.unpack('>I', buf[1:5])[0] & 0xfff
c = 2
elif c == 4:
l = struct.unpack('>I', buf[2:6])[0]
else:
# XXX - can be up to 127 bytes, but...
raise dpkt.UnpackError('excessive long-form ASN.1 length %d' % l)
# Skip type, length
buf = buf[2+c:]
# Parse content
if constructed:
msg.append((t, decode(buf)))
elif tag == INTEGER:
if l == 0:
n = 0
elif l == 1:
n = ord(buf[0])
elif l == 2:
n = struct.unpack('>H', buf[:2])[0]
elif l == 3:
n = struct.unpack('>I', buf[:4])[0] >> 8
elif l == 4:
n = struct.unpack('>I', buf[:4])[0]
else:
raise dpkt.UnpackError('excessive integer length > %d bytes' % l)
msg.append((t, n))
elif tag == UTC_TIME:
msg.append((t, utctime(buf[:l])))
else:
msg.append((t, buf[:l]))
# Skip content
buf = buf[l:]
return msg
if __name__ == '__main__':
import unittest
class ASN1TestCase(unittest.TestCase):
def test_asn1(self):
s = '0\x82\x02Q\x02\x01\x0bc\x82\x02J\x04xcn=Douglas J Song 1, ou=Information Technology Division, ou=Faculty and Staff, ou=People, o=University of Michigan, c=US\n\x01\x00\n\x01\x03\x02\x01\x00\x02\x01\x00\x01\x01\x00\x87\x0bobjectclass0\x82\x01\xb0\x04\rmemberOfGroup\x04\x03acl\x04\x02cn\x04\x05title\x04\rpostalAddress\x04\x0ftelephoneNumber\x04\x04mail\x04\x06member\x04\thomePhone\x04\x11homePostalAddress\x04\x0bobjectClass\x04\x0bdescription\x04\x18facsimileTelephoneNumber\x04\x05pager\x04\x03uid\x04\x0cuserPassword\x04\x08joinable\x04\x10associatedDomain\x04\x05owner\x04\x0erfc822ErrorsTo\x04\x08ErrorsTo\x04\x10rfc822RequestsTo\x04\nRequestsTo\x04\tmoderator\x04\nlabeledURL\x04\nonVacation\x04\x0fvacationMessage\x04\x05drink\x04\x0elastModifiedBy\x04\x10lastModifiedTime\x04\rmodifiersname\x04\x0fmodifytimestamp\x04\x0ccreatorsname\x04\x0fcreatetimestamp'
self.failUnless(decode(s) == [(48, [(2, 11), (99, [(4, 'cn=Douglas J Song 1, ou=Information Technology Division, ou=Faculty and Staff, ou=People, o=University of Michigan, c=US'), (10, '\x00'), (10, '\x03'), (2, 0), (2, 0), (1, '\x00'), (135, 'objectclass'), (48, [(4, 'memberOfGroup'), (4, 'acl'), (4, 'cn'), (4, 'title'), (4, 'postalAddress'), (4, 'telephoneNumber'), (4, 'mail'), (4, 'member'), (4, 'homePhone'), (4, 'homePostalAddress'), (4, 'objectClass'), (4, 'description'), (4, 'facsimileTelephoneNumber'), (4, 'pager'), (4, 'uid'), (4, 'userPassword'), (4, 'joinable'), (4, 'associatedDomain'), (4, 'owner'), (4, 'rfc822ErrorsTo'), (4, 'ErrorsTo'), (4, 'rfc822RequestsTo'), (4, 'RequestsTo'), (4, 'moderator'), (4, 'labeledURL'), (4, 'onVacation'), (4, 'vacationMessage'), (4, 'drink'), (4, 'lastModifiedBy'), (4, 'lastModifiedTime'), (4, 'modifiersname'), (4, 'modifytimestamp'), (4, 'creatorsname'), (4, 'createtimestamp')])])])])
unittest.main()
| {
"repo_name": "jacklee0810/QMarkdowner",
"path": "dpkt/asn1.py",
"copies": "15",
"size": "4995",
"license": "mit",
"hash": 7053437734507659000,
"line_mean": 40.974789916,
"line_max": 953,
"alpha_frac": 0.5611611612,
"autogenerated": false,
"ratio": 2.8973317865429236,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: asn1.py 271 2006-01-11 16:03:33Z dugsong $
"""Abstract Syntax Notation #1."""
import struct, time
import dpkt
# Type class
CLASSMASK = 0xc0
UNIVERSAL = 0x00
APPLICATION = 0x40
CONTEXT = 0x80
PRIVATE = 0xc0
# Constructed (vs. primitive)
CONSTRUCTED = 0x20
# Universal-class tags
TAGMASK = 0x1f
INTEGER = 2
BIT_STRING = 3 # arbitrary bit string
OCTET_STRING = 4 # arbitrary octet string
NULL = 5
OID = 6 # object identifier
SEQUENCE = 16 # ordered collection of types
SET = 17 # unordered collection of types
PRINT_STRING = 19 # printable string
T61_STRING = 20 # T.61 (8-bit) character string
IA5_STRING = 22 # ASCII
UTC_TIME = 23
def utctime(buf):
"""Convert ASN.1 UTCTime string to UTC float."""
yy = int(buf[:2])
mm = int(buf[2:4])
dd = int(buf[4:6])
hh = int(buf[6:8])
mm = int(buf[8:10])
try:
ss = int(buf[10:12])
buf = buf[12:]
except TypeError:
ss = 0
buf = buf[10:]
if buf[0] == '+':
hh -= int(buf[1:3])
mm -= int(buf[3:5])
elif buf[0] == '-':
hh += int(buf[1:3])
mm += int(buf[3:5])
return time.mktime((2000 + yy, mm, dd, hh, mm, ss, 0, 0, 0))
def decode(buf):
"""Sleazy ASN.1 decoder.
Return list of (id, value) tuples from ASN.1 BER/DER encoded buffer.
"""
msg = []
while buf:
t = ord(buf[0])
constructed = t & CONSTRUCTED
tag = t & TAGMASK
l = ord(buf[1])
c = 0
if constructed and l == 128:
# XXX - constructed, indefinite length
msg.append(t, decode(buf[2:]))
elif l >= 128:
c = l & 127
if c == 1:
l = ord(buf[2])
elif c == 2:
l = struct.unpack('>H', buf[2:4])[0]
elif c == 3:
l = struct.unpack('>I', buf[1:5])[0] & 0xfff
c = 2
elif c == 4:
l = struct.unpack('>I', buf[2:6])[0]
else:
# XXX - can be up to 127 bytes, but...
raise dpkt.UnpackError('excessive long-form ASN.1 length %d' % l)
# Skip type, length
buf = buf[2+c:]
# Parse content
if constructed:
msg.append((t, decode(buf)))
elif tag == INTEGER:
if l == 0:
n = 0
elif l == 1:
n = ord(buf[0])
elif l == 2:
n = struct.unpack('>H', buf[:2])[0]
elif l == 3:
n = struct.unpack('>I', buf[:4])[0] >> 8
elif l == 4:
n = struct.unpack('>I', buf[:4])[0]
else:
raise dpkt.UnpackError('excessive integer length > %d bytes' % l)
msg.append((t, n))
elif tag == UTC_TIME:
msg.append((t, utctime(buf[:l])))
else:
msg.append((t, buf[:l]))
# Skip content
buf = buf[l:]
return msg
if __name__ == '__main__':
import unittest
class ASN1TestCase(unittest.TestCase):
def test_asn1(self):
s = '0\x82\x02Q\x02\x01\x0bc\x82\x02J\x04xcn=Douglas J Song 1, ou=Information Technology Division, ou=Faculty and Staff, ou=People, o=University of Michigan, c=US\n\x01\x00\n\x01\x03\x02\x01\x00\x02\x01\x00\x01\x01\x00\x87\x0bobjectclass0\x82\x01\xb0\x04\rmemberOfGroup\x04\x03acl\x04\x02cn\x04\x05title\x04\rpostalAddress\x04\x0ftelephoneNumber\x04\x04mail\x04\x06member\x04\thomePhone\x04\x11homePostalAddress\x04\x0bobjectClass\x04\x0bdescription\x04\x18facsimileTelephoneNumber\x04\x05pager\x04\x03uid\x04\x0cuserPassword\x04\x08joinable\x04\x10associatedDomain\x04\x05owner\x04\x0erfc822ErrorsTo\x04\x08ErrorsTo\x04\x10rfc822RequestsTo\x04\nRequestsTo\x04\tmoderator\x04\nlabeledURL\x04\nonVacation\x04\x0fvacationMessage\x04\x05drink\x04\x0elastModifiedBy\x04\x10lastModifiedTime\x04\rmodifiersname\x04\x0fmodifytimestamp\x04\x0ccreatorsname\x04\x0fcreatetimestamp'
self.failUnless(decode(s) == [(48, [(2, 11), (99, [(4, 'cn=Douglas J Song 1, ou=Information Technology Division, ou=Faculty and Staff, ou=People, o=University of Michigan, c=US'), (10, '\x00'), (10, '\x03'), (2, 0), (2, 0), (1, '\x00'), (135, 'objectclass'), (48, [(4, 'memberOfGroup'), (4, 'acl'), (4, 'cn'), (4, 'title'), (4, 'postalAddress'), (4, 'telephoneNumber'), (4, 'mail'), (4, 'member'), (4, 'homePhone'), (4, 'homePostalAddress'), (4, 'objectClass'), (4, 'description'), (4, 'facsimileTelephoneNumber'), (4, 'pager'), (4, 'uid'), (4, 'userPassword'), (4, 'joinable'), (4, 'associatedDomain'), (4, 'owner'), (4, 'rfc822ErrorsTo'), (4, 'ErrorsTo'), (4, 'rfc822RequestsTo'), (4, 'RequestsTo'), (4, 'moderator'), (4, 'labeledURL'), (4, 'onVacation'), (4, 'vacationMessage'), (4, 'drink'), (4, 'lastModifiedBy'), (4, 'lastModifiedTime'), (4, 'modifiersname'), (4, 'modifytimestamp'), (4, 'creatorsname'), (4, 'createtimestamp')])])])])
unittest.main()
| {
"repo_name": "MercenaryLogic/StompingGround",
"path": "stompingground/dpkt/asn1.py",
"copies": "1",
"size": "4996",
"license": "mit",
"hash": -6388156514376681000,
"line_mean": 40.9831932773,
"line_max": 953,
"alpha_frac": 0.5612489992,
"autogenerated": false,
"ratio": 2.896231884057971,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3957480883257971,
"avg_score": null,
"num_lines": null
} |
# $Id: AtlasUtil.py 153446 2015-02-22 16:28:34Z xju $
#
# Module providing some convenience functions for decorating ATLAS plots.
#
##
# @short Function producing the "ATLAS Preliminary" sign
#
# There is a usual format for the "ATLAS Preliminary" sign on the plots,
# this function takes care of using that. Plus, it makes it simple to
# remove/change the "Preliminary" part if/when necessary.
#
# @param x The X position of the text in the [0,1] interval
# @param y The Y position of the text in the [0,1] interval
# @param color The color of the text
def AtlasLabel( x, y, color = 1, experiment="Atlas", preliminary=False, public=False ):
docStatus = "Internal"
if preliminary: docStatus = "Preliminary"
if public: docStatus = ""
textMap = {
'Atlas':'ATLAS #font[42]{%s}' % docStatus,
'AtlasCms':'ATLAS+CMS #font[42]{%s}' % docStatus,
}
# ROOT is needed of course:
import ROOT
# Draw the "ATLAS" part:
l = ROOT.TLatex()
l.SetNDC()
l.SetTextFont( 72 )
l.SetTextColor( color )
atlas = l.DrawLatex( x, y, textMap[experiment] )
return
##
# @short Function drawing generic text on the plots
#
# This is just to save the user a few lines of code in his/her script
# when putting some additional comments on a plot.
#
# @param x The X position of the text in the [0,1] interval
# @param y The Y position of the text in the [0,1] interval
# @param text The text to be displayed
# @param color The color of the text
def DrawTextOneLine( x, y, text, color = 1, size = 0.04, NDC = True, halign = "left", valign = "bottom", skipLines = 0 ):
halignMap = {"left":1, "center":2, "right":3}
valignMap = {"bottom":1, "center":2, "top":3}
scaleLineHeight = 1.0
if valign == "top": scaleLineHeight = 0.8
if skipLines: text = "#lower[%.1f]{%s}" % (skipLines*scaleLineHeight,text)
# Draw the text quite simply:
import ROOT
l = ROOT.TLatex()
if NDC: l.SetNDC()
l.SetTextAlign( 10*halignMap[halign] + valignMap[valign] )
l.SetTextColor( color )
l.SetTextSize( size )
l.DrawLatex( x, y, text )
return l
def DrawText( x, y, text, color = 1, size = 0.04, NDC = True, halign = "left", valign = "bottom" ):
objs = []
skipLines = 0
for line in text.split('\n'):
objs.append( DrawTextOneLine( x, y, line, color, size, NDC, halign, valign, skipLines ) )
if NDC == True: y -= 0.05 * size/0.04
else:
skipLines += 1
return objs
##
# @short Function drawing the luminosity value on the plots
#
# This is just a convenience function for putting a pretty note
# on the plots of how much luminosity was used to produce them.
#
# @param x The X position of the text in the [0,1] interval
# @param y The Y position of the text in the [0,1] interval
# @param lumi The luminosity value in 1/pb
# @param color The color of the text
def DrawLuminosity( x, y, lumi, color = 1 ):
return DrawText( x, y, "#intLdt = " + str( lumi ) + " pb^{-1}", color )
##
# @short Function drawing the luminosity value on the plots in fb-1
#
# This is just a convenience function for putting a pretty note
# on the plots of how much luminosity was used to produce them.
#
# @param x The X position of the text in the [0,1] interval
# @param y The Y position of the text in the [0,1] interval
# @param lumi The luminosity value in 1/fb
# @param color The color of the text
def DrawLuminosityFb( x, y, lumi, color = 1, sqrts = 7, size = 0.04, twoLines = True ):
if isinstance( lumi, float ): lumi = "%.1f" % lumi
if isinstance( sqrts, float ): sqrts = "%.0f" % sqrts
text = "#intLdt = %s fb^{-1}, #sqrt{s} = %s TeV" % (lumi, sqrts)
if twoLines: text = text.replace( ", ", "\n " )
return DrawText( x, y, text, color, size )
def DrawLuminosityFbSplit( x, y, lumi, color = 1, sqrts = 7, size = 0.04, twoLines = True ):
if isinstance( lumi, float ) or isinstance( lumi, int ): lumi = "%.1f" % lumi
if isinstance( sqrts, float ) or isinstance( sqrts, int ): sqrts = "%.0f" % sqrts
lumi = lumi.split(" + ")
sqrts = sqrts.split(" and ")
text = ["#sqrt{s} = %s TeV: #lower[-0.17]{#scale[0.57]{#int}}Ldt = %s fb^{-1}" % (s,l) for l,s in zip( lumi, sqrts ) ]
text = ", ".join( text )
if twoLines: text = text.replace( ", ", "\n" )
return DrawText( x, y, text, color, size )
| {
"repo_name": "xju2/monojet",
"path": "scripts/AtlasUtil.py",
"copies": "1",
"size": "4336",
"license": "mit",
"hash": -3696687701166592000,
"line_mean": 37.7142857143,
"line_max": 122,
"alpha_frac": 0.6409132841,
"autogenerated": false,
"ratio": 2.9516678012253235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4092581085325323,
"avg_score": null,
"num_lines": null
} |
"""$Id: author.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
#
# author element.
#
class author(validatorBase):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
def validate(self):
if not "name" in self.children and not "atom_name" in self.children:
self.log(MissingElement({"parent":self.name, "element":"name"}))
def do_name(self):
return nonhtml(), nonemail(), nonblank(), noduplicates()
def do_email(self):
return addr_spec(), noduplicates()
def do_uri(self):
return nonblank(), rfc3987(), nows(), noduplicates()
def do_foaf_workplaceHomepage(self):
return rdfResourceURI()
def do_foaf_homepage(self):
return rdfResourceURI()
def do_foaf_weblog(self):
return rdfResourceURI()
def do_foaf_plan(self):
return text()
def do_foaf_firstName(self):
return text()
def do_xhtml_div(self):
from content import diveater
return diveater()
# RSS/Atom support
do_atom_name = do_name
do_atom_email = do_email
do_atom_uri = do_uri
| {
"repo_name": "Einsteinish/PyTune3",
"path": "vendor/feedvalidator/author.py",
"copies": "16",
"size": "1386",
"license": "mit",
"hash": -925066896212175900,
"line_mean": 25.1509433962,
"line_max": 94,
"alpha_frac": 0.6630591631,
"autogenerated": false,
"ratio": 3.10762331838565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03190387284078172,
"num_lines": 53
} |
"""Basic object class, used as abstract class definition for event
capable objects."""
from ocempgui.events import EventManager, EventCallback, INotifyable
class BaseObject (INotifyable):
"""BaseObject () -> BaseObject
An object class, which is able to receive events.
The BaseObject provides a basic set of methods, which enable it to
be suitable for event driven systems. It is able to listen to
specific event types and runs connected callback functions upon
their occurance.
It is designed for usage with the EventManager class from the
ocempgui.events package and needs to be inherited to be fully
usable. It can be easily connected to an instance of the
EventManager via the 'manager' attribute (or using the set_manager()
method), which also will remove it from another instance, it was
connected to before. Thus the BaseObject can be only connected to
ONE EventManager instance at a time by default.
The BaseObject class does not provide any predefined signals, it
listens on (those will be called slots here). Instead an inherited
class has to provide its own signal types within the private
'_signals' dictionary. The entries within the '_signals' dictionary
need to be key-value pairs, which have a list as value and a free
choosable type as key (if the default EventCallback class is
used). A typical example about how to create own signal slots
follows:
class OwnObject (BaseObject):
...
def __init__ (self):
BaseObject.__init__ (self)
self._signals['ping'] = []
self._signals['pong'] = []
The OwnObject class can listen to signals, which are strings being
'ping' and 'pong'. It is now possible to connect a callback to those
signals:
obj = OwnObject ()
obj.connect_signal ('ping', cb_func, ...)
obj.connect_signal ('pong', cb_func, ...)
Any instance of the BaseObject class should be explicitly destroyed
using the destroy() method, if it is not needed anymore. This method
takes care of the deletion any callback objects and removes the
object from the connected event manager.
Attributes:
manager - The event manager for emitting events.
"""
def __init__ (self):
self._signals = {}
self._manager = None
def connect_signal (self, signal, callback, *data):
"""B.connect_signal (...) -> EventCallback
Connects a function or method to a signal.
The function or method is invoked as soon as the signal is
emitted on the object. If *data is supplied, it will be passed
as argument(s) to the connected function. The returned
EventCallback can be used to disconnect the function using
disconnect_signal().
"""
ev = EventCallback (signal, callback, *data)
self._signals[signal].append (ev)
return ev
def disconnect_signal (self, event):
"""B.disconnect_signal (...) -> None
Removes a connected EventCallback from the object.
"""
self._signals[event.signal].remove (event)
def run_signal_handlers (self, signal, *data):
"""B.run_signal_handlers (...) -> None
Invokes all connected EventCallbacks for a specific signal.
The method invokes all connected callbacks for the given
signal. Additional data will be passed to the callback invoke,
if given.
"""
for callback in self._signals[signal]:
callback.run (*data)
def set_event_manager (self, manager):
"""B.set_event_manager (...) -> None
Sets the event manager to use by the object.
In case the new event manager to set differs from the current
event manager, the object will be removed from the current one
and added to the new event manager.
It is possible to remove the object only by passing a None value
to the method. The object then will remove itself from the
connected event manager only.
Raises a TypeError, if the passed manager does not inherit
from the EventManager class.
"""
if (manager != None) and not isinstance (manager, EventManager):
raise TypeError ("manager must inherit from EventManager")
if (self._manager != None) and (self._manager != manager):
self._manager.remove_object (self)
self._manager = manager
# An empty list or pygame.sprite.Group evaluates to False in a
# boolean expression, thus we need to explicitly check for such
# objects.
if self._manager != None:
self._manager.add_object (self, *self._signals.keys ())
def emit (self, signal, data):
"""B.emit (...) -> bool
Emits a signal through the connected event manager.
Emits a signal using the connected event manager (if any), and
returns True upon success or False upon an error.
"""
if self.manager != None:
self.manager.emit (signal, data)
return True
return False
def destroy (self):
"""B.destroy () -> None
Destroys the object and disconnects it from its event manager.
This method should be called, if the object is not needed
anymore.
"""
del self._signals
if self.manager != None:
self.manager.remove_object (self)
del self._manager
manager = property (lambda self: self._manager,
lambda self, var: self.set_event_manager (var),
doc = "The event manager to use by the object.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/object/BaseObject.py",
"copies": "1",
"size": "7111",
"license": "bsd-2-clause",
"hash": -2290178485999679700,
"line_mean": 39.4034090909,
"line_max": 78,
"alpha_frac": 0.6717761215,
"autogenerated": false,
"ratio": 4.617532467532468,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5789308589032467,
"avg_score": null,
"num_lines": null
} |
""" Base classes for, e.g., Kernels and Workers
@todo: Check to see if we need to reset sys.ext and set
handlers every time - it is most likely that these
mods are preserved in the fork so shouldn't need to
be done in any extension of HandlerBase.
"""
import sys
import peloton.utils.logging as logging
from peloton.utils.config import PelotonSettings
import signal
from twisted.internet import reactor
class HandlerBase(object):
def __init__(self, settings = {}):
self.settings = settings
if settings.has_key('profile'):
self.profile = profile = settings.profile
else:
self.profile = PelotonSettings()
self.logger = logging.getLogger()
# hide sys.exit
self._trapExit()
# ensure that handlers only installed when things are OK
reactor.callWhenRunning(self._setSignalHandlers)
def _trapExit(self):
""" Move sys.exit to sys.realexit and put a dummy
into sys.exit. This prevents service writers from accidentaly
closing a node down."""
def dummyExit():
raise Exception("sys.exit disabled to prevent accidental node shutdown.")
sys.realexit = sys.exit
sys.exit = dummyExit
def _signalClosedown(self, num, frame):
""" Handle SIGINT/TERM """
# delay helps ensure things closedown neatly... think
# the shutdown tramples on event handler code. Not sure.
# Anyhow... it helps.
if not self.__CLOSING_DOWN__:
self.__CLOSING_DOWN__ = True
reactor.callLater(0.1, self.closedown)
def _signalReload(self, num, frame):
""" Reaction to a SIGHUP: need to re-start so as to re-load configuration
files etc."""
raise NotImplementedError("SIGHUP handler not yet written.")
def _setSignalHandlers(self):
"""Set signal traps for INT and TERM to the _signalClosedown method
that tidies up behind itself."""
self.__CLOSING_DOWN__ = False
signal.signal(signal.SIGINT, self._signalClosedown)
signal.signal(signal.SIGTERM, self._signalClosedown)
signal.signal(signal.SIGHUP, self._signalReload)
| {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/base.py",
"copies": "1",
"size": "2386",
"license": "bsd-3-clause",
"hash": 6860580205269928000,
"line_mean": 36.8888888889,
"line_max": 85,
"alpha_frac": 0.6504610226,
"autogenerated": false,
"ratio": 4.1280276816609,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016649900367227126,
"num_lines": 63
} |
"""$Id: base.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Base class for output classes"""
from UserList import UserList
import os
LANGUAGE = os.environ.get('LANGUAGE', 'en')
lang = __import__('feedvalidator.i18n.%s' % LANGUAGE, globals(), locals(), LANGUAGE)
from feedvalidator.logging import Info, Warning, Error
class BaseFormatter(UserList):
def __getitem__(self, i):
return self.format(self.data[i])
def getErrors(self):
return [self.format(msg) for msg in self.data if isinstance(msg,Error)]
def getWarnings(self):
return [self.format(msg) for msg in self.data if isinstance(msg,Warning)]
def getLine(self, event):
if not event.params.has_key('line'): return ''
return lang.line % event.params
def getColumn(self, event):
if not event.params.has_key('column'): return ''
return lang.column % event.params
def getLineAndColumn(self, event):
line = self.getLine(event)
if not line: return ''
column = self.getColumn(event)
return '%s, %s:' % (line, column)
def getCount(self, event):
if not event.params.has_key('msgcount'): return ''
count = int(event.params['msgcount'])
if count <= 1: return ''
return lang.occurances % event.params
def getMessageClass(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return classes[0]
classes = classes + list(classes[0].__bases__)
del classes[0]
return "Undefined message: %s[%s]" % (event.__class__, event.params)
def getMessage(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return lang.messages[classes[0]] % event.params
classes = classes + list(classes[0].__bases__)
del classes[0]
return "Undefined message: %s[%s]" % (event.__class__, event.params)
def format(self, event):
"""returns the formatted representation of a single event"""
return `event`
| {
"repo_name": "nriley/NewsBlur",
"path": "vendor/feedvalidator/formatter/base.py",
"copies": "16",
"size": "2249",
"license": "mit",
"hash": -1223273077245527000,
"line_mean": 32.5671641791,
"line_max": 94,
"alpha_frac": 0.6496220542,
"autogenerated": false,
"ratio": 3.4493865030674846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""$Id: base.py 744 2007-03-24 11:57:16Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 744 $"
__date__ = "$Date: 2007-03-24 11:57:16 +0000 (Sat, 24 Mar 2007) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from xml.sax.handler import ContentHandler
from xml.sax.xmlreader import Locator
from logging import NonCanonicalURI, NotUTF8
import re
# references:
# http://web.resource.org/rss/1.0/modules/standard.html
# http://web.resource.org/rss/1.0/modules/proposed.html
# http://dmoz.org/Reference/Libraries/Library_and_Information_Science/Technical_Services/Cataloguing/Metadata/RDF/Applications/RSS/Specifications/RSS1.0_Modules/
namespaces = {
"http://www.bloglines.com/about/specs/fac-1.0": "access",
"http://webns.net/mvcb/": "admin",
"http://purl.org/rss/1.0/modules/aggregation/": "ag",
"http://purl.org/rss/1.0/modules/annotate/": "annotate",
"http://media.tangent.org/rss/1.0/": "audio",
"http://backend.userland.com/blogChannelModule": "blogChannel",
"http://web.resource.org/cc/": "cc",
"http://www.microsoft.com/schemas/rss/core/2005": "cf",
"http://backend.userland.com/creativeCommonsRssModule": "creativeCommons",
"http://purl.org/rss/1.0/modules/company": "company",
"http://purl.org/rss/1.0/modules/content/": "content",
"http://my.theinfo.org/changed/1.0/rss/": "cp",
"http://purl.org/dc/elements/1.1/": "dc",
"http://purl.org/dc/terms/": "dcterms",
"http://purl.org/rss/1.0/modules/email/": "email",
"http://purl.org/rss/1.0/modules/event/": "ev",
"http://www.w3.org/2003/01/geo/wgs84_pos#": "geo",
"http://geourl.org/rss/module/": "geourl",
"http://www.georss.org/georss": "georss",
"http://www.opengis.net/gml": "gml",
"http://postneo.com/icbm": "icbm",
"http://purl.org/rss/1.0/modules/image/": "image",
"http://www.itunes.com/dtds/podcast-1.0.dtd": "itunes",
"http://xmlns.com/foaf/0.1/": "foaf",
"http://purl.org/rss/1.0/modules/link/": "l",
"http://search.yahoo.com/mrss/": "media",
"http://a9.com/-/spec/opensearch/1.1/": "opensearch",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://www.w3.org/2000/01/rdf-schema#": "rdfs",
"http://purl.org/rss/1.0/modules/reference/": "ref",
"http://purl.org/rss/1.0/modules/richequiv/": "reqv",
"http://purl.org/rss/1.0/modules/rss091#": "rss091",
"http://purl.org/rss/1.0/modules/search/": "search",
"http://purl.org/rss/1.0/modules/slash/": "slash",
"http://purl.org/rss/1.0/modules/servicestatus/": "ss",
"http://hacks.benhammersley.com/rss/streaming/": "str",
"http://purl.org/rss/1.0/modules/subscription/": "sub",
"http://purl.org/rss/1.0/modules/syndication/": "sy",
"http://purl.org/rss/1.0/modules/taxonomy/": "taxo",
"http://purl.org/rss/1.0/modules/threading/": "thr",
"http://purl.org/syndication/thread/1.0": "thr",
"http://madskills.com/public/xml/rss/module/trackback/": "trackback",
"http://wellformedweb.org/CommentAPI/": "wfw",
"http://purl.org/rss/1.0/modules/wiki/": "wiki",
"http://www.usemod.com/cgi-bin/mb.pl?ModWiki": "wiki",
"http://schemas.xmlsoap.org/soap/envelope/": "soap",
"http://www.w3.org/2005/Atom": "atom",
"http://www.w3.org/1999/xhtml": "xhtml",
"http://my.netscape.com/rdf/simple/0.9/": "rss090",
"http://purl.org/net/rss1.1#": "rss11",
"http://base.google.com/ns/1.0": "g",
"http://www.w3.org/XML/1998/namespace": "xml",
"http://openid.net/xmlns/1.0": "openid",
"xri://$xrd*($v*2.0)": "xrd",
"xri://$xrds": "xrds",
}
def near_miss(ns):
try:
return re.match(".*\w", ns).group().lower()
except:
return ns
nearly_namespaces = dict([(near_miss(u),p) for u,p in namespaces.items()])
stdattrs = [(u'http://www.w3.org/XML/1998/namespace', u'base'),
(u'http://www.w3.org/XML/1998/namespace', u'lang'),
(u'http://www.w3.org/XML/1998/namespace', u'space')]
#
# From the SAX parser's point of view, this class is the one responsible for
# handling SAX events. In actuality, all this class does is maintain a
# pushdown stack of the *real* content handlers, and delegates sax events
# to the current one.
#
class SAXDispatcher(ContentHandler):
firstOccurrenceOnly = 0
def __init__(self, base, selfURIs, encoding):
from root import root
ContentHandler.__init__(self)
self.lastKnownLine = 1
self.lastKnownColumn = 0
self.loggedEvents = []
self.feedType = 0
try:
self.xmlBase = base.encode('idna')
except:
self.xmlBase = base
self.selfURIs = selfURIs
self.encoding = encoding
self.handler_stack=[[root(self, base)]]
self.literal_entities=[]
self.defaultNamespaces = []
# experimental RSS-Profile draft 1.06 support
def setLiterals(self, literals):
for literal in literals:
if literal not in self.literal_entities:
self.literal_entities.append(literal)
def setDocumentLocator(self, locator):
self.locator = locator
ContentHandler.setDocumentLocator(self, self.locator)
def setFirstOccurrenceOnly(self, firstOccurrenceOnly=1):
self.firstOccurrenceOnly = firstOccurrenceOnly
def startPrefixMapping(self, prefix, uri):
for handler in iter(self.handler_stack[-1]):
handler.namespace[prefix] = uri
if uri and len(uri.split())>1:
from xml.sax import SAXException
self.error(SAXException('Invalid Namespace: %s' % uri))
if prefix in namespaces.values():
if not namespaces.get(uri,'') == prefix and prefix:
from logging import ReservedPrefix
preferredURI = [key for key, value in namespaces.items() if value == prefix][0]
self.log(ReservedPrefix({'prefix':prefix, 'ns':preferredURI}))
elif prefix=='wiki' and uri.find('usemod')>=0:
from logging import ObsoleteWikiNamespace
self.log(ObsoleteWikiNamespace({'preferred':namespaces[uri], 'ns':uri}))
elif namespaces.has_key(uri):
if not namespaces[uri] == prefix and prefix:
from logging import NonstdPrefix
self.log(NonstdPrefix({'preferred':namespaces[uri], 'ns':uri}))
def namespaceFor(self, prefix):
return None
def startElementNS(self, name, qname, attrs):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
qname, name = name
for handler in iter(self.handler_stack[-1]):
handler.startElementNS(name, qname, attrs)
if len(attrs):
present = attrs.getNames()
unexpected = filter(lambda x: x not in stdattrs, present)
for handler in iter(self.handler_stack[-1]):
ean = handler.getExpectedAttrNames()
if ean: unexpected = filter(lambda x: x not in ean, unexpected)
for u in unexpected:
if u[0] and near_miss(u[0]) not in nearly_namespaces:
feedtype=self.getFeedType()
if (not qname) and feedtype and (feedtype==TYPE_RSS2):
from logging import InvalidExtensionAttr
self.log(InvalidExtensionAttr({"attribute":u, "element":name}))
continue
from logging import UnexpectedAttribute
if not u[0]: u=u[1]
self.log(UnexpectedAttribute({"parent":name, "attribute":u, "element":name}))
def resolveEntity(self, publicId, systemId):
if not publicId and not systemId:
import cStringIO
return cStringIO.StringIO()
try:
def log(exception):
from logging import SAXError
self.log(SAXError({'exception':str(exception)}))
if self.xmlvalidator:
self.xmlvalidator(log)
self.xmlvalidator=0
except:
pass
if (publicId=='-//Netscape Communications//DTD RSS 0.91//EN' and
systemId=='http://my.netscape.com/publish/formats/rss-0.91.dtd'):
from logging import ValidDoctype, DeprecatedDTD
self.log(ValidDoctype({}))
self.log(DeprecatedDTD({}))
else:
from logging import ContainsSystemEntity
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
self.log(ContainsSystemEntity({}))
from StringIO import StringIO
return StringIO()
def skippedEntity(self, name):
from logging import ValidDoctype
if [e for e in self.loggedEvents if e.__class__ == ValidDoctype]:
from htmlentitydefs import name2codepoint
if name in name2codepoint: return
from logging import UndefinedNamedEntity
self.log(UndefinedNamedEntity({'value':name}))
def characters(self, string):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
for handler in iter(self.handler_stack[-1]):
handler.characters(string)
def endElementNS(self, name, qname):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
qname, name = name
for handler in iter(self.handler_stack[-1]):
handler.endElementNS(name, qname)
del self.handler_stack[-1]
def push(self, handlers, name, attrs, parent):
if hasattr(handlers,'__iter__'):
for handler in iter(handlers):
handler.setElement(name, attrs, parent)
handler.value=""
handler.prevalidate()
else:
handlers.setElement(name, attrs, parent)
handlers.value=""
handlers.prevalidate()
handlers = [handlers]
self.handler_stack.append(handlers)
def log(self, event, offset=(0,0)):
def findDuplicate(self, event):
duplicates = [e for e in self.loggedEvents if e.__class__ == event.__class__]
if duplicates and (event.__class__ in [NonCanonicalURI]):
return duplicates[0]
for dup in duplicates:
for k, v in event.params.items():
if k != 'value':
if not k in dup.params or dup.params[k] != v: break
else:
return dup
if event.params.has_key('element') and event.params['element']:
if not isinstance(event.params['element'],tuple):
event.params['element']=':'.join(event.params['element'].split('_', 1))
elif event.params['element'][0]==u'http://www.w3.org/XML/1998/namespace':
event.params['element'] = 'xml:' + event.params['element'][-1]
if self.firstOccurrenceOnly:
dup = findDuplicate(self, event)
if dup:
dup.params['msgcount'] = dup.params['msgcount'] + 1
return
event.params['msgcount'] = 1
try:
line = self.locator.getLineNumber() + offset[0]
backupline = self.lastKnownLine
column = (self.locator.getColumnNumber() or 0) + offset[1]
backupcolumn = self.lastKnownColumn
except AttributeError:
line = backupline = column = backupcolumn = 1
event.params['line'] = line
event.params['backupline'] = backupline
event.params['column'] = column
event.params['backupcolumn'] = backupcolumn
self.loggedEvents.append(event)
def error(self, exception):
from logging import SAXError
self.log(SAXError({'exception':str(exception)}))
raise exception
fatalError=error
warning=error
def getFeedType(self):
return self.feedType
def setFeedType(self, feedType):
self.feedType = feedType
#
# This base class for content handlers keeps track of such administrative
# details as the parent of the current element, and delegating both log
# and push events back up the stack. It will also concatenate up all of
# the SAX events associated with character data into a value, handing such
# things as CDATA and entities.
#
# Subclasses are expected to declare "do_name" methods for every
# element that they support. These methods are expected to return the
# appropriate handler for the element.
#
# The name of the element and the names of the children processed so
# far are also maintained.
#
# Hooks are also provided for subclasses to do "prevalidation" and
# "validation".
#
from logging import TYPE_RSS2
class validatorBase(ContentHandler):
def __init__(self):
ContentHandler.__init__(self)
self.value = ""
self.attrs = None
self.children = []
self.isValid = 1
self.name = None
self.itunes = False
self.namespace = {}
def setElement(self, name, attrs, parent):
self.name = name
self.attrs = attrs
self.parent = parent
self.dispatcher = parent.dispatcher
self.line = self.dispatcher.locator.getLineNumber()
self.col = self.dispatcher.locator.getColumnNumber()
self.xmlLang = parent.xmlLang
if attrs and attrs.has_key((u'http://www.w3.org/XML/1998/namespace', u'base')):
self.xmlBase=attrs.getValue((u'http://www.w3.org/XML/1998/namespace', u'base'))
from validators import rfc3987
self.validate_attribute((u'http://www.w3.org/XML/1998/namespace',u'base'),
rfc3987)
from urlparse import urljoin
self.xmlBase = urljoin(parent.xmlBase, self.xmlBase)
else:
self.xmlBase = parent.xmlBase
return self
def simplename(self, name):
if not name[0]: return name[1]
return namespaces.get(name[0], name[0]) + ":" + name[1]
def namespaceFor(self, prefix):
if self.namespace.has_key(prefix):
return self.namespace[prefix]
elif self.parent:
return self.parent.namespaceFor(prefix)
else:
return None
def validate_attribute(self, name, rule):
if not isinstance(rule,validatorBase): rule = rule()
if isinstance(name,str): name = (None,name)
rule.setElement(self.simplename(name), {}, self)
rule.value=self.attrs.getValue(name)
rule.validate()
def validate_required_attribute(self, name, rule):
if self.attrs and self.attrs.has_key(name):
self.validate_attribute(name, rule)
else:
from logging import MissingAttribute
self.log(MissingAttribute({"attr": self.simplename(name)}))
def validate_optional_attribute(self, name, rule):
if self.attrs and self.attrs.has_key(name):
self.validate_attribute(name, rule)
def getExpectedAttrNames(self):
None
def unknown_starttag(self, name, qname, attrs):
from validators import any
return any(self, name, qname, attrs)
def startElementNS(self, name, qname, attrs):
if attrs.has_key((u'http://www.w3.org/XML/1998/namespace', u'lang')):
self.xmlLang=attrs.getValue((u'http://www.w3.org/XML/1998/namespace', u'lang'))
if self.xmlLang:
from validators import iso639_validate
iso639_validate(self.log, self.xmlLang, "xml:lang", name)
from validators import eater
feedtype=self.getFeedType()
if (not qname) and feedtype and (feedtype!=TYPE_RSS2):
from logging import UndeterminableVocabulary
self.log(UndeterminableVocabulary({"parent":self.name, "element":name, "namespace":'""'}))
qname="null"
if qname in self.dispatcher.defaultNamespaces: qname=None
nm_qname = near_miss(qname)
if nearly_namespaces.has_key(nm_qname):
prefix = nearly_namespaces[nm_qname]
qname, name = None, prefix + "_" + name
if prefix == 'itunes' and not self.itunes and not self.parent.itunes:
if hasattr(self, 'setItunes'): self.setItunes(True)
# ensure all attribute namespaces are properly defined
for (namespace,attr) in attrs.keys():
if ':' in attr and not namespace:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
if qname=='http://purl.org/atom/ns#':
from logging import ObsoleteNamespace
self.log(ObsoleteNamespace({"element":"feed"}))
for key, string in attrs.items():
for c in string:
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":name, "element":key[-1]}))
if qname:
handler = self.unknown_starttag(name, qname, attrs)
name="unknown_"+name
else:
try:
self.child=name
if name.startswith('dc_'):
# handle "Qualified" Dublin Core
handler = getattr(self, "do_" + name.replace("-","_").split('.')[0])()
else:
handler = getattr(self, "do_" + name.replace("-","_"))()
except AttributeError:
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
handler = eater()
elif name.startswith('xhtml_'):
from logging import MisplacedXHTMLContent
self.log(MisplacedXHTMLContent({"parent": ':'.join(self.name.split("_",1)), "element":name}))
handler = eater()
else:
from logging import UndefinedElement
self.log(UndefinedElement({"parent": ':'.join(self.name.split("_",1)), "element":name}))
handler = eater()
self.push(handler, name, attrs)
# MAP - always append name, even if already exists (we need this to
# check for too many hour elements in skipHours, and it doesn't
# hurt anything else)
self.children.append(name)
def normalizeWhitespace(self):
self.value = self.value.strip()
def endElementNS(self, name, qname):
self.normalizeWhitespace()
self.validate()
if self.isValid and self.name:
from validators import ValidElement
self.log(ValidElement({"parent":self.parent.name, "element":name}))
def textOK(self):
from validators import UnexpectedText
self.log(UnexpectedText({"element":self.name,"parent":self.parent.name}))
def characters(self, string):
if string.strip(): self.textOK()
line=column=0
pc=' '
for c in string:
# latin characters double encoded as utf-8
if 0x80 <= ord(c) <= 0xBF:
if 0xC2 <= ord(pc) <= 0xC3:
try:
string.encode('iso-8859-1').decode('utf-8')
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}), offset=(line,max(1,column-1)))
except:
pass
pc = c
# win1252
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}), offset=(line,column))
column=column+1
if ord(c) in (10,13):
column=0
line=line+1
self.value = self.value + string
def log(self, event, offset=(0,0)):
if not event.params.has_key('element'):
event.params['element'] = self.name
self.dispatcher.log(event, offset)
self.isValid = 0
def setFeedType(self, feedType):
self.dispatcher.setFeedType(feedType)
def getFeedType(self):
return self.dispatcher.getFeedType()
def push(self, handler, name, value):
self.dispatcher.push(handler, name, value, self)
def leaf(self):
from validators import text
return text()
def prevalidate(self):
pass
def validate(self):
pass
| {
"repo_name": "epiphany27/NewsBlur",
"path": "vendor/feedvalidator/base.py",
"copies": "16",
"size": "19402",
"license": "mit",
"hash": -8884613260529229000,
"line_mean": 36.9686888454,
"line_max": 161,
"alpha_frac": 0.6388001237,
"autogenerated": false,
"ratio": 3.509768451519537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""Globally used variables and objects by the widgets."""
from pygame import init as pygameinit, key
from Style import Style
from StyleInformation import StyleInformation
# Globally used style object for the widgets.
GlobalStyle = None
# Timer rate in ms to adjust the double-click speed.
DoubleClickRate = 0
def init ():
"""base.init () -> None
Initializes the globally used variables and objects.
Initializes the globally used variables and objects for the widgets
package such as a global style and the pygame engine
"""
global GlobalStyle
global DoubleClickRate
GlobalStyle = Style ()
DoubleClickRate = 250
pygameinit ()
key.set_repeat (500, 30)
def set_doubleclick_rate (rate):
"""base.set_doubleclick_rate (...) -> None
Sets the maximum time to elaps between two clicks for a double-click.
Sets the maximum time in milliseconds to elaps between two click events
(SIG_MOUSEDOWN, SIGMOUSEUP) identify them as a double-click.
The default are 250 ms.
Raises a TypeError, if the passed argument is not an integer greater
than 0.
"""
if (type (rate) != int) or (rate < 1):
raise TypeError ("rate must be a positive integer greater than 0")
global DoubleClickRate
DoubleClickRate = rate
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/base.py",
"copies": "1",
"size": "2710",
"license": "bsd-2-clause",
"hash": 1223751144590743000,
"line_mean": 37.7142857143,
"line_max": 78,
"alpha_frac": 0.7431734317,
"autogenerated": false,
"ratio": 4.329073482428115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008258810185033121,
"num_lines": 70
} |
"""Basic widget class, used as an abstract definition for widgets."""
# TODO: Add ControlCollection class to the components.
from pygame import sprite, Rect, Surface
from pygame import error as PygameError
from ocempgui.object import BaseObject
from ocempgui.access import IIndexable
from Style import WidgetStyle
from Constants import *
import base
class BaseWidget (BaseObject, sprite.Sprite):
"""BaseWidget () -> BaseWidget
A basic widget class for user interface elements.
The BaseWidget is the most basic widget class, from which any other
widget class should be inherited. It provides the most basic
attributes and methods, every widget needs.
The widget is a visible (or non-vissible) element on the display,
which allows the user to interact with it (active or passive) in a
specific way. It has several methods and attributes to allow
developers to control this interaction and supports accessibility
through the ocempgui.access module.
The widget can be placed on the display by accessing the various
attributes of its 'rect' attribute directly. It exposes the following
pygame.Rect attributes:
top, left, bottom, right,
topleft, bottomleft, topright, bottomright,
midtop, midleft, midbottom, midright,
center, centerx, centery,
size, width, height
Except the last three ones, 'size', 'width' and 'height' any of those
can be assigned similarily to the pygame.Rect:
widget.top = 10
widget.center = (10, 10)
...
Note: This will only work for toplevel widgets as widgets are placed
relative to their parent. Thus the 'top' attribute value of a
widget, which is packed into another one, refers to its parents
coordinates. So if it is placed four pixels to the left on its
parent, its 'top' value will be 4, while the parent might be placed
at e.g. 100, 100.
You can get the absolute coordinates, the widget is placed on the
display, by using the rect_to_client() method.
To get the actual dimensions of the widget, it provides the
read-only 'width', 'height' and 'size' attributes.
if (widget.width > 50) or (widget.height > 50):
...
if widget.size == (50, 50):
...
To force a specific minimum size to occupy by the widget, the
'minsize' attribute or the respective set_minimum_size() method can
be used. The occupied area of the widget will not be smaller than
the size, but can grow bigger.
widget.minsize = 100, 50
widget.set_minimum_size (10, 33)
The counterpart of 'minsize' is the 'maxsize' attribute, which
defines the maximum size, the widget can grow to. It will never
exceed that size.
widget.maxsize = 200, 200
wdget.set_maximum_size (100, 22)
The 'image' and 'rect' attributes are used and needed by the
pygame.sprite system. 'image' refers to the visible surface of the
widget, which will be blitted on the display. 'rect' is a copy of
the pygame.Rect object indicating the occupied area of the
widget. The rect denotes the relative position of the widget on its
parent (as explained above).
The 'index' attribute and set_index() method set the navigation
index position for the widget. It is highly recommended to set this
value in order to provide a better accessibility (e.g. for keyboard
navigation). The attribute can be used in ocempgui.access.IIndexable
implementations for example.
widget.index = 3
widget.set_index (0)
Widgets support a 'style' attribute and create_style() method, which
enable them to use different look than default one without the need
to override their draw() method. The 'style' attribute of a widget
usually defaults to a None value and can be set using the
create_style() method. This causes the widget internals to setup the
specific style for the widget and can be accessed through the
'style' attribute later on. A detailled documentation of the style
can be found in the Style class.
if not widget.style:
widget.create_style () # Setup the style internals first.
widget.style['font']['size'] = 18
widget.create_style ()['font']['name'] = Arial
Widgets can be in different states, which cause the widgets to have
a certain behaviour and/or look. Dependant on the widget, the
actions it supports and actions, which have taken place, the state
of the widget can change. The actual state of the widget can be
looked up via the 'state' attribute and is one of the STATE_TYPES
constants.
if widget.state == STATE_INSENSITIVE:
print 'The widget is currently insensitive and does not react.'
Any widget supports layered drawing through the 'depth' attribute.
The higher the depth is, the higher the layer on the z-axis will be,
on which the widget will be drawn. Widgets might use the flag to set
themselves on top or bottom of the display.
# The widget will be placed upon all widgets with a depth lower than 4.
widget.depth = 4
widget.set_depth (4)
Widgets should set the 'dirty' attribute to True, whenever an update
of the widget surface is necessary, which includes redrawing the
widget (resp. calling draw_bg() and draw()). In user code, 'dirty'
usually does not need to be modified manually, but for own widget
implementations it is necessary (e.g. if a Label text changed).
If the 'parent' attribute of the widget is set, the parent will be
notified automatically, that it has to update portions of its
surface.
# Force redrawing the widget on the next update cycle of the render
# group.
widget.dirty = True
Widgets support a focus mode, which denotes that the widget has the
current input and action focus. Setting the focus can be done via
the 'focus' attribute or the set_focus() method.
widget.focus = True
widget.set_focus (True)
'sensitive' is an attribute, which can block the widget's reaction
upon events temporarily. It also influences the look of the widget
by using other style values (see STATE_INSENSITIVE in the Style
class).
widget.sensitive = False
widget.set_sensitive (False)
Each widget supports transparency, which also includes all children
which are drawn on it. By setting the 'opacity' attribute you can
adjust the degree of transparency of the widget. The allowed values
are ranged between 0 for fully transparent and 255 for fully opaque.
widget.opacity = 100
widget.set_opacity (25)
Widgets allow parent-child relationships via the 'parent' attribute.
Parental relationships are useful for container classes, which can
contain widgets and need to be informed, when the widget is
destroyed, for example. Take a look the Bin and Container classes
for details about possible implementations.
Do NOT modify the 'parent' attribute value, if you do not know, what
might happen.
Widgets support locking themselves self temporarily using the lock()
method. This is extremely useful to avoid multiple update/draw
calls, when certain operations take place on it. To unlock the
widget, the unlock() method should be called, which causes it to
update itself instantly.
widget.lock () # Acquire lock.
widget.focus = False # Normally update() would be called here.
widget.sensitive = False # Normally update() would be called here.
widget.unlock () # Release lock and call update().
When using the lock() method in your own code, you have to ensure,
that you unlock() the widget as soon as you do not need the lock
anymore. The state of the lock on a widget can be queried using the
'locked' attribute:
if widget.locked:
print 'The widget is currently locked'
Widgets can consist of other widgets. To guarantee that all of them
will be added to the same event management system, set the same
state, etc., the 'controls' attribute exists. It is a collection to
and from which widgets can be attached or detached. Several methods
make use of this attribute by iterating over the attached widgets
and invoking their methods to put them into the same state, etc. as
the main widget.
widget.controls.append (sub_widget)
for sub in widget.controls:
...
Default action (invoked by activate()):
None, will raise an NotImplementedError
Mnemonic action (invoked by activate_mnemonic()):
None
Signals:
SIG_FOCUSED - Invoked, when the widget received the focus
(widget.focus=True).
SIG_ENTER - Invoked, when the input device enters the widget.
SIG_LEAVE - Invoked, when the input device leaves the wigdet.
SIG_DESTROYED - Invoked, when the widget is destroyed.
Attributes:
minsize - Guaranteed size of the widget.
maxsize - Counterpart to size and denotes the maximum size the widget.
is allowed to occupy. Defaults to None usually.
image - The visible surface of the widget.
index - Navigation index of the widget.
style - The style to use for drawing the widget.
state - The current state of the widget.
depth - The z-axis layer depth of the widget.
dirty - Indicates, that the widget needs to be updated.
focus - Indicates, that the widget has the current input focus.
sensitive - Indicates, if the user can interact with the widget.
parent - Slot for the creation of parent-child relationships.
controls - Collection of attached controls for complex widgets.
tooltip - The tool tip text to display for the widget.
opacity - The degree of transparency to apply (0-255, 0 for fully
transparent, 255 for fully opaque).
indexable - The ocempgui.access.IIndexable implementation to use for
the 'index' attribute support.
entered - Indicates, that an input device is currently over the widget
(e.g. the mouse cursor).
locked - Indicates, whether the widget is locked.
rect - The area occupied by the widget.
x, y, ... - The widget allows to reposition itself through the various
width, ... attributes offered by its rect attribute.
size
"""
def __init__ (self):
BaseObject.__init__ (self)
sprite.Sprite.__init__ (self)
# Guaranteed sizes for the widget, see also the minsize/maxsize
# attributes and set_*_size () methods.
self._minwidth = 0
self._minheight = 0
self._maxwidth = 0
self._maxheight = 0
self._indexable = None
self._image = None
self._rect = Rect (0, 0, 0, 0)
self._oldrect = Rect (0, 0, 0, 0)
self._opacity = 255
self._style = None
self._index = 0
self._state = STATE_NORMAL
self._focus = False
self._entered = False
self._sensitive = True
self._controls = []
self._depth = 0
self._dirty = True
self._lock = 0
self._bg = None
self.parent = None
# Accessibility.
self._tooltip = None
# Signals, the widget listens to by default
self._signals[SIG_FOCUSED] = []
self._signals[SIG_ENTER] = []
self._signals[SIG_LEAVE] = []
self._signals[SIG_DESTROYED] = []
def _get_rect_attr (self, attr):
"""W._get_rect_attr (...) -> var
Gets the wanted attribute value from the underlying rect.
"""
return getattr (self._rect, attr)
def _set_rect_attr (self, attr, value):
"""W._set_rect_attr (...) -> None
Sets a specific attribute value on the underlying rect.
Raises an AttributeError if the attr argument is the width,
height or size.
"""
if attr in ("width", "height", "size"):
# The width and height are protected!
raise AttributeError ("%s attribute is read-only" % attr)
# TODO: This is just a hack around wrong positioning in
# containers.
self._oldrect = self.rect
setattr (self._rect, attr, value)
if (self.parent != None):
if not isinstance (self.parent, BaseWidget):
self.update ()
else:
self._oldrect = self.rect
def initclass (cls):
"""B.initclass () -> None
Class method to expose the attributes of the own self.rect attribute.
The method usually is called in the __init__.py script of the
module.
"""
attributes = dir (Rect)
for attr in attributes:
if not attr.startswith ("__") and \
not callable (getattr (Rect, attr)):
def get_attr (self, attr=attr):
return cls._get_rect_attr (self, attr)
def set_attr (self, value, attr=attr):
return cls._set_rect_attr (self, attr, value)
prop = property (get_attr, set_attr)
setattr (cls, attr, prop)
initclass = classmethod (initclass)
def _get_rect (self):
"""W._get_rect () -> pygame.Rect
Gets a copy of the widget's rect.
"""
return Rect (self._rect)
# DEPRECATED
def set_position (self, x, y):
"""W.set_position (...) -> None
DEPRECATED - use the 'topleft' attribute instead
"""
print "*** Warning: set_position() is deprecated, use the topleft"
print " attribute instead."
self._set_rect_attr ("topleft", (x, y))
def rect_to_client (self, rect=None):
"""W.rect_to_client (...) -> pygame.Rect
Returns the absolute coordinates a rect is located at.
In contrast to the widget.rect attribute, which denotes the
relative position and size of the widget on its parent, this
method returns the absolute position and occupied size on the
screen for a passed rect.
Usually this method will be called by children of the callee and
the callee itself to detrmine their absolute positions on the
screen.
"""
if self.parent and isinstance (self.parent, BaseWidget):
re = self.rect
if rect != None:
re.x += rect.x
re.y += rect.y
re.width = rect.width
re.height = rect.height
return self.parent.rect_to_client (re)
elif rect != None:
rect.x = self.x + rect.x
rect.y = self.y + rect.y
return rect
return self.rect
def set_minimum_size (self, width, height):
"""W.set_minimum_size (...) -> None
Sets the minimum size to occupy for the widget.
Minimum size means that the widget can exceed the size by any
time, but its width and height will never be smaller than these
values.
Raises a TypeError, if the passed arguments are not integers.
Raises a ValueError, if the passed arguments are not positive.
"""
if (type (width) != int) or (type (height) != int):
raise TypeError ("width and height must be positive integers")
if (width < 0) or (height < 0):
raise ValueError ("width and height must be positive integers")
self._minwidth = width
self._minheight = height
self.dirty = True
# DEPRECATED
def set_size (self, width, height):
"""W.set_size (...) -> None
DEPREACATED - use set_minimum_size () instead.
"""
print "*** Warning: set_size() is deprecated, use set_minimum_size()."
self.set_minimum_size (width, height)
def set_maximum_size (self, width, height):
"""W.set_maximum_size (...) -> None
Sets the maximum size the widget is allowed to occupy.
This is the counterpart to the set_minimum_size() method.
"""
if (type (width) != int) or (type (height) != int):
raise TypeError ("width and height must be positive integers")
if (width < 0) or (height < 0):
raise ValueError ("width and height must be positive integers")
self._maxwidth = width
self._maxheight = height
self.dirty = True
def check_sizes (self, width, height):
"""W.check_sizes (...) -> int, int
Checks the passed width and height for allowed values.
Checks, whether the passed width an height match the upper and
lower size ranges of the widget and returns corrected values, if
they exceed those. Else the same values are returned.
"""
minwidth, minheight = self.minsize
maxwidth, maxheight = self.maxsize
if (minwidth != 0) and (width < minwidth):
width = minwidth
elif (maxwidth != 0) and (width > maxwidth):
width = maxwidth
if (minheight != 0) and (height < minheight):
height = minheight
elif (maxheight != 0) and (height > maxheight):
height = maxheight
return width, height
def set_index (self, index):
"""W.set_index (...) -> None
Sets the tab index of the widget.
Sets the index position of the widget to the given value. It can
be used by ocempgui.access.IIndexable implementations to allow
easy navigation access and activation for the widgets.
Raises a TypeError, if the passed argument is not a positive
integer.
"""
if (type (index) != int) or (index < 0):
raise TypeError ("index must be a positive integer")
self._index = index
def set_depth (self, depth):
"""W.set_depth (...) -> None
Sets the z-axis layer depth for the widget.
Sets the z-axis layer depth for the widget. This will need a
renderer, which makes use of layers such as the Renderer
class. By default, the higher the depth value, the higher the
drawing layer of the widget is. That means, that a widget with a
depth of 1 is placed upon widgets with a depth of 0.
Raises a TypeError, if the passed argument is not an integer.
"""
if type (depth) != int:
raise TypeError ("depth must be an integer")
self.lock ()
old = self._depth
self._depth = depth
if isinstance (self.parent, BaseWidget):
try:
self.parent.update_layer (old, self)
except: pass
for c in self._controls:
c.set_depth (depth)
self.unlock ()
def set_dirty (self, dirty, update=True):
"""W.set_dirty (...) -> None
Marks the widget as dirty.
Marks the widget as dirty, so that it will be updated and
redrawn.
"""
self._dirty = dirty
if dirty and update:
self.update ()
def set_event_manager (self, manager):
"""W.set_event_manager (...) -> None
Sets the event manager of the widget and its controls.
Adds the widget to an event manager and causes its controls to
be added to the same, too.
"""
BaseObject.set_event_manager (self, manager)
for control in self.controls:
control.set_event_manager (manager)
def set_indexable (self, indexable):
"""W.set_indexable (...) -> None
Sets the IIndexable for the widget.
The widget will invoke the add_index() method for itself on the
IIndexable.
"""
if indexable and not isinstance (indexable, IIndexable):
raise TypeError ("indexable must inherit from IIndexable")
if self._indexable == indexable:
return
if self._indexable != None:
self._indexable.remove_index (self)
self._indexable = indexable
if indexable != None:
indexable.add_index (self)
for ctrl in self.controls:
ctrl.set_indexable (indexable)
# DEPRECATED
def get_style (self):
"""W.get_style () -> WidgetStyle
DEPRECATED - use the create_style() method instead
"""
print "*** Warning: get_style() is deprecated, use the create_style()"
print " method instead."
return self.create_style ()
def create_style (self):
"""W.create_style () -> WidgetStyle
Creates the instance-specific style for the widget.
Gets the style associated with the widget. If the widget had no
style before, a new one will be created for it, based on the
class name of the widget. The style will be copied internally
and associated with the widget, so that modifications on it will
be instance specific.
More information about how a style looks like and how to modify
them can be found in the Style class documentation.
"""
if not self._style:
# Create a new style from the base style class.
self._style = base.GlobalStyle.copy_style (self.__class__)
self._style.set_value_changed (lambda: self.set_dirty (True))
return self._style
def set_style (self, style):
"""W.set_style (...) -> None
Sets the style of the widget.
Sets the style of the widget to the passed style dictionary.
This method currently does not perform any checks, whether the
passed dictionary matches the criteria of the Style class.
Raises a TypeError, if the passed argument is not a WidgetStyle
object.
"""
if not isinstance (style, WidgetStyle):
raise TypeError ("style must be a WidgetStyle")
self._style = style
if not self._style.get_value_changed ():
self._style.set_value_changed (lambda: self.set_dirty (True))
self.dirty = True
def set_focus (self, focus=True):
"""W.set_focus (...) -> bool
Sets the input and action focus of the widget.
Sets the input and action focus of the widget and returns True
upon success or False, if the focus could not be set.
"""
if not self.sensitive:
return False
if focus:
if not self._focus:
self._focus = True
self.emit (SIG_FOCUSED, self)
self.dirty = True
self.run_signal_handlers (SIG_FOCUSED)
else:
if self._focus:
self._focus = False
self.dirty = True
return True
def set_entered (self, entered):
"""W.set_entered (...) -> None
Sets the widget into an entered mode.
"""
if entered:
if not self._entered:
self._entered = True
self.state = STATE_ENTERED
self.emit (SIG_ENTER, self)
self.run_signal_handlers (SIG_ENTER)
elif self._entered:
self._entered = False
self.state = STATE_NORMAL
self.run_signal_handlers (SIG_LEAVE)
def set_sensitive (self, sensitive=True):
"""W.set_sensitive (...) -> None
Sets the sensitivity of the widget.
In a sensitive state (the default), widgets can react upon user
interaction while they will not do so in an insensitive
state.
To support the visibility of this, the widget style should
support the STATE_INSENSITIVE flag, while inheriting widgets
should check for the sensitivity to enable or disable the event
mechanisms.
"""
if sensitive != self._sensitive:
if sensitive:
self._sensitive = True
self.state = STATE_NORMAL
else:
self._sensitive = False
self.state = STATE_INSENSITIVE
for control in self.controls:
control.set_sensitive (sensitive)
def set_state (self, state):
"""W.set_state (...) -> None
Sets the state of the widget.
Sets the state of the widget. The state of the widget is mainly
used for the visible or non-visible appearance of the widget,
so that the user can determine the state of the widget
easier.
Usually this method should not be invoked by user code.
Raises a ValueError, if the passed argument is not a value of
the STATE_TYPES tuple.
"""
if state not in STATE_TYPES:
raise ValueError ("state must be a value from STATE_TYPES")
if self._state != state:
self._state = state
self.dirty = True
def set_opacity (self, opacity):
"""W.set_opacity (...) -> None
Sets the opacity of the widget.
"""
if type (opacity) != int:
raise TypeError ("opacity must be an integer")
dirty = self._opacity != opacity
self._opacity = opacity
self.update ()
# DEPRECATED
def set_event_area (self, area):
"""W.set_event_area (...) -> None
DEPRECATED - this is no longer used.
"""
print "*** Warning: set_event_area() is no longer used!"
def lock (self):
"""W.lock () -> None
Acquires a lock on the Widget to suspend its updating methods.
"""
self._lock += 1
def unlock (self):
"""W.unlock () -> None
Releases a previously set lock on the Widget and updates it
instantly.
"""
if self._lock > 0:
self._lock -= 1
if self._lock == 0:
self.update ()
def set_tooltip (self, tooltip):
"""W.set_tooltip (...) -> None
Sets the tooltip information for the widget.
Raises a TypeError, if the passed argument is not a string or
unicode.
"""
if type (tooltip) not in (str, unicode):
raise TypeError ("text must be a string or unicode")
self._tooltip = tooltip
def activate (self):
"""W.activate () -> None
Activates the widget.
Activates the widget, which means, that the default action of
the widget will be invoked.
This method should be implemented by inherited widgets.
"""
raise NotImplementedError
def activate_mnemonic (self, mnemonic):
"""W.activate_mnemonic (...) -> bool
Activates the widget through the set mnemonic.
Activates the widget through the set mnemonic for it and returns
True upon successful activation or False, if the widget was not
activated.
The BaseWidget.activate_mnemonic () method always returns False
by default, so that this method should be implemented by
inherited widgets, if they need explicit mnemonic support.
"""
return False
def draw_bg (self):
"""W.draw_bg () -> Surface
Draws the widget background surface and returns it.
Creates the visible background surface of the widget and returns
it to the caller.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def draw (self):
"""W.draw () -> None
Draws the widget surface.
Creates the visible surface of the widget and updates its
internals.
"""
# Original surface.
self._bg = self.draw_bg ()
try:
self._bg = self._bg.convert ()
except PygameError: pass
rect = self._bg.get_rect ()
# Current surface for blits.
self._image = Surface ((rect.width, rect.height))
self._image.blit (self._bg, (0, 0))
topleft = self._rect.topleft
self._rect = rect
self._rect.topleft = topleft
self._oldrect = self.rect
def notify (self, event):
"""W.notify (...) -> None
Notifies the widget about an event.
Note: Widgets, which are not visible (not shown) or are in a
specific state (e.g. STATE_INSENSITIVE), usually do not receive
any events. But dependant on the widget, this behaviour can be
different, thus checking the visibility depends on the widget
and implementation.
"""
if not self.sensitive:
return
if (event.signal == SIG_FOCUSED) and (event.data != self):
self.focus = False
elif (event.signal == SIG_ENTER) and (event.data != self):
self.entered = False
def update (self, **kwargs):
"""W.update (...) -> None
Updates the widget.
Updates the widget and causes its parent to update itself on
demand.
"""
if self.locked:
return
oldrect = Rect (self._oldrect)
resize = kwargs.get ("resize", False)
if not self.dirty:
children = kwargs.get ("children", {})
blit = self.image.blit
items = children.items ()
# Clean up the dirty areas on the widget.
for child, rect in items:
blit (self._bg, rect, rect)
# Blit the changes.
for child, rect in items:
blit (child.image, child.rect)
self._image.set_alpha (self.opacity)
# If a parent's available, reassign the child rects, so that
# they point to the absolute position on the widget and build
# one matching them all for an update.
if self.parent:
vals = children.values ()
rect = oldrect
if len (vals) != 0:
rect = vals[0]
x = self.x
y = self.y
for r in vals:
r.x += x
r.y += y
rect.unionall (vals[1:])
self.parent.update (children={ self : rect }, resize=resize)
self._lock = max (self._lock - 1, 0)
return
# Acquire lock to prevent recursion on drawing.
self._lock += 1
# Draw the widget.
self.draw ()
self._image.set_alpha (self.opacity)
if self.parent != None:
resize = oldrect != self._rect
self.parent.update (children={ self : oldrect }, resize=resize)
# Release previously set lock.
self._lock = max (self._lock - 1, 0)
self.dirty = False
def destroy (self):
"""W.destroy () -> None
Destroys the widget and removes it from its event system.
Causes the widget to destroy itself as well as its controls and
removes all from the connected event manager and sprite groups
using the sprite.kill() method.
"""
if isinstance (self.parent, BaseWidget):
raise AttributeError ("widget still has a parent relationship")
self.run_signal_handlers (SIG_DESTROYED)
self.emit (SIG_DESTROYED, self)
# Clear the associated controls.
_pop = self._controls.pop
while len (self._controls) > 0:
control = _pop ()
control.parent = None
control.destroy ()
del control
del self._controls
if self._indexable != None:
index = self._indexable
self._indexable = None
index.remove_index (self)
if self._manager != None:
self._manager.remove_object (self)
BaseObject.destroy (self) # Clear BaseObject internals.
self.kill () # Clear Sprite
#del self.parent
del self._indexable
del self._bg
del self._style
del self._image
del self._rect
del self._oldrect
del self
# DEPRECATED
position = property (lambda self: self.topleft,
lambda self, (x, y): self.set_position (x, y),
doc = "The position of the topleft corner.")
eventarea = property (lambda self: self.rect_to_client (),
lambda self, var: self.set_event_area (var),
doc = "The area, which gets the events.")
minsize = property (lambda self: (self._minwidth, self._minheight),
lambda self, (w, h): self.set_minimum_size (w, h),
doc = "The guaranteed size of the widget.")
maxsize = property (lambda self: (self._maxwidth, self._maxheight),
lambda self, (w, h): self.set_maximum_size (w, h),
doc = "The maximum size to occupy by the widget.")
image = property (lambda self: self._image,
doc = "The visible surface of the widget.")
rect = property (lambda self: self._get_rect (),
doc = "The area occupied by the widget.")
index = property (lambda self: self._index,
lambda self, var: self.set_index (var),
doc = "The tab index position of the widget.")
style = property (lambda self: self._style,
lambda self, var: self.set_style (var),
doc = "The style of the widget.")
state = property (lambda self: self._state,
lambda self, var: self.set_state (var),
doc = "The current state of the widget.")
focus = property (lambda self: self._focus,
lambda self, var: self.set_focus (var),
doc = "The focus of the widget.")
sensitive = property (lambda self: self._sensitive,
lambda self, var: self.set_sensitive (var),
doc = "The sensitivity of the widget.")
dirty = property (lambda self: self._dirty,
lambda self, var: self.set_dirty (var),
doc = """Indicates, whether the widget need to be
redrawn.""")
controls = property (lambda self: self._controls,
doc = "Widgets associated with the widget.")
depth = property (lambda self: self._depth,
lambda self, var: self.set_depth (var),
doc = "The z-axis layer depth of the widget.")
tooltip = property (lambda self: self._tooltip,
lambda self, var: self.set_tooltip (var),
doc = "The tool tip text to display for the widget.")
locked = property (lambda self: self._lock > 0,
doc = "Indicates, whether the widget is locked.")
indexable = property (lambda self: self._indexable,
lambda self, var: self.set_indexable (var),
doc = "The IIndexable, the widget is attached to.")
entered = property (lambda self: self._entered,
lambda self, var: self.set_entered (var),
doc = "Indicates, whether the widget is entered.")
opacity = property (lambda self: self._opacity,
lambda self, var: self.set_opacity (var),
doc = "The opacity of the widget.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/BaseWidget.py",
"copies": "1",
"size": "36876",
"license": "bsd-2-clause",
"hash": -1229466538969074200,
"line_mean": 36.5519348269,
"line_max": 78,
"alpha_frac": 0.6052174856,
"autogenerated": false,
"ratio": 4.498719043552519,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5603936529152519,
"avg_score": null,
"num_lines": null
} |
# $Id: benchmark.py 1757 2004-03-28 17:21:25Z fredrik $
# simple elementtree benchmark program
from elementtree import XMLTreeBuilder, SimpleXMLTreeBuilder
from elementtree import SgmlopXMLTreeBuilder
from xml.dom import minidom
import sys, time
try:
file = sys.argv[1]
except IndexError:
file = "hamlet.xml"
def benchmark(file, builder_module):
source = open(file, "rb")
t0 = time.time()
parser = builder_module.TreeBuilder()
while 1:
data = source.read(32768)
if not data:
break
parser.feed(data)
tree = parser.close()
t1 = time.time()
print "%s: %d nodes read in %.3f seconds" % (
builder_module.__name__, len(tree.getiterator()), t1-t0
)
raw_input("press return to continue...")
del tree
def benchmark_minidom(file):
t0 = time.time()
dom = minidom.parse(file)
t1 = time.time()
print "minidom tree read in %.3f seconds" % (t1-t0)
raw_input("press return to continue...")
del dom
benchmark(file, XMLTreeBuilder)
benchmark(file, SimpleXMLTreeBuilder) # use xmllib
try:
benchmark(file, SgmlopXMLTreeBuilder) # use sgmlop
except RuntimeError, v:
print "=== SgmlopXMLTreeBuilder not available (%s)" % v
benchmark_minidom(file)
| {
"repo_name": "yongshengwang/builthue",
"path": "desktop/core/ext-py/elementtree/benchmark.py",
"copies": "45",
"size": "1259",
"license": "apache-2.0",
"hash": 7933153415694996000,
"line_mean": 26.3695652174,
"line_max": 63,
"alpha_frac": 0.6648133439,
"autogenerated": false,
"ratio": 3.46831955922865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: benchmark.py 2923 2006-11-19 08:05:45Z fredrik $
# simple elementtree benchmark program
from elementtree import ElementTree, XMLTreeBuilder
try:
import cElementTree
except ImportError:
try:
from xml.etree import cElementTree
except ImportError:
cElementTree = None
try:
from elementtree import SimpleXMLTreeBuilder # xmllib
except ImportError:
SimpleXMLTreeBuilder = None
try:
from elementtree import SgmlopXMLTreeBuilder # sgmlop
except ImportError:
SgmlopXMLTreeBuilder = None
try:
from xml.dom import minidom # pyexpat+minidom
except ImportError:
minidom = None
import sys, time
try:
file = sys.argv[1]
except IndexError:
file = "hamlet.xml"
def benchmark(file, builder_module):
source = open(file, "rb")
t0 = time.time()
parser = builder_module.TreeBuilder()
while 1:
data = source.read(32768)
if not data:
break
parser.feed(data)
tree = parser.close()
t1 = time.time()
print "%s: %d nodes read in %.3f seconds" % (
builder_module.__name__, len(tree.getiterator()), t1-t0
)
raw_input("press return to continue...")
del tree
def benchmark_parse(file, driver):
t0 = time.time()
tree = driver.parse(file)
t1 = time.time()
print driver.__name__ + ".parse done in %.3f seconds" % (t1-t0)
raw_input("press return to continue...")
del tree
def benchmark_minidom(file):
t0 = time.time()
dom = minidom.parse(file)
t1 = time.time()
print "minidom tree read in %.3f seconds" % (t1-t0)
raw_input("press return to continue...")
del dom
benchmark_parse(file, ElementTree)
if cElementTree:
benchmark_parse(file, cElementTree)
if sys.platform != "cli":
benchmark(file, XMLTreeBuilder)
benchmark(file, SimpleXMLTreeBuilder) # use xmllib
try:
benchmark(file, SgmlopXMLTreeBuilder) # use sgmlop
except RuntimeError, v:
print "=== SgmlopXMLTreeBuilder not available (%s)" % v
if minidom:
benchmark_minidom(file)
else:
print "=== minidom not available"
| {
"repo_name": "prats226/python-amazon-product-api-0.2.8",
"path": "tests/build/elementtree/benchmark.py",
"copies": "5",
"size": "2170",
"license": "bsd-3-clause",
"hash": -8944662118714484000,
"line_mean": 25.4683544304,
"line_max": 67,
"alpha_frac": 0.6382488479,
"autogenerated": false,
"ratio": 3.7030716723549486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6841320520254949,
"avg_score": null,
"num_lines": null
} |
# $Id: bgp.py 52 2008-08-25 22:22:34Z jon.oberheide $
"""Border Gateway Protocol."""
import dpkt
import struct, socket
# Border Gateway Protocol 4 - RFC 4271
# Communities Attribute - RFC 1997
# Capabilities - RFC 3392
# Route Refresh - RFC 2918
# Route Reflection - RFC 4456
# Confederations - RFC 3065
# Cease Subcodes - RFC 4486
# NOPEER Community - RFC 3765
# Multiprotocol Extensions - 2858
# Message Types
OPEN = 1
UPDATE = 2
NOTIFICATION = 3
KEEPALIVE = 4
ROUTE_REFRESH = 5
# Attribute Types
ORIGIN = 1
AS_PATH = 2
NEXT_HOP = 3
MULTI_EXIT_DISC = 4
LOCAL_PREF = 5
ATOMIC_AGGREGATE = 6
AGGREGATOR = 7
COMMUNITIES = 8
ORIGINATOR_ID = 9
CLUSTER_LIST = 10
MP_REACH_NLRI = 14
MP_UNREACH_NLRI = 15
# Origin Types
ORIGIN_IGP = 0
ORIGIN_EGP = 1
INCOMPLETE = 2
# AS Path Types
AS_SET = 1
AS_SEQUENCE = 2
AS_CONFED_SEQUENCE = 3
AS_CONFED_SET = 4
# Reserved Communities Types
NO_EXPORT = 0xffffff01L
NO_ADVERTISE = 0xffffff02L
NO_EXPORT_SUBCONFED = 0xffffff03L
NO_PEER = 0xffffff04L
# Common AFI types
AFI_IPV4 = 1
AFI_IPV6 = 2
# Multiprotocol SAFI types
SAFI_UNICAST = 1
SAFI_MULTICAST = 2
SAFI_UNICAST_MULTICAST = 3
# OPEN Message Optional Parameters
AUTHENTICATION = 1
CAPABILITY = 2
# Capability Types
CAP_MULTIPROTOCOL = 1
CAP_ROUTE_REFRESH = 2
# NOTIFICATION Error Codes
MESSAGE_HEADER_ERROR = 1
OPEN_MESSAGE_ERROR = 2
UPDATE_MESSAGE_ERROR = 3
HOLD_TIMER_EXPIRED = 4
FSM_ERROR = 5
CEASE = 6
# Message Header Error Subcodes
CONNECTION_NOT_SYNCHRONIZED = 1
BAD_MESSAGE_LENGTH = 2
BAD_MESSAGE_TYPE = 3
# OPEN Message Error Subcodes
UNSUPPORTED_VERSION_NUMBER = 1
BAD_PEER_AS = 2
BAD_BGP_IDENTIFIER = 3
UNSUPPORTED_OPTIONAL_PARAMETER = 4
AUTHENTICATION_FAILURE = 5
UNACCEPTABLE_HOLD_TIME = 6
UNSUPPORTED_CAPABILITY = 7
# UPDATE Message Error Subcodes
MALFORMED_ATTRIBUTE_LIST = 1
UNRECOGNIZED_ATTRIBUTE = 2
MISSING_ATTRIBUTE = 3
ATTRIBUTE_FLAGS_ERROR = 4
ATTRIBUTE_LENGTH_ERROR = 5
INVALID_ORIGIN_ATTRIBUTE = 6
AS_ROUTING_LOOP = 7
INVALID_NEXT_HOP_ATTRIBUTE = 8
OPTIONAL_ATTRIBUTE_ERROR = 9
INVALID_NETWORK_FIELD = 10
MALFORMED_AS_PATH = 11
# Cease Error Subcodes
MAX_NUMBER_OF_PREFIXES_REACHED = 1
ADMINISTRATIVE_SHUTDOWN = 2
PEER_DECONFIGURED = 3
ADMINISTRATIVE_RESET = 4
CONNECTION_REJECTED = 5
OTHER_CONFIGURATION_CHANGE = 6
CONNECTION_COLLISION_RESOLUTION = 7
OUT_OF_RESOURCES = 8
class BGP(dpkt.Packet):
__hdr__ = (
('marker', '16s', '\x01' * 16),
('len', 'H', 0),
('type', 'B', OPEN)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - self.__hdr_len__]
if self.type == OPEN:
self.data = self.open = self.Open(self.data)
elif self.type == UPDATE:
self.data = self.update = self.Update(self.data)
elif self.type == NOTIFICATION:
self.data = self.notifiation = self.Notification(self.data)
elif self.type == KEEPALIVE:
self.data = self.keepalive = self.Keepalive(self.data)
elif self.type == ROUTE_REFRESH:
self.data = self.route_refresh = self.RouteRefresh(self.data)
class Open(dpkt.Packet):
__hdr__ = (
('v', 'B', 4),
('asn', 'H', 0),
('holdtime', 'H', 0),
('identifier', 'I', 0),
('param_len', 'B', 0)
)
__hdr_defaults__ = {
'parameters': []
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
plen = self.param_len
while plen > 0:
param = self.Parameter(self.data)
self.data = self.data[len(param):]
plen -= len(param)
l.append(param)
self.data = self.parameters = l
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.parameters))
def __str__(self):
params = ''.join(map(str, self.parameters))
self.param_len = len(params)
return self.pack_hdr() + params
class Parameter(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
if self.type == AUTHENTICATION:
self.data = self.authentication = self.Authentication(self.data)
elif self.type == CAPABILITY:
self.data = self.capability = self.Capability(self.data)
class Authentication(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
)
class Capability(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
class Update(dpkt.Packet):
__hdr_defaults__ = {
'withdrawn': [],
'attributes': [],
'announced': []
}
def unpack(self, buf):
self.data = buf
# Withdrawn Routes
wlen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while wlen > 0:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
wlen -= len(route)
l.append(route)
self.withdrawn = l
# Path Attributes
plen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while plen > 0:
attr = self.Attribute(self.data)
self.data = self.data[len(attr):]
plen -= len(attr)
l.append(attr)
self.attributes = l
# Announced Routes
l = []
while self.data:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
l.append(route)
self.announced = l
def __len__(self):
return 2 + sum(map(len, self.withdrawn)) + \
2 + sum(map(len, self.attributes)) + \
sum(map(len, self.announced))
def __str__(self):
return struct.pack('>H', sum(map(len, self.withdrawn))) + \
''.join(map(str, self.withdrawn)) + \
struct.pack('>H', sum(map(len, self.attributes))) + \
''.join(map(str, self.attributes)) + \
''.join(map(str, self.announced))
class Attribute(dpkt.Packet):
__hdr__ = (
('flags', 'B', 0),
('type', 'B', 0)
)
def _get_o(self):
return (self.flags >> 7) & 0x1
def _set_o(self, o):
self.flags = (self.flags & ~0x80) | ((o & 0x1) << 7)
optional = property(_get_o, _set_o)
def _get_t(self):
return (self.flags >> 6) & 0x1
def _set_t(self, t):
self.flags = (self.flags & ~0x40) | ((t & 0x1) << 6)
transitive = property(_get_t, _set_t)
def _get_p(self):
return (self.flags >> 5) & 0x1
def _set_p(self, p):
self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
partial = property(_get_p, _set_p)
def _get_e(self):
return (self.flags >> 4) & 0x1
def _set_e(self, e):
self.flags = (self.flags & ~0x10) | ((e & 0x1) << 4)
extended_length = property(_get_e, _set_e)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.extended_length:
self.len = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
else:
self.len = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.data = self.data[:self.len]
if self.type == ORIGIN:
self.data = self.origin = self.Origin(self.data)
elif self.type == AS_PATH:
self.data = self.as_path = self.ASPath(self.data)
elif self.type == NEXT_HOP:
self.data = self.next_hop = self.NextHop(self.data)
elif self.type == MULTI_EXIT_DISC:
self.data = self.multi_exit_disc = self.MultiExitDisc(self.data)
elif self.type == LOCAL_PREF:
self.data = self.local_pref = self.LocalPref(self.data)
elif self.type == ATOMIC_AGGREGATE:
self.data = self.atomic_aggregate = self.AtomicAggregate(self.data)
elif self.type == AGGREGATOR:
self.data = self.aggregator = self.Aggregator(self.data)
elif self.type == COMMUNITIES:
self.data = self.communities = self.Communities(self.data)
elif self.type == ORIGINATOR_ID:
self.data = self.originator_id = self.OriginatorID(self.data)
elif self.type == CLUSTER_LIST:
self.data = self.cluster_list = self.ClusterList(self.data)
elif self.type == MP_REACH_NLRI:
self.data = self.mp_reach_nlri = self.MPReachNLRI(self.data)
elif self.type == MP_UNREACH_NLRI:
self.data = self.mp_unreach_nlri = self.MPUnreachNLRI(self.data)
def __len__(self):
if self.extended_length:
attr_len = 2
else:
attr_len = 1
return self.__hdr_len__ + \
attr_len + \
len(self.data)
def __str__(self):
if self.extended_length:
attr_len_str = struct.pack('>H', self.len)
else:
attr_len_str = struct.pack('B', self.len)
return self.pack_hdr() + \
attr_len_str + \
str(self.data)
class Origin(dpkt.Packet):
__hdr__ = (
('type', 'B', ORIGIN_IGP),
)
class ASPath(dpkt.Packet):
__hdr_defaults__ = {
'segments': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
seg = self.ASPathSegment(self.data)
self.data = self.data[len(seg):]
l.append(seg)
self.data = self.segments = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class ASPathSegment(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
for i in range(self.len):
AS = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l.append(AS)
self.data = self.path = l
def __len__(self):
return self.__hdr_len__ + \
2 * len(self.path)
def __str__(self):
as_str = ''
for AS in self.path:
as_str += struct.pack('>H', AS)
return self.pack_hdr() + \
as_str
class NextHop(dpkt.Packet):
__hdr__ = (
('ip', 'I', 0),
)
class MultiExitDisc(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class LocalPref(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class AtomicAggregate(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __str__(self):
return ''
class Aggregator(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('ip', 'I', 0)
)
class Communities(dpkt.Packet):
__hdr_defaults__ = {
'list': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
val = struct.unpack('>I', self.data[:4])[0]
if (val >= 0x00000000L and val <= 0x0000ffffL) or \
(val >= 0xffff0000L and val <= 0xffffffffL):
comm = self.ReservedCommunity(self.data[:4])
else:
comm = self.Community(self.data[:4])
self.data = self.data[len(comm):]
l.append(comm)
self.data = self.list = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class Community(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('value', 'H', 0)
)
class ReservedCommunity(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class OriginatorID(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class ClusterList(dpkt.Packet):
__hdr_defaults__ = {
'list': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
id = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
l.append(id)
self.data = self.list = l
def __len__(self):
return 4 * len(self.list)
def __str__(self):
cluster_str = ''
for val in self.list:
cluster_str += struct.pack('>I', val)
return cluster_str
class MPReachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Next Hop
nlen = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.next_hop = self.data[:nlen]
self.data = self.data[nlen:]
# SNPAs
l = []
num_snpas = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
for i in range(num_snpas):
snpa = self.SNPA(self.data)
self.data = self.data[len(snpa):]
l.append(snpa)
self.snpas = l
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Announced Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.announced = l
def __len__(self):
return self.__hdr_len__ + \
1 + len(self.next_hop) + \
1 + sum(map(len, self.snpas)) + \
sum(map(len, self.announced))
def __str__(self):
return self.pack_hdr() + \
struct.pack('B', len(self.next_hop)) + \
str(self.next_hop) + \
struct.pack('B', len(self.snpas)) + \
''.join(map(str, self.snpas)) + \
''.join(map(str, self.announced))
class SNPA:
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:(self.len + 1) / 2]
class MPUnreachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Withdrawn Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.withdrawn = l
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.data))
def __str__(self):
return self.pack_hdr() + \
''.join(map(str, self.data))
class Notification(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('subcode', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.error = self.data
class Keepalive(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __str__(self):
return ''
class RouteRefresh(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('rsvd', 'B', 0),
('safi', 'B', SAFI_UNICAST)
)
class RouteGeneric(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.prefix = self.data[:(self.len + 7) / 8]
class RouteIPV4(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) / 8]
tmp += (4 - len(tmp)) * '\x00'
self.data = self.prefix = tmp
def __repr__(self):
cidr = '%s/%d' % (socket.inet_ntoa(self.prefix), self.len)
return '%s(%s)' % (self.__class__.__name__, cidr)
def __len__(self):
return self.__hdr_len__ + \
(self.len + 7) / 8
def __str__(self):
return self.pack_hdr() + \
self.prefix[:(self.len + 7) / 8]
class RouteIPV6(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) / 8]
tmp += (16 - len(tmp)) * '\x00'
self.data = self.prefix = tmp
def __len__(self):
return self.__hdr_len__ + \
(self.len + 7) / 8
def __str__(self):
return self.pack_hdr() + \
self.prefix[:(self.len + 7) / 8]
if __name__ == '__main__':
import unittest
class BGPTestCase(unittest.TestCase):
def testPack(self):
b1 = BGP(self.bgp1)
self.failUnless(self.bgp1 == str(b1))
b2 = BGP(self.bgp2)
self.failUnless(self.bgp2 == str(b2))
b3 = BGP(self.bgp3)
self.failUnless(self.bgp3 == str(b3))
b4 = BGP(self.bgp4)
self.failUnless(self.bgp4 == str(b4))
def testUnpack(self):
b1 = BGP(self.bgp1)
self.failUnless(b1.len == 19)
self.failUnless(b1.type == KEEPALIVE)
self.failUnless(b1.keepalive is not None)
b2 = BGP(self.bgp2)
self.failUnless(b2.type == UPDATE)
self.failUnless(len(b2.update.withdrawn) == 0)
self.failUnless(len(b2.update.announced) == 1)
self.failUnless(len(b2.update.attributes) == 9)
a = b2.update.attributes[1]
self.failUnless(a.type == AS_PATH)
self.failUnless(a.len == 10)
self.failUnless(len(a.as_path.segments) == 2)
s = a.as_path.segments[0]
self.failUnless(s.type == AS_SET)
self.failUnless(s.len == 2)
self.failUnless(len(s.path) == 2)
self.failUnless(s.path[0] == 500)
a = b2.update.attributes[6]
self.failUnless(a.type == COMMUNITIES)
self.failUnless(a.len == 12)
self.failUnless(len(a.communities.list) == 3)
c = a.communities.list[0]
self.failUnless(c.asn == 65215)
self.failUnless(c.value == 1)
r = b2.update.announced[0]
self.failUnless(r.len == 22)
self.failUnless(r.prefix == '\xc0\xa8\x04\x00')
b3 = BGP(self.bgp3)
self.failUnless(b3.type == UPDATE)
self.failUnless(len(b3.update.withdrawn) == 0)
self.failUnless(len(b3.update.announced) == 0)
self.failUnless(len(b3.update.attributes) == 6)
a = b3.update.attributes[0]
self.failUnless(a.optional == False)
self.failUnless(a.transitive == True)
self.failUnless(a.partial == False)
self.failUnless(a.extended_length == False)
self.failUnless(a.type == ORIGIN)
self.failUnless(a.len == 1)
o = a.origin
self.failUnless(o.type == ORIGIN_IGP)
a = b3.update.attributes[5]
self.failUnless(a.optional == True)
self.failUnless(a.transitive == False)
self.failUnless(a.partial == False)
self.failUnless(a.extended_length == True)
self.failUnless(a.type == MP_REACH_NLRI)
self.failUnless(a.len == 30)
m = a.mp_reach_nlri
self.failUnless(m.afi == AFI_IPV4)
self.failUnless(len(m.snpas) == 0)
self.failUnless(len(m.announced) == 1)
p = m.announced[0]
self.failUnless(p.len == 96)
b4 = BGP(self.bgp4)
self.failUnless(b4.len == 45)
self.failUnless(b4.type == OPEN)
self.failUnless(b4.open.asn == 237)
self.failUnless(b4.open.param_len == 16)
self.failUnless(len(b4.open.parameters) == 3)
p = b4.open.parameters[0]
self.failUnless(p.type == CAPABILITY)
self.failUnless(p.len == 6)
c = p.capability
self.failUnless(c.code == CAP_MULTIPROTOCOL)
self.failUnless(c.len == 4)
self.failUnless(c.data == '\x00\x01\x00\x01')
c = b4.open.parameters[2].capability
self.failUnless(c.code == CAP_ROUTE_REFRESH)
self.failUnless(c.len == 0)
bgp1 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x13\x04'
bgp2 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x63\x02\x00\x00\x00\x48\x40\x01\x01\x00\x40\x02\x0a\x01\x02\x01\xf4\x01\xf4\x02\x01\xfe\xbb\x40\x03\x04\xc0\xa8\x00\x0f\x40\x05\x04\x00\x00\x00\x64\x40\x06\x00\xc0\x07\x06\xfe\xba\xc0\xa8\x00\x0a\xc0\x08\x0c\xfe\xbf\x00\x01\x03\x16\x00\x04\x01\x54\x00\xfa\x80\x09\x04\xc0\xa8\x00\x0f\x80\x0a\x04\xc0\xa8\x00\xfa\x16\xc0\xa8\x04'
bgp3 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x79\x02\x00\x00\x00\x62\x40\x01\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x08\x00\x02\x01\x2c\x00\x00\x01\x2c\xc0\x80\x24\x00\x00\xfd\xe9\x40\x01\x01\x00\x40\x02\x04\x02\x01\x15\xb3\x40\x05\x04\x00\x00\x00\x2c\x80\x09\x04\x16\x05\x05\x05\x80\x0a\x04\x16\x05\x05\x05\x90\x0e\x00\x1e\x00\x01\x80\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x04\x04\x04\x00\x60\x18\x77\x01\x00\x00\x01\xf4\x00\x00\x01\xf4\x85'
bgp4 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x2d\x01\x04\x00\xed\x00\x5a\xc6\x6e\x83\x7d\x10\x02\x06\x01\x04\x00\x01\x00\x01\x02\x02\x80\x00\x02\x02\x02\x00'
unittest.main()
| {
"repo_name": "guke001/QMarkdowner",
"path": "dpkt/bgp.py",
"copies": "10",
"size": "25737",
"license": "mit",
"hash": 5537722251092449000,
"line_mean": 32.8644736842,
"line_max": 501,
"alpha_frac": 0.4523448731,
"autogenerated": false,
"ratio": 3.549931034482759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9002275907582759,
"avg_score": null,
"num_lines": null
} |
# $Id: bgp.py 76 2011-01-06 15:51:30Z dugsong $
# -*- coding: utf-8 -*-
"""Border Gateway Protocol."""
from __future__ import print_function
from __future__ import absolute_import
import struct
import socket
from . import dpkt
from .decorators import deprecated
# Border Gateway Protocol 4 - RFC 4271
# Communities Attribute - RFC 1997
# Capabilities - RFC 3392
# Route Refresh - RFC 2918
# Route Reflection - RFC 4456
# Confederations - RFC 3065
# Cease Subcodes - RFC 4486
# NOPEER Community - RFC 3765
# Multiprotocol Extensions - 2858
# Message Types
OPEN = 1
UPDATE = 2
NOTIFICATION = 3
KEEPALIVE = 4
ROUTE_REFRESH = 5
# Attribute Types
ORIGIN = 1
AS_PATH = 2
NEXT_HOP = 3
MULTI_EXIT_DISC = 4
LOCAL_PREF = 5
ATOMIC_AGGREGATE = 6
AGGREGATOR = 7
COMMUNITIES = 8
ORIGINATOR_ID = 9
CLUSTER_LIST = 10
MP_REACH_NLRI = 14
MP_UNREACH_NLRI = 15
# Origin Types
ORIGIN_IGP = 0
ORIGIN_EGP = 1
INCOMPLETE = 2
# AS Path Types
AS_SET = 1
AS_SEQUENCE = 2
AS_CONFED_SEQUENCE = 3
AS_CONFED_SET = 4
# Reserved Communities Types
NO_EXPORT = 0xffffff01
NO_ADVERTISE = 0xffffff02
NO_EXPORT_SUBCONFED = 0xffffff03
NO_PEER = 0xffffff04
# Common AFI types
AFI_IPV4 = 1
AFI_IPV6 = 2
# Multiprotocol SAFI types
SAFI_UNICAST = 1
SAFI_MULTICAST = 2
SAFI_UNICAST_MULTICAST = 3
# OPEN Message Optional Parameters
AUTHENTICATION = 1
CAPABILITY = 2
# Capability Types
CAP_MULTIPROTOCOL = 1
CAP_ROUTE_REFRESH = 2
# NOTIFICATION Error Codes
MESSAGE_HEADER_ERROR = 1
OPEN_MESSAGE_ERROR = 2
UPDATE_MESSAGE_ERROR = 3
HOLD_TIMER_EXPIRED = 4
FSM_ERROR = 5
CEASE = 6
# Message Header Error Subcodes
CONNECTION_NOT_SYNCHRONIZED = 1
BAD_MESSAGE_LENGTH = 2
BAD_MESSAGE_TYPE = 3
# OPEN Message Error Subcodes
UNSUPPORTED_VERSION_NUMBER = 1
BAD_PEER_AS = 2
BAD_BGP_IDENTIFIER = 3
UNSUPPORTED_OPTIONAL_PARAMETER = 4
AUTHENTICATION_FAILURE = 5
UNACCEPTABLE_HOLD_TIME = 6
UNSUPPORTED_CAPABILITY = 7
# UPDATE Message Error Subcodes
MALFORMED_ATTRIBUTE_LIST = 1
UNRECOGNIZED_ATTRIBUTE = 2
MISSING_ATTRIBUTE = 3
ATTRIBUTE_FLAGS_ERROR = 4
ATTRIBUTE_LENGTH_ERROR = 5
INVALID_ORIGIN_ATTRIBUTE = 6
AS_ROUTING_LOOP = 7
INVALID_NEXT_HOP_ATTRIBUTE = 8
OPTIONAL_ATTRIBUTE_ERROR = 9
INVALID_NETWORK_FIELD = 10
MALFORMED_AS_PATH = 11
# Cease Error Subcodes
MAX_NUMBER_OF_PREFIXES_REACHED = 1
ADMINISTRATIVE_SHUTDOWN = 2
PEER_DECONFIGURED = 3
ADMINISTRATIVE_RESET = 4
CONNECTION_REJECTED = 5
OTHER_CONFIGURATION_CHANGE = 6
CONNECTION_COLLISION_RESOLUTION = 7
OUT_OF_RESOURCES = 8
class BGP(dpkt.Packet):
"""Border Gateway Protocol.
BGP is an inter-AS routing protocol.
See more about the BGP on \
https://en.wikipedia.org/wiki/Border_Gateway_Protocol
Attributes:
__hdr__: Header fields of BGP.
#TODO
"""
__hdr__ = (
('marker', '16s', '\xff' * 16),
('len', 'H', 0),
('type', 'B', OPEN)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - self.__hdr_len__]
if self.type == OPEN:
self.data = self.open = self.Open(self.data)
elif self.type == UPDATE:
self.data = self.update = self.Update(self.data)
elif self.type == NOTIFICATION:
self.data = self.notifiation = self.Notification(self.data)
elif self.type == KEEPALIVE:
self.data = self.keepalive = self.Keepalive(self.data)
elif self.type == ROUTE_REFRESH:
self.data = self.route_refresh = self.RouteRefresh(self.data)
class Open(dpkt.Packet):
__hdr__ = (
('v', 'B', 4),
('asn', 'H', 0),
('holdtime', 'H', 0),
('identifier', 'I', 0),
('param_len', 'B', 0)
)
__hdr_defaults__ = {
'parameters': []
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
plen = self.param_len
while plen > 0:
param = self.Parameter(self.data)
self.data = self.data[len(param):]
plen -= len(param)
l.append(param)
self.data = self.parameters = l
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.parameters))
def __bytes__(self):
params = b''.join(map(bytes, self.parameters))
self.param_len = len(params)
return self.pack_hdr() + params
class Parameter(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
if self.type == AUTHENTICATION:
self.data = self.authentication = self.Authentication(self.data)
elif self.type == CAPABILITY:
self.data = self.capability = self.Capability(self.data)
class Authentication(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
)
class Capability(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
class Update(dpkt.Packet):
__hdr_defaults__ = {
'withdrawn': [],
'attributes': [],
'announced': []
}
def unpack(self, buf):
self.data = buf
# Withdrawn Routes
wlen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while wlen > 0:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
wlen -= len(route)
l.append(route)
self.withdrawn = l
# Path Attributes
plen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while plen > 0:
attr = self.Attribute(self.data)
self.data = self.data[len(attr):]
plen -= len(attr)
l.append(attr)
self.attributes = l
# Announced Routes
l = []
while self.data:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
l.append(route)
self.announced = l
def __len__(self):
return 2 + sum(map(len, self.withdrawn)) + \
2 + sum(map(len, self.attributes)) + \
sum(map(len, self.announced))
def __bytes__(self):
return struct.pack('>H', sum(map(len, self.withdrawn))) + \
b''.join(map(bytes, self.withdrawn)) + \
struct.pack('>H', sum(map(len, self.attributes))) + \
b''.join(map(bytes, self.attributes)) + \
b''.join(map(bytes, self.announced))
class Attribute(dpkt.Packet):
__hdr__ = (
('flags', 'B', 0),
('type', 'B', 0)
)
@property
def optional(self):
return (self.flags >> 7) & 0x1
@optional.setter
def optional(self, o):
self.flags = (self.flags & ~0x80) | ((o & 0x1) << 7)
@property
def transitive(self):
return (self.flags >> 6) & 0x1
@transitive.setter
def transitive(self, t):
self.flags = (self.flags & ~0x40) | ((t & 0x1) << 6)
@property
def partial(self):
return (self.flags >> 5) & 0x1
@partial.setter
def partial(self, p):
self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
@property
def extended_length(self):
return (self.flags >> 4) & 0x1
@extended_length.setter
def extended_length(self, e):
self.flags = (self.flags & ~0x10) | ((e & 0x1) << 4)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.extended_length:
self.len = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
else:
self.len = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.data = self.data[:self.len]
if self.type == ORIGIN:
self.data = self.origin = self.Origin(self.data)
elif self.type == AS_PATH:
self.data = self.as_path = self.ASPath(self.data)
elif self.type == NEXT_HOP:
self.data = self.next_hop = self.NextHop(self.data)
elif self.type == MULTI_EXIT_DISC:
self.data = self.multi_exit_disc = self.MultiExitDisc(self.data)
elif self.type == LOCAL_PREF:
self.data = self.local_pref = self.LocalPref(self.data)
elif self.type == ATOMIC_AGGREGATE:
self.data = self.atomic_aggregate = self.AtomicAggregate(self.data)
elif self.type == AGGREGATOR:
self.data = self.aggregator = self.Aggregator(self.data)
elif self.type == COMMUNITIES:
self.data = self.communities = self.Communities(self.data)
elif self.type == ORIGINATOR_ID:
self.data = self.originator_id = self.OriginatorID(self.data)
elif self.type == CLUSTER_LIST:
self.data = self.cluster_list = self.ClusterList(self.data)
elif self.type == MP_REACH_NLRI:
self.data = self.mp_reach_nlri = self.MPReachNLRI(self.data)
elif self.type == MP_UNREACH_NLRI:
self.data = self.mp_unreach_nlri = self.MPUnreachNLRI(self.data)
def __len__(self):
if self.extended_length:
attr_len = 2
else:
attr_len = 1
return self.__hdr_len__ + attr_len + len(self.data)
def __bytes__(self):
if self.extended_length:
attr_len_str = struct.pack('>H', self.len)
else:
attr_len_str = struct.pack('B', self.len)
return self.pack_hdr() + attr_len_str + bytes(self.data)
class Origin(dpkt.Packet):
__hdr__ = (
('type', 'B', ORIGIN_IGP),
)
class ASPath(dpkt.Packet):
__hdr_defaults__ = {
'segments': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
seg = self.ASPathSegment(self.data)
self.data = self.data[len(seg):]
l.append(seg)
self.data = self.segments = l
def __len__(self):
return sum(map(len, self.data))
def __bytes__(self):
return b''.join(map(bytes, self.data))
class ASPathSegment(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
for i in range(self.len):
AS = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l.append(AS)
self.data = self.path = l
def __len__(self):
return self.__hdr_len__ + 2 * len(self.path)
def __bytes__(self):
as_str = b''
for AS in self.path:
as_str += struct.pack('>H', AS)
return self.pack_hdr() + as_str
class NextHop(dpkt.Packet):
__hdr__ = (
('ip', 'I', 0),
)
class MultiExitDisc(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class LocalPref(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class AtomicAggregate(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __bytes__(self):
return b''
class Aggregator(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('ip', 'I', 0)
)
class Communities(dpkt.Packet):
__hdr_defaults__ = {
'list': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
val = struct.unpack('>I', self.data[:4])[0]
if (0x00000000 <= val <= 0x0000ffff) or (0xffff0000 <= val <= 0xffffffff):
comm = self.ReservedCommunity(self.data[:4])
else:
comm = self.Community(self.data[:4])
self.data = self.data[len(comm):]
l.append(comm)
self.data = self.list = l
def __len__(self):
return sum(map(len, self.data))
def __bytes__(self):
return b''.join(map(bytes, self.data))
class Community(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('value', 'H', 0)
)
class ReservedCommunity(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class OriginatorID(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class ClusterList(dpkt.Packet):
__hdr_defaults__ = {
'list': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
id = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
l.append(id)
self.data = self.list = l
def __len__(self):
return 4 * len(self.list)
def __bytes__(self):
cluster_str = b''
for val in self.list:
cluster_str += struct.pack('>I', val)
return cluster_str
class MPReachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Next Hop
nlen = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.next_hop = self.data[:nlen]
self.data = self.data[nlen:]
# SNPAs
l = []
num_snpas = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
for i in range(num_snpas):
snpa = self.SNPA(self.data)
self.data = self.data[len(snpa):]
l.append(snpa)
self.snpas = l
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Announced Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.announced = l
def __len__(self):
return self.__hdr_len__ + \
1 + len(self.next_hop) + \
1 + sum(map(len, self.snpas)) + \
sum(map(len, self.announced))
def __bytes__(self):
return self.pack_hdr() + \
struct.pack('B', len(self.next_hop)) + \
bytes(self.next_hop) + \
struct.pack('B', len(self.snpas)) + \
b''.join(map(bytes, self.snpas)) + \
b''.join(map(bytes, self.announced))
class SNPA(object):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:(self.len + 1) // 2]
class MPUnreachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Withdrawn Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.withdrawn = l
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.data))
def __bytes__(self):
return self.pack_hdr() + b''.join(map(bytes, self.data))
class Notification(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('subcode', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.error = self.data
class Keepalive(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __bytes__(self):
return b''
class RouteRefresh(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('rsvd', 'B', 0),
('safi', 'B', SAFI_UNICAST)
)
class RouteGeneric(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.prefix = self.data[:(self.len + 7) // 8]
class RouteIPV4(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) // 8]
tmp += (4 - len(tmp)) * b'\x00'
self.data = self.prefix = tmp
def __repr__(self):
cidr = '%s/%d' % (socket.inet_ntoa(self.prefix), self.len)
return '%s(%s)' % (self.__class__.__name__, cidr)
def __len__(self):
return self.__hdr_len__ + (self.len + 7) // 8
def __bytes__(self):
return self.pack_hdr() + self.prefix[:(self.len + 7) // 8]
class RouteIPV6(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) // 8]
tmp += (16 - len(tmp)) * b'\x00'
self.data = self.prefix = tmp
def __len__(self):
return self.__hdr_len__ + (self.len + 7) // 8
def __bytes__(self):
return self.pack_hdr() + self.prefix[:(self.len + 7) // 8]
__bgp1 = b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x13\x04'
__bgp2 = b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x63\x02\x00\x00\x00\x48\x40\x01\x01\x00\x40\x02\x0a\x01\x02\x01\xf4\x01\xf4\x02\x01\xfe\xbb\x40\x03\x04\xc0\xa8\x00\x0f\x40\x05\x04\x00\x00\x00\x64\x40\x06\x00\xc0\x07\x06\xfe\xba\xc0\xa8\x00\x0a\xc0\x08\x0c\xfe\xbf\x00\x01\x03\x16\x00\x04\x01\x54\x00\xfa\x80\x09\x04\xc0\xa8\x00\x0f\x80\x0a\x04\xc0\xa8\x00\xfa\x16\xc0\xa8\x04'
__bgp3 = b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x79\x02\x00\x00\x00\x62\x40\x01\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x08\x00\x02\x01\x2c\x00\x00\x01\x2c\xc0\x80\x24\x00\x00\xfd\xe9\x40\x01\x01\x00\x40\x02\x04\x02\x01\x15\xb3\x40\x05\x04\x00\x00\x00\x2c\x80\x09\x04\x16\x05\x05\x05\x80\x0a\x04\x16\x05\x05\x05\x90\x0e\x00\x1e\x00\x01\x80\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x04\x04\x04\x00\x60\x18\x77\x01\x00\x00\x01\xf4\x00\x00\x01\xf4\x85'
__bgp4 = b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x2d\x01\x04\x00\xed\x00\x5a\xc6\x6e\x83\x7d\x10\x02\x06\x01\x04\x00\x01\x00\x01\x02\x02\x80\x00\x02\x02\x02\x00'
def test_pack():
assert (__bgp1 == bytes(BGP(__bgp1)))
assert (__bgp2 == bytes(BGP(__bgp2)))
assert (__bgp3 == bytes(BGP(__bgp3)))
assert (__bgp4 == bytes(BGP(__bgp4)))
def test_unpack():
b1 = BGP(__bgp1)
assert (b1.len == 19)
assert (b1.type == KEEPALIVE)
assert (b1.keepalive is not None)
b2 = BGP(__bgp2)
assert (b2.type == UPDATE)
assert (len(b2.update.withdrawn) == 0)
assert (len(b2.update.announced) == 1)
assert (len(b2.update.attributes) == 9)
a = b2.update.attributes[1]
assert (a.type == AS_PATH)
assert (a.len == 10)
assert (len(a.as_path.segments) == 2)
s = a.as_path.segments[0]
assert (s.type == AS_SET)
assert (s.len == 2)
assert (len(s.path) == 2)
assert (s.path[0] == 500)
a = b2.update.attributes[6]
assert (a.type == COMMUNITIES)
assert (a.len == 12)
assert (len(a.communities.list) == 3)
c = a.communities.list[0]
assert (c.asn == 65215)
assert (c.value == 1)
r = b2.update.announced[0]
assert (r.len == 22)
assert (r.prefix == b'\xc0\xa8\x04\x00')
b3 = BGP(__bgp3)
assert (b3.type == UPDATE)
assert (len(b3.update.withdrawn) == 0)
assert (len(b3.update.announced) == 0)
assert (len(b3.update.attributes) == 6)
a = b3.update.attributes[0]
assert (a.optional == False)
assert (a.transitive == True)
assert (a.partial == False)
assert (a.extended_length == False)
assert (a.type == ORIGIN)
assert (a.len == 1)
o = a.origin
assert (o.type == ORIGIN_IGP)
a = b3.update.attributes[5]
assert (a.optional == True)
assert (a.transitive == False)
assert (a.partial == False)
assert (a.extended_length == True)
assert (a.type == MP_REACH_NLRI)
assert (a.len == 30)
m = a.mp_reach_nlri
assert (m.afi == AFI_IPV4)
assert (len(m.snpas) == 0)
assert (len(m.announced) == 1)
p = m.announced[0]
assert (p.len == 96)
b4 = BGP(__bgp4)
assert (b4.len == 45)
assert (b4.type == OPEN)
assert (b4.open.asn == 237)
assert (b4.open.param_len == 16)
assert (len(b4.open.parameters) == 3)
p = b4.open.parameters[0]
assert (p.type == CAPABILITY)
assert (p.len == 6)
c = p.capability
assert (c.code == CAP_MULTIPROTOCOL)
assert (c.len == 4)
assert (c.data == b'\x00\x01\x00\x01')
c = b4.open.parameters[2].capability
assert (c.code == CAP_ROUTE_REFRESH)
assert (c.len == 0)
if __name__ == '__main__':
test_pack()
test_unpack()
print('Tests Successful...')
| {
"repo_name": "smutt/dpkt",
"path": "dpkt/bgp.py",
"copies": "3",
"size": "24436",
"license": "bsd-3-clause",
"hash": -375874084797032000,
"line_mean": 30.8591916558,
"line_max": 496,
"alpha_frac": 0.4681208054,
"autogenerated": false,
"ratio": 3.5003581148832548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001042839347119387,
"num_lines": 767
} |
# $Id: bgp.py 76 2011-01-06 15:51:30Z dugsong $
# -*- coding: utf-8 -*-
"""Border Gateway Protocol."""
import struct
import socket
import dpkt
from decorators import deprecated
# Border Gateway Protocol 4 - RFC 4271
# Communities Attribute - RFC 1997
# Capabilities - RFC 3392
# Route Refresh - RFC 2918
# Route Reflection - RFC 4456
# Confederations - RFC 3065
# Cease Subcodes - RFC 4486
# NOPEER Community - RFC 3765
# Multiprotocol Extensions - 2858
# Message Types
OPEN = 1
UPDATE = 2
NOTIFICATION = 3
KEEPALIVE = 4
ROUTE_REFRESH = 5
# Attribute Types
ORIGIN = 1
AS_PATH = 2
NEXT_HOP = 3
MULTI_EXIT_DISC = 4
LOCAL_PREF = 5
ATOMIC_AGGREGATE = 6
AGGREGATOR = 7
COMMUNITIES = 8
ORIGINATOR_ID = 9
CLUSTER_LIST = 10
MP_REACH_NLRI = 14
MP_UNREACH_NLRI = 15
AS4_PATH = 17
AS4_AGGREGATOR = 18
# Origin Types
ORIGIN_IGP = 0
ORIGIN_EGP = 1
INCOMPLETE = 2
# AS Path Types
AS_SET = 1
AS_SEQUENCE = 2
AS_CONFED_SEQUENCE = 3
AS_CONFED_SET = 4
# Reserved Communities Types
NO_EXPORT = 0xffffff01L
NO_ADVERTISE = 0xffffff02L
NO_EXPORT_SUBCONFED = 0xffffff03L
NO_PEER = 0xffffff04L
# Common AFI types
AFI_IPV4 = 1
AFI_IPV6 = 2
# Multiprotocol SAFI types
SAFI_UNICAST = 1
SAFI_MULTICAST = 2
SAFI_UNICAST_MULTICAST = 3
# OPEN Message Optional Parameters
AUTHENTICATION = 1
CAPABILITY = 2
# Capability Types
CAP_MULTIPROTOCOL = 1
CAP_ROUTE_REFRESH = 2
# NOTIFICATION Error Codes
MESSAGE_HEADER_ERROR = 1
OPEN_MESSAGE_ERROR = 2
UPDATE_MESSAGE_ERROR = 3
HOLD_TIMER_EXPIRED = 4
FSM_ERROR = 5
CEASE = 6
# Message Header Error Subcodes
CONNECTION_NOT_SYNCHRONIZED = 1
BAD_MESSAGE_LENGTH = 2
BAD_MESSAGE_TYPE = 3
# OPEN Message Error Subcodes
UNSUPPORTED_VERSION_NUMBER = 1
BAD_PEER_AS = 2
BAD_BGP_IDENTIFIER = 3
UNSUPPORTED_OPTIONAL_PARAMETER = 4
AUTHENTICATION_FAILURE = 5
UNACCEPTABLE_HOLD_TIME = 6
UNSUPPORTED_CAPABILITY = 7
# UPDATE Message Error Subcodes
MALFORMED_ATTRIBUTE_LIST = 1
UNRECOGNIZED_ATTRIBUTE = 2
MISSING_ATTRIBUTE = 3
ATTRIBUTE_FLAGS_ERROR = 4
ATTRIBUTE_LENGTH_ERROR = 5
INVALID_ORIGIN_ATTRIBUTE = 6
AS_ROUTING_LOOP = 7
INVALID_NEXT_HOP_ATTRIBUTE = 8
OPTIONAL_ATTRIBUTE_ERROR = 9
INVALID_NETWORK_FIELD = 10
MALFORMED_AS_PATH = 11
# Cease Error Subcodes
MAX_NUMBER_OF_PREFIXES_REACHED = 1
ADMINISTRATIVE_SHUTDOWN = 2
PEER_DECONFIGURED = 3
ADMINISTRATIVE_RESET = 4
CONNECTION_REJECTED = 5
OTHER_CONFIGURATION_CHANGE = 6
CONNECTION_COLLISION_RESOLUTION = 7
OUT_OF_RESOURCES = 8
class BGP(dpkt.Packet):
__hdr__ = (
('marker', '16s', '\xff' * 16),
('len', 'H', 0),
('type', 'B', OPEN)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - self.__hdr_len__]
if self.type == OPEN:
self.data = self.open = self.Open(self.data)
elif self.type == UPDATE:
self.data = self.update = self.Update(self.data)
elif self.type == NOTIFICATION:
self.data = self.notifiation = self.Notification(self.data)
elif self.type == KEEPALIVE:
self.data = self.keepalive = self.Keepalive(self.data)
elif self.type == ROUTE_REFRESH:
self.data = self.route_refresh = self.RouteRefresh(self.data)
class Open(dpkt.Packet):
__hdr__ = (
('v', 'B', 4),
('asn', 'H', 0),
('holdtime', 'H', 0),
('identifier', 'I', 0),
('param_len', 'B', 0)
)
__hdr_defaults__ = {
'parameters': []
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
plen = self.param_len
while plen > 0:
param = self.Parameter(self.data)
self.data = self.data[len(param):]
plen -= len(param)
l.append(param)
self.data = self.parameters = l
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.parameters))
def __str__(self):
params = ''.join(map(str, self.parameters))
self.param_len = len(params)
return self.pack_hdr() + params
class Parameter(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
if self.type == AUTHENTICATION:
self.data = self.authentication = self.Authentication(self.data)
elif self.type == CAPABILITY:
self.data = self.capability = self.Capability(self.data)
class Authentication(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
)
class Capability(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
class Update(dpkt.Packet):
__hdr_defaults__ = {
'withdrawn': [],
'attributes': [],
'announced': []
}
def unpack(self, buf):
self.data = buf
# Withdrawn Routes
wlen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while wlen > 0:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
wlen -= len(route)
l.append(route)
self.withdrawn = l
# Path Attributes
plen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while plen > 0:
attr = self.Attribute(self.data)
self.data = self.data[len(attr):]
plen -= len(attr)
l.append(attr)
self.attributes = l
# Announced Routes
l = []
while self.data:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
l.append(route)
self.announced = l
def __len__(self):
return 2 + sum(map(len, self.withdrawn)) + \
2 + sum(map(len, self.attributes)) + \
sum(map(len, self.announced))
def __str__(self):
return struct.pack('>H', sum(map(len, self.withdrawn))) + \
''.join(map(str, self.withdrawn)) + \
struct.pack('>H', sum(map(len, self.attributes))) + \
''.join(map(str, self.attributes)) + \
''.join(map(str, self.announced))
class Attribute(dpkt.Packet):
__hdr__ = (
('flags', 'B', 0),
('type', 'B', 0)
)
@property
def optional(self):
return (self.flags >> 7) & 0x1
@optional.setter
def optional(self, o):
self.flags = (self.flags & ~0x80) | ((o & 0x1) << 7)
@property
def transitive(self):
return (self.flags >> 6) & 0x1
@transitive.setter
def transitive(self, t):
self.flags = (self.flags & ~0x40) | ((t & 0x1) << 6)
@property
def partial(self):
return (self.flags >> 5) & 0x1
@partial.setter
def partial(self, p):
self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
@property
def extended_length(self):
return (self.flags >> 4) & 0x1
@extended_length.setter
def extended_length(self, e):
self.flags = (self.flags & ~0x10) | ((e & 0x1) << 4)
# Deprecated methods, will be removed in the future
# ======================================================
@deprecated('optional')
def _get_o(self):
return self.optional
@deprecated('optional')
def _set_o(self, o):
self.optional = o
@deprecated('transitive')
def _get_t(self):
return self.transitive
@deprecated('transitive')
def _set_t(self, t):
self.transitive = t
@deprecated('partial')
def _get_p(self):
return self.partial
@deprecated('partial')
def _set_p(self, p):
self.partial = p
@deprecated('extended_length')
def _get_e(self):
return self.extended_length
@deprecated('extended_length')
def _set_e(self, e):
self.extended_length = e
# ======================================================
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.extended_length:
self.len = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
else:
self.len = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.data = self.data[:self.len]
if self.type == ORIGIN:
self.data = self.origin = self.Origin(self.data)
elif self.type == AS_PATH:
self.data = self.as_path = self.ASPath(self.data)
elif self.type == NEXT_HOP:
self.data = self.next_hop = self.NextHop(self.data)
elif self.type == MULTI_EXIT_DISC:
self.data = self.multi_exit_disc = self.MultiExitDisc(self.data)
elif self.type == LOCAL_PREF:
self.data = self.local_pref = self.LocalPref(self.data)
elif self.type == ATOMIC_AGGREGATE:
self.data = self.atomic_aggregate = self.AtomicAggregate(self.data)
elif self.type == AGGREGATOR:
self.data = self.aggregator = self.Aggregator(self.data)
elif self.type == COMMUNITIES:
self.data = self.communities = self.Communities(self.data)
elif self.type == ORIGINATOR_ID:
self.data = self.originator_id = self.OriginatorID(self.data)
elif self.type == CLUSTER_LIST:
self.data = self.cluster_list = self.ClusterList(self.data)
elif self.type == MP_REACH_NLRI:
self.data = self.mp_reach_nlri = self.MPReachNLRI(self.data)
elif self.type == MP_UNREACH_NLRI:
self.data = self.mp_unreach_nlri = self.MPUnreachNLRI(self.data)
elif self.type == AS4_PATH:
self.data = self.as_path = self.AS4Path(self.data)
def __len__(self):
if self.extended_length:
attr_len = 2
else:
attr_len = 1
return self.__hdr_len__ + attr_len + len(self.data)
def __str__(self):
if self.extended_length:
attr_len_str = struct.pack('>H', self.len)
else:
attr_len_str = struct.pack('B', self.len)
return self.pack_hdr() + attr_len_str + str(self.data)
class Origin(dpkt.Packet):
__hdr__ = (
('type', 'B', ORIGIN_IGP),
)
class ASPath(dpkt.Packet):
__hdr_defaults__ = {
'segments': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
seg = self.ASPathSegment(self.data)
self.data = self.data[len(seg):]
l.append(seg)
self.data = self.segments = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class ASPathSegment(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
# Hack! Length of the buffer is not an indicative
# of AS length. But, this hack helps parse
# more packets than it does otherwise, albeit some incorrectly.
self.as_len = 4
if self.len * self.as_len > len(self.data):
self.as_len = 2
for i in range(self.len):
if self.as_len == 4:
AS = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
else:
AS = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l.append(AS)
self.data = self.path = l
def __len__(self):
return self.__hdr_len__ + self.as_len * len(self.path)
def __str__(self):
as_str = ''
for AS in self.path:
if self.as_len == 4:
as_str += struct.pack('>I', AS)
else:
as_str += struct.pack('>H', AS)
return self.pack_hdr() + as_str
class AS4Path(dpkt.Packet):
__hdr_defaults__ = {
'segments': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
seg = self.AS4PathSegment(self.data)
self.data = self.data[len(seg):]
l.append(seg)
self.data = self.segments = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class AS4PathSegment(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
for i in range(self.len):
AS = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
l.append(AS)
self.data = self.path = l
def __len__(self):
return self.__hdr_len__ + 4 * len(self.path)
def __str__(self):
as_str = ''
for AS in self.path:
as_str += struct.pack('>I', AS)
return self.pack_hdr() + as_str
class NextHop(dpkt.Packet):
__hdr__ = (
('ip', 'I', 0),
)
class MultiExitDisc(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class LocalPref(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class AtomicAggregate(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __str__(self):
return ''
class Aggregator(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('ip', 'I', 0)
)
class Communities(dpkt.Packet):
__hdr_defaults__ = {
'list': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
val = struct.unpack('>I', self.data[:4])[0]
if (0x00000000L <= val <= 0x0000ffffL) or (0xffff0000L <= val <= 0xffffffffL):
comm = self.ReservedCommunity(self.data[:4])
else:
comm = self.Community(self.data[:4])
self.data = self.data[len(comm):]
l.append(comm)
self.data = self.list = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class Community(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('value', 'H', 0)
)
class ReservedCommunity(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class OriginatorID(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class ClusterList(dpkt.Packet):
__hdr_defaults__ = {
'list': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
id = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
l.append(id)
self.data = self.list = l
def __len__(self):
return 4 * len(self.list)
def __str__(self):
cluster_str = ''
for val in self.list:
cluster_str += struct.pack('>I', val)
return cluster_str
class MPReachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Next Hop
nlen = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.next_hop = self.data[:nlen]
self.data = self.data[nlen:]
# SNPAs
l = []
num_snpas = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
for i in range(num_snpas):
snpa = self.SNPA(self.data)
self.data = self.data[len(snpa):]
l.append(snpa)
self.snpas = l
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Announced Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.announced = l
def __len__(self):
return self.__hdr_len__ + \
1 + len(self.next_hop) + \
1 + sum(map(len, self.snpas)) + \
sum(map(len, self.announced))
def __str__(self):
return self.pack_hdr() + \
struct.pack('B', len(self.next_hop)) + \
str(self.next_hop) + \
struct.pack('B', len(self.snpas)) + \
''.join(map(str, self.snpas)) + \
''.join(map(str, self.announced))
class SNPA(object):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:(self.len + 1) / 2]
class MPUnreachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Withdrawn Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.withdrawn = l
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.data))
def __str__(self):
return self.pack_hdr() + ''.join(map(str, self.data))
class Notification(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('subcode', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.error = self.data
class Keepalive(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __str__(self):
return ''
class RouteRefresh(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('rsvd', 'B', 0),
('safi', 'B', SAFI_UNICAST)
)
class RouteGeneric(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.prefix = self.data[:(self.len + 7) / 8]
class RouteIPV4(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) / 8]
tmp += (4 - len(tmp)) * '\x00'
self.data = self.prefix = tmp
def __repr__(self):
cidr = '%s/%d' % (socket.inet_ntoa(self.prefix), self.len)
return '%s(%s)' % (self.__class__.__name__, cidr)
def __len__(self):
return self.__hdr_len__ + (self.len + 7) / 8
def __str__(self):
return self.pack_hdr() + self.prefix[:(self.len + 7) / 8]
class RouteIPV6(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) / 8]
tmp += (16 - len(tmp)) * '\x00'
self.data = self.prefix = tmp
def __len__(self):
return self.__hdr_len__ + (self.len + 7) / 8
def __str__(self):
return self.pack_hdr() + self.prefix[:(self.len + 7) / 8]
__bgp1 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x13\x04'
__bgp2 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x63\x02\x00\x00\x00\x48\x40\x01\x01\x00\x40\x02\x0a\x01\x02\x01\xf4\x01\xf4\x02\x01\xfe\xbb\x40\x03\x04\xc0\xa8\x00\x0f\x40\x05\x04\x00\x00\x00\x64\x40\x06\x00\xc0\x07\x06\xfe\xba\xc0\xa8\x00\x0a\xc0\x08\x0c\xfe\xbf\x00\x01\x03\x16\x00\x04\x01\x54\x00\xfa\x80\x09\x04\xc0\xa8\x00\x0f\x80\x0a\x04\xc0\xa8\x00\xfa\x16\xc0\xa8\x04'
__bgp3 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x79\x02\x00\x00\x00\x62\x40\x01\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x08\x00\x02\x01\x2c\x00\x00\x01\x2c\xc0\x80\x24\x00\x00\xfd\xe9\x40\x01\x01\x00\x40\x02\x04\x02\x01\x15\xb3\x40\x05\x04\x00\x00\x00\x2c\x80\x09\x04\x16\x05\x05\x05\x80\x0a\x04\x16\x05\x05\x05\x90\x0e\x00\x1e\x00\x01\x80\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x04\x04\x04\x00\x60\x18\x77\x01\x00\x00\x01\xf4\x00\x00\x01\xf4\x85'
__bgp4 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x2d\x01\x04\x00\xed\x00\x5a\xc6\x6e\x83\x7d\x10\x02\x06\x01\x04\x00\x01\x00\x01\x02\x02\x80\x00\x02\x02\x02\x00'
def test_pack():
assert (__bgp1 == str(BGP(__bgp1)))
assert (__bgp2 == str(BGP(__bgp2)))
assert (__bgp3 == str(BGP(__bgp3)))
assert (__bgp4 == str(BGP(__bgp4)))
def test_unpack():
b1 = BGP(__bgp1)
assert (b1.len == 19)
assert (b1.type == KEEPALIVE)
assert (b1.keepalive is not None)
b2 = BGP(__bgp2)
assert (b2.type == UPDATE)
assert (len(b2.update.withdrawn) == 0)
assert (len(b2.update.announced) == 1)
assert (len(b2.update.attributes) == 9)
a = b2.update.attributes[1]
assert (a.type == AS_PATH)
assert (a.len == 10)
assert (len(a.as_path.segments) == 2)
s = a.as_path.segments[0]
assert (s.type == AS_SET)
assert (s.len == 2)
assert (len(s.path) == 2)
assert (s.path[0] == 500)
a = b2.update.attributes[6]
assert (a.type == COMMUNITIES)
assert (a.len == 12)
assert (len(a.communities.list) == 3)
c = a.communities.list[0]
assert (c.asn == 65215)
assert (c.value == 1)
r = b2.update.announced[0]
assert (r.len == 22)
assert (r.prefix == '\xc0\xa8\x04\x00')
b3 = BGP(__bgp3)
assert (b3.type == UPDATE)
assert (len(b3.update.withdrawn) == 0)
assert (len(b3.update.announced) == 0)
assert (len(b3.update.attributes) == 6)
a = b3.update.attributes[0]
assert (a.optional == False)
assert (a.transitive == True)
assert (a.partial == False)
assert (a.extended_length == False)
assert (a.type == ORIGIN)
assert (a.len == 1)
o = a.origin
assert (o.type == ORIGIN_IGP)
a = b3.update.attributes[5]
assert (a.optional == True)
assert (a.transitive == False)
assert (a.partial == False)
assert (a.extended_length == True)
assert (a.type == MP_REACH_NLRI)
assert (a.len == 30)
m = a.mp_reach_nlri
assert (m.afi == AFI_IPV4)
assert (len(m.snpas) == 0)
assert (len(m.announced) == 1)
p = m.announced[0]
assert (p.len == 96)
b4 = BGP(__bgp4)
assert (b4.len == 45)
assert (b4.type == OPEN)
assert (b4.open.asn == 237)
assert (b4.open.param_len == 16)
assert (len(b4.open.parameters) == 3)
p = b4.open.parameters[0]
assert (p.type == CAPABILITY)
assert (p.len == 6)
c = p.capability
assert (c.code == CAP_MULTIPROTOCOL)
assert (c.len == 4)
assert (c.data == '\x00\x01\x00\x01')
c = b4.open.parameters[2].capability
assert (c.code == CAP_ROUTE_REFRESH)
assert (c.len == 0)
if __name__ == '__main__':
test_pack()
test_unpack()
print 'Tests Successful...'
| {
"repo_name": "jack8daniels2/dpkt",
"path": "dpkt/bgp.py",
"copies": "1",
"size": "27596",
"license": "bsd-3-clause",
"hash": -5448426694056687000,
"line_mean": 31.5041224971,
"line_max": 495,
"alpha_frac": 0.4509711552,
"autogenerated": false,
"ratio": 3.628188272416513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4579159427616513,
"avg_score": null,
"num_lines": null
} |
# $Id: bgp.py 76 2011-01-06 15:51:30Z dugsong $
"""Border Gateway Protocol."""
import dpkt
import struct, socket
# Border Gateway Protocol 4 - RFC 4271
# Communities Attribute - RFC 1997
# Capabilities - RFC 3392
# Route Refresh - RFC 2918
# Route Reflection - RFC 4456
# Confederations - RFC 3065
# Cease Subcodes - RFC 4486
# NOPEER Community - RFC 3765
# Multiprotocol Extensions - 2858
# Message Types
OPEN = 1
UPDATE = 2
NOTIFICATION = 3
KEEPALIVE = 4
ROUTE_REFRESH = 5
# Attribute Types
ORIGIN = 1
AS_PATH = 2
NEXT_HOP = 3
MULTI_EXIT_DISC = 4
LOCAL_PREF = 5
ATOMIC_AGGREGATE = 6
AGGREGATOR = 7
COMMUNITIES = 8
ORIGINATOR_ID = 9
CLUSTER_LIST = 10
MP_REACH_NLRI = 14
MP_UNREACH_NLRI = 15
# Origin Types
ORIGIN_IGP = 0
ORIGIN_EGP = 1
INCOMPLETE = 2
# AS Path Types
AS_SET = 1
AS_SEQUENCE = 2
AS_CONFED_SEQUENCE = 3
AS_CONFED_SET = 4
# Reserved Communities Types
NO_EXPORT = 0xffffff01L
NO_ADVERTISE = 0xffffff02L
NO_EXPORT_SUBCONFED = 0xffffff03L
NO_PEER = 0xffffff04L
# Common AFI types
AFI_IPV4 = 1
AFI_IPV6 = 2
# Multiprotocol SAFI types
SAFI_UNICAST = 1
SAFI_MULTICAST = 2
SAFI_UNICAST_MULTICAST = 3
# OPEN Message Optional Parameters
AUTHENTICATION = 1
CAPABILITY = 2
# Capability Types
CAP_MULTIPROTOCOL = 1
CAP_ROUTE_REFRESH = 2
# NOTIFICATION Error Codes
MESSAGE_HEADER_ERROR = 1
OPEN_MESSAGE_ERROR = 2
UPDATE_MESSAGE_ERROR = 3
HOLD_TIMER_EXPIRED = 4
FSM_ERROR = 5
CEASE = 6
# Message Header Error Subcodes
CONNECTION_NOT_SYNCHRONIZED = 1
BAD_MESSAGE_LENGTH = 2
BAD_MESSAGE_TYPE = 3
# OPEN Message Error Subcodes
UNSUPPORTED_VERSION_NUMBER = 1
BAD_PEER_AS = 2
BAD_BGP_IDENTIFIER = 3
UNSUPPORTED_OPTIONAL_PARAMETER = 4
AUTHENTICATION_FAILURE = 5
UNACCEPTABLE_HOLD_TIME = 6
UNSUPPORTED_CAPABILITY = 7
# UPDATE Message Error Subcodes
MALFORMED_ATTRIBUTE_LIST = 1
UNRECOGNIZED_ATTRIBUTE = 2
MISSING_ATTRIBUTE = 3
ATTRIBUTE_FLAGS_ERROR = 4
ATTRIBUTE_LENGTH_ERROR = 5
INVALID_ORIGIN_ATTRIBUTE = 6
AS_ROUTING_LOOP = 7
INVALID_NEXT_HOP_ATTRIBUTE = 8
OPTIONAL_ATTRIBUTE_ERROR = 9
INVALID_NETWORK_FIELD = 10
MALFORMED_AS_PATH = 11
# Cease Error Subcodes
MAX_NUMBER_OF_PREFIXES_REACHED = 1
ADMINISTRATIVE_SHUTDOWN = 2
PEER_DECONFIGURED = 3
ADMINISTRATIVE_RESET = 4
CONNECTION_REJECTED = 5
OTHER_CONFIGURATION_CHANGE = 6
CONNECTION_COLLISION_RESOLUTION = 7
OUT_OF_RESOURCES = 8
class BGP(dpkt.Packet):
__hdr__ = (
('marker', '16s', '\xff' * 16),
('len', 'H', 0),
('type', 'B', OPEN)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - self.__hdr_len__]
if self.type == OPEN:
self.data = self.open = self.Open(self.data)
elif self.type == UPDATE:
self.data = self.update = self.Update(self.data)
elif self.type == NOTIFICATION:
self.data = self.notifiation = self.Notification(self.data)
elif self.type == KEEPALIVE:
self.data = self.keepalive = self.Keepalive(self.data)
elif self.type == ROUTE_REFRESH:
self.data = self.route_refresh = self.RouteRefresh(self.data)
class Open(dpkt.Packet):
__hdr__ = (
('v', 'B', 4),
('asn', 'H', 0),
('holdtime', 'H', 0),
('identifier', 'I', 0),
('param_len', 'B', 0)
)
__hdr_defaults__ = {
'parameters': []
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
plen = self.param_len
while plen > 0:
param = self.Parameter(self.data)
self.data = self.data[len(param):]
plen -= len(param)
l.append(param)
self.data = self.parameters = l
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.parameters))
def __str__(self):
params = ''.join(map(str, self.parameters))
self.param_len = len(params)
return self.pack_hdr() + params
class Parameter(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
if self.type == AUTHENTICATION:
self.data = self.authentication = self.Authentication(self.data)
elif self.type == CAPABILITY:
self.data = self.capability = self.Capability(self.data)
class Authentication(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
)
class Capability(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
class Update(dpkt.Packet):
__hdr_defaults__ = {
'withdrawn': [],
'attributes': [],
'announced': []
}
def unpack(self, buf):
self.data = buf
# Withdrawn Routes
wlen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while wlen > 0:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
wlen -= len(route)
l.append(route)
self.withdrawn = l
# Path Attributes
plen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while plen > 0:
attr = self.Attribute(self.data)
self.data = self.data[len(attr):]
plen -= len(attr)
l.append(attr)
self.attributes = l
# Announced Routes
l = []
while self.data:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
l.append(route)
self.announced = l
def __len__(self):
return 2 + sum(map(len, self.withdrawn)) + \
2 + sum(map(len, self.attributes)) + \
sum(map(len, self.announced))
def __str__(self):
return struct.pack('>H', sum(map(len, self.withdrawn))) + \
''.join(map(str, self.withdrawn)) + \
struct.pack('>H', sum(map(len, self.attributes))) + \
''.join(map(str, self.attributes)) + \
''.join(map(str, self.announced))
class Attribute(dpkt.Packet):
__hdr__ = (
('flags', 'B', 0),
('type', 'B', 0)
)
def _get_o(self):
return (self.flags >> 7) & 0x1
def _set_o(self, o):
self.flags = (self.flags & ~0x80) | ((o & 0x1) << 7)
optional = property(_get_o, _set_o)
def _get_t(self):
return (self.flags >> 6) & 0x1
def _set_t(self, t):
self.flags = (self.flags & ~0x40) | ((t & 0x1) << 6)
transitive = property(_get_t, _set_t)
def _get_p(self):
return (self.flags >> 5) & 0x1
def _set_p(self, p):
self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
partial = property(_get_p, _set_p)
def _get_e(self):
return (self.flags >> 4) & 0x1
def _set_e(self, e):
self.flags = (self.flags & ~0x10) | ((e & 0x1) << 4)
extended_length = property(_get_e, _set_e)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.extended_length:
self.len = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
else:
self.len = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.data = self.data[:self.len]
if self.type == ORIGIN:
self.data = self.origin = self.Origin(self.data)
elif self.type == AS_PATH:
self.data = self.as_path = self.ASPath(self.data)
elif self.type == NEXT_HOP:
self.data = self.next_hop = self.NextHop(self.data)
elif self.type == MULTI_EXIT_DISC:
self.data = self.multi_exit_disc = self.MultiExitDisc(self.data)
elif self.type == LOCAL_PREF:
self.data = self.local_pref = self.LocalPref(self.data)
elif self.type == ATOMIC_AGGREGATE:
self.data = self.atomic_aggregate = self.AtomicAggregate(self.data)
elif self.type == AGGREGATOR:
self.data = self.aggregator = self.Aggregator(self.data)
elif self.type == COMMUNITIES:
self.data = self.communities = self.Communities(self.data)
elif self.type == ORIGINATOR_ID:
self.data = self.originator_id = self.OriginatorID(self.data)
elif self.type == CLUSTER_LIST:
self.data = self.cluster_list = self.ClusterList(self.data)
elif self.type == MP_REACH_NLRI:
self.data = self.mp_reach_nlri = self.MPReachNLRI(self.data)
elif self.type == MP_UNREACH_NLRI:
self.data = self.mp_unreach_nlri = self.MPUnreachNLRI(self.data)
def __len__(self):
if self.extended_length:
attr_len = 2
else:
attr_len = 1
return self.__hdr_len__ + \
attr_len + \
len(self.data)
def __str__(self):
if self.extended_length:
attr_len_str = struct.pack('>H', self.len)
else:
attr_len_str = struct.pack('B', self.len)
return self.pack_hdr() + \
attr_len_str + \
str(self.data)
class Origin(dpkt.Packet):
__hdr__ = (
('type', 'B', ORIGIN_IGP),
)
class ASPath(dpkt.Packet):
__hdr_defaults__ = {
'segments': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
seg = self.ASPathSegment(self.data)
self.data = self.data[len(seg):]
l.append(seg)
self.data = self.segments = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class ASPathSegment(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
for i in range(self.len):
AS = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l.append(AS)
self.data = self.path = l
def __len__(self):
return self.__hdr_len__ + \
2 * len(self.path)
def __str__(self):
as_str = ''
for AS in self.path:
as_str += struct.pack('>H', AS)
return self.pack_hdr() + \
as_str
class NextHop(dpkt.Packet):
__hdr__ = (
('ip', 'I', 0),
)
class MultiExitDisc(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class LocalPref(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class AtomicAggregate(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __str__(self):
return ''
class Aggregator(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('ip', 'I', 0)
)
class Communities(dpkt.Packet):
__hdr_defaults__ = {
'list': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
val = struct.unpack('>I', self.data[:4])[0]
if (val >= 0x00000000L and val <= 0x0000ffffL) or \
(val >= 0xffff0000L and val <= 0xffffffffL):
comm = self.ReservedCommunity(self.data[:4])
else:
comm = self.Community(self.data[:4])
self.data = self.data[len(comm):]
l.append(comm)
self.data = self.list = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class Community(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('value', 'H', 0)
)
class ReservedCommunity(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class OriginatorID(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class ClusterList(dpkt.Packet):
__hdr_defaults__ = {
'list': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
id = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
l.append(id)
self.data = self.list = l
def __len__(self):
return 4 * len(self.list)
def __str__(self):
cluster_str = ''
for val in self.list:
cluster_str += struct.pack('>I', val)
return cluster_str
class MPReachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Next Hop
nlen = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.next_hop = self.data[:nlen]
self.data = self.data[nlen:]
# SNPAs
l = []
num_snpas = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
for i in range(num_snpas):
snpa = self.SNPA(self.data)
self.data = self.data[len(snpa):]
l.append(snpa)
self.snpas = l
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Announced Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.announced = l
def __len__(self):
return self.__hdr_len__ + \
1 + len(self.next_hop) + \
1 + sum(map(len, self.snpas)) + \
sum(map(len, self.announced))
def __str__(self):
return self.pack_hdr() + \
struct.pack('B', len(self.next_hop)) + \
str(self.next_hop) + \
struct.pack('B', len(self.snpas)) + \
''.join(map(str, self.snpas)) + \
''.join(map(str, self.announced))
class SNPA:
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:(self.len + 1) / 2]
class MPUnreachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Withdrawn Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.withdrawn = l
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.data))
def __str__(self):
return self.pack_hdr() + \
''.join(map(str, self.data))
class Notification(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('subcode', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.error = self.data
class Keepalive(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __str__(self):
return ''
class RouteRefresh(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('rsvd', 'B', 0),
('safi', 'B', SAFI_UNICAST)
)
class RouteGeneric(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.prefix = self.data[:(self.len + 7) / 8]
class RouteIPV4(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) / 8]
tmp += (4 - len(tmp)) * '\x00'
self.data = self.prefix = tmp
def __repr__(self):
cidr = '%s/%d' % (socket.inet_ntoa(self.prefix), self.len)
return '%s(%s)' % (self.__class__.__name__, cidr)
def __len__(self):
return self.__hdr_len__ + \
(self.len + 7) / 8
def __str__(self):
return self.pack_hdr() + \
self.prefix[:(self.len + 7) / 8]
class RouteIPV6(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) / 8]
tmp += (16 - len(tmp)) * '\x00'
self.data = self.prefix = tmp
def __len__(self):
return self.__hdr_len__ + \
(self.len + 7) / 8
def __str__(self):
return self.pack_hdr() + \
self.prefix[:(self.len + 7) / 8]
if __name__ == '__main__':
import unittest
class BGPTestCase(unittest.TestCase):
def testPack(self):
b1 = BGP(self.bgp1)
self.failUnless(self.bgp1 == str(b1))
b2 = BGP(self.bgp2)
self.failUnless(self.bgp2 == str(b2))
b3 = BGP(self.bgp3)
self.failUnless(self.bgp3 == str(b3))
b4 = BGP(self.bgp4)
self.failUnless(self.bgp4 == str(b4))
def testUnpack(self):
b1 = BGP(self.bgp1)
self.failUnless(b1.len == 19)
self.failUnless(b1.type == KEEPALIVE)
self.failUnless(b1.keepalive is not None)
b2 = BGP(self.bgp2)
self.failUnless(b2.type == UPDATE)
self.failUnless(len(b2.update.withdrawn) == 0)
self.failUnless(len(b2.update.announced) == 1)
self.failUnless(len(b2.update.attributes) == 9)
a = b2.update.attributes[1]
self.failUnless(a.type == AS_PATH)
self.failUnless(a.len == 10)
self.failUnless(len(a.as_path.segments) == 2)
s = a.as_path.segments[0]
self.failUnless(s.type == AS_SET)
self.failUnless(s.len == 2)
self.failUnless(len(s.path) == 2)
self.failUnless(s.path[0] == 500)
a = b2.update.attributes[6]
self.failUnless(a.type == COMMUNITIES)
self.failUnless(a.len == 12)
self.failUnless(len(a.communities.list) == 3)
c = a.communities.list[0]
self.failUnless(c.asn == 65215)
self.failUnless(c.value == 1)
r = b2.update.announced[0]
self.failUnless(r.len == 22)
self.failUnless(r.prefix == '\xc0\xa8\x04\x00')
b3 = BGP(self.bgp3)
self.failUnless(b3.type == UPDATE)
self.failUnless(len(b3.update.withdrawn) == 0)
self.failUnless(len(b3.update.announced) == 0)
self.failUnless(len(b3.update.attributes) == 6)
a = b3.update.attributes[0]
self.failUnless(a.optional == False)
self.failUnless(a.transitive == True)
self.failUnless(a.partial == False)
self.failUnless(a.extended_length == False)
self.failUnless(a.type == ORIGIN)
self.failUnless(a.len == 1)
o = a.origin
self.failUnless(o.type == ORIGIN_IGP)
a = b3.update.attributes[5]
self.failUnless(a.optional == True)
self.failUnless(a.transitive == False)
self.failUnless(a.partial == False)
self.failUnless(a.extended_length == True)
self.failUnless(a.type == MP_REACH_NLRI)
self.failUnless(a.len == 30)
m = a.mp_reach_nlri
self.failUnless(m.afi == AFI_IPV4)
self.failUnless(len(m.snpas) == 0)
self.failUnless(len(m.announced) == 1)
p = m.announced[0]
self.failUnless(p.len == 96)
b4 = BGP(self.bgp4)
self.failUnless(b4.len == 45)
self.failUnless(b4.type == OPEN)
self.failUnless(b4.open.asn == 237)
self.failUnless(b4.open.param_len == 16)
self.failUnless(len(b4.open.parameters) == 3)
p = b4.open.parameters[0]
self.failUnless(p.type == CAPABILITY)
self.failUnless(p.len == 6)
c = p.capability
self.failUnless(c.code == CAP_MULTIPROTOCOL)
self.failUnless(c.len == 4)
self.failUnless(c.data == '\x00\x01\x00\x01')
c = b4.open.parameters[2].capability
self.failUnless(c.code == CAP_ROUTE_REFRESH)
self.failUnless(c.len == 0)
bgp1 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x13\x04'
bgp2 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x63\x02\x00\x00\x00\x48\x40\x01\x01\x00\x40\x02\x0a\x01\x02\x01\xf4\x01\xf4\x02\x01\xfe\xbb\x40\x03\x04\xc0\xa8\x00\x0f\x40\x05\x04\x00\x00\x00\x64\x40\x06\x00\xc0\x07\x06\xfe\xba\xc0\xa8\x00\x0a\xc0\x08\x0c\xfe\xbf\x00\x01\x03\x16\x00\x04\x01\x54\x00\xfa\x80\x09\x04\xc0\xa8\x00\x0f\x80\x0a\x04\xc0\xa8\x00\xfa\x16\xc0\xa8\x04'
bgp3 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x79\x02\x00\x00\x00\x62\x40\x01\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x08\x00\x02\x01\x2c\x00\x00\x01\x2c\xc0\x80\x24\x00\x00\xfd\xe9\x40\x01\x01\x00\x40\x02\x04\x02\x01\x15\xb3\x40\x05\x04\x00\x00\x00\x2c\x80\x09\x04\x16\x05\x05\x05\x80\x0a\x04\x16\x05\x05\x05\x90\x0e\x00\x1e\x00\x01\x80\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x04\x04\x04\x00\x60\x18\x77\x01\x00\x00\x01\xf4\x00\x00\x01\xf4\x85'
bgp4 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x2d\x01\x04\x00\xed\x00\x5a\xc6\x6e\x83\x7d\x10\x02\x06\x01\x04\x00\x01\x00\x01\x02\x02\x80\x00\x02\x02\x02\x00'
unittest.main()
| {
"repo_name": "pquerna/tls-client-hello-stats",
"path": "third_party/dpkt/dpkt/bgp.py",
"copies": "5",
"size": "25731",
"license": "apache-2.0",
"hash": 2295317537831036200,
"line_mean": 32.8565789474,
"line_max": 501,
"alpha_frac": 0.4522560336,
"autogenerated": false,
"ratio": 3.5510626552580735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6503318688858074,
"avg_score": null,
"num_lines": null
} |
"""
This is a mod to make all the Twisted threads daemon threads
so that they do not block termination of the main process.
Even when threads are daemonised, when the reactor calls threadpool.stop()it
attempts to join() every thread and close it. If threads are blocked for
some reason this call will also block.
So we overide stop() to set the workers count to zero. This by-passes some
twisted code and the daemon threads simply die with the parent.
This module must be imported before any other code imports the thread pool.
"""
import twisted.python.threadpool
from twisted.python import threadable
threadable.init()
import Queue
import threading
from twisted import version as twistedVersion
from twisted.python import runtime
MIN_THREADS=5
MAX_THREADS=150
class DaemonThreadPool_2(twisted.python.threadpool.ThreadPool):
def __init__(self, minthreads=5, maxthreads=20, name=None):
"""Create a new threadpool.
@param minthreads: minimum number of threads in the pool
@param maxthreads: maximum number of threads in the pool
"""
assert minthreads >= 0, 'minimum is negative'
assert minthreads <= maxthreads, 'minimum is greater than maximum'
self.q = Queue.Queue(0)
self.min = max(MIN_THREADS, minthreads)
self.max = max(MAX_THREADS, maxthreads)
self.name = name
if runtime.platform.getType() != "java":
self.waiters = []
self.threads = []
self.working = []
else:
self.waiters = twisted.python.threadpool.ThreadSafeList()
self.threads = twisted.python.threadpool.ThreadSafeList()
self.working = twisted.python.threadpool.ThreadSafeList()
def startAWorker(self):
self.workers = self.workers + 1
name = "PoolThread-%s-%s" % (self.name or id(self), self.workers)
try:
firstJob = self.q.get(0)
except Queue.Empty:
firstJob = None
newThread = threading.Thread(target=self._worker, name=name, args=(firstJob,))
newThread.setDaemon(True)
self.threads.append(newThread)
newThread.start()
def stop(self):
self.workers = 0
class DaemonThreadPool_8(twisted.python.threadpool.ThreadPool):
def __init__(self, minthreads=5, maxthreads=20, name=None):
"""Create a new threadpool.
@param minthreads: minimum number of threads in the pool
@param maxthreads: maximum number of threads in the pool
"""
assert minthreads >= 0, 'minimum is negative'
assert minthreads <= maxthreads, 'minimum is greater than maximum'
self.q = Queue.Queue(0)
self.min = max(MIN_THREADS, minthreads)
self.max = max(MAX_THREADS, maxthreads)
self.name = name
if runtime.platform.getType() != "java":
self.waiters = []
self.threads = []
self.working = []
else:
self.waiters = twisted.python.threadpool.ThreadSafeList()
self.threads = twisted.python.threadpool.ThreadSafeList()
self.working = twisted.python.threadpool.ThreadSafeList()
def startAWorker(self):
self.workers += 1
name = "PoolThread-%s-%s" % (self.name or id(self), self.workers)
newThread = self.threadFactory(target=self._worker, name=name)
newThread.setDaemon(True)
self.threads.append(newThread)
newThread.start()
def stop(self):
self.workers = 0
if twistedVersion.major==2:
twisted.python.threadpool.ThreadPool = DaemonThreadPool_2
else:
twisted.python.threadpool.ThreadPool = DaemonThreadPool_8 | {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/utils/bigThreadPool.py",
"copies": "1",
"size": "3858",
"license": "bsd-3-clause",
"hash": 304122233344019650,
"line_mean": 35.7523809524,
"line_max": 86,
"alpha_frac": 0.6591498186,
"autogenerated": false,
"ratio": 3.940755873340143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008205106483884943,
"num_lines": 105
} |
"""An abstract widget, which can hold exactly one other widget."""
from BaseWidget import BaseWidget
class Bin (BaseWidget):
"""Bin () -> Bin
A container widget class, which can hold one other widget.
The Bin widget class is an abstract class, which can hold exactly
one other widget. It is usable to serve as a container class, which
can hold various types of widgets and allows inheritors to use their
own look.
The widget to hold can be set or removed using the 'child' attribute
and set_child() method. The child will not be automatically modified
by rebinding any of its attributes.
bin.child = widget
bin.set_child (widget)
The 'padding' attribute and set_padding() method are used to place a
certain amount of pixels between the child widget and the outer
edges of the Bin.
bin.padding = 10
bin.set_padding (10)
Binding the Bin to a new event manager using the 'manager' attribute
or set_event_manager() method will cause the event manager of the
child to be set to the same.
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
Attributes:
child - The widget hold by the Bin.
padding - Additional padding between the child and outer edges of
the Bin. Default is 2.
"""
def __init__ (self):
BaseWidget.__init__ (self)
self._child = None
self._padding = 2
def set_child (self, child=None):
"""B.set_child (...) -> None
Sets (or resets) the child of the Bin.
Creates a parent-child relationship from the Bin to the child by
associating the Bin with the child and vice versa.
Raises a TypeError, if the passed argument does not inherit
from the BaseWidget class.
Raises an Exception, if the passed argument is already
attached to another parent.
"""
self.lock ()
if child:
if not isinstance (child, BaseWidget):
raise TypeError ("child must inherit from BaseWidget")
if child.parent:
raise Exception ("child already has a parent")
child.parent = self
if (child.depth != self.depth):
child.set_depth (self.depth)
if (self.manager != None) and not child.manager:
child.set_event_manager (self.manager)
# Set the states for the child.
if not self.sensitive:
child.set_sensitive (self.sensitive)
if self._child:
self._child.parent = None
self._child = child
self.dirty = True
self.unlock ()
def set_depth (self, depth):
"""B.set_depth (...) -> None
Sets the depth of the Bin.
Sets the depth of the Bin and its child to the given value.
"""
self.lock ()
BaseWidget.set_depth (self, depth)
if self.child:
self.child.set_depth (depth)
self.unlock ()
def set_indexable (self, indexable):
"""B.set_indexable (...) -> None
Sets the indexable of the Bin.
Adds the Bin to an IIndexable implementation and causes its child
to be added to the same, too.
"""
BaseWidget.set_indexable (self, indexable)
if self.child:
self.child.set_indexable (indexable)
def set_event_manager (self, manager):
"""B.set_event_manager (...) -> None
Sets the event manager of the Bin.
Adds the Bin to an event manager and causes its child to be
added to the same, too.
"""
BaseWidget.set_event_manager (self, manager)
if self.child:
self.child.set_event_manager (manager)
def set_sensitive (self, sensitive=True):
"""B.set_sensitive (...) -> None
Sets the sensitivity of the Bin and its child.
"""
self.lock ()
BaseWidget.set_sensitive (self, sensitive)
if self.child:
self.child.set_sensitive (sensitive)
self.unlock ()
def set_padding (self, padding):
"""B.set_padding (...) -> None
Sets the padding between the child and edges of the Bin.
The padding value is the amount of pixels to place between the
edges of the Bin and the contained child.
Raises a TypeError, if the passed argument is not a positive
integer.
"""
if (type (padding) != int) or (padding < 0):
raise TypeError ("padding must be a positive integer")
self._padding = padding
self.dirty = True
def destroy (self):
"""B.destroy () -> None
Destroys the Bin and removes it from its event system.
"""
if self.child:
w = self.child
w.parent = None
self.child = None
w.destroy ()
del w
BaseWidget.destroy (self)
def update (self, **kwargs):
"""B.update (...) -> None
Updates the Bin and refreshes its image and rect content.
Updates the Bin and causes its parent to update itself on
demand.
"""
children = kwargs.get ("children", {})
resize = kwargs.get ("resize", False)
if self.locked:
return
# We have to check for possible size changes here!
if resize:
self.dirty = True
else:
BaseWidget.update (self, children=children, resize=resize)
child = property (lambda self: self._child,
lambda self, var: self.set_child (var),
doc = "The widget hold by the Bin.")
padding = property (lambda self: self._padding,
lambda self, var: self.set_padding (var),
doc = "Additional padding between child and borders.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Bin.py",
"copies": "1",
"size": "7346",
"license": "bsd-2-clause",
"hash": -1910476699253050000,
"line_mean": 33.6509433962,
"line_max": 78,
"alpha_frac": 0.6214266267,
"autogenerated": false,
"ratio": 4.492966360856269,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5614392987556269,
"avg_score": null,
"num_lines": null
} |
# $Id: BMPRDR.py 1853 2006-02-01 17:16:28Z cpbotha $
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
import wx
class BMPReader(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._reader = vtk.vtkBMPReader()
self._reader.SetFileDimensionality(3)
self._reader.SetAllow8BitBMP(1)
module_utils.setup_vtk_object_progress(self, self._reader,
'Reading BMP images.')
self._config.filePattern = '%03d.bmp'
self._config.firstSlice = 0
self._config.lastSlice = 1
self._config.spacing = (1,1,1)
self._config.fileLowerLeft = False
configList = [
('File pattern:', 'filePattern', 'base:str', 'filebrowser',
'Filenames will be built with this. See module help.',
{'fileMode' : wx.OPEN,
'fileMask' :
'BMP files (*.bmp)|*.bmp|All files (*.*)|*.*'}),
('First slice:', 'firstSlice', 'base:int', 'text',
'%d will iterate starting at this number.'),
('Last slice:', 'lastSlice', 'base:int', 'text',
'%d will iterate and stop at this number.'),
('Spacing:', 'spacing', 'tuple:float,3', 'text',
'The 3-D spacing of the resultant dataset.'),
('Lower left:', 'fileLowerLeft', 'base:bool', 'checkbox',
'Image origin at lower left? (vs. upper left)')]
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'vtkBMPReader' : self._reader})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
# get rid of our reference
del self._reader
def get_input_descriptions(self):
return ()
def set_input(self, idx, inputStream):
raise Exception
def get_output_descriptions(self):
return ('vtkImageData',)
def get_output(self, idx):
return self._reader.GetOutput()
def logic_to_config(self):
#self._config.filePrefix = self._reader.GetFilePrefix()
self._config.filePattern = self._reader.GetFilePattern()
self._config.firstSlice = self._reader.GetFileNameSliceOffset()
e = self._reader.GetDataExtent()
self._config.lastSlice = self._config.firstSlice + e[5] - e[4]
self._config.spacing = self._reader.GetDataSpacing()
self._config.fileLowerLeft = bool(self._reader.GetFileLowerLeft())
def config_to_logic(self):
#self._reader.SetFilePrefix(self._config.filePrefix)
self._reader.SetFilePattern(self._config.filePattern)
self._reader.SetFileNameSliceOffset(self._config.firstSlice)
self._reader.SetDataExtent(0,0,0,0,0,
self._config.lastSlice -
self._config.firstSlice)
self._reader.SetDataSpacing(self._config.spacing)
self._reader.SetFileLowerLeft(self._config.fileLowerLeft)
def execute_module(self):
self._reader.Update()
| {
"repo_name": "nagyistoce/devide",
"path": "modules/readers/BMPReader.py",
"copies": "7",
"size": "3607",
"license": "bsd-3-clause",
"hash": 4661768936220958000,
"line_mean": 34.7128712871,
"line_max": 74,
"alpha_frac": 0.5885777655,
"autogenerated": false,
"ratio": 3.9944629014396456,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.013038085603261538,
"num_lines": 101
} |
"""
Directives for additional body elements.
See `docutils.parsers.rst.directives` for API details.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
class BasePseudoSection(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
has_content = True
node_class = None
"""Node class to be used (must be set in subclasses)."""
def run(self):
if not (self.state_machine.match_titles
or isinstance(self.state_machine.node, nodes.sidebar)):
raise self.error('The "%s" directive may not be used within '
'topics or body elements.' % self.name)
self.assert_has_content()
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text, self.lineno)
titles = [nodes.title(title_text, '', *textnodes)]
# Sidebar uses this code.
if self.options.has_key('subtitle'):
textnodes, more_messages = self.state.inline_text(
self.options['subtitle'], self.lineno)
titles.append(nodes.subtitle(self.options['subtitle'], '',
*textnodes))
messages.extend(more_messages)
text = '\n'.join(self.content)
node = self.node_class(text, *(titles + messages))
node['classes'] += self.options.get('class', [])
if text:
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Topic(BasePseudoSection):
node_class = nodes.topic
class Sidebar(BasePseudoSection):
node_class = nodes.sidebar
option_spec = BasePseudoSection.option_spec.copy()
option_spec['subtitle'] = directives.unchanged_required
def run(self):
if isinstance(self.state_machine.node, nodes.sidebar):
raise self.error('The "%s" directive may not be used within a '
'sidebar element.' % self.name)
return BasePseudoSection.run(self)
class LineBlock(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
self.assert_has_content()
block = nodes.line_block(classes=self.options.get('class', []))
node_list = [block]
for line_text in self.content:
text_nodes, messages = self.state.inline_text(
line_text.strip(), self.lineno + self.content_offset)
line = nodes.line(line_text, '', *text_nodes)
if line_text.strip():
line.indent = len(line_text) - len(line_text.lstrip())
block += line
node_list.extend(messages)
self.content_offset += 1
self.state.nest_line_block_lines(block)
return node_list
class ParsedLiteral(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
text_nodes, messages = self.state.inline_text(text, self.lineno)
node = nodes.literal_block(text, '', *text_nodes, **self.options)
node.line = self.content_offset + 1
return [node] + messages
class Rubric(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
def run(self):
set_classes(self.options)
rubric_text = self.arguments[0]
textnodes, messages = self.state.inline_text(rubric_text, self.lineno)
rubric = nodes.rubric(rubric_text, '', *textnodes, **self.options)
return [rubric] + messages
class BlockQuote(Directive):
has_content = True
classes = []
def run(self):
self.assert_has_content()
elements = self.state.block_quote(self.content, self.content_offset)
for element in elements:
if isinstance(element, nodes.block_quote):
element['classes'] += self.classes
return elements
class Epigraph(BlockQuote):
classes = ['epigraph']
class Highlights(BlockQuote):
classes = ['highlights']
class PullQuote(BlockQuote):
classes = ['pull-quote']
class Compound(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
node = nodes.compound(text)
node['classes'] += self.options.get('class', [])
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Container(Directive):
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = nodes.container(text)
node['classes'].extend(classes)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
| {
"repo_name": "PatrickKennedy/Sybil",
"path": "docutils/parsers/rst/directives/body.py",
"copies": "2",
"size": "5778",
"license": "bsd-2-clause",
"hash": 8997563756861494000,
"line_mean": 29.09375,
"line_max": 78,
"alpha_frac": 0.6161301488,
"autogenerated": false,
"ratio": 3.9548254620123204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.557095561081232,
"avg_score": null,
"num_lines": null
} |
"""
Directives for additional body elements.
See `docutils.parsers.rst.directives` for API details.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
class BasePseudoSection(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
has_content = True
node_class = None
"""Node class to be used (must be set in subclasses)."""
def run(self):
if not (self.state_machine.match_titles
or isinstance(self.state_machine.node, nodes.sidebar)):
raise self.error('The "%s" directive may not be used within '
'topics or body elements.' % self.name)
self.assert_has_content()
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text, self.lineno)
titles = [nodes.title(title_text, '', *textnodes)]
# Sidebar uses this code.
if 'subtitle' in self.options:
textnodes, more_messages = self.state.inline_text(
self.options['subtitle'], self.lineno)
titles.append(nodes.subtitle(self.options['subtitle'], '',
*textnodes))
messages.extend(more_messages)
text = '\n'.join(self.content)
node = self.node_class(text, *(titles + messages))
node['classes'] += self.options.get('class', [])
if text:
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Topic(BasePseudoSection):
node_class = nodes.topic
class Sidebar(BasePseudoSection):
node_class = nodes.sidebar
option_spec = BasePseudoSection.option_spec.copy()
option_spec['subtitle'] = directives.unchanged_required
def run(self):
if isinstance(self.state_machine.node, nodes.sidebar):
raise self.error('The "%s" directive may not be used within a '
'sidebar element.' % self.name)
return BasePseudoSection.run(self)
class LineBlock(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
self.assert_has_content()
block = nodes.line_block(classes=self.options.get('class', []))
node_list = [block]
for line_text in self.content:
text_nodes, messages = self.state.inline_text(
line_text.strip(), self.lineno + self.content_offset)
line = nodes.line(line_text, '', *text_nodes)
if line_text.strip():
line.indent = len(line_text) - len(line_text.lstrip())
block += line
node_list.extend(messages)
self.content_offset += 1
self.state.nest_line_block_lines(block)
return node_list
class ParsedLiteral(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
text_nodes, messages = self.state.inline_text(text, self.lineno)
node = nodes.literal_block(text, '', *text_nodes, **self.options)
node.line = self.content_offset + 1
return [node] + messages
class Rubric(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
def run(self):
set_classes(self.options)
rubric_text = self.arguments[0]
textnodes, messages = self.state.inline_text(rubric_text, self.lineno)
rubric = nodes.rubric(rubric_text, '', *textnodes, **self.options)
return [rubric] + messages
class BlockQuote(Directive):
has_content = True
classes = []
def run(self):
self.assert_has_content()
elements = self.state.block_quote(self.content, self.content_offset)
for element in elements:
if isinstance(element, nodes.block_quote):
element['classes'] += self.classes
return elements
class Epigraph(BlockQuote):
classes = ['epigraph']
class Highlights(BlockQuote):
classes = ['highlights']
class PullQuote(BlockQuote):
classes = ['pull-quote']
class Compound(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
node = nodes.compound(text)
node['classes'] += self.options.get('class', [])
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Container(Directive):
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = nodes.container(text)
node['classes'].extend(classes)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
| {
"repo_name": "MER-GROUP/intellij-community",
"path": "python/helpers/py2only/docutils/parsers/rst/directives/body.py",
"copies": "5",
"size": "5759",
"license": "apache-2.0",
"hash": -7099028375942207000,
"line_mean": 29.3105263158,
"line_max": 78,
"alpha_frac": 0.6157318979,
"autogenerated": false,
"ratio": 3.9580756013745706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7073807499274571,
"avg_score": null,
"num_lines": null
} |
"""
Directives for additional body elements.
See `docutils.parsers.rst.directives` for API details.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
class BasePseudoSection(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
has_content = True
node_class = None
"""Node class to be used (must be set in subclasses)."""
def run(self):
if not (self.state_machine.match_titles
or isinstance(self.state_machine.node, nodes.sidebar)):
raise self.error('The "%s" directive may not be used within '
'topics or body elements.' % self.name)
self.assert_has_content()
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text, self.lineno)
titles = [nodes.title(title_text, '', *textnodes)]
# Sidebar uses this code.
if 'subtitle' in self.options:
textnodes, more_messages = self.state.inline_text(
self.options['subtitle'], self.lineno)
titles.append(nodes.subtitle(self.options['subtitle'], '',
*textnodes))
messages.extend(more_messages)
text = '\n'.join(self.content)
node = self.node_class(text, *(titles + messages))
node['classes'] += self.options.get('class', [])
if text:
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Topic(BasePseudoSection):
node_class = nodes.topic
class Sidebar(BasePseudoSection):
node_class = nodes.sidebar
option_spec = BasePseudoSection.option_spec.copy()
option_spec['subtitle'] = directives.unchanged_required
def run(self):
if isinstance(self.state_machine.node, nodes.sidebar):
raise self.error('The "%s" directive may not be used within a '
'sidebar element.' % self.name)
return BasePseudoSection.run(self)
class LineBlock(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
self.assert_has_content()
block = nodes.line_block(classes=self.options.get('class', []))
node_list = [block]
for line_text in self.content:
text_nodes, messages = self.state.inline_text(
line_text.strip(), self.lineno + self.content_offset)
line = nodes.line(line_text, '', *text_nodes)
if line_text.strip():
line.indent = len(line_text) - len(line_text.lstrip())
block += line
node_list.extend(messages)
self.content_offset += 1
self.state.nest_line_block_lines(block)
return node_list
class ParsedLiteral(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
text_nodes, messages = self.state.inline_text(text, self.lineno)
node = nodes.literal_block(text, '', *text_nodes, **self.options)
node.line = self.content_offset + 1
return [node] + messages
class Rubric(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
def run(self):
set_classes(self.options)
rubric_text = self.arguments[0]
textnodes, messages = self.state.inline_text(rubric_text, self.lineno)
rubric = nodes.rubric(rubric_text, '', *textnodes, **self.options)
return [rubric] + messages
class BlockQuote(Directive):
has_content = True
classes = []
def run(self):
self.assert_has_content()
elements = self.state.block_quote(self.content, self.content_offset)
for element in elements:
if isinstance(element, nodes.block_quote):
element['classes'] += self.classes
return elements
class Epigraph(BlockQuote):
classes = ['epigraph']
class Highlights(BlockQuote):
classes = ['highlights']
class PullQuote(BlockQuote):
classes = ['pull-quote']
class Compound(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
node = nodes.compound(text)
node['classes'] += self.options.get('class', [])
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Container(Directive):
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = nodes.container(text)
node['classes'].extend(classes)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
| {
"repo_name": "orekyuu/intellij-community",
"path": "python/helpers/docutils/parsers/rst/directives/body.py",
"copies": "51",
"size": "5771",
"license": "apache-2.0",
"hash": 5403713989357063000,
"line_mean": 29.0572916667,
"line_max": 78,
"alpha_frac": 0.6160110899,
"autogenerated": false,
"ratio": 3.958161865569273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 192
} |
"""
Directives for additional body elements.
See `docutils.parsers.rst.directives` for API details.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
class BasePseudoSection(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
has_content = True
node_class = None
"""Node class to be used (must be set in subclasses)."""
def run(self):
if not (self.state_machine.match_titles
or isinstance(self.state_machine.node, nodes.sidebar)):
raise self.error('The "%s" directive may not be used within '
'topics or body elements.' % self.name)
self.assert_has_content()
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text, self.lineno)
titles = [nodes.title(title_text, '', *textnodes)]
# Sidebar uses this code.
if 'subtitle' in self.options:
textnodes, more_messages = self.state.inline_text(
self.options['subtitle'], self.lineno)
titles.append(nodes.subtitle(self.options['subtitle'], '',
*textnodes))
messages.extend(more_messages)
text = '\n'.join(self.content)
node = self.node_class(text, *(titles + messages))
node['classes'] += self.options.get('class', [])
if text:
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Topic(BasePseudoSection):
node_class = nodes.topic
class Sidebar(BasePseudoSection):
node_class = nodes.sidebar
option_spec = BasePseudoSection.option_spec.copy()
option_spec['subtitle'] = directives.unchanged_required
def run(self):
if isinstance(self.state_machine.node, nodes.sidebar):
raise self.error('The "%s" directive may not be used within a '
'sidebar element.' % self.name)
return BasePseudoSection.run(self)
class LineBlock(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
self.assert_has_content()
block = nodes.line_block(classes=self.options.get('class', []))
node_list = [block]
for line_text in self.content:
text_nodes, messages = self.state.inline_text(
line_text.strip(), self.lineno + self.content_offset)
line = nodes.line(line_text, '', *text_nodes)
if line_text.strip():
line.indent = len(line_text) - len(line_text.lstrip())
block += line
node_list.extend(messages)
self.content_offset += 1
self.state.nest_line_block_lines(block)
return node_list
class ParsedLiteral(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
text_nodes, messages = self.state.inline_text(text, self.lineno)
node = nodes.literal_block(text, '', *text_nodes, **self.options)
node.line = self.content_offset + 1
return [node] + messages
class Rubric(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
def run(self):
set_classes(self.options)
rubric_text = self.arguments[0]
textnodes, messages = self.state.inline_text(rubric_text, self.lineno)
rubric = nodes.rubric(rubric_text, '', *textnodes, **self.options)
return [rubric] + messages
class BlockQuote(Directive):
has_content = True
classes = []
def run(self):
self.assert_has_content()
elements = self.state.block_quote(self.content, self.content_offset)
for element in elements:
if isinstance(element, nodes.block_quote):
element['classes'] += self.classes
return elements
class Epigraph(BlockQuote):
classes = ['epigraph']
class Highlights(BlockQuote):
classes = ['highlights']
class PullQuote(BlockQuote):
classes = ['pull-quote']
class Compound(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
node = nodes.compound(text)
node['classes'] += self.options.get('class', [])
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Container(Directive):
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = nodes.container(text)
node['classes'].extend(classes)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
| {
"repo_name": "rimbalinux/MSISDNArea",
"path": "docutils/parsers/rst/directives/body.py",
"copies": "2",
"size": "5963",
"license": "bsd-3-clause",
"hash": -932805525182199800,
"line_mean": 29.0572916667,
"line_max": 78,
"alpha_frac": 0.5961764213,
"autogenerated": false,
"ratio": 4.059223961878829,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5655400383178829,
"avg_score": null,
"num_lines": null
} |
"""
Directives for additional body elements.
See `docutils.parsers.rst.directives` for API details.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
class BasePseudoSection(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Node class to be used (must be set in subclasses)."""
def run(self):
if not (self.state_machine.match_titles
or isinstance(self.state_machine.node, nodes.sidebar)):
raise self.error('The "%s" directive may not be used within '
'topics or body elements.' % self.name)
self.assert_has_content()
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text, self.lineno)
titles = [nodes.title(title_text, '', *textnodes)]
# Sidebar uses this code.
if 'subtitle' in self.options:
textnodes, more_messages = self.state.inline_text(
self.options['subtitle'], self.lineno)
titles.append(nodes.subtitle(self.options['subtitle'], '',
*textnodes))
messages.extend(more_messages)
text = '\n'.join(self.content)
node = self.node_class(text, *(titles + messages))
node['classes'] += self.options.get('class', [])
self.add_name(node)
if text:
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Topic(BasePseudoSection):
node_class = nodes.topic
class Sidebar(BasePseudoSection):
node_class = nodes.sidebar
option_spec = BasePseudoSection.option_spec.copy()
option_spec['subtitle'] = directives.unchanged_required
def run(self):
if isinstance(self.state_machine.node, nodes.sidebar):
raise self.error('The "%s" directive may not be used within a '
'sidebar element.' % self.name)
return BasePseudoSection.run(self)
class LineBlock(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
block = nodes.line_block(classes=self.options.get('class', []))
self.add_name(block)
node_list = [block]
for line_text in self.content:
text_nodes, messages = self.state.inline_text(
line_text.strip(), self.lineno + self.content_offset)
line = nodes.line(line_text, '', *text_nodes)
if line_text.strip():
line.indent = len(line_text) - len(line_text.lstrip())
block += line
node_list.extend(messages)
self.content_offset += 1
self.state.nest_line_block_lines(block)
return node_list
class ParsedLiteral(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
text_nodes, messages = self.state.inline_text(text, self.lineno)
node = nodes.literal_block(text, '', *text_nodes, **self.options)
node.line = self.content_offset + 1
self.add_name(node)
return [node] + messages
class MathBlock(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
## TODO: Add Sphinx' ``mathbase.py`` option 'nowrap'?
# 'nowrap': directives.flag,
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
# join lines, separate blocks
content = '\n'.join(self.content).split('\n\n')
_nodes = []
for block in content:
if not block:
continue
node = nodes.math_block(self.block_text, block, **self.options)
node.line = self.content_offset + 1
self.add_name(node)
_nodes.append(node)
return _nodes
class Rubric(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
def run(self):
set_classes(self.options)
rubric_text = self.arguments[0]
textnodes, messages = self.state.inline_text(rubric_text, self.lineno)
rubric = nodes.rubric(rubric_text, '', *textnodes, **self.options)
self.add_name(rubric)
return [rubric] + messages
class BlockQuote(Directive):
has_content = True
classes = []
def run(self):
self.assert_has_content()
elements = self.state.block_quote(self.content, self.content_offset)
for element in elements:
if isinstance(element, nodes.block_quote):
element['classes'] += self.classes
return elements
class Epigraph(BlockQuote):
classes = ['epigraph']
class Highlights(BlockQuote):
classes = ['highlights']
class PullQuote(BlockQuote):
classes = ['pull-quote']
class Compound(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
node = nodes.compound(text)
node['classes'] += self.options.get('class', [])
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Container(Directive):
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = nodes.container(text)
node['classes'].extend(classes)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
| {
"repo_name": "paaschpa/badcomputering",
"path": "docutils/parsers/rst/directives/body.py",
"copies": "6",
"size": "6978",
"license": "bsd-3-clause",
"hash": -2293969529767857700,
"line_mean": 29.7400881057,
"line_max": 78,
"alpha_frac": 0.599885354,
"autogenerated": false,
"ratio": 4.001146788990826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7601032142990827,
"avg_score": null,
"num_lines": null
} |
"""
Directives for additional body elements.
See `docutils.parsers.rst.directives` for API details.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
class BasePseudoSection(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Node class to be used (must be set in subclasses)."""
def run(self):
if not (self.state_machine.match_titles
or isinstance(self.state_machine.node, nodes.sidebar)):
raise self.error('The "%s" directive may not be used within '
'topics or body elements.' % self.name)
self.assert_has_content()
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text, self.lineno)
titles = [nodes.title(title_text, '', *textnodes)]
# Sidebar uses this code.
if 'subtitle' in self.options:
textnodes, more_messages = self.state.inline_text(
self.options['subtitle'], self.lineno)
titles.append(nodes.subtitle(self.options['subtitle'], '',
*textnodes))
messages.extend(more_messages)
text = '\n'.join(self.content)
node = self.node_class(text, *(titles + messages))
node['classes'] += self.options.get('class', [])
self.add_name(node)
if text:
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Topic(BasePseudoSection):
node_class = nodes.topic
class Sidebar(BasePseudoSection):
node_class = nodes.sidebar
option_spec = BasePseudoSection.option_spec.copy()
option_spec['subtitle'] = directives.unchanged_required
def run(self):
if isinstance(self.state_machine.node, nodes.sidebar):
raise self.error('The "%s" directive may not be used within a '
'sidebar element.' % self.name)
return BasePseudoSection.run(self)
class LineBlock(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
block = nodes.line_block(classes=self.options.get('class', []))
self.add_name(block)
node_list = [block]
for line_text in self.content:
text_nodes, messages = self.state.inline_text(
line_text.strip(), self.lineno + self.content_offset)
line = nodes.line(line_text, '', *text_nodes)
if line_text.strip():
line.indent = len(line_text) - len(line_text.lstrip())
block += line
node_list.extend(messages)
self.content_offset += 1
self.state.nest_line_block_lines(block)
return node_list
class ParsedLiteral(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
text_nodes, messages = self.state.inline_text(text, self.lineno)
node = nodes.literal_block(text, '', *text_nodes, **self.options)
node.line = self.content_offset + 1
self.add_name(node)
return [node] + messages
class MathBlock(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
## TODO: Add Sphinx' ``mathbase.py`` option 'nowrap'?
# 'nowrap': directives.flag,
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
# join lines, separate blocks
content = '\n'.join(self.content).split('\n\n')
_nodes = []
for block in content:
if not block:
continue
node = nodes.math_block(self.block_text, block, **self.options)
node.line = self.content_offset + 1
self.add_name(node)
_nodes.append(node)
return _nodes
class Rubric(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
def run(self):
set_classes(self.options)
rubric_text = self.arguments[0]
textnodes, messages = self.state.inline_text(rubric_text, self.lineno)
rubric = nodes.rubric(rubric_text, '', *textnodes, **self.options)
self.add_name(rubric)
return [rubric] + messages
class BlockQuote(Directive):
has_content = True
classes = []
def run(self):
self.assert_has_content()
elements = self.state.block_quote(self.content, self.content_offset)
for element in elements:
if isinstance(element, nodes.block_quote):
element['classes'] += self.classes
return elements
class Epigraph(BlockQuote):
classes = ['epigraph']
class Highlights(BlockQuote):
classes = ['highlights']
class PullQuote(BlockQuote):
classes = ['pull-quote']
class Compound(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
node = nodes.compound(text)
node['classes'] += self.options.get('class', [])
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Container(Directive):
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = nodes.container(text)
node['classes'].extend(classes)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
| {
"repo_name": "ajaxsys/dict-admin",
"path": "docutils/parsers/rst/directives/body.py",
"copies": "2",
"size": "7205",
"license": "bsd-3-clause",
"hash": -4755607485531948000,
"line_mean": 29.7400881057,
"line_max": 78,
"alpha_frac": 0.5809854268,
"autogenerated": false,
"ratio": 4.086783891094725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5667769317894725,
"avg_score": null,
"num_lines": null
} |
"""
Directives for additional body elements.
See `docutils.parsers.rst.directives` for API details.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
from docutils.utils.code_analyzer import Lexer, LexerError, NumberLines
class BasePseudoSection(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Node class to be used (must be set in subclasses)."""
def run(self):
if not (self.state_machine.match_titles
or isinstance(self.state_machine.node, nodes.sidebar)):
raise self.error('The "%s" directive may not be used within '
'topics or body elements.' % self.name)
self.assert_has_content()
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text, self.lineno)
titles = [nodes.title(title_text, '', *textnodes)]
# Sidebar uses this code.
if 'subtitle' in self.options:
textnodes, more_messages = self.state.inline_text(
self.options['subtitle'], self.lineno)
titles.append(nodes.subtitle(self.options['subtitle'], '',
*textnodes))
messages.extend(more_messages)
text = '\n'.join(self.content)
node = self.node_class(text, *(titles + messages))
node['classes'] += self.options.get('class', [])
self.add_name(node)
if text:
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Topic(BasePseudoSection):
node_class = nodes.topic
class Sidebar(BasePseudoSection):
node_class = nodes.sidebar
option_spec = BasePseudoSection.option_spec.copy()
option_spec['subtitle'] = directives.unchanged_required
def run(self):
if isinstance(self.state_machine.node, nodes.sidebar):
raise self.error('The "%s" directive may not be used within a '
'sidebar element.' % self.name)
return BasePseudoSection.run(self)
class LineBlock(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
block = nodes.line_block(classes=self.options.get('class', []))
self.add_name(block)
node_list = [block]
for line_text in self.content:
text_nodes, messages = self.state.inline_text(
line_text.strip(), self.lineno + self.content_offset)
line = nodes.line(line_text, '', *text_nodes)
if line_text.strip():
line.indent = len(line_text) - len(line_text.lstrip())
block += line
node_list.extend(messages)
self.content_offset += 1
self.state.nest_line_block_lines(block)
return node_list
class ParsedLiteral(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
text_nodes, messages = self.state.inline_text(text, self.lineno)
node = nodes.literal_block(text, '', *text_nodes, **self.options)
node.line = self.content_offset + 1
self.add_name(node)
return [node] + messages
class CodeBlock(Directive):
"""Parse and mark up content of a code block.
Configuration setting: syntax_highlight
Highlight Code content with Pygments?
Possible values: ('long', 'short', 'none')
"""
optional_arguments = 1
option_spec = {'class': directives.class_option,
'name': directives.unchanged,
'number-lines': directives.unchanged # integer or None
}
has_content = True
def run(self):
self.assert_has_content()
if self.arguments:
language = self.arguments[0]
else:
language = ''
set_classes(self.options)
classes = ['code']
if language:
classes.append(language)
if 'classes' in self.options:
classes.extend(self.options['classes'])
# set up lexical analyzer
try:
tokens = Lexer('\n'.join(self.content), language,
self.state.document.settings.syntax_highlight)
except LexerError as error:
raise self.warning(error)
if 'number-lines' in self.options:
# optional argument `startline`, defaults to 1
try:
startline = int(self.options['number-lines'] or 1)
except ValueError:
raise self.error(':number-lines: with non-integer start value')
endline = startline + len(self.content)
# add linenumber filter:
tokens = NumberLines(tokens, startline, endline)
node = nodes.literal_block('\n'.join(self.content), classes=classes)
self.add_name(node)
# if called from "include", set the source
if 'source' in self.options:
node.attributes['source'] = self.options['source']
# analyze content and add nodes for every token
for classes, value in tokens:
# print (classes, value)
if classes:
node += nodes.inline(value, value, classes=classes)
else:
# insert as Text to decrease the verbosity of the output
node += nodes.Text(value, value)
return [node]
class MathBlock(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
## TODO: Add Sphinx' ``mathbase.py`` option 'nowrap'?
# 'nowrap': directives.flag,
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
# join lines, separate blocks
content = '\n'.join(self.content).split('\n\n')
_nodes = []
for block in content:
if not block:
continue
node = nodes.math_block(self.block_text, block, **self.options)
node.line = self.content_offset + 1
self.add_name(node)
_nodes.append(node)
return _nodes
class Rubric(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
def run(self):
set_classes(self.options)
rubric_text = self.arguments[0]
textnodes, messages = self.state.inline_text(rubric_text, self.lineno)
rubric = nodes.rubric(rubric_text, '', *textnodes, **self.options)
self.add_name(rubric)
return [rubric] + messages
class BlockQuote(Directive):
has_content = True
classes = []
def run(self):
self.assert_has_content()
elements = self.state.block_quote(self.content, self.content_offset)
for element in elements:
if isinstance(element, nodes.block_quote):
element['classes'] += self.classes
return elements
class Epigraph(BlockQuote):
classes = ['epigraph']
class Highlights(BlockQuote):
classes = ['highlights']
class PullQuote(BlockQuote):
classes = ['pull-quote']
class Compound(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
node = nodes.compound(text)
node['classes'] += self.options.get('class', [])
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Container(Directive):
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = nodes.container(text)
node['classes'].extend(classes)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
| {
"repo_name": "mdaniel/intellij-community",
"path": "python/helpers/py3only/docutils/parsers/rst/directives/body.py",
"copies": "44",
"size": "9232",
"license": "apache-2.0",
"hash": 4638055173871111000,
"line_mean": 31.1672473868,
"line_max": 79,
"alpha_frac": 0.593262565,
"autogenerated": false,
"ratio": 4.132497761862131,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""
Directives for additional body elements.
See `docutils.parsers.rst.directives` for API details.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
from docutils.utils.code_analyzer import Lexer, LexerError, NumberLines
class BasePseudoSection(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Node class to be used (must be set in subclasses)."""
def run(self):
if not (self.state_machine.match_titles
or isinstance(self.state_machine.node, nodes.sidebar)):
raise self.error('The "%s" directive may not be used within '
'topics or body elements.' % self.name)
self.assert_has_content()
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text, self.lineno)
titles = [nodes.title(title_text, '', *textnodes)]
# Sidebar uses this code.
if 'subtitle' in self.options:
textnodes, more_messages = self.state.inline_text(
self.options['subtitle'], self.lineno)
titles.append(nodes.subtitle(self.options['subtitle'], '',
*textnodes))
messages.extend(more_messages)
text = '\n'.join(self.content)
node = self.node_class(text, *(titles + messages))
node['classes'] += self.options.get('class', [])
self.add_name(node)
if text:
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Topic(BasePseudoSection):
node_class = nodes.topic
class Sidebar(BasePseudoSection):
node_class = nodes.sidebar
option_spec = BasePseudoSection.option_spec.copy()
option_spec['subtitle'] = directives.unchanged_required
def run(self):
if isinstance(self.state_machine.node, nodes.sidebar):
raise self.error('The "%s" directive may not be used within a '
'sidebar element.' % self.name)
return BasePseudoSection.run(self)
class LineBlock(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
block = nodes.line_block(classes=self.options.get('class', []))
self.add_name(block)
node_list = [block]
for line_text in self.content:
text_nodes, messages = self.state.inline_text(
line_text.strip(), self.lineno + self.content_offset)
line = nodes.line(line_text, '', *text_nodes)
if line_text.strip():
line.indent = len(line_text) - len(line_text.lstrip())
block += line
node_list.extend(messages)
self.content_offset += 1
self.state.nest_line_block_lines(block)
return node_list
class ParsedLiteral(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
text_nodes, messages = self.state.inline_text(text, self.lineno)
node = nodes.literal_block(text, '', *text_nodes, **self.options)
node.line = self.content_offset + 1
self.add_name(node)
return [node] + messages
class CodeBlock(Directive):
"""Parse and mark up content of a code block.
Configuration setting: syntax_highlight
Highlight Code content with Pygments?
Possible values: ('long', 'short', 'none')
"""
optional_arguments = 1
option_spec = {'class': directives.class_option,
'name': directives.unchanged,
'number-lines': directives.unchanged # integer or None
}
has_content = True
def run(self):
self.assert_has_content()
if self.arguments:
language = self.arguments[0]
else:
language = ''
set_classes(self.options)
classes = ['code']
if language:
classes.append(language)
if 'classes' in self.options:
classes.extend(self.options['classes'])
# set up lexical analyzer
try:
tokens = Lexer(u'\n'.join(self.content), language,
self.state.document.settings.syntax_highlight)
except LexerError, error:
raise self.warning(error)
if 'number-lines' in self.options:
# optional argument `startline`, defaults to 1
try:
startline = int(self.options['number-lines'] or 1)
except ValueError:
raise self.error(':number-lines: with non-integer start value')
endline = startline + len(self.content)
# add linenumber filter:
tokens = NumberLines(tokens, startline, endline)
node = nodes.literal_block('\n'.join(self.content), classes=classes)
self.add_name(node)
# if called from "include", set the source
if 'source' in self.options:
node.attributes['source'] = self.options['source']
# analyze content and add nodes for every token
for classes, value in tokens:
# print (classes, value)
if classes:
node += nodes.inline(value, value, classes=classes)
else:
# insert as Text to decrease the verbosity of the output
node += nodes.Text(value, value)
return [node]
class MathBlock(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
## TODO: Add Sphinx' ``mathbase.py`` option 'nowrap'?
# 'nowrap': directives.flag,
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
# join lines, separate blocks
content = '\n'.join(self.content).split('\n\n')
_nodes = []
for block in content:
if not block:
continue
node = nodes.math_block(self.block_text, block, **self.options)
node.line = self.content_offset + 1
self.add_name(node)
_nodes.append(node)
return _nodes
class Rubric(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
def run(self):
set_classes(self.options)
rubric_text = self.arguments[0]
textnodes, messages = self.state.inline_text(rubric_text, self.lineno)
rubric = nodes.rubric(rubric_text, '', *textnodes, **self.options)
self.add_name(rubric)
return [rubric] + messages
class BlockQuote(Directive):
has_content = True
classes = []
def run(self):
self.assert_has_content()
elements = self.state.block_quote(self.content, self.content_offset)
for element in elements:
if isinstance(element, nodes.block_quote):
element['classes'] += self.classes
return elements
class Epigraph(BlockQuote):
classes = ['epigraph']
class Highlights(BlockQuote):
classes = ['highlights']
class PullQuote(BlockQuote):
classes = ['pull-quote']
class Compound(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
node = nodes.compound(text)
node['classes'] += self.options.get('class', [])
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Container(Directive):
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = nodes.container(text)
node['classes'].extend(classes)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
| {
"repo_name": "xfournet/intellij-community",
"path": "python/helpers/py2only/docutils/parsers/rst/directives/body.py",
"copies": "128",
"size": "9243",
"license": "apache-2.0",
"hash": 368462770430755200,
"line_mean": 30.9826989619,
"line_max": 79,
"alpha_frac": 0.5934220491,
"autogenerated": false,
"ratio": 4.128182224207236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006022624511035185,
"num_lines": 289
} |
# Boost library support module.
#
# This module allows to use the boost library from boost-build projects.
# The location of a boost source tree or the path to a pre-built
# version of the library can be configured from either site-config.jam
# or user-config.jam. If no location is configured the module looks for
# a BOOST_ROOT environment variable, which should point to a boost source
# tree. As a last resort it tries to use pre-built libraries from the standard
# search path of the compiler.
#
# If the location to a source tree is known, the module can be configured
# from the *-config.jam files:
#
# using boost : 1.35 : <root>/path-to-boost-root ;
#
# If the location to a pre-built version is known:
#
# using boost : 1.34
# : <include>/usr/local/include/boost_1_34
# <library>/usr/local/lib
# ;
#
# It is legal to configure more than one boost library version in the config
# files. The version identifier is used to disambiguate between them.
# The first configured version becomes the default.
#
# To use a boost library you need to put a 'use' statement into your
# Jamfile:
#
# import boost ;
#
# boost.use-project 1.35 ;
#
# If you don't care about a specific version you just can omit the version
# part, in which case the default is picked up:
#
# boost.use-project ;
#
# The library can be referenced with the project identifier '/boost'. To
# reference the program_options you would specify:
#
# exe myexe : mysrc.cpp : <library>/boost//program_options ;
#
# Note that the requirements are automatically transformed into suitable
# tags to find the correct pre-built library.
#
import re
import bjam
from b2.build import alias, property, property_set, feature
from b2.manager import get_manager
from b2.tools import builtin, common
from b2.util import bjam_signature, regex
# TODO: This is currently necessary in Python Port, but was not in Jam.
feature.feature('layout', ['system', 'versioned', 'tag'], ['optional'])
feature.feature('root', [], ['optional', 'free'])
feature.feature('build-id', [], ['optional', 'free'])
__initialized = None
__boost_auto_config = property_set.create([property.Property('layout', 'system')])
__boost_configured = {}
__boost_default = None
__build_id = None
__debug = None
def debug():
global __debug
if __debug is None:
__debug = "--debug-configuration" in bjam.variable("ARGV")
return __debug
# Configuration of the boost library to use.
#
# This can either be a boost source tree or
# pre-built libraries. The 'version' parameter must be a valid boost
# version number, e.g. 1.35, if specifying a pre-built version with
# versioned layout. It may be a symbolic name, e.g. 'trunk' if specifying
# a source tree. The options are specified as named parameters (like
# properties). The following paramters are available:
#
# <root>/path-to-boost-root: Specify a source tree.
#
# <include>/path-to-include: The include directory to search.
#
# <library>/path-to-library: The library directory to search.
#
# <layout>system or <layout>versioned.
#
# <build-id>my_build_id: The custom build id to use.
#
def init(version, options = None):
assert(isinstance(version,list))
assert(len(version)==1)
version = version[0]
if version in __boost_configured:
get_manager().errors()("Boost {} already configured.".format(version));
else:
global __boost_default
if debug():
if not __boost_default:
print "notice: configuring default boost library {}".format(version)
print "notice: configuring boost library {}".format(version)
if not __boost_default:
__boost_default = version
properties = []
for option in options:
properties.append(property.create_from_string(option))
__boost_configured[ version ] = property_set.PropertySet(properties)
projects = get_manager().projects()
rules = projects.project_rules()
# Use a certain version of the library.
#
# The use-project rule causes the module to define a boost project of
# searchable pre-built boost libraries, or references a source tree
# of the boost library. If the 'version' parameter is omitted either
# the configured default (first in config files) is used or an auto
# configuration will be attempted.
#
@bjam_signature(([ "version", "?" ], ))
def use_project(version = None):
projects.push_current( projects.current() )
if not version:
version = __boost_default
if not version:
version = "auto_config"
global __initialized
if __initialized:
if __initialized != version:
get_manager().errors()('Attempt to use {} with different parameters'.format('boost'))
else:
if version in __boost_configured:
opts = __boost_configured[ version ]
root = opts.get('<root>' )
inc = opts.get('<include>')
lib = opts.get('<library>')
if debug():
print "notice: using boost library {} {}".format( version, opt.raw() )
global __layout
global __version_tag
__layout = opts.get('<layout>')
if not __layout:
__layout = 'versioned'
__build_id = opts.get('<build-id>')
__version_tag = re.sub("[*\\/:.\"\' ]", "_", version)
__initialized = version
if ( root and inc ) or \
( root and lib ) or \
( lib and not inc ) or \
( not lib and inc ):
get_manager().errors()("Ambiguous parameters, use either <root> or <inlude> with <library>.")
elif not root and not inc:
root = bjam.variable("BOOST_ROOT")
module = projects.current().project_module()
if root:
bjam.call('call-in-module', module, 'use-project', ['boost', root])
else:
projects.initialize(__name__)
if version == '0.0.1':
boost_0_0_1( inc, lib )
else:
boost_std( inc, lib )
else:
get_manager().errors()("Reference to unconfigured boost version.")
projects.pop_current()
rules.add_rule( 'boost.use-project', use_project )
def boost_std(inc = None, lib = None):
# The default definitions for pre-built libraries.
rules.project(
['boost'],
['usage-requirements'] + ['<include>{}'.format(i) for i in inc] + ['<define>BOOST_ALL_NO_LIB'],
['requirements'] + ['<search>{}'.format(l) for l in lib])
# TODO: There should be a better way to add a Python function into a
# project requirements property set.
tag_prop_set = property_set.create([property.Property('<tag>', tag_std)])
attributes = projects.attributes(projects.current().project_module())
attributes.requirements = attributes.requirements.refine(tag_prop_set)
alias('headers')
def boost_lib(lib_name, dyn_link_macro):
if (isinstance(lib_name,str)):
lib_name = [lib_name]
builtin.lib(lib_name, usage_requirements=['<link>shared:<define>{}'.format(dyn_link_macro)])
boost_lib('date_time' , 'BOOST_DATE_TIME_DYN_LINK' )
boost_lib('filesystem' , 'BOOST_FILE_SYSTEM_DYN_LINK' )
boost_lib('graph' , 'BOOST_GRAPH_DYN_LINK' )
boost_lib('graph_parallel' , 'BOOST_GRAPH_DYN_LINK' )
boost_lib('iostreams' , 'BOOST_IOSTREAMS_DYN_LINK' )
boost_lib('locale' , 'BOOST_LOG_DYN_LINK' )
boost_lib('log' , 'BOOST_LOG_DYN_LINK' )
boost_lib('log_setup' , 'BOOST_LOG_DYN_LINK' )
boost_lib('math_tr1' , 'BOOST_MATH_TR1_DYN_LINK' )
boost_lib('math_tr1f' , 'BOOST_MATH_TR1_DYN_LINK' )
boost_lib('math_tr1l' , 'BOOST_MATH_TR1_DYN_LINK' )
boost_lib('math_c99' , 'BOOST_MATH_TR1_DYN_LINK' )
boost_lib('math_c99f' , 'BOOST_MATH_TR1_DYN_LINK' )
boost_lib('math_c99l' , 'BOOST_MATH_TR1_DYN_LINK' )
boost_lib('mpi' , 'BOOST_MPI_DYN_LINK' )
boost_lib('program_options' , 'BOOST_PROGRAM_OPTIONS_DYN_LINK')
boost_lib('python' , 'BOOST_PYTHON_DYN_LINK' )
boost_lib('python3' , 'BOOST_PYTHON_DYN_LINK' )
boost_lib('random' , 'BOOST_RANDOM_DYN_LINK' )
boost_lib('regex' , 'BOOST_REGEX_DYN_LINK' )
boost_lib('serialization' , 'BOOST_SERIALIZATION_DYN_LINK' )
boost_lib('wserialization' , 'BOOST_SERIALIZATION_DYN_LINK' )
boost_lib('signals' , 'BOOST_SIGNALS_DYN_LINK' )
boost_lib('system' , 'BOOST_SYSTEM_DYN_LINK' )
boost_lib('unit_test_framework' , 'BOOST_TEST_DYN_LINK' )
boost_lib('prg_exec_monitor' , 'BOOST_TEST_DYN_LINK' )
boost_lib('test_exec_monitor' , 'BOOST_TEST_DYN_LINK' )
boost_lib('thread' , 'BOOST_THREAD_DYN_DLL' )
boost_lib('wave' , 'BOOST_WAVE_DYN_LINK' )
def boost_0_0_1( inc, lib ):
print "You are trying to use an example placeholder for boost libs." ;
# Copy this template to another place (in the file boost.jam)
# and define a project and libraries modelled after the
# boost_std rule. Please note that it is also possible to have
# a per version taging rule in case they are different between
# versions.
def tag_std(name, type, prop_set):
name = 'boost_' + name
if 'static' in prop_set.get('<link>') and 'windows' in prop_set.get('<target-os>'):
name = 'lib' + name
result = None
if __layout == 'system':
versionRe = re.search('^([0-9]+)_([0-9]+)', __version_tag)
if versionRe and versionRe.group(1) == '1' and int(versionRe.group(2)) < 39:
result = tag_tagged(name, type, prop_set)
else:
result = tag_system(name, type, prop_set)
elif __layout == 'tagged':
result = tag_tagged(name, type, prop_set)
elif __layout == 'versioned':
result = tag_versioned(name, type, prop_set)
else:
get_manager().errors()("Missing layout")
return result
def tag_maybe(param):
return ['-{}'.format(param)] if param else []
def tag_system(name, type, prop_set):
return common.format_name(['<base>'] + tag_maybe(__build_id), name, type, prop_set)
def tag_system(name, type, prop_set):
return common.format_name(['<base>', '<threading>', '<runtime>'] + tag_maybe(__build_id), name, type, prop_set)
def tag_versioned(name, type, prop_set):
return common.format_name(['<base>', '<toolset>', '<threading>', '<runtime>'] + tag_maybe(__version_tag) + tag_maybe(__build_id),
name, type, prop_set)
| {
"repo_name": "flingone/frameworks_base_cmds_remoted",
"path": "libs/boost/tools/build/src/contrib/boost.py",
"copies": "2",
"size": "11493",
"license": "apache-2.0",
"hash": -8273881732779942000,
"line_mean": 39.1935483871,
"line_max": 133,
"alpha_frac": 0.5923605673,
"autogenerated": false,
"ratio": 3.538485221674877,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014912686075544135,
"num_lines": 279
} |
"""An abstract widget for button type widget implementations."""
from pygame import time as PygameTime
from pygame import K_SPACE, K_KP_ENTER, K_RETURN
from Bin import Bin
from Constants import *
import base
class ButtonBase (Bin):
"""
ButtonBase () -> ButtonBase
An abstract base class that implements basic button logic.
The ButtonBase class is an abstract class, that implements a minimal
set of events and methods to make it suitable for button type
widgets. It implements the most important mouse signal types and how
to handle them..
Default action (invoked by activate()):
The ButtonBase emulates a SIG_CLICKED event and runs the connected
callbacks.
Mnemonic action (invoked by activate_mnemonic()):
The ButtonBase invokes the activate_mnemonic() method of its child
(if any).
Signals:
SIG_MOUSEDOWN - Invoked, when a mouse button is pressed on the
ButtonBase.
SIG_MOUSEUP - Invoked, when a mouse button is released on the
ButtonBase.
SIG_MOUSEMOVE - Invoked, when the mouse moves over the ButtonBase.
SIG_CLICKED - Invoked, when the left mouse button is pressed AND
released over the ButtonBase.
"""
def __init__ (self):
Bin.__init__ (self)
# Internal click detector.
self.__click = False
# Signals, the button listens to.
self._signals[SIG_MOUSEDOWN] = []
self._signals[SIG_MOUSEUP] = []
self._signals[SIG_MOUSEMOVE] = []
self._signals[SIG_KEYDOWN] = None # Dummy for keyboard activation.
self._signals[SIG_CLICKED] = []
def activate_mnemonic (self, mnemonic):
"""B.activate_mnemonic (...) -> bool
Activates the mnemonic of the ButtonBase its child.
"""
if self.child:
return self.child.activate_mnemonic (mnemonic)
return False
def activate (self):
"""B.activate () -> None
Activates the ButtonBase default action.
Activates the ButtonBase default action. This usually means a
click, emulated by setting the state to STATE_ACTIVE, forcing an
update, setting the state back to STATE_NORMAL and running the
attached callbacks for the SIG_CLICKED event.
"""
if not self.sensitive:
return
self.lock ()
self.focus = True
self.state = STATE_ACTIVE
self.unlock ()
PygameTime.delay (50)
self.state = STATE_NORMAL
self.run_signal_handlers (SIG_CLICKED)
def notify (self, event):
"""B.notify (...) -> None
Notifies the ButtonBase about an event.
"""
if not self.sensitive:
return
if event.signal in SIGNALS_MOUSE:
eventarea = self.rect_to_client ()
if event.signal == SIG_MOUSEDOWN:
if eventarea.collidepoint (event.data.pos):
self.focus = True
# The button only acts upon left clicks.
if event.data.button == 1:
self.__click = True
self.state = STATE_ACTIVE
self.run_signal_handlers (SIG_MOUSEDOWN, event.data)
event.handled = True
elif event.signal == SIG_MOUSEUP:
if eventarea.collidepoint (event.data.pos):
self.run_signal_handlers (SIG_MOUSEUP, event.data)
if event.data.button == 1:
if self.state == STATE_ACTIVE:
self.state = STATE_ENTERED
else:
self.state = STATE_NORMAL
# Check for a previous left click.
if self.__click:
self.__click = False
self.run_signal_handlers (SIG_CLICKED)
event.handled = True
elif event.data.button == 1:
# Reset the 'clicked' state for the button, if the mouse
# button 1 is released at another location.
self.__click = False
self.state = STATE_NORMAL
elif event.signal == SIG_MOUSEMOVE:
if eventarea.collidepoint (event.data.pos):
if not self.__click:
self.state = STATE_ENTERED
else:
self.state = STATE_ACTIVE
self.run_signal_handlers (SIG_MOUSEMOVE, event.data)
self.entered = True
event.handled = True
else:
self.entered = False
elif (event.signal == SIG_KEYDOWN) and self.focus:
if event.data.key in (K_SPACE, K_KP_ENTER, K_RETURN):
# Activate the focused button, if the user presses
# space, return or enter.
self.activate ()
event.handled = True
Bin.notify (self, event)
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/ButtonBase.py",
"copies": "1",
"size": "6588",
"license": "bsd-2-clause",
"hash": 1115756313914449900,
"line_mean": 38.686746988,
"line_max": 78,
"alpha_frac": 0.5925925926,
"autogenerated": false,
"ratio": 4.6199158485273495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.571250844112735,
"avg_score": null,
"num_lines": null
} |
"""A widget, which acts upon different mouse events."""
from ButtonBase import ButtonBase
from Label import Label
from Constants import *
import base
class Button (ButtonBase):
"""Button (text=None) -> Button ()
A widget class, which can react upon mouse events.
The Button widget can listen to mouse events such as clicks or a
pressed button and can display a short text.
The text to display on the Button can be set using the 'text'
attribute or the set_text() method. The text is displayed using a
Label widget, which is placed upon the Button surface, thus all
text capabilities of the Label, such as mnemonics, can be applied
to the Button as well.
button.text = '#Click me' # Use the C character as mnemonic.
button.text = 'Button' # A simple text.
button.set_text ('Button ##1') # Creates the text 'Button #1'
To operate on the displayed Label directly (which is NOT
recommended), the 'child' attribute and set_child() method can be
used. They have a slightly different behaviour than the methods of
the ButtonBase class and allow only Label widgets to be assigned to
the Button. Additionally the Label its 'widget' attribute will be
bound to the Button.
button.child = Label ('#Button')
button.set_child (None)
The Button supports different border types by setting its 'border'
attribute to a valid value of the BORDER_TYPES constants.
button.border = BORDER_SUNKEN
button.set_border (BORDER_SUNKEN)
Note: Changing the 'state' attribute of the Button will also
affect the state of the Label placed on the Button.
Default action (invoked by activate()):
See the ButtonBase class.
Mnemonic action (invoked by activate_mnemonic()):
See the ButtonBase class.
Attributes:
text - The text to display on the Button.
border - The border style to set for the Button.
"""
def __init__ (self, text=None):
ButtonBase.__init__ (self)
self._border = BORDER_RAISED
self.set_text (text)
def set_text (self, text=None):
"""B.set_text (...) -> None
Sets the text to display on the Button.
Sets the text to display on the Button by referring to the
'text' attribute of its child Label.
"""
if text != None:
if self.child:
self.child.set_text (text)
else:
self.child = Label (text)
else:
self.child = None
def get_text (self):
"""B.get_text () -> string
Returns the set text of the Button.
Returns the text set on the Label of the Button.
"""
if self.child:
return self.child.text
return ""
def set_child (self, child=None):
"""B.set_child (...) -> None
Sets the Label to display on the Button.
Creates a parent-child relationship from the Button to a Label
and causes the Label to set its mnemonic widget to the Button.
Raises a TypeError, if the passed argument does not inherit
from the Label class.
"""
self.lock ()
if child and not isinstance (child, Label):
raise TypeError ("child must inherit from Label")
ButtonBase.set_child (self, child)
if child:
child.set_widget (self)
if not child.style:
child.style = self.style or \
base.GlobalStyle.get_style (self.__class__)
self.unlock ()
def set_state (self, state):
"""B.set_state (...) -> None
Sets the state of the Button.
Sets the state of the Button and causes its child to set its
state to the same value.
"""
if self.state == state:
return
self.lock ()
if self.child:
self.child.state = state
ButtonBase.set_state (self, state)
self.unlock ()
def set_border (self, border):
"""B.set_border (...) -> None
Sets the border type to be used by the Button.
Raises a ValueError, if the passed argument is not a value from
BORDER_TYPES
"""
if border not in BORDER_TYPES:
raise ValueError ("border must be a value from BORDER_TYPES")
self._border = border
self.dirty = True
def draw_bg (self):
"""B.draw_bg () -> Surface
Draws the Button background surface and returns it.
Creates the visible surface of the Button and returns it to the
caller.
"""
return base.GlobalStyle.engine.draw_button (self)
def draw (self):
"""B.draw () -> None
Draws the Button surface and places its Label on it.
"""
ButtonBase.draw (self)
if self.child:
self.child.center = self.image.get_rect ().center
self.image.blit (self.child.image, self.child.rect)
text = property (lambda self: self.get_text (),
lambda self, var: self.set_text (var),
doc = "The text of the Button.")
border = property (lambda self: self._border,
lambda self, var: self.set_border (var),
doc = "The border style to set for the Button.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Button.py",
"copies": "1",
"size": "6750",
"license": "bsd-2-clause",
"hash": -4605172156315822600,
"line_mean": 34.7142857143,
"line_max": 78,
"alpha_frac": 0.6377777778,
"autogenerated": false,
"ratio": 4.383116883116883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008533758875063385,
"num_lines": 189
} |