repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringlengths
1
5
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
tinloaf/home-assistant
homeassistant/components/microsoft_face.py
4
10344
""" Support for Microsoft face recognition. For more details about this component, please refer to the documentation at https://home-assistant.io/components/microsoft_face/ """ import asyncio import json import logging import aiohttp from aiohttp.hdrs import CONTENT_TYPE import async_timeout import voluptuous as vol from homeassistant.const import CONF_API_KEY, CONF_TIMEOUT, ATTR_NAME from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.util import slugify _LOGGER = logging.getLogger(__name__) ATTR_CAMERA_ENTITY = 'camera_entity' ATTR_GROUP = 'group' ATTR_PERSON = 'person' CONF_AZURE_REGION = 'azure_region' DATA_MICROSOFT_FACE = 'microsoft_face' DEFAULT_TIMEOUT = 10 DEPENDENCIES = ['camera'] DOMAIN = 'microsoft_face' FACE_API_URL = "api.cognitive.microsoft.com/face/v1.0/{0}" SERVICE_CREATE_GROUP = 'create_group' SERVICE_CREATE_PERSON = 'create_person' SERVICE_DELETE_GROUP = 'delete_group' SERVICE_DELETE_PERSON = 'delete_person' SERVICE_FACE_PERSON = 'face_person' SERVICE_TRAIN_GROUP = 'train_group' CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_AZURE_REGION, default="westus"): cv.string, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, }), }, extra=vol.ALLOW_EXTRA) SCHEMA_GROUP_SERVICE = vol.Schema({ vol.Required(ATTR_NAME): cv.string, }) SCHEMA_PERSON_SERVICE = SCHEMA_GROUP_SERVICE.extend({ vol.Required(ATTR_GROUP): cv.slugify, }) SCHEMA_FACE_SERVICE = vol.Schema({ vol.Required(ATTR_PERSON): cv.string, vol.Required(ATTR_GROUP): cv.slugify, vol.Required(ATTR_CAMERA_ENTITY): cv.entity_id, }) SCHEMA_TRAIN_SERVICE = vol.Schema({ vol.Required(ATTR_GROUP): cv.slugify, }) async def async_setup(hass, config): """Set up Microsoft Face.""" entities = {} face = MicrosoftFace( hass, config[DOMAIN].get(CONF_AZURE_REGION), config[DOMAIN].get(CONF_API_KEY), config[DOMAIN].get(CONF_TIMEOUT), entities ) try: # read exists group/person from cloud and create entities await face.update_store() except HomeAssistantError as err: _LOGGER.error("Can't load data from face api: %s", err) return False hass.data[DATA_MICROSOFT_FACE] = face async def async_create_group(service): """Create a new person group.""" name = service.data[ATTR_NAME] g_id = slugify(name) try: await face.call_api( 'put', "persongroups/{0}".format(g_id), {'name': name}) face.store[g_id] = {} entities[g_id] = MicrosoftFaceGroupEntity(hass, face, g_id, name) await entities[g_id].async_update_ha_state() except HomeAssistantError as err: _LOGGER.error("Can't create group '%s' with error: %s", g_id, err) hass.services.async_register( DOMAIN, SERVICE_CREATE_GROUP, async_create_group, schema=SCHEMA_GROUP_SERVICE) async def async_delete_group(service): """Delete a person group.""" g_id = slugify(service.data[ATTR_NAME]) try: await face.call_api('delete', "persongroups/{0}".format(g_id)) face.store.pop(g_id) entity = entities.pop(g_id) hass.states.async_remove(entity.entity_id) except HomeAssistantError as err: _LOGGER.error("Can't delete group '%s' with error: %s", g_id, err) hass.services.async_register( DOMAIN, SERVICE_DELETE_GROUP, async_delete_group, schema=SCHEMA_GROUP_SERVICE) async def async_train_group(service): """Train a person group.""" g_id = service.data[ATTR_GROUP] try: await face.call_api( 'post', "persongroups/{0}/train".format(g_id)) except HomeAssistantError as err: _LOGGER.error("Can't train group '%s' with error: %s", g_id, err) hass.services.async_register( DOMAIN, SERVICE_TRAIN_GROUP, async_train_group, schema=SCHEMA_TRAIN_SERVICE) async def async_create_person(service): """Create a person in a group.""" name = service.data[ATTR_NAME] g_id = service.data[ATTR_GROUP] try: user_data = await face.call_api( 'post', "persongroups/{0}/persons".format(g_id), {'name': name} ) face.store[g_id][name] = user_data['personId'] await entities[g_id].async_update_ha_state() except HomeAssistantError as err: _LOGGER.error("Can't create person '%s' with error: %s", name, err) hass.services.async_register( DOMAIN, SERVICE_CREATE_PERSON, async_create_person, schema=SCHEMA_PERSON_SERVICE) async def async_delete_person(service): """Delete a person in a group.""" name = service.data[ATTR_NAME] g_id = service.data[ATTR_GROUP] p_id = face.store[g_id].get(name) try: await face.call_api( 'delete', "persongroups/{0}/persons/{1}".format(g_id, p_id)) face.store[g_id].pop(name) await entities[g_id].async_update_ha_state() except HomeAssistantError as err: _LOGGER.error("Can't delete person '%s' with error: %s", p_id, err) hass.services.async_register( DOMAIN, SERVICE_DELETE_PERSON, async_delete_person, schema=SCHEMA_PERSON_SERVICE) async def async_face_person(service): """Add a new face picture to a person.""" g_id = service.data[ATTR_GROUP] p_id = face.store[g_id].get(service.data[ATTR_PERSON]) camera_entity = service.data[ATTR_CAMERA_ENTITY] camera = hass.components.camera try: image = await camera.async_get_image(hass, camera_entity) await face.call_api( 'post', "persongroups/{0}/persons/{1}/persistedFaces".format( g_id, p_id), image.content, binary=True ) except HomeAssistantError as err: _LOGGER.error("Can't delete person '%s' with error: %s", p_id, err) hass.services.async_register( DOMAIN, SERVICE_FACE_PERSON, async_face_person, schema=SCHEMA_FACE_SERVICE) return True class MicrosoftFaceGroupEntity(Entity): """Person-Group state/data Entity.""" def __init__(self, hass, api, g_id, name): """Initialize person/group entity.""" self.hass = hass self._api = api self._id = g_id self._name = name @property def name(self): """Return the name of the entity.""" return self._name @property def entity_id(self): """Return entity id.""" return "{0}.{1}".format(DOMAIN, self._id) @property def state(self): """Return the state of the entity.""" return len(self._api.store[self._id]) @property def should_poll(self): """Return True if entity has to be polled for state.""" return False @property def device_state_attributes(self): """Return device specific state attributes.""" attr = {} for name, p_id in self._api.store[self._id].items(): attr[name] = p_id return attr class MicrosoftFace: """Microsoft Face api for HomeAssistant.""" def __init__(self, hass, server_loc, api_key, timeout, entities): """Initialize Microsoft Face api.""" self.hass = hass self.websession = async_get_clientsession(hass) self.timeout = timeout self._api_key = api_key self._server_url = "https://{0}.{1}".format(server_loc, FACE_API_URL) self._store = {} self._entities = entities @property def store(self): """Store group/person data and IDs.""" return self._store async def update_store(self): """Load all group/person data into local store.""" groups = await self.call_api('get', 'persongroups') tasks = [] for group in groups: g_id = group['personGroupId'] self._store[g_id] = {} self._entities[g_id] = MicrosoftFaceGroupEntity( self.hass, self, g_id, group['name']) persons = await self.call_api( 'get', "persongroups/{0}/persons".format(g_id)) for person in persons: self._store[g_id][person['name']] = person['personId'] tasks.append(self._entities[g_id].async_update_ha_state()) if tasks: await asyncio.wait(tasks, loop=self.hass.loop) async def call_api(self, method, function, data=None, binary=False, params=None): """Make an api call.""" headers = {"Ocp-Apim-Subscription-Key": self._api_key} url = self._server_url.format(function) payload = None if binary: headers[CONTENT_TYPE] = "application/octet-stream" payload = data else: headers[CONTENT_TYPE] = "application/json" if data is not None: payload = json.dumps(data).encode() else: payload = None try: with async_timeout.timeout(self.timeout, loop=self.hass.loop): response = await getattr(self.websession, method)( url, data=payload, headers=headers, params=params) answer = await response.json() _LOGGER.debug("Read from microsoft face api: %s", answer) if response.status < 300: return answer _LOGGER.warning("Error %d microsoft face api %s", response.status, response.url) raise HomeAssistantError(answer['error']['message']) except aiohttp.ClientError: _LOGGER.warning("Can't connect to microsoft face api") except asyncio.TimeoutError: _LOGGER.warning("Timeout from microsoft face api %s", response.url) raise HomeAssistantError("Network error on microsoft face api.")
apache-2.0
FNCS/ns-3.26
examples/wireless/mixed-wireless.py
59
17198
# /* # * This program is free software; you can redistribute it and/or modify # * it under the terms of the GNU General Public License version 2 as # * published by the Free Software Foundation; # * # * This program is distributed in the hope that it will be useful, # * but WITHOUT ANY WARRANTY; without even the implied warranty of # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # * GNU General Public License for more details. # * # * You should have received a copy of the GNU General Public License # * along with this program; if not, write to the Free Software # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # * # */ # # This ns-3 example demonstrates the use of helper functions to ease # the construction of simulation scenarios. # # The simulation topology consists of a mixed wired and wireless # scenario in which a hierarchical mobility model is used. # # The simulation layout consists of N backbone routers interconnected # by an ad hoc wifi network. # Each backbone router also has a local 802.11 network and is connected # to a local LAN. An additional set of(K-1) nodes are connected to # this backbone. Finally, a local LAN is connected to each router # on the backbone, with L-1 additional hosts. # # The nodes are populated with TCP/IP stacks, and OLSR unicast routing # on the backbone. An example UDP transfer is shown. The simulator # be configured to output tcpdumps or traces from different nodes. # # # +--------------------------------------------------------+ # | | # | 802.11 ad hoc, ns-2 mobility | # | | # +--------------------------------------------------------+ # | o o o(N backbone routers) | # +--------+ +--------+ # wired LAN | mobile | wired LAN | mobile | # -----------| router | -----------| router | # --------- --------- # | | # +----------------+ +----------------+ # | 802.11 | | 802.11 | # | net | | net | # | K-1 hosts | | K-1 hosts | # +----------------+ +----------------+ # import ns.applications import ns.core import ns.csma import ns.internet import ns.mobility import ns.network import ns.olsr import ns.wifi # # # # This function will be used below as a trace sink # # # static void # CourseChangeCallback(std.string path, Ptr<const MobilityModel> model) # { # Vector position = model.GetPosition(); # std.cout << "CourseChange " << path << " x=" << position.x << ", y=" << position.y << ", z=" << position.z << std.endl; # } def main(argv): # # First, we initialize a few local variables that control some # simulation parameters. # cmd = ns.core.CommandLine() cmd.backboneNodes = 10 cmd.infraNodes = 2 cmd.lanNodes = 2 cmd.stopTime = 20 # # Simulation defaults are typically set next, before command line # arguments are parsed. # ns.core.Config.SetDefault("ns3::OnOffApplication::PacketSize", ns.core.StringValue("1472")) ns.core.Config.SetDefault("ns3::OnOffApplication::DataRate", ns.core.StringValue("100kb/s")) # # For convenience, we add the local variables to the command line argument # system so that they can be overridden with flags such as # "--backboneNodes=20" # cmd.AddValue("backboneNodes", "number of backbone nodes") cmd.AddValue("infraNodes", "number of leaf nodes") cmd.AddValue("lanNodes", "number of LAN nodes") cmd.AddValue("stopTime", "simulation stop time(seconds)") # # The system global variables and the local values added to the argument # system can be overridden by command line arguments by using this call. # cmd.Parse(argv) backboneNodes = int(cmd.backboneNodes) infraNodes = int(cmd.infraNodes) lanNodes = int(cmd.lanNodes) stopTime = int(cmd.stopTime) if (stopTime < 10): print "Use a simulation stop time >= 10 seconds" exit(1) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / # # # Construct the backbone # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / # # Create a container to manage the nodes of the adhoc(backbone) network. # Later we'll create the rest of the nodes we'll need. # backbone = ns.network.NodeContainer() backbone.Create(backboneNodes) # # Create the backbone wifi net devices and install them into the nodes in # our container # wifi = ns.wifi.WifiHelper() mac = ns.wifi.WifiMacHelper() mac.SetType("ns3::AdhocWifiMac") wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager", "DataMode", ns.core.StringValue("OfdmRate54Mbps")) wifiPhy = ns.wifi.YansWifiPhyHelper.Default() wifiChannel = ns.wifi.YansWifiChannelHelper.Default() wifiPhy.SetChannel(wifiChannel.Create()) backboneDevices = wifi.Install(wifiPhy, mac, backbone) # # Add the IPv4 protocol stack to the nodes in our container # print "Enabling OLSR routing on all backbone nodes" internet = ns.internet.InternetStackHelper() olsr = ns.olsr.OlsrHelper() internet.SetRoutingHelper(olsr); # has effect on the next Install () internet.Install(backbone); # re-initialize for non-olsr routing. # internet.Reset() # # Assign IPv4 addresses to the device drivers(actually to the associated # IPv4 interfaces) we just created. # ipAddrs = ns.internet.Ipv4AddressHelper() ipAddrs.SetBase(ns.network.Ipv4Address("192.168.0.0"), ns.network.Ipv4Mask("255.255.255.0")) ipAddrs.Assign(backboneDevices) # # The ad-hoc network nodes need a mobility model so we aggregate one to # each of the nodes we just finished building. # mobility = ns.mobility.MobilityHelper() mobility.SetPositionAllocator("ns3::GridPositionAllocator", "MinX", ns.core.DoubleValue(20.0), "MinY", ns.core.DoubleValue(20.0), "DeltaX", ns.core.DoubleValue(20.0), "DeltaY", ns.core.DoubleValue(20.0), "GridWidth", ns.core.UintegerValue(5), "LayoutType", ns.core.StringValue("RowFirst")) mobility.SetMobilityModel("ns3::RandomDirection2dMobilityModel", "Bounds", ns.mobility.RectangleValue(ns.mobility.Rectangle(-500, 500, -500, 500)), "Speed", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=2]"), "Pause", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0.2]")) mobility.Install(backbone) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / # # # Construct the LANs # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / # Reset the address base-- all of the CSMA networks will be in # the "172.16 address space ipAddrs.SetBase(ns.network.Ipv4Address("172.16.0.0"), ns.network.Ipv4Mask("255.255.255.0")) for i in range(backboneNodes): print "Configuring local area network for backbone node ", i # # Create a container to manage the nodes of the LAN. We need # two containers here; one with all of the new nodes, and one # with all of the nodes including new and existing nodes # newLanNodes = ns.network.NodeContainer() newLanNodes.Create(lanNodes - 1) # Now, create the container with all nodes on this link lan = ns.network.NodeContainer(ns.network.NodeContainer(backbone.Get(i)), newLanNodes) # # Create the CSMA net devices and install them into the nodes in our # collection. # csma = ns.csma.CsmaHelper() csma.SetChannelAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate(5000000))) csma.SetChannelAttribute("Delay", ns.core.TimeValue(ns.core.MilliSeconds(2))) lanDevices = csma.Install(lan) # # Add the IPv4 protocol stack to the new LAN nodes # internet.Install(newLanNodes) # # Assign IPv4 addresses to the device drivers(actually to the # associated IPv4 interfaces) we just created. # ipAddrs.Assign(lanDevices) # # Assign a new network prefix for the next LAN, according to the # network mask initialized above # ipAddrs.NewNetwork() # # The new LAN nodes need a mobility model so we aggregate one # to each of the nodes we just finished building. # mobilityLan = ns.mobility.MobilityHelper() positionAlloc = ns.mobility.ListPositionAllocator() for j in range(newLanNodes.GetN()): positionAlloc.Add(ns.core.Vector(0.0, (j*10 + 10), 0.0)) mobilityLan.SetPositionAllocator(positionAlloc) mobilityLan.PushReferenceMobilityModel(backbone.Get(i)) mobilityLan.SetMobilityModel("ns3::ConstantPositionMobilityModel") mobilityLan.Install(newLanNodes); # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / # # # Construct the mobile networks # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / # Reset the address base-- all of the 802.11 networks will be in # the "10.0" address space ipAddrs.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0")) for i in range(backboneNodes): print "Configuring wireless network for backbone node ", i # # Create a container to manage the nodes of the LAN. We need # two containers here; one with all of the new nodes, and one # with all of the nodes including new and existing nodes # stas = ns.network.NodeContainer() stas.Create(infraNodes - 1) # Now, create the container with all nodes on this link infra = ns.network.NodeContainer(ns.network.NodeContainer(backbone.Get(i)), stas) # # Create another ad hoc network and devices # ssid = ns.wifi.Ssid('wifi-infra' + str(i)) wifiInfra = ns.wifi.WifiHelper.Default() wifiPhy.SetChannel(wifiChannel.Create()) wifiInfra.SetRemoteStationManager('ns3::ArfWifiManager') macInfra = ns.wifi.WifiMacHelper(); macInfra.SetType("ns3::StaWifiMac", "Ssid", ns.wifi.SsidValue(ssid), "ActiveProbing", ns.core.BooleanValue(False)) # setup stas staDevices = wifiInfra.Install(wifiPhy, macInfra, stas) # setup ap. macInfra.SetType("ns3::ApWifiMac", "Ssid", ns.wifi.SsidValue(ssid), "BeaconGeneration", ns.core.BooleanValue(True), "BeaconInterval", ns.core.TimeValue(ns.core.Seconds(2.5))) apDevices = wifiInfra.Install(wifiPhy, macInfra, backbone.Get(i)) # Collect all of these new devices infraDevices = ns.network.NetDeviceContainer(apDevices, staDevices) # Add the IPv4 protocol stack to the nodes in our container # internet.Install(stas) # # Assign IPv4 addresses to the device drivers(actually to the associated # IPv4 interfaces) we just created. # ipAddrs.Assign(infraDevices) # # Assign a new network prefix for each mobile network, according to # the network mask initialized above # ipAddrs.NewNetwork() # # The new wireless nodes need a mobility model so we aggregate one # to each of the nodes we just finished building. # subnetAlloc = ns.mobility.ListPositionAllocator() for j in range(infra.GetN()): subnetAlloc.Add(ns.core.Vector(0.0, j, 0.0)) mobility.PushReferenceMobilityModel(backbone.Get(i)) mobility.SetPositionAllocator(subnetAlloc) mobility.SetMobilityModel("ns3::RandomDirection2dMobilityModel", "Bounds", ns.mobility.RectangleValue(ns.mobility.Rectangle(-10, 10, -10, 10)), "Speed", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=3]"), "Pause", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0.4]")) mobility.Install(stas) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / # # # Application configuration # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / # Create the OnOff application to send UDP datagrams of size # 210 bytes at a rate of 448 Kb/s, between two nodes print "Create Applications." port = 9 # Discard port(RFC 863) appSource = ns.network.NodeList.GetNode(backboneNodes) lastNodeIndex = backboneNodes + backboneNodes*(lanNodes - 1) + backboneNodes*(infraNodes - 1) - 1 appSink = ns.network.NodeList.GetNode(lastNodeIndex) # Let's fetch the IP address of the last node, which is on Ipv4Interface 1 remoteAddr = appSink.GetObject(ns.internet.Ipv4.GetTypeId()).GetAddress(1,0).GetLocal() onoff = ns.applications.OnOffHelper("ns3::UdpSocketFactory", ns.network.Address(ns.network.InetSocketAddress(remoteAddr, port))) apps = onoff.Install(ns.network.NodeContainer(appSource)) apps.Start(ns.core.Seconds(3)) apps.Stop(ns.core.Seconds(stopTime - 1)) # Create a packet sink to receive these packets sink = ns.applications.PacketSinkHelper("ns3::UdpSocketFactory", ns.network.InetSocketAddress(ns.network.Ipv4Address.GetAny(), port)) apps = sink.Install(ns.network.NodeContainer(appSink)) apps.Start(ns.core.Seconds(3)) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / # # # Tracing configuration # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # / print "Configure Tracing." csma = ns.csma.CsmaHelper() # # Let's set up some ns-2-like ascii traces, using another helper class # ascii = ns.network.AsciiTraceHelper(); stream = ascii.CreateFileStream("mixed-wireless.tr"); wifiPhy.EnableAsciiAll(stream); csma.EnableAsciiAll(stream); internet.EnableAsciiIpv4All(stream); # Csma captures in non-promiscuous mode csma.EnablePcapAll("mixed-wireless", False) # Let's do a pcap trace on the backbone devices wifiPhy.EnablePcap("mixed-wireless", backboneDevices) wifiPhy.EnablePcap("mixed-wireless", appSink.GetId(), 0) # #ifdef ENABLE_FOR_TRACING_EXAMPLE # Config.Connect("/NodeList/*/$MobilityModel/CourseChange", # MakeCallback(&CourseChangeCallback)) # #endif # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Run simulation # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # print "Run Simulation." ns.core.Simulator.Stop(ns.core.Seconds(stopTime)) ns.core.Simulator.Run() ns.core.Simulator.Destroy() if __name__ == '__main__': import sys main(sys.argv)
gpl-2.0
Monika319/EWEF-1
Cw2Rezonans/Karolina/Oscyloskop/OscyloskopZ5W2.py
1
1312
# -*- coding: utf-8 -*- """ Plot oscilloscope files from MultiSim """ import numpy as np import matplotlib.pyplot as plt import sys import os from matplotlib import rc rc('font',family="Consolas") files=["real_zad5_05f_p2.txt"] for NazwaPliku in files: print NazwaPliku Plik=open(NazwaPliku) #print DeltaT Dane=Plik.readlines()#[4:] DeltaT=float(Dane[2].split()[3].replace(",",".")) #M=len(Dane[4].split())/2 M=2 Dane=Dane[5:] Plik.close() print M Ys=[np.zeros(len(Dane)) for i in range(M)] for m in range(M): for i in range(len(Dane)): try: Ys[m][i]=float(Dane[i].split()[2+3*m].replace(",",".")) except: print m, i, 2+3*m, len(Dane[i].split()), Dane[i].split() #print i, Y[i] X=np.zeros_like(Ys[0]) for i in range(len(X)): X[i]=i*DeltaT for y in Ys: print max(y)-min(y) Opis=u"Układ szeregowy\nPołowa częstotliwości rezonansowej" Nazwa=u"Z5W2" plt.title(u"Przebieg napięciowy\n"+Opis) plt.xlabel(u"Czas t [s]") plt.ylabel(u"Napięcie [V]") plt.plot(X,Ys[0],label=u"Wejście") plt.plot(X,Ys[1],label=u"Wyjście") plt.grid() plt.legend(loc="best") plt.savefig(Nazwa + ".png", bbox_inches='tight') plt.show()
gpl-2.0
seanyoncraic/linuxRetroPie
Documentation/networking/cxacru-cf.py
14668
1626
#!/usr/bin/env python # Copyright 2009 Simon Arlott # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 59 # Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Usage: cxacru-cf.py < cxacru-cf.bin # Output: values string suitable for the sysfs adsl_config attribute # # Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110 # contains mis-aligned values which will stop the modem from being able # to make a connection. If the first and last two bytes are removed then # the values become valid, but the modulation will be forced to ANSI # T1.413 only which may not be appropriate. # # The original binary format is a packed list of le32 values. import sys import struct i = 0 while True: buf = sys.stdin.read(4) if len(buf) == 0: break elif len(buf) != 4: sys.stdout.write("\n") sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf))) sys.exit(1) if i > 0: sys.stdout.write(" ") sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0])) i += 1 sys.stdout.write("\n")
gpl-2.0
catapult-project/catapult
telemetry/telemetry/internal/backends/chrome_inspector/devtools_client_backend.py
3
21781
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import absolute_import import logging import re import socket import sys import six from py_utils import exc_util from py_utils import retry_util from telemetry.core import exceptions from telemetry import decorators from telemetry.internal.backends import browser_backend from telemetry.internal.backends.chrome_inspector import devtools_http from telemetry.internal.backends.chrome_inspector import inspector_backend from telemetry.internal.backends.chrome_inspector import inspector_websocket from telemetry.internal.backends.chrome_inspector import memory_backend from telemetry.internal.backends.chrome_inspector import system_info_backend from telemetry.internal.backends.chrome_inspector import tracing_backend from telemetry.internal.backends.chrome_inspector import window_manager_backend from telemetry.internal.platform.tracing_agent import ( chrome_tracing_devtools_manager) class TabNotFoundError(exceptions.Error): pass class UnsupportedVersionError(exceptions.Error): pass # Only versions of Chrome from M58 and above are supported. Older versions # did not support many of the modern features currently in use by Telemetry. MIN_SUPPORTED_BRANCH_NUMBER = 3029 # The first WebSocket connections or calls against a newly-started # browser, specifically in Debug builds, can take a long time. Give # them 60s to complete instead of the default 10s used in many places # in this file. _FIRST_CALL_TIMEOUT = 60 # These are possible exceptions raised when the DevTools agent is not ready # to accept incomming connections. _DEVTOOLS_CONNECTION_ERRORS = ( devtools_http.DevToolsClientConnectionError, inspector_websocket.WebSocketException, socket.error) def GetDevToolsBackEndIfReady(devtools_port, app_backend, browser_target=None, enable_tracing=True): client = _DevToolsClientBackend(app_backend) try: client.Connect(devtools_port, browser_target, enable_tracing) logging.info('DevTools agent ready at %s', client) except _DEVTOOLS_CONNECTION_ERRORS as exc: logging.info('DevTools agent at %s not ready yet: %s', client, exc) client = None return client class FuchsiaBrowserTargetNotFoundException(Exception): pass class _DevToolsClientBackend(object): """An object that communicates with Chrome's devtools. This class owns a map of InspectorBackends. It is responsible for creating and destroying them. """ def __init__(self, app_backend): """Create an object able to connect with the DevTools agent. Args: app_backend: The app that contains the DevTools agent. """ self._app_backend = app_backend self._browser_target = None self._forwarder = None self._devtools_http = None self._browser_websocket = None self._created = False self._local_port = None self._remote_port = None # Other backends. self._tracing_backend = None self._memory_backend = None self._system_info_backend = None self._wm_backend = None self._devtools_context_map_backend = _DevToolsContextMapBackend(self) def __str__(self): s = self.browser_target_url if self.local_port != self.remote_port: s = '%s (remote=%s)' % (s, self.remote_port) return s @property def local_port(self): return self._local_port @property def remote_port(self): return self._remote_port @property def browser_target_url(self): # For Fuchsia browsers, we get the browser_target through a JSON request if self.platform_backend.GetOSName() == 'fuchsia': resp = self.GetVersion() if 'webSocketDebuggerUrl' in resp: return resp['webSocketDebuggerUrl'] else: raise FuchsiaBrowserTargetNotFoundException( 'Could not get the browser target.') return 'ws://127.0.0.1:%i%s' % (self._local_port, self._browser_target) @property def app_backend(self): return self._app_backend @property def platform_backend(self): return self._app_backend.platform_backend @property def supports_overriding_memory_pressure_notifications(self): return ( isinstance(self.app_backend, browser_backend.BrowserBackend) and self.app_backend.supports_overriding_memory_pressure_notifications) @property def is_tracing_running(self): return self._tracing_backend.is_tracing_running @property def has_tracing_client(self): return self._tracing_backend != None def Connect(self, devtools_port, browser_target, enable_tracing=True): try: self._Connect(devtools_port, browser_target, enable_tracing) except: self.Close() # Close any connections made if failed to connect to all. raise @retry_util.RetryOnException(devtools_http.DevToolsClientUrlError, retries=3) def _WaitForConnection(self, retries=None): del retries self._devtools_http.Request('') def _SetUpPortForwarding(self, devtools_port): self._forwarder = self.platform_backend.forwarder_factory.Create( local_port=None, # Forwarder will choose an available port. remote_port=devtools_port, reverse=True) self._local_port = self._forwarder._local_port self._remote_port = self._forwarder._remote_port self._devtools_http = devtools_http.DevToolsHttp(self.local_port) # For Fuchsia, wait until port forwarding has started working. if self.platform_backend.GetOSName() == 'fuchsia': self._WaitForConnection() def _Connect(self, devtools_port, browser_target, enable_tracing): """Attempt to connect to the DevTools client. Args: devtools_port: The devtools_port uniquely identifies the DevTools agent. browser_target: An optional string to override the default path used to establish a websocket connection with the browser inspector. enable_tracing: Defines if a tracing_client is created. Raises: Any of _DEVTOOLS_CONNECTION_ERRORS if failed to establish the connection. """ self._browser_target = browser_target or '/devtools/browser' self._SetUpPortForwarding(devtools_port) # If the agent is not alive and ready, trying to get the branch number will # raise a devtools_http.DevToolsClientConnectionError. branch_number = self.GetChromeBranchNumber() if branch_number < MIN_SUPPORTED_BRANCH_NUMBER: raise UnsupportedVersionError( 'Chrome branch number %d is no longer supported' % branch_number) # Ensure that the inspector websocket is ready. This may raise a # inspector_websocket.WebSocketException or socket.error if not ready. self._browser_websocket = inspector_websocket.InspectorWebsocket() self._browser_websocket.Connect(self.browser_target_url, timeout=10) chrome_tracing_devtools_manager.RegisterDevToolsClient(self) # If there is a trace_config it means that Telemetry has already started # Chrome tracing via a startup config. The TracingBackend also needs needs # this config to initialize itself correctly. if enable_tracing: trace_config = ( self.platform_backend.tracing_controller_backend.GetChromeTraceConfig()) self._tracing_backend = tracing_backend.TracingBackend( self._browser_websocket, trace_config) @exc_util.BestEffort def Close(self): if self._tracing_backend is not None: self._tracing_backend.Close() self._tracing_backend = None if self._memory_backend is not None: self._memory_backend.Close() self._memory_backend = None if self._system_info_backend is not None: self._system_info_backend.Close() self._system_info_backend = None if self._wm_backend is not None: self._wm_backend.Close() self._wm_backend = None if self._devtools_context_map_backend is not None: self._devtools_context_map_backend.Clear() self._devtools_context_map_backend = None # Close the DevTools connections last (in case the backends above still # need to interact with them while closing). if self._browser_websocket is not None: self._browser_websocket.Disconnect() self._browser_websocket = None if self._devtools_http is not None: self._devtools_http.Disconnect() self._devtools_http = None if self._forwarder is not None: self._forwarder.Close() self._forwarder = None def CloseBrowser(self): """Close the browser instance.""" request = { 'method': 'Browser.close', } self._browser_websocket.SyncRequest(request, timeout=60) def IsAlive(self): """Whether the DevTools server is available and connectable.""" if self._devtools_http is None: return False try: self._devtools_http.Request('') except devtools_http.DevToolsClientConnectionError: return False else: return True @decorators.Cache def GetVersion(self): """Return the version dict as provided by the DevTools agent.""" return self._devtools_http.RequestJson('version') def GetChromeBranchNumber(self): # Detect version information. resp = self.GetVersion() if 'Protocol-Version' in resp: if 'Browser' in resp: branch_number_match = re.search(r'.+/\d+\.\d+\.(\d+)\.\d+', resp['Browser']) if not branch_number_match and 'User-Agent' in resp: branch_number_match = re.search( r'Chrome/\d+\.\d+\.(\d+)\.\d+ (Mobile )?Safari', resp['User-Agent']) if branch_number_match: branch_number = int(branch_number_match.group(1)) if branch_number: return branch_number # Branch number can't be determined, so fail any branch number checks. return 0 def _ListInspectableContexts(self): return self._devtools_http.RequestJson('') def RequestNewTab(self, timeout, in_new_window=False, url=None): """Creates a new tab, either in new window or current window. Returns: A dict of a parsed JSON object as returned by DevTools. Example: If an error is present, the dict will contain an 'error' key. If no error is present, the result is present in the 'result' key: { "result": { "targetId": "id-string" # This is the ID for the tab. } } """ request = { 'method': 'Target.createTarget', 'params': { 'url': url if url else 'about:blank', 'newWindow': in_new_window } } return self._browser_websocket.SyncRequest(request, timeout) def CloseTab(self, tab_id, timeout): """Closes the tab with the given id. Raises: devtools_http.DevToolsClientConnectionError TabNotFoundError """ try: return self._devtools_http.Request( 'close/%s' % tab_id, timeout=timeout) except devtools_http.DevToolsClientUrlError: error = TabNotFoundError( 'Unable to close tab, tab id not found: %s' % tab_id) six.reraise(error, None, sys.exc_info()[2]) def ActivateTab(self, tab_id, timeout): """Activates the tab with the given id. Raises: devtools_http.DevToolsClientConnectionError TabNotFoundError """ try: return self._devtools_http.Request( 'activate/%s' % tab_id, timeout=timeout) except devtools_http.DevToolsClientUrlError: error = TabNotFoundError( 'Unable to activate tab, tab id not found: %s' % tab_id) six.reraise(error, None, sys.exc_info()[2]) def GetUrl(self, tab_id): """Returns the URL of the tab with |tab_id|, as reported by devtools. Raises: devtools_http.DevToolsClientConnectionError """ for c in self._ListInspectableContexts(): if c['id'] == tab_id: return c['url'] return None def IsInspectable(self, tab_id): """Whether the tab with |tab_id| is inspectable, as reported by devtools. Raises: devtools_http.DevToolsClientConnectionError """ contexts = self._ListInspectableContexts() return tab_id in [c['id'] for c in contexts] def GetUpdatedInspectableContexts(self): """Returns an updated instance of _DevToolsContextMapBackend.""" contexts = self._ListInspectableContexts() self._devtools_context_map_backend._Update(contexts) return self._devtools_context_map_backend def _CreateWindowManagerBackendIfNeeded(self): if not self._wm_backend: self._wm_backend = window_manager_backend.WindowManagerBackend( self._browser_websocket) def _CreateMemoryBackendIfNeeded(self): assert self.supports_overriding_memory_pressure_notifications if not self._memory_backend: self._memory_backend = memory_backend.MemoryBackend( self._browser_websocket) def _CreateSystemInfoBackendIfNeeded(self): if not self._system_info_backend: self._system_info_backend = system_info_backend.SystemInfoBackend( self.browser_target_url) def StartChromeTracing(self, trace_config, transfer_mode=None, timeout=20): """ Args: trace_config: An tracing_config.TracingConfig instance. transfer_mode: Defaults to using 'ReturnAsStream' transfer mode for Chrome tracing. Can be set to 'ReportEvents'. timeout: Time waited for websocket to receive a response. """ if not self._tracing_backend: return assert trace_config and trace_config.enable_chrome_trace return self._tracing_backend.StartTracing( trace_config.chrome_trace_config, transfer_mode, timeout) def RecordChromeClockSyncMarker(self, sync_id): assert self.is_tracing_running, 'Tracing must be running to clock sync.' self._tracing_backend.RecordClockSyncMarker(sync_id) def StopChromeTracing(self): if not self._tracing_backend: return assert self.is_tracing_running try: backend = self.FirstTabBackend() if backend is not None: backend.AddTimelineMarker('first-renderer-thread') backend.AddTimelineMarker(backend.id) else: logging.warning('No page inspector backend found.') finally: self._tracing_backend.StopTracing() def _IterInspectorBackends(self, types): """Iterate over inspector backends from this client. Note: The devtools client might list contexts which, howerver, do not yet have a live DevTools instance to connect to (e.g. background tabs which may have been discarded or not yet created). In such case this method will hang and eventually timeout when trying to create an inspector backend to communicate with such contexts. """ context_map = self.GetUpdatedInspectableContexts() for context in context_map.contexts: if context['type'] in types: yield context_map.GetInspectorBackend(context['id']) def FirstTabBackend(self): """Obtain the inspector backend for the firstly created tab.""" return next(self._IterInspectorBackends(['page']), None) def CollectChromeTracingData(self, trace_data_builder, timeout=120): if not self._tracing_backend: return self._tracing_backend.CollectTraceData(trace_data_builder, timeout) # This call may be made early during browser bringup and may cause the # GPU process to launch, which takes a long time in Debug builds and # has been seen to frequently exceed the default 10s timeout used # throughout this file. Use a larger timeout by default. Callers # typically do not override this. def GetSystemInfo(self, timeout=_FIRST_CALL_TIMEOUT): self._CreateSystemInfoBackendIfNeeded() return self._system_info_backend.GetSystemInfo(timeout) def DumpMemory(self, timeout=None, detail_level=None): """Dumps memory. Args: timeout: seconds to wait between websocket responses. detail_level: Level of detail in memory dump. One of ['detailed', 'light', 'background']. Defaults to 'detailed'. Returns: GUID of the generated dump if successful, None otherwise. Raises: TracingTimeoutException: If more than |timeout| seconds has passed since the last time any data is received. TracingUnrecoverableException: If there is a websocket error. TracingUnexpectedResponseException: If the response contains an error or does not contain the expected result. """ if not self._tracing_backend: return None return self._tracing_backend.DumpMemory( timeout=timeout, detail_level=detail_level) def SetMemoryPressureNotificationsSuppressed(self, suppressed, timeout=30): """Enable/disable suppressing memory pressure notifications. Args: suppressed: If true, memory pressure notifications will be suppressed. timeout: The timeout in seconds. Raises: MemoryTimeoutException: If more than |timeout| seconds has passed since the last time any data is received. MemoryUnrecoverableException: If there is a websocket error. MemoryUnexpectedResponseException: If the response contains an error or does not contain the expected result. """ self._CreateMemoryBackendIfNeeded() return self._memory_backend.SetMemoryPressureNotificationsSuppressed( suppressed, timeout) def SimulateMemoryPressureNotification(self, pressure_level, timeout=30): """Simulate a memory pressure notification. Args: pressure level: The memory pressure level of the notification ('moderate' or 'critical'). timeout: The timeout in seconds. Raises: MemoryTimeoutException: If more than |timeout| seconds has passed since the last time any data is received. MemoryUnrecoverableException: If there is a websocket error. MemoryUnexpectedResponseException: If the response contains an error or does not contain the expected result. """ self._CreateMemoryBackendIfNeeded() return self._memory_backend.SimulateMemoryPressureNotification( pressure_level, timeout) @property def window_manager_backend(self): """Return the window manager backend. This should be called by a CrOS backend only. """ self._CreateWindowManagerBackendIfNeeded() return self._wm_backend def ExecuteBrowserCommand(self, command_id, timeout): request = { 'method': 'Browser.executeBrowserCommand', 'params': { 'commandId': command_id, } } self._browser_websocket.SyncRequest(request, timeout) def SetDownloadBehavior(self, behavior, downloadPath, timeout): request = { 'method': 'Browser.setDownloadBehavior', 'params': { 'behavior': behavior, 'downloadPath': downloadPath, } } self._browser_websocket.SyncRequest(request, timeout) def GetWindowForTarget(self, target_id): request = { 'method': 'Browser.getWindowForTarget', 'params': { 'targetId': target_id } } return self._browser_websocket.SyncRequest(request, timeout=30) def SetWindowBounds(self, window_id, bounds): request = { 'method': 'Browser.setWindowBounds', 'params': { 'windowId': window_id, 'bounds': bounds } } self._browser_websocket.SyncRequest(request, timeout=30) class _DevToolsContextMapBackend(object): def __init__(self, devtools_client): self._devtools_client = devtools_client self._contexts = None self._inspector_backends_dict = {} @property def contexts(self): """The most up to date contexts data. Returned in the order returned by devtools agent.""" return self._contexts def GetContextInfo(self, context_id): for context in self._contexts: if context['id'] == context_id: return context raise KeyError('Cannot find a context with id=%s' % context_id) def GetInspectorBackend(self, context_id): """Gets an InspectorBackend instance for the given context_id. This lazily creates InspectorBackend for the context_id if it does not exist yet. Otherwise, it will return the cached instance.""" if context_id in self._inspector_backends_dict: return self._inspector_backends_dict[context_id] for context in self._contexts: if context['id'] == context_id: new_backend = inspector_backend.InspectorBackend( self._devtools_client, context) self._inspector_backends_dict[context_id] = new_backend return new_backend raise KeyError('Cannot find a context with id=%s' % context_id) def _Update(self, contexts): # Remove InspectorBackend that is not in the current inspectable # contexts list. context_ids = [context['id'] for context in contexts] for context_id in list(self._inspector_backends_dict.keys()): if context_id not in context_ids: backend = self._inspector_backends_dict[context_id] backend.Disconnect() del self._inspector_backends_dict[context_id] valid_contexts = [] for context in contexts: # If the context does not have webSocketDebuggerUrl, skip it. # If an InspectorBackend is already created for the tab, # webSocketDebuggerUrl will be missing, and this is expected. context_id = context['id'] if context_id not in self._inspector_backends_dict: if 'webSocketDebuggerUrl' not in context: logging.debug('webSocketDebuggerUrl missing, removing %s', context_id) continue valid_contexts.append(context) self._contexts = valid_contexts def Clear(self): for backend in self._inspector_backends_dict.values(): backend.Disconnect() self._inspector_backends_dict = {} self._contexts = None
bsd-3-clause
v-zhongz/azure-linux-extensions
VMBackup/main/Utils/WAAgentUtil.py
11
2528
# Wrapper module for waagent # # waagent is not written as a module. This wrapper module is created # to use the waagent code as a module. # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.7+ # import imp import os import os.path # # The following code will search and load waagent code and expose # it as a submodule of current module # def searchWAAgent(): agentPath = '/usr/sbin/waagent' if(os.path.isfile(agentPath)): return agentPath user_paths = os.environ['PYTHONPATH'].split(os.pathsep) for user_path in user_paths: agentPath = os.path.join(user_path, 'waagent') if(os.path.isfile(agentPath)): return agentPath return None agentPath = searchWAAgent() if(agentPath): waagent = imp.load_source('waagent', agentPath) else: raise Exception("Can't load waagent.") if not hasattr(waagent, "AddExtensionEvent"): """ If AddExtensionEvent is not defined, provide a dummy impl. """ def _AddExtensionEvent(*args, **kwargs): pass waagent.AddExtensionEvent = _AddExtensionEvent if not hasattr(waagent, "WALAEventOperation"): class _WALAEventOperation: HeartBeat = "HeartBeat" Provision = "Provision" Install = "Install" UnIsntall = "UnInstall" Disable = "Disable" Enable = "Enable" Download = "Download" Upgrade = "Upgrade" Update = "Update" waagent.WALAEventOperation = _WALAEventOperation __ExtensionName__ = None def InitExtensionEventLog(name): __ExtensionName__ = name def AddExtensionEvent(name=__ExtensionName__, op=waagent.WALAEventOperation.Enable, isSuccess=False, message=None): if name is not None: waagent.AddExtensionEvent(name=name, op=op, isSuccess=isSuccess, message=message)
apache-2.0
ibinti/intellij-community
python/lib/Lib/site-packages/django/contrib/gis/geos/prototypes/topology.py
311
2226
""" This module houses the GEOS ctypes prototype functions for the topological operations on geometries. """ __all__ = ['geos_boundary', 'geos_buffer', 'geos_centroid', 'geos_convexhull', 'geos_difference', 'geos_envelope', 'geos_intersection', 'geos_linemerge', 'geos_pointonsurface', 'geos_preservesimplify', 'geos_simplify', 'geos_symdifference', 'geos_union', 'geos_relate'] from ctypes import c_char_p, c_double, c_int from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE from django.contrib.gis.geos.prototypes.errcheck import check_geom, check_string from django.contrib.gis.geos.prototypes.geom import geos_char_p from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc def topology(func, *args): "For GEOS unary topology functions." argtypes = [GEOM_PTR] if args: argtypes += args func.argtypes = argtypes func.restype = GEOM_PTR func.errcheck = check_geom return func ### Topology Routines ### geos_boundary = topology(GEOSFunc('GEOSBoundary')) geos_buffer = topology(GEOSFunc('GEOSBuffer'), c_double, c_int) geos_centroid = topology(GEOSFunc('GEOSGetCentroid')) geos_convexhull = topology(GEOSFunc('GEOSConvexHull')) geos_difference = topology(GEOSFunc('GEOSDifference'), GEOM_PTR) geos_envelope = topology(GEOSFunc('GEOSEnvelope')) geos_intersection = topology(GEOSFunc('GEOSIntersection'), GEOM_PTR) geos_linemerge = topology(GEOSFunc('GEOSLineMerge')) geos_pointonsurface = topology(GEOSFunc('GEOSPointOnSurface')) geos_preservesimplify = topology(GEOSFunc('GEOSTopologyPreserveSimplify'), c_double) geos_simplify = topology(GEOSFunc('GEOSSimplify'), c_double) geos_symdifference = topology(GEOSFunc('GEOSSymDifference'), GEOM_PTR) geos_union = topology(GEOSFunc('GEOSUnion'), GEOM_PTR) # GEOSRelate returns a string, not a geometry. geos_relate = GEOSFunc('GEOSRelate') geos_relate.argtypes = [GEOM_PTR, GEOM_PTR] geos_relate.restype = geos_char_p geos_relate.errcheck = check_string # Routines only in GEOS 3.1+ if GEOS_PREPARE: geos_cascaded_union = GEOSFunc('GEOSUnionCascaded') geos_cascaded_union.argtypes = [GEOM_PTR] geos_cascaded_union.restype = GEOM_PTR __all__.append('geos_cascaded_union')
apache-2.0
googlecartographer/cartographer
docs/source/conf.py
5
9092
# -*- coding: utf-8 -*- # Copyright 2016 The Cartographer Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Cartographer documentation build configuration file, created by # sphinx-quickstart on Fri Jul 8 10:41:33 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os from datetime import datetime # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.todo', 'sphinx.ext.mathjax', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Cartographer' copyright = u'{year} The Cartographer Authors'.format(year=datetime.now().year) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #version = '' # The full version, including alpha/beta/rc tags. #release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Cartographerdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Cartographer.tex', u'Cartographer Documentation', u'The Cartographer Authors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'cartographer', u'Cartographer Documentation', [u'The Cartographer Authors'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Cartographer', u'Cartographer Documentation', u'The Cartographer Authors', 'Cartographer', 'Cartographer is a system that provides real-time simultaneous ' 'localization and mapping (SLAM) in 2D and 3D across multiple platforms ' 'and sensor configurations.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
apache-2.0
partofthething/home-assistant
tests/components/apple_tv/conftest.py
8
3388
"""Fixtures for component.""" from unittest.mock import patch from pyatv import conf, net import pytest from .common import MockPairingHandler, create_conf @pytest.fixture(autouse=True, name="mock_scan") def mock_scan_fixture(): """Mock pyatv.scan.""" with patch("homeassistant.components.apple_tv.config_flow.scan") as mock_scan: async def _scan(loop, timeout=5, identifier=None, protocol=None, hosts=None): if not mock_scan.hosts: mock_scan.hosts = hosts return mock_scan.result mock_scan.result = [] mock_scan.hosts = None mock_scan.side_effect = _scan yield mock_scan @pytest.fixture(name="dmap_pin") def dmap_pin_fixture(): """Mock pyatv.scan.""" with patch("homeassistant.components.apple_tv.config_flow.randrange") as mock_pin: mock_pin.side_effect = lambda start, stop: 1111 yield mock_pin @pytest.fixture def pairing(): """Mock pyatv.scan.""" with patch("homeassistant.components.apple_tv.config_flow.pair") as mock_pair: async def _pair(config, protocol, loop, session=None, **kwargs): handler = MockPairingHandler( await net.create_session(session), config.get_service(protocol) ) handler.always_fail = mock_pair.always_fail return handler mock_pair.always_fail = False mock_pair.side_effect = _pair yield mock_pair @pytest.fixture def pairing_mock(): """Mock pyatv.scan.""" with patch("homeassistant.components.apple_tv.config_flow.pair") as mock_pair: async def _pair(config, protocol, loop, session=None, **kwargs): return mock_pair async def _begin(): pass async def _close(): pass mock_pair.close.side_effect = _close mock_pair.begin.side_effect = _begin mock_pair.pin = lambda pin: None mock_pair.side_effect = _pair yield mock_pair @pytest.fixture def full_device(mock_scan, dmap_pin): """Mock pyatv.scan.""" mock_scan.result.append( create_conf( "127.0.0.1", "MRP Device", conf.MrpService("mrpid", 5555), conf.DmapService("dmapid", None, port=6666), conf.AirPlayService("airplayid", port=7777), ) ) yield mock_scan @pytest.fixture def mrp_device(mock_scan): """Mock pyatv.scan.""" mock_scan.result.append( create_conf("127.0.0.1", "MRP Device", conf.MrpService("mrpid", 5555)) ) yield mock_scan @pytest.fixture def dmap_device(mock_scan): """Mock pyatv.scan.""" mock_scan.result.append( create_conf( "127.0.0.1", "DMAP Device", conf.DmapService("dmapid", None, port=6666), ) ) yield mock_scan @pytest.fixture def dmap_device_with_credentials(mock_scan): """Mock pyatv.scan.""" mock_scan.result.append( create_conf( "127.0.0.1", "DMAP Device", conf.DmapService("dmapid", "dummy_creds", port=6666), ) ) yield mock_scan @pytest.fixture def airplay_device(mock_scan): """Mock pyatv.scan.""" mock_scan.result.append( create_conf( "127.0.0.1", "AirPlay Device", conf.AirPlayService("airplayid", port=7777) ) ) yield mock_scan
mit
fentas/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/port/xvfbdriver_unittest.py
118
7503
# Copyright (C) 2012 Zan Dobersek <zandobersek@gmail.com> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging import unittest2 as unittest from webkitpy.common.system.filesystem_mock import MockFileSystem from webkitpy.common.system.executive_mock import MockExecutive2 from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.common.system.systemhost_mock import MockSystemHost from webkitpy.port import Port from webkitpy.port.server_process_mock import MockServerProcess from webkitpy.port.xvfbdriver import XvfbDriver from webkitpy.tool.mocktool import MockOptions _log = logging.getLogger(__name__) class XvfbDriverTest(unittest.TestCase): def make_driver(self, worker_number=0, xorg_running=False, executive=None): port = Port(MockSystemHost(log_executive=True, executive=executive), 'xvfbdrivertestport', options=MockOptions(configuration='Release')) port._config.build_directory = lambda configuration: "/mock-build" port._server_process_constructor = MockServerProcess if xorg_running: port._executive._running_pids['Xorg'] = 108 driver = XvfbDriver(port, worker_number=worker_number, pixel_tests=True) driver._startup_delay_secs = 0 return driver def cleanup_driver(self, driver): # Setting _xvfb_process member to None is necessary as the Driver object is stopped on deletion, # killing the Xvfb process if present. Thus, this method should only be called from tests that do not # intend to test the behavior of XvfbDriver.stop. driver._xvfb_process = None def assertDriverStartSuccessful(self, driver, expected_logs, expected_display, pixel_tests=False): OutputCapture().assert_outputs(self, driver.start, [pixel_tests, []], expected_logs=expected_logs) self.assertTrue(driver._server_process.started) self.assertEqual(driver._server_process.env["DISPLAY"], expected_display) def test_start_no_pixel_tests(self): driver = self.make_driver() expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n" self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0") self.cleanup_driver(driver) def test_start_pixel_tests(self): driver = self.make_driver() expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n" self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True) self.cleanup_driver(driver) def test_start_arbitrary_worker_number(self): driver = self.make_driver(worker_number=17) expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n" self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True) self.cleanup_driver(driver) def test_next_free_display(self): output = "Xorg /usr/bin/X :0 -auth /var/run/lightdm/root/:0 -nolisten tcp vt7 -novtswitch -background none\nXvfb Xvfb :1 -screen 0 800x600x24 -nolisten tcp" executive = MockExecutive2(output) driver = self.make_driver(executive=executive) self.assertEqual(driver._next_free_display(), 2) self.cleanup_driver(driver) output = "X /usr/bin/X :0 vt7 -nolisten tcp -auth /var/run/xauth/A:0-8p7Ybb" executive = MockExecutive2(output) driver = self.make_driver(executive=executive) self.assertEqual(driver._next_free_display(), 1) self.cleanup_driver(driver) output = "Xvfb Xvfb :0 -screen 0 800x600x24 -nolisten tcp" executive = MockExecutive2(output) driver = self.make_driver(executive=executive) self.assertEqual(driver._next_free_display(), 1) self.cleanup_driver(driver) output = "Xvfb Xvfb :1 -screen 0 800x600x24 -nolisten tcp\nXvfb Xvfb :0 -screen 0 800x600x24 -nolisten tcp\nXvfb Xvfb :3 -screen 0 800x600x24 -nolisten tcp" executive = MockExecutive2(output) driver = self.make_driver(executive=executive) self.assertEqual(driver._next_free_display(), 2) self.cleanup_driver(driver) def test_start_next_worker(self): driver = self.make_driver() driver._next_free_display = lambda: 0 expected_logs = "MOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n" self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True) self.cleanup_driver(driver) driver = self.make_driver() driver._next_free_display = lambda: 3 expected_logs = "MOCK popen: ['Xvfb', ':3', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n" self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":3", pixel_tests=True) self.cleanup_driver(driver) def test_stop(self): filesystem = MockFileSystem(files={'/tmp/.X42-lock': '1234\n'}) port = Port(MockSystemHost(log_executive=True, filesystem=filesystem), 'xvfbdrivertestport', options=MockOptions(configuration='Release')) port._executive.kill_process = lambda x: _log.info("MOCK kill_process pid: " + str(x)) driver = XvfbDriver(port, worker_number=0, pixel_tests=True) class FakeXvfbProcess(object): pid = 1234 driver._xvfb_process = FakeXvfbProcess() driver._lock_file = '/tmp/.X42-lock' expected_logs = "MOCK kill_process pid: 1234\n" OutputCapture().assert_outputs(self, driver.stop, [], expected_logs=expected_logs) self.assertIsNone(driver._xvfb_process) self.assertFalse(port._filesystem.exists(driver._lock_file))
bsd-3-clause
valkjsaaa/sl4a
python/src/Lib/contextlib.py
62
4136
"""Utilities for with-statement contexts. See PEP 343.""" import sys from functools import wraps __all__ = ["contextmanager", "nested", "closing"] class GeneratorContextManager(object): """Helper for @contextmanager decorator.""" def __init__(self, gen): self.gen = gen def __enter__(self): try: return self.gen.next() except StopIteration: raise RuntimeError("generator didn't yield") def __exit__(self, type, value, traceback): if type is None: try: self.gen.next() except StopIteration: return else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = type() try: self.gen.throw(type, value, traceback) raise RuntimeError("generator didn't stop after throw()") except StopIteration, exc: # Suppress the exception *unless* it's the same exception that # was passed to throw(). This prevents a StopIteration # raised inside the "with" statement from being suppressed return exc is not value except: # only re-raise if it's *not* the exception that was # passed to throw(), because __exit__() must not raise # an exception unless __exit__() itself failed. But throw() # has to raise the exception to signal propagation, so this # fixes the impedance mismatch between the throw() protocol # and the __exit__() protocol. # if sys.exc_info()[1] is not value: raise def contextmanager(func): """@contextmanager decorator. Typical usage: @contextmanager def some_generator(<arguments>): <setup> try: yield <value> finally: <cleanup> This makes this: with some_generator(<arguments>) as <variable>: <body> equivalent to this: <setup> try: <variable> = <value> <body> finally: <cleanup> """ @wraps(func) def helper(*args, **kwds): return GeneratorContextManager(func(*args, **kwds)) return helper @contextmanager def nested(*managers): """Support multiple context managers in a single with-statement. Code like this: with nested(A, B, C) as (X, Y, Z): <body> is equivalent to this: with A as X: with B as Y: with C as Z: <body> """ exits = [] vars = [] exc = (None, None, None) try: for mgr in managers: exit = mgr.__exit__ enter = mgr.__enter__ vars.append(enter()) exits.append(exit) yield vars except: exc = sys.exc_info() finally: while exits: exit = exits.pop() try: if exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() if exc != (None, None, None): # Don't rely on sys.exc_info() still containing # the right information. Another exception may # have been raised and caught by an exit method raise exc[0], exc[1], exc[2] class closing(object): """Context to automatically close something at the end of a block. Code like this: with closing(<module>.open(<arguments>)) as f: <block> is equivalent to this: f = <module>.open(<arguments>) try: <block> finally: f.close() """ def __init__(self, thing): self.thing = thing def __enter__(self): return self.thing def __exit__(self, *exc_info): self.thing.close()
apache-2.0
AbrahmAB/sugar
src/jarabe/controlpanel/gui.py
2
21394
# Copyright (C) 2008 One Laptop Per Child # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import logging from gettext import gettext as _ from gi.repository import GObject from gi.repository import Gtk from gi.repository import Gdk from gi.repository import GdkX11 from sugar3.graphics.icon import Icon from sugar3.graphics import style from sugar3.graphics.alert import Alert, TimeoutAlert from jarabe.model.session import get_session_manager from jarabe.controlpanel.toolbar import MainToolbar from jarabe.controlpanel.toolbar import SectionToolbar from jarabe import config from jarabe.model import shell _logger = logging.getLogger('ControlPanel') class ControlPanel(Gtk.Window): __gtype_name__ = 'SugarControlPanel' def __init__(self, window_xid=0): self.parent_window_xid = window_xid Gtk.Window.__init__(self) self._calculate_max_columns() self.set_border_width(style.LINE_WIDTH) self.set_position(Gtk.WindowPosition.CENTER_ALWAYS) self.set_decorated(False) self.set_resizable(False) self.set_modal(True) self.set_can_focus(True) self.connect('key-press-event', self.__key_press_event_cb) self._toolbar = None self._canvas = None self._table = None self._scrolledwindow = None self._separator = None self._section_view = None self._section_toolbar = None self._main_toolbar = None self._vbox = Gtk.VBox() self._hbox = Gtk.HBox() self._vbox.pack_start(self._hbox, True, True, 0) self._hbox.show() self._main_view = Gtk.EventBox() self._hbox.pack_start(self._main_view, True, True, 0) self._main_view.modify_bg(Gtk.StateType.NORMAL, style.COLOR_BLACK.get_gdk_color()) self._main_view.show() self.add(self._vbox) self._vbox.show() self.connect('realize', self.__realize_cb) self._options = self._get_options() self._current_option = None self._setup_main() self._setup_section() self._show_main_view() Gdk.Screen.get_default().connect( 'size-changed', self.__size_changed_cb) self._busy_count = 0 self._selected = [] def __realize_cb(self, widget): self.set_type_hint(Gdk.WindowTypeHint.DIALOG) window = self.get_window() window.set_accept_focus(True) if self.parent_window_xid > 0: display = Gdk.Display.get_default() parent = GdkX11.X11Window.foreign_new_for_display( display, self.parent_window_xid) window.set_transient_for(parent) # the modal windows counter is updated to disable hot keys - SL#4601 shell.get_model().push_modal() def __size_changed_cb(self, event): self._calculate_max_columns() def busy(self): if self._busy_count == 0: self._old_cursor = self.get_window().get_cursor() self._set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH)) self._busy_count += 1 def unbusy(self): self._busy_count -= 1 if self._busy_count == 0: self._set_cursor(self._old_cursor) def _set_cursor(self, cursor): self.get_window().set_cursor(cursor) Gdk.flush() def add_alert(self, alert): self._vbox.pack_start(alert, False, False, 0) self._vbox.reorder_child(alert, 2) def remove_alert(self, alert): self._vbox.remove(alert) def grab_focus(self): # overwrite grab focus in order to grab focus on the view self._main_view.get_child().grab_focus() def _calculate_max_columns(self): self._max_columns = int(0.285 * (float(Gdk.Screen.width()) / style.GRID_CELL_SIZE - 3)) offset = style.GRID_CELL_SIZE width = Gdk.Screen.width() - offset * 2 height = Gdk.Screen.height() - offset * 2 self.set_size_request(width, height) if hasattr(self, '_table'): for child in self._table.get_children(): child.destroy() self._setup_options() def _set_canvas(self, canvas): if self._canvas in self._main_view: self._main_view.remove(self._canvas) if canvas: self._main_view.add(canvas) self._canvas = canvas def _set_toolbar(self, toolbar): if self._toolbar: self._vbox.remove(self._toolbar) self._vbox.pack_start(toolbar, False, False, 0) self._vbox.reorder_child(toolbar, 0) self._toolbar = toolbar if not self._separator: self._separator = Gtk.HSeparator() self._vbox.pack_start(self._separator, False, False, 0) self._vbox.reorder_child(self._separator, 1) self._separator.show() def _setup_main(self): self._main_toolbar = MainToolbar() self._table = Gtk.Table() self._table.set_col_spacings(style.GRID_CELL_SIZE) self._table.set_row_spacings(style.GRID_CELL_SIZE) self._table.set_border_width(style.GRID_CELL_SIZE) self._scrolledwindow = Gtk.ScrolledWindow() self._scrolledwindow.set_can_focus(False) self._scrolledwindow.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC) self._scrolledwindow.add_with_viewport(self._table) child = self._scrolledwindow.get_child() child.modify_bg( Gtk.StateType.NORMAL, style.COLOR_BLACK.get_gdk_color()) self._setup_options() self._main_toolbar.connect('stop-clicked', self.__stop_clicked_cb) self._main_toolbar.connect('search-changed', self.__search_changed_cb) def _setup_options(self): # If the screen width only supports two columns, start # placing from the second row. if self._max_columns == 2: row = 1 column = 0 else: # About Me and About my computer are hardcoded below to use the # first two slots so we need to leave them free. row = 0 column = 2 options = self._options.keys() options.sort() for option in options: sectionicon = _SectionIcon(icon_name=self._options[option]['icon'], title=self._options[option]['title'], xo_color=self._options[option]['color'], pixel_size=style.GRID_CELL_SIZE) sectionicon.connect('button_press_event', self.__select_option_cb, option) sectionicon.show() if option == 'aboutme': self._table.attach(sectionicon, 0, 1, 0, 1) elif option == 'aboutcomputer': self._table.attach(sectionicon, 1, 2, 0, 1) else: self._table.attach(sectionicon, column, column + 1, row, row + 1) column += 1 if column == self._max_columns: column = 0 row += 1 self._options[option]['button'] = sectionicon def _show_main_view(self): if self._section_view is not None: self._section_view.destroy() self._section_view = None self._set_toolbar(self._main_toolbar) self._main_toolbar.show() self._set_canvas(self._scrolledwindow) self._main_view.modify_bg(Gtk.StateType.NORMAL, style.COLOR_BLACK.get_gdk_color()) self._table.show() self._scrolledwindow.show() entry = self._main_toolbar.get_entry() entry.set_text('') entry.connect('icon-press', self.__clear_icon_pressed_cb) self.grab_focus() def __key_press_event_cb(self, window, event): if event.keyval == Gdk.KEY_Return: if len(self._selected) == 1: self.show_section_view(self._selected[0]) return True if event.keyval == Gdk.KEY_Escape: if self._toolbar == self._main_toolbar: self.__stop_clicked_cb(None) self.destroy() else: self.__cancel_clicked_cb(None) return True # if the user clicked out of the window - fix SL #3188 if not self.is_active(): self.present() entry = self._main_toolbar.get_entry() if not entry.has_focus(): entry.grab_focus() return False def __clear_icon_pressed_cb(self, entry, icon_pos, event): self.grab_focus() def _update(self, query): self._selected = [] for option in self._options: found = False for key in self._options[option]['keywords']: if query.lower() in key.lower(): self._options[option]['button'].set_sensitive(True) self._selected.append(option) found = True break if not found: self._options[option]['button'].set_sensitive(False) def _setup_section(self): self._section_toolbar = SectionToolbar() self._section_toolbar.connect('cancel-clicked', self.__cancel_clicked_cb) self._section_toolbar.connect('accept-clicked', self.__accept_clicked_cb) def show_section_view(self, option): self._set_toolbar(self._section_toolbar) icon = self._section_toolbar.get_icon() icon.set_from_icon_name(self._options[option]['icon'], Gtk.IconSize.LARGE_TOOLBAR) icon.props.xo_color = self._options[option]['color'] title = self._section_toolbar.get_title() title.set_text(self._options[option]['title']) self._section_toolbar.show() self._current_option = option mod = __import__('.'.join(('cpsection', option, 'view')), globals(), locals(), ['view']) view_class = getattr(mod, self._options[option]['view'], None) mod = __import__('.'.join(('cpsection', option, 'model')), globals(), locals(), ['model']) model = ModelWrapper(mod) try: self.busy() self._section_view = view_class(model, self._options[option]['alerts']) self._set_canvas(self._section_view) self._section_view.show() finally: self.unbusy() self._section_view.connect('notify::is-valid', self.__valid_section_cb) self._section_view.connect('notify::is-cancellable', self.__cancellable_section_cb) self._section_view.connect('request-close', self.__close_request_cb) self._section_view.connect('add-alert', self.__create_restart_alert_cb) self._section_view.connect('set-toolbar-sensitivity', self.__set_toolbar_sensitivity_cb) self._main_view.modify_bg(Gtk.StateType.NORMAL, style.COLOR_WHITE.get_gdk_color()) def set_section_view_auto_close(self): """Automatically close the control panel if there is "nothing to do" """ self._section_view.auto_close = True def _get_options(self): """Get the available option information from the extensions """ options = {} path = os.path.join(config.ext_path, 'cpsection') folder = os.listdir(path) for item in folder: if os.path.isdir(os.path.join(path, item)) and \ os.path.exists(os.path.join(path, item, '__init__.py')): try: mod = __import__('.'.join(('cpsection', item)), globals(), locals(), [item]) view_class = getattr(mod, 'CLASS', None) if view_class is not None: options[item] = {} options[item]['alerts'] = [] options[item]['view'] = view_class options[item]['icon'] = getattr(mod, 'ICON', item) options[item]['title'] = getattr(mod, 'TITLE', item) options[item]['color'] = getattr(mod, 'COLOR', None) keywords = getattr(mod, 'KEYWORDS', []) keywords.append(options[item]['title'].lower()) if item not in keywords: keywords.append(item) options[item]['keywords'] = keywords else: _logger.debug('no CLASS attribute in %r', item) except Exception: logging.exception('Exception while loading extension:') return options def __cancel_clicked_cb(self, widget): self._section_view.undo() self._options[self._current_option]['alerts'] = [] self._section_toolbar.accept_button.set_sensitive(True) self._show_main_view() def __accept_clicked_cb(self, widget): if hasattr(self._section_view, "apply"): self._section_view.apply() if self._section_view.needs_restart: self.__set_toolbar_sensitivity_cb(False) if self._section_view.show_restart_alert: self.__create_restart_alert_cb() else: self._show_main_view() def __set_toolbar_sensitivity_cb(self, value=True, widget=None, event=None): self._section_toolbar.accept_button.set_sensitive(value) self._section_toolbar.cancel_button.set_sensitive(value) def __create_restart_alert_cb(self, widget=None, event=None): alert = Alert() alert.props.title = _('Warning') alert.props.msg = self._section_view.restart_msg if self._section_view.props.is_cancellable: icon = Icon(icon_name='dialog-cancel') alert.add_button(Gtk.ResponseType.CANCEL, _('Cancel changes'), icon) icon.show() if self._section_view.props.is_deferrable: icon = Icon(icon_name='dialog-ok') alert.add_button(Gtk.ResponseType.ACCEPT, _('Later'), icon) icon.show() icon = Icon(icon_name='system-restart') alert.add_button(Gtk.ResponseType.APPLY, _('Restart now'), icon) icon.show() self.add_alert(alert) alert.connect('response', self.__response_cb) alert.show() def __response_cb(self, alert, response_id): self.remove_alert(alert) self._section_toolbar.accept_button.set_sensitive(True) self._section_toolbar.cancel_button.set_sensitive(True) if response_id is Gtk.ResponseType.CANCEL: self._section_view.undo() self._section_view.setup() self._options[self._current_option]['alerts'] = [] elif response_id is Gtk.ResponseType.ACCEPT: self._options[self._current_option]['alerts'] = \ self._section_view.restart_alerts self._show_main_view() elif response_id is Gtk.ResponseType.APPLY: self.busy() self._section_toolbar.accept_button.set_sensitive(False) self._section_toolbar.cancel_button.set_sensitive(False) get_session_manager().logout() GObject.timeout_add_seconds(4, self.__quit_timeout_cb) def __quit_timeout_cb(self): self.unbusy() alert = TimeoutAlert(30) alert.props.title = _('An activity is not responding.') alert.props.msg = _('You may lose unsaved work if you continue.') alert.connect('response', self.__quit_accept_cb) self.add_alert(alert) alert.show() def __quit_accept_cb(self, alert, response_id): self.remove_alert(alert) if response_id is Gtk.ResponseType.CANCEL: get_session_manager().cancel_shutdown() self._section_toolbar.accept_button.set_sensitive(True) self._section_toolbar.cancel_button.set_sensitive(True) else: self.busy() get_session_manager().shutdown_completed() def __select_option_cb(self, button, event, option): self.show_section_view(option) def __search_changed_cb(self, maintoolbar, query): self._update(query) def __stop_clicked_cb(self, widget): shell.get_model().pop_modal() self.destroy() def __close_request_cb(self, widget, event=None): self.destroy() def __valid_section_cb(self, section_view, pspec): section_is_valid = section_view.props.is_valid self._section_toolbar.accept_button.set_sensitive(section_is_valid) def __cancellable_section_cb(self, section_view, pspec): cancellable = section_view.props.is_cancellable self._section_toolbar.cancel_button.set_sensitive(cancellable) class ModelWrapper(object): def __init__(self, module): self._module = module self._options = {} self._setup() def _setup(self): methods = dir(self._module) for method in methods: if method.startswith('get_') and method[4:] != 'color': try: self._options[method[4:]] = getattr(self._module, method)() except Exception: self._options[method[4:]] = None def __getattr__(self, name): return getattr(self._module, name) def undo(self): for key in self._options.keys(): method = getattr(self._module, 'set_' + key, None) if method and self._options[key] is not None: try: method(self._options[key]) except Exception as detail: _logger.debug('Error undo option: %s', detail) if hasattr(ControlPanel, 'set_css_name'): ControlPanel.set_css_name('controlpanel') class _SectionIcon(Gtk.EventBox): __gtype_name__ = 'SugarSectionIcon' __gproperties__ = { 'icon-name': (str, None, None, None, GObject.PARAM_READWRITE), 'pixel-size': (object, None, None, GObject.PARAM_READWRITE), 'xo-color': (object, None, None, GObject.PARAM_READWRITE), 'title': (str, None, None, None, GObject.PARAM_READWRITE), } def __init__(self, **kwargs): self._icon_name = None self._pixel_size = style.GRID_CELL_SIZE self._xo_color = None self._title = 'No Title' Gtk.EventBox.__init__(self, **kwargs) self._vbox = Gtk.VBox() self._icon = Icon(icon_name=self._icon_name, pixel_size=self._pixel_size, xo_color=self._xo_color) self._vbox.pack_start(self._icon, expand=False, fill=False, padding=0) self._label = Gtk.Label(label=self._title) self._label.modify_fg(Gtk.StateType.NORMAL, style.COLOR_WHITE.get_gdk_color()) self._vbox.pack_start(self._label, expand=False, fill=False, padding=0) self._vbox.set_spacing(style.DEFAULT_SPACING) self.set_visible_window(False) self.set_app_paintable(True) self.set_events(Gdk.EventMask.BUTTON_PRESS_MASK) self.add(self._vbox) self._vbox.show() self._label.show() self._icon.show() def get_icon(self): return self._icon def do_set_property(self, pspec, value): if pspec.name == 'icon-name': if self._icon_name != value: self._icon_name = value elif pspec.name == 'pixel-size': if self._pixel_size != value: self._pixel_size = value elif pspec.name == 'xo-color': if self._xo_color != value: self._xo_color = value elif pspec.name == 'title': if self._title != value: self._title = value def do_get_property(self, pspec): if pspec.name == 'icon-name': return self._icon_name elif pspec.name == 'pixel-size': return self._pixel_size elif pspec.name == 'xo-color': return self._xo_color elif pspec.name == 'title': return self._title
gpl-3.0
pegasus-isi/pegasus
test/core/010-runtime-clustering/cluster.py
1
8643
#!/usr/bin/env python3 import os import argparse import configparser import logging import sys import logging import subprocess from datetime import datetime from pathlib import Path from Pegasus.api import * logging.basicConfig(level=logging.DEBUG) def parse_args(args=sys.argv[1:]): parser = argparse.ArgumentParser(description="Runtime Cluster Test Workflow") parser.add_argument( "pegasus_keg_path", help="abs path to pegasus-keg install (e.g '/usr/bin/pegasus-keg')", metavar="PEGASUS_KEG_PATH", ) parser.add_argument( "config_dir", help="name of test config dir (e.g. 'runtime-condorio', 'runtime-nonsharedfs'", ) return parser.parse_args(args) def write_sc(top_dir: Path, run_id: str): # get pegasus version cp = subprocess.run( ["pegasus-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) if cp.returncode != 0: raise RuntimeError( "unable to call pegasus-version: {}".format(cp.stderr.decode().strip()) ) REMOTE_PEGASUS_HOME = "/lizard/scratch-90-days/bamboo/installs/pegasus-{}".format( cp.stdout.decode().strip() ) sc = SiteCatalog() # --- cartman-data site ---------------------------------------------------- cartman_data = Site(name="cartman-data", arch=Arch.X86_64, os_type=OS.LINUX) cartman_data.add_directories( Directory( Directory.SHARED_SCRATCH, top_dir / "staging-site/scratch", ).add_file_servers( FileServer( "gsiftp://bamboo.isi.edu" + str(top_dir / "staging-site/scratch"), Operation.ALL, ) ) ) cartman_data.add_env(PEGASUS_HOME=REMOTE_PEGASUS_HOME) sc.add_sites(cartman_data) # --- condorpool site ------------------------------------------------------ condorpool = Site(name="condorpool", arch=Arch.X86_64, os_type=OS.LINUX) condorpool.add_condor_profile(universe="vanilla") condorpool.add_pegasus_profile(style="condor") sc.add_sites(condorpool) # --- sharedfs site -------------------------------------------------------- sharedfs = Site(name="sharedfs", arch=Arch.X86_64, os_type=OS.LINUX) sharedfs_dir1 = Directory( Directory.SHARED_STORAGE, Path("/lizard/scratch-90-days") / os.getenv("USER") / "storage/black-diamond-output" / run_id, ) sharedfs_dir1.add_file_servers( FileServer( "file://" + str( Path("/lizard/scratch-90-days") / os.getenv("USER") / "storage/black-diamond-output" / run_id ), Operation.ALL, ) ) sharedfs.add_directories(sharedfs_dir1) sharedfs_dir2 = Directory( Directory.SHARED_SCRATCH, Path("/lizard/scratch-90-days") / os.getenv("USER") / "scratch" / run_id, ) sharedfs_dir2.add_file_servers( FileServer( "file://" + str( Path("/lizard/scratch-90-days") / os.getenv("USER") / "scratch" / run_id ), Operation.ALL, ) ) sharedfs.add_directories(sharedfs_dir2) sharedfs.add_env(PEGASUS_HOME=REMOTE_PEGASUS_HOME) sharedfs.add_condor_profile( should_transfer_files="Yes", universe="vanilla", when_to_transfer_output="ON_EXIT", ) sharedfs.add_pegasus_profile(style="condor") sc.add_sites(sharedfs) # --- local site ----------------------------------------------------------- local_site_url = config.get("all", "local_site_url", fallback="") local = Site(name="local", arch=Arch.X86_64, os_type=OS.LINUX) local_dir1 = Directory(Directory.SHARED_STORAGE, top_dir / "outputs") local_dir1.add_file_servers( FileServer(local_site_url + str(top_dir / "outputs"), Operation.ALL) ) local.add_directories(local_dir1) local_dir2 = Directory(Directory.SHARED_SCRATCH, top_dir / "work") local_dir2.add_file_servers( FileServer(local_site_url + str(top_dir / "work"), Operation.ALL) ) local.add_directories(local_dir2) sc.add_sites(local) # write sc.write() def write_rc(config: configparser.ConfigParser): input_file = config.get("all", "input_file") if input_file == "": input_file = Path("f.a") else: # is a directory such as '/lizard/scratch-90-days' input_dir = Path(input_file) / os.getenv("USER") / "inputs" input_dir.mkdir(parents=True, exist_ok=True) input_file = input_dir / "f.a" with input_file.open("w") as f: f.write("This is sample input to KEG") rc = ReplicaCatalog() rc.add_replica( site=config.get("all", "file_site"), lfn="f.a", pfn=input_file.resolve() ) rc.write() def write_tc(config: configparser.ConfigParser, pegasus_keg_path: str): tc = TransformationCatalog() for i in range(1, 3): sleep = Transformation( namespace="cluster", name="level{}".format(i), version="1.0", site=config.get("all", "executable_site"), pfn=config.get("all", "executable_url") + pegasus_keg_path, is_stageable=True, os_type=OS.LINUX, arch=Arch.X86_64, ) sleep.add_pegasus_profile( clusters_size=config.get("all", "clusters_size"), clusters_max_runtime=config.get("all", "clusters_maxruntime"), ) tc.add_transformations(sleep) tc.write() if __name__ == "__main__": args = parse_args() TOP_DIR = Path().cwd().resolve() RUN_ID = datetime.now().strftime("%Y%m%d_%H%M%S") # --- validate test config dir --------------------------------------------- config_dir = Path(__file__).parent / args.config_dir if not config_dir.is_dir(): raise ValueError( "config_dir: {} does not a directory or does not exist".format(config_dir) ) config_file = config_dir / "test.config" if not config_file.is_file(): raise ValueError("{} does not contain required file: {}".format(config_file)) # --- general test config -------------------------------------------------- config = configparser.ConfigParser( { "input_file": "", "workflow_name": "horizontal-clustering-test", "clusters_size": "3", "clusters_maxruntime": "7", } ) config.read(str(config_file)) # --- catalogs ------------------------------------------------------------- write_sc(TOP_DIR, RUN_ID) write_rc(config) write_tc(config, args.pegasus_keg_path) # --- workflow ------------------------------------------------------------- wf = Workflow(config.get("all", "workflow_name")) input_file = File("f.a") # create 4 lvl1 jobs for i in range(4): job = ( Job(namespace="cluster", transformation="level1", version="1.0") .add_args("-a", "level1", "-T", i + 1, "-i", input_file) .add_inputs(input_file) .add_profiles(Namespace.PEGASUS, key="job.runtime", value=i + 1) ) wf.add_jobs(job) # for each lvl1 job, create 4 lvl2 children for j in range(4): child = ( Job(namespace="cluster", transformation="level2", version="1.0") .add_args("-a", "level2", "-T", ((j + 1) * 2)) .add_profiles(Namespace.PEGASUS, key="runtime", value=((j + 1) * 2)) ) wf.add_jobs(child) wf.add_dependency(job=job, children=[child]) # plan and run execution_site = config.get("all", "execution_site", fallback="local") staging_site = config.get("all", "staging_site", fallback="local") output_site = config.get("all", "output_site", fallback="local") top_pegasusrc = Path(__file__).parent / "pegasusrc" pegasusrc = config_dir / "pegasusrc" # include anything in __file__/pegasusrc in ./config_dir/pegasusrc with top_pegasusrc.open("r") as top_cfg, pegasusrc.open("a") as cfg: cfg.write(top_cfg.read()) try: wf.plan( conf=str(pegasusrc), sites=[execution_site], staging_sites={execution_site: staging_site}, output_sites=[output_site], dir="work/submit", cleanup="leaf", cluster=["horizontal"], verbose=3, submit=True, ).wait().analyze().statistics() except PegasusClientError as e: print(e) print(e.result.stdout)
apache-2.0
bramd/django-phonenumber-field
setup.py
1
1568
from setuptools import setup, find_packages from phonenumber_field import __version__ setup( name="django-phonenumber-field", version=__version__, url='http://github.com/stefanfoulis/django-phonenumber-field', license='BSD', platforms=['OS Independent'], description="An international phone number field for django models.", install_requires=[ 'phonenumbers>=7.0.2', 'babel', ], long_description=open('README.rst').read(), author='Stefan Foulis', author_email='stefan.foulis@gmail.com', maintainer='Stefan Foulis', maintainer_email='stefan.foulis@gmail.com', packages=find_packages(), package_data = { 'phonenumber_field': [ 'locale/*/LC_MESSAGES/*', ], }, include_package_data=True, zip_safe=False, classifiers=[ 'Development Status :: 4 - Beta', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Internet :: WWW/HTTP', ] )
mit
quang-ha/lammps
tools/moltemplate/moltemplate/remove_duplicate_atoms.py
11
1467
#!/usr/bin/env python """ Get rid of lines containing duplicate copies of the same atom in the "Atoms" section of a LAMMPS data file. Duplicate lines which occur later are preserved and the earlier lines are erased. The file is read from sys.stdin. This program does not parse the entire data file. The text from the "Atoms" section of the LAMMPS file must be extracted in advance before it is sent to this program.) """ import sys def main(): in_stream = sys.stdin f = None fname = None if len(sys.argv) == 2: fname = sys.argv[1] f = open(fname, 'r') in_stream = f atom_ids_in_use = set([]) lines = in_stream.readlines() # Start at the end of the file and read backwards. # If duplicate lines exist, eliminate the ones that occur earlier in the file. i = len(lines) while i > 0: i -= 1 line_orig = lines[i] line = line_orig.rstrip('\n') if '#' in line_orig: ic = line.find('#') line = line_orig[:ic] tokens = line.strip().split() if len(tokens) > 0: atom_id = tokens[0] if atom_id in atom_ids_in_use: del lines[i] else: atom_ids_in_use.add(atom_id) else: del lines[i] for line in lines: sys.stdout.write(line) if f != None: f.close() return if __name__ == '__main__': main()
gpl-2.0
hydroshare/hydroshare_temp
hs_party/models/group_association.py
1
2255
from django.contrib.contenttypes import generic from django.contrib.auth.models import User, Group from django.db import models from mezzanine.pages.models import Page, RichText,Displayable from mezzanine.core.fields import FileField, RichTextField from mezzanine.core.models import Ownable from mezzanine.generic.models import Keyword, Orderable from hs_core.models import AbstractResource from django.db.models.signals import post_save from datetime import date from uuid import uuid4 from django.db.models.signals import post_save,pre_save,post_init from django.contrib.auth.signals import user_logged_in from django.dispatch import receiver from django.core.exceptions import ObjectDoesNotExist,ValidationError from django.core.urlresolvers import reverse from .party import Party from .party_types import PartyEmailModel,PartyGeolocation,PartyPhoneModel,PartyLocationModel from .activities import ActivitiesModel from .person import Person from .organization import Organization __author__ = 'valentin' class GroupAssociation( ActivitiesModel): # object to handle a person being in one or more organizations #organization = models.ForeignKey(Organization) uniqueCode = models.CharField(max_length=64,default=lambda: str(uuid4()),verbose_name="A unique code for the record", help_text="A unique code for the record") group = models.ForeignKey(Group) #person = models.ForeignKey(Person) person = models.ForeignKey(Person) beginDate = models.DateField(null=True,blank=True,verbose_name="begin date of associate, Empty is not know.") endDate = models.DateField(null=True,blank=True, verbose_name="End date of association. Empty if still with group") positionName = models.CharField(verbose_name="Position, empty is not known", blank=True,max_length='100') def __unicode__(self): if (self.beginDate): if (self.endDate): range=u' [%s, %s]' % (self.beginDate,self.endDate) else: range=u' [%s]' % (self.beginDate) else: range='' if (self.jobTitle): title = ' ,' + self.jobTitle return u'%s (%s%s%s)' % (self.person.name, self.group.name,title,range) class Meta: app_label = 'hs_party'
bsd-3-clause
stewartsmith/bzr
bzrlib/index.py
2
80106
# Copyright (C) 2007-2011 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Indexing facilities.""" from __future__ import absolute_import __all__ = [ 'CombinedGraphIndex', 'GraphIndex', 'GraphIndexBuilder', 'GraphIndexPrefixAdapter', 'InMemoryGraphIndex', ] from bisect import bisect_right from cStringIO import StringIO import re import sys from bzrlib.lazy_import import lazy_import lazy_import(globals(), """ from bzrlib import ( bisect_multi, revision as _mod_revision, trace, ) """) from bzrlib import ( debug, errors, ) from bzrlib.static_tuple import StaticTuple _HEADER_READV = (0, 200) _OPTION_KEY_ELEMENTS = "key_elements=" _OPTION_LEN = "len=" _OPTION_NODE_REFS = "node_ref_lists=" _SIGNATURE = "Bazaar Graph Index 1\n" _whitespace_re = re.compile('[\t\n\x0b\x0c\r\x00 ]') _newline_null_re = re.compile('[\n\0]') def _has_key_from_parent_map(self, key): """Check if this index has one key. If it's possible to check for multiple keys at once through calling get_parent_map that should be faster. """ return (key in self.get_parent_map([key])) def _missing_keys_from_parent_map(self, keys): return set(keys) - set(self.get_parent_map(keys)) class GraphIndexBuilder(object): """A builder that can build a GraphIndex. The resulting graph has the structure:: _SIGNATURE OPTIONS NODES NEWLINE _SIGNATURE := 'Bazaar Graph Index 1' NEWLINE OPTIONS := 'node_ref_lists=' DIGITS NEWLINE NODES := NODE* NODE := KEY NULL ABSENT? NULL REFERENCES NULL VALUE NEWLINE KEY := Not-whitespace-utf8 ABSENT := 'a' REFERENCES := REFERENCE_LIST (TAB REFERENCE_LIST){node_ref_lists - 1} REFERENCE_LIST := (REFERENCE (CR REFERENCE)*)? REFERENCE := DIGITS ; digits is the byte offset in the index of the ; referenced key. VALUE := no-newline-no-null-bytes """ def __init__(self, reference_lists=0, key_elements=1): """Create a GraphIndex builder. :param reference_lists: The number of node references lists for each entry. :param key_elements: The number of bytestrings in each key. """ self.reference_lists = reference_lists # A dict of {key: (absent, ref_lists, value)} self._nodes = {} # Keys that are referenced but not actually present in this index self._absent_keys = set() self._nodes_by_key = None self._key_length = key_elements self._optimize_for_size = False self._combine_backing_indices = True def _check_key(self, key): """Raise BadIndexKey if key is not a valid key for this index.""" if type(key) not in (tuple, StaticTuple): raise errors.BadIndexKey(key) if self._key_length != len(key): raise errors.BadIndexKey(key) for element in key: if not element or _whitespace_re.search(element) is not None: raise errors.BadIndexKey(element) def _external_references(self): """Return references that are not present in this index. """ keys = set() refs = set() # TODO: JAM 2008-11-21 This makes an assumption about how the reference # lists are used. It is currently correct for pack-0.92 through # 1.9, which use the node references (3rd column) second # reference list as the compression parent. Perhaps this should # be moved into something higher up the stack, since it # makes assumptions about how the index is used. if self.reference_lists > 1: for node in self.iter_all_entries(): keys.add(node[1]) refs.update(node[3][1]) return refs - keys else: # If reference_lists == 0 there can be no external references, and # if reference_lists == 1, then there isn't a place to store the # compression parent return set() def _get_nodes_by_key(self): if self._nodes_by_key is None: nodes_by_key = {} if self.reference_lists: for key, (absent, references, value) in self._nodes.iteritems(): if absent: continue key_dict = nodes_by_key for subkey in key[:-1]: key_dict = key_dict.setdefault(subkey, {}) key_dict[key[-1]] = key, value, references else: for key, (absent, references, value) in self._nodes.iteritems(): if absent: continue key_dict = nodes_by_key for subkey in key[:-1]: key_dict = key_dict.setdefault(subkey, {}) key_dict[key[-1]] = key, value self._nodes_by_key = nodes_by_key return self._nodes_by_key def _update_nodes_by_key(self, key, value, node_refs): """Update the _nodes_by_key dict with a new key. For a key of (foo, bar, baz) create _nodes_by_key[foo][bar][baz] = key_value """ if self._nodes_by_key is None: return key_dict = self._nodes_by_key if self.reference_lists: key_value = StaticTuple(key, value, node_refs) else: key_value = StaticTuple(key, value) for subkey in key[:-1]: key_dict = key_dict.setdefault(subkey, {}) key_dict[key[-1]] = key_value def _check_key_ref_value(self, key, references, value): """Check that 'key' and 'references' are all valid. :param key: A key tuple. Must conform to the key interface (be a tuple, be of the right length, not have any whitespace or nulls in any key element.) :param references: An iterable of reference lists. Something like [[(ref, key)], [(ref, key), (other, key)]] :param value: The value associate with this key. Must not contain newlines or null characters. :return: (node_refs, absent_references) * node_refs: basically a packed form of 'references' where all iterables are tuples * absent_references: reference keys that are not in self._nodes. This may contain duplicates if the same key is referenced in multiple lists. """ as_st = StaticTuple.from_sequence self._check_key(key) if _newline_null_re.search(value) is not None: raise errors.BadIndexValue(value) if len(references) != self.reference_lists: raise errors.BadIndexValue(references) node_refs = [] absent_references = [] for reference_list in references: for reference in reference_list: # If reference *is* in self._nodes, then we know it has already # been checked. if reference not in self._nodes: self._check_key(reference) absent_references.append(reference) reference_list = as_st([as_st(ref).intern() for ref in reference_list]) node_refs.append(reference_list) return as_st(node_refs), absent_references def add_node(self, key, value, references=()): """Add a node to the index. :param key: The key. keys are non-empty tuples containing as many whitespace-free utf8 bytestrings as the key length defined for this index. :param references: An iterable of iterables of keys. Each is a reference to another key. :param value: The value to associate with the key. It may be any bytes as long as it does not contain \\0 or \\n. """ (node_refs, absent_references) = self._check_key_ref_value(key, references, value) if key in self._nodes and self._nodes[key][0] != 'a': raise errors.BadIndexDuplicateKey(key, self) for reference in absent_references: # There may be duplicates, but I don't think it is worth worrying # about self._nodes[reference] = ('a', (), '') self._absent_keys.update(absent_references) self._absent_keys.discard(key) self._nodes[key] = ('', node_refs, value) if self._nodes_by_key is not None and self._key_length > 1: self._update_nodes_by_key(key, value, node_refs) def clear_cache(self): """See GraphIndex.clear_cache() This is a no-op, but we need the api to conform to a generic 'Index' abstraction. """ def finish(self): """Finish the index. :returns: cStringIO holding the full context of the index as it should be written to disk. """ lines = [_SIGNATURE] lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n') lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n') key_count = len(self._nodes) - len(self._absent_keys) lines.append(_OPTION_LEN + str(key_count) + '\n') prefix_length = sum(len(x) for x in lines) # references are byte offsets. To avoid having to do nasty # polynomial work to resolve offsets (references to later in the # file cannot be determined until all the inbetween references have # been calculated too) we pad the offsets with 0's to make them be # of consistent length. Using binary offsets would break the trivial # file parsing. # to calculate the width of zero's needed we do three passes: # one to gather all the non-reference data and the number of references. # one to pad all the data with reference-length and determine entry # addresses. # One to serialise. # forward sorted by key. In future we may consider topological sorting, # at the cost of table scans for direct lookup, or a second index for # direct lookup nodes = sorted(self._nodes.items()) # if we do not prepass, we don't know how long it will be up front. expected_bytes = None # we only need to pre-pass if we have reference lists at all. if self.reference_lists: key_offset_info = [] non_ref_bytes = prefix_length total_references = 0 # TODO use simple multiplication for the constants in this loop. for key, (absent, references, value) in nodes: # record the offset known *so far* for this key: # the non reference bytes to date, and the total references to # date - saves reaccumulating on the second pass key_offset_info.append((key, non_ref_bytes, total_references)) # key is literal, value is literal, there are 3 null's, 1 NL # key is variable length tuple, \x00 between elements non_ref_bytes += sum(len(element) for element in key) if self._key_length > 1: non_ref_bytes += self._key_length - 1 # value is literal bytes, there are 3 null's, 1 NL. non_ref_bytes += len(value) + 3 + 1 # one byte for absent if set. if absent: non_ref_bytes += 1 elif self.reference_lists: # (ref_lists -1) tabs non_ref_bytes += self.reference_lists - 1 # (ref-1 cr's per ref_list) for ref_list in references: # how many references across the whole file? total_references += len(ref_list) # accrue reference separators if ref_list: non_ref_bytes += len(ref_list) - 1 # how many digits are needed to represent the total byte count? digits = 1 possible_total_bytes = non_ref_bytes + total_references*digits while 10 ** digits < possible_total_bytes: digits += 1 possible_total_bytes = non_ref_bytes + total_references*digits expected_bytes = possible_total_bytes + 1 # terminating newline # resolve key addresses. key_addresses = {} for key, non_ref_bytes, total_references in key_offset_info: key_addresses[key] = non_ref_bytes + total_references*digits # serialise format_string = '%%0%sd' % digits for key, (absent, references, value) in nodes: flattened_references = [] for ref_list in references: ref_addresses = [] for reference in ref_list: ref_addresses.append(format_string % key_addresses[reference]) flattened_references.append('\r'.join(ref_addresses)) string_key = '\x00'.join(key) lines.append("%s\x00%s\x00%s\x00%s\n" % (string_key, absent, '\t'.join(flattened_references), value)) lines.append('\n') result = StringIO(''.join(lines)) if expected_bytes and len(result.getvalue()) != expected_bytes: raise errors.BzrError('Failed index creation. Internal error:' ' mismatched output length and expected length: %d %d' % (len(result.getvalue()), expected_bytes)) return result def set_optimize(self, for_size=None, combine_backing_indices=None): """Change how the builder tries to optimize the result. :param for_size: Tell the builder to try and make the index as small as possible. :param combine_backing_indices: If the builder spills to disk to save memory, should the on-disk indices be combined. Set to True if you are going to be probing the index, but to False if you are not. (If you are not querying, then the time spent combining is wasted.) :return: None """ # GraphIndexBuilder itself doesn't pay attention to the flag yet, but # other builders do. if for_size is not None: self._optimize_for_size = for_size if combine_backing_indices is not None: self._combine_backing_indices = combine_backing_indices def find_ancestry(self, keys, ref_list_num): """See CombinedGraphIndex.find_ancestry()""" pending = set(keys) parent_map = {} missing_keys = set() while pending: next_pending = set() for _, key, value, ref_lists in self.iter_entries(pending): parent_keys = ref_lists[ref_list_num] parent_map[key] = parent_keys next_pending.update([p for p in parent_keys if p not in parent_map]) missing_keys.update(pending.difference(parent_map)) pending = next_pending return parent_map, missing_keys class GraphIndex(object): """An index for data with embedded graphs. The index maps keys to a list of key reference lists, and a value. Each node has the same number of key reference lists. Each key reference list can be empty or an arbitrary length. The value is an opaque NULL terminated string without any newlines. The storage of the index is hidden in the interface: keys and key references are always tuples of bytestrings, never the internal representation (e.g. dictionary offsets). It is presumed that the index will not be mutated - it is static data. Successive iter_all_entries calls will read the entire index each time. Additionally, iter_entries calls will read the index linearly until the desired keys are found. XXX: This must be fixed before the index is suitable for production use. :XXX """ def __init__(self, transport, name, size, unlimited_cache=False, offset=0): """Open an index called name on transport. :param transport: A bzrlib.transport.Transport. :param name: A path to provide to transport API calls. :param size: The size of the index in bytes. This is used for bisection logic to perform partial index reads. While the size could be obtained by statting the file this introduced an additional round trip as well as requiring stat'able transports, both of which are avoided by having it supplied. If size is None, then bisection support will be disabled and accessing the index will just stream all the data. :param offset: Instead of starting the index data at offset 0, start it at an arbitrary offset. """ self._transport = transport self._name = name # Becomes a dict of key:(value, reference-list-byte-locations) used by # the bisection interface to store parsed but not resolved keys. self._bisect_nodes = None # Becomes a dict of key:(value, reference-list-keys) which are ready to # be returned directly to callers. self._nodes = None # a sorted list of slice-addresses for the parsed bytes of the file. # e.g. (0,1) would mean that byte 0 is parsed. self._parsed_byte_map = [] # a sorted list of keys matching each slice address for parsed bytes # e.g. (None, 'foo@bar') would mean that the first byte contained no # key, and the end byte of the slice is the of the data for 'foo@bar' self._parsed_key_map = [] self._key_count = None self._keys_by_offset = None self._nodes_by_key = None self._size = size # The number of bytes we've read so far in trying to process this file self._bytes_read = 0 self._base_offset = offset def __eq__(self, other): """Equal when self and other were created with the same parameters.""" return ( type(self) == type(other) and self._transport == other._transport and self._name == other._name and self._size == other._size) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self._transport.abspath(self._name)) def _buffer_all(self, stream=None): """Buffer all the index data. Mutates self._nodes and self.keys_by_offset. """ if self._nodes is not None: # We already did this return if 'index' in debug.debug_flags: trace.mutter('Reading entire index %s', self._transport.abspath(self._name)) if stream is None: stream = self._transport.get(self._name) if self._base_offset != 0: # This is wasteful, but it is better than dealing with # adjusting all the offsets, etc. stream = StringIO(stream.read()[self._base_offset:]) self._read_prefix(stream) self._expected_elements = 3 + self._key_length line_count = 0 # raw data keyed by offset self._keys_by_offset = {} # ready-to-return key:value or key:value, node_ref_lists self._nodes = {} self._nodes_by_key = None trailers = 0 pos = stream.tell() lines = stream.read().split('\n') # GZ 2009-09-20: Should really use a try/finally block to ensure close stream.close() del lines[-1] _, _, _, trailers = self._parse_lines(lines, pos) for key, absent, references, value in self._keys_by_offset.itervalues(): if absent: continue # resolve references: if self.node_ref_lists: node_value = (value, self._resolve_references(references)) else: node_value = value self._nodes[key] = node_value # cache the keys for quick set intersections if trailers != 1: # there must be one line - the empty trailer line. raise errors.BadIndexData(self) def clear_cache(self): """Clear out any cached/memoized values. This can be called at any time, but generally it is used when we have extracted some information, but don't expect to be requesting any more from this index. """ def external_references(self, ref_list_num): """Return references that are not present in this index. """ self._buffer_all() if ref_list_num + 1 > self.node_ref_lists: raise ValueError('No ref list %d, index has %d ref lists' % (ref_list_num, self.node_ref_lists)) refs = set() nodes = self._nodes for key, (value, ref_lists) in nodes.iteritems(): ref_list = ref_lists[ref_list_num] refs.update([ref for ref in ref_list if ref not in nodes]) return refs def _get_nodes_by_key(self): if self._nodes_by_key is None: nodes_by_key = {} if self.node_ref_lists: for key, (value, references) in self._nodes.iteritems(): key_dict = nodes_by_key for subkey in key[:-1]: key_dict = key_dict.setdefault(subkey, {}) key_dict[key[-1]] = key, value, references else: for key, value in self._nodes.iteritems(): key_dict = nodes_by_key for subkey in key[:-1]: key_dict = key_dict.setdefault(subkey, {}) key_dict[key[-1]] = key, value self._nodes_by_key = nodes_by_key return self._nodes_by_key def iter_all_entries(self): """Iterate over all keys within the index. :return: An iterable of (index, key, value) or (index, key, value, reference_lists). The former tuple is used when there are no reference lists in the index, making the API compatible with simple key:value index types. There is no defined order for the result iteration - it will be in the most efficient order for the index. """ if 'evil' in debug.debug_flags: trace.mutter_callsite(3, "iter_all_entries scales with size of history.") if self._nodes is None: self._buffer_all() if self.node_ref_lists: for key, (value, node_ref_lists) in self._nodes.iteritems(): yield self, key, value, node_ref_lists else: for key, value in self._nodes.iteritems(): yield self, key, value def _read_prefix(self, stream): signature = stream.read(len(self._signature())) if not signature == self._signature(): raise errors.BadIndexFormatSignature(self._name, GraphIndex) options_line = stream.readline() if not options_line.startswith(_OPTION_NODE_REFS): raise errors.BadIndexOptions(self) try: self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):-1]) except ValueError: raise errors.BadIndexOptions(self) options_line = stream.readline() if not options_line.startswith(_OPTION_KEY_ELEMENTS): raise errors.BadIndexOptions(self) try: self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):-1]) except ValueError: raise errors.BadIndexOptions(self) options_line = stream.readline() if not options_line.startswith(_OPTION_LEN): raise errors.BadIndexOptions(self) try: self._key_count = int(options_line[len(_OPTION_LEN):-1]) except ValueError: raise errors.BadIndexOptions(self) def _resolve_references(self, references): """Return the resolved key references for references. References are resolved by looking up the location of the key in the _keys_by_offset map and substituting the key name, preserving ordering. :param references: An iterable of iterables of key locations. e.g. [[123, 456], [123]] :return: A tuple of tuples of keys. """ node_refs = [] for ref_list in references: node_refs.append(tuple([self._keys_by_offset[ref][0] for ref in ref_list])) return tuple(node_refs) def _find_index(self, range_map, key): """Helper for the _parsed_*_index calls. Given a range map - [(start, end), ...], finds the index of the range in the map for key if it is in the map, and if it is not there, the immediately preceeding range in the map. """ result = bisect_right(range_map, key) - 1 if result + 1 < len(range_map): # check the border condition, it may be in result + 1 if range_map[result + 1][0] == key[0]: return result + 1 return result def _parsed_byte_index(self, offset): """Return the index of the entry immediately before offset. e.g. if the parsed map has regions 0,10 and 11,12 parsed, meaning that there is one unparsed byte (the 11th, addressed as[10]). then: asking for 0 will return 0 asking for 10 will return 0 asking for 11 will return 1 asking for 12 will return 1 """ key = (offset, 0) return self._find_index(self._parsed_byte_map, key) def _parsed_key_index(self, key): """Return the index of the entry immediately before key. e.g. if the parsed map has regions (None, 'a') and ('b','c') parsed, meaning that keys from None to 'a' inclusive, and 'b' to 'c' inclusive have been parsed, then: asking for '' will return 0 asking for 'a' will return 0 asking for 'b' will return 1 asking for 'e' will return 1 """ search_key = (key, None) return self._find_index(self._parsed_key_map, search_key) def _is_parsed(self, offset): """Returns True if offset has been parsed.""" index = self._parsed_byte_index(offset) if index == len(self._parsed_byte_map): return offset < self._parsed_byte_map[index - 1][1] start, end = self._parsed_byte_map[index] return offset >= start and offset < end def _iter_entries_from_total_buffer(self, keys): """Iterate over keys when the entire index is parsed.""" # Note: See the note in BTreeBuilder.iter_entries for why we don't use # .intersection() here nodes = self._nodes keys = [key for key in keys if key in nodes] if self.node_ref_lists: for key in keys: value, node_refs = nodes[key] yield self, key, value, node_refs else: for key in keys: yield self, key, nodes[key] def iter_entries(self, keys): """Iterate over keys within the index. :param keys: An iterable providing the keys to be retrieved. :return: An iterable as per iter_all_entries, but restricted to the keys supplied. No additional keys will be returned, and every key supplied that is in the index will be returned. """ keys = set(keys) if not keys: return [] if self._size is None and self._nodes is None: self._buffer_all() # We fit about 20 keys per minimum-read (4K), so if we are looking for # more than 1/20th of the index its likely (assuming homogenous key # spread) that we'll read the entire index. If we're going to do that, # buffer the whole thing. A better analysis might take key spread into # account - but B+Tree indices are better anyway. # We could look at all data read, and use a threshold there, which will # trigger on ancestry walks, but that is not yet fully mapped out. if self._nodes is None and len(keys) * 20 > self.key_count(): self._buffer_all() if self._nodes is not None: return self._iter_entries_from_total_buffer(keys) else: return (result[1] for result in bisect_multi.bisect_multi_bytes( self._lookup_keys_via_location, self._size, keys)) def iter_entries_prefix(self, keys): """Iterate over keys within the index using prefix matching. Prefix matching is applied within the tuple of a key, not to within the bytestring of each key element. e.g. if you have the keys ('foo', 'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then only the former key is returned. WARNING: Note that this method currently causes a full index parse unconditionally (which is reasonably appropriate as it is a means for thunking many small indices into one larger one and still supplies iter_all_entries at the thunk layer). :param keys: An iterable providing the key prefixes to be retrieved. Each key prefix takes the form of a tuple the length of a key, but with the last N elements 'None' rather than a regular bytestring. The first element cannot be 'None'. :return: An iterable as per iter_all_entries, but restricted to the keys with a matching prefix to those supplied. No additional keys will be returned, and every match that is in the index will be returned. """ keys = set(keys) if not keys: return # load data - also finds key lengths if self._nodes is None: self._buffer_all() if self._key_length == 1: for key in keys: # sanity check if key[0] is None: raise errors.BadIndexKey(key) if len(key) != self._key_length: raise errors.BadIndexKey(key) if self.node_ref_lists: value, node_refs = self._nodes[key] yield self, key, value, node_refs else: yield self, key, self._nodes[key] return nodes_by_key = self._get_nodes_by_key() for key in keys: # sanity check if key[0] is None: raise errors.BadIndexKey(key) if len(key) != self._key_length: raise errors.BadIndexKey(key) # find what it refers to: key_dict = nodes_by_key elements = list(key) # find the subdict whose contents should be returned. try: while len(elements) and elements[0] is not None: key_dict = key_dict[elements[0]] elements.pop(0) except KeyError: # a non-existant lookup. continue if len(elements): dicts = [key_dict] while dicts: key_dict = dicts.pop(-1) # can't be empty or would not exist item, value = key_dict.iteritems().next() if type(value) == dict: # push keys dicts.extend(key_dict.itervalues()) else: # yield keys for value in key_dict.itervalues(): # each value is the key:value:node refs tuple # ready to yield. yield (self, ) + value else: # the last thing looked up was a terminal element yield (self, ) + key_dict def _find_ancestors(self, keys, ref_list_num, parent_map, missing_keys): """See BTreeIndex._find_ancestors.""" # The api can be implemented as a trivial overlay on top of # iter_entries, it is not an efficient implementation, but it at least # gets the job done. found_keys = set() search_keys = set() for index, key, value, refs in self.iter_entries(keys): parent_keys = refs[ref_list_num] found_keys.add(key) parent_map[key] = parent_keys search_keys.update(parent_keys) # Figure out what, if anything, was missing missing_keys.update(set(keys).difference(found_keys)) search_keys = search_keys.difference(parent_map) return search_keys def key_count(self): """Return an estimate of the number of keys in this index. For GraphIndex the estimate is exact. """ if self._key_count is None: self._read_and_parse([_HEADER_READV]) return self._key_count def _lookup_keys_via_location(self, location_keys): """Public interface for implementing bisection. If _buffer_all has been called, then all the data for the index is in memory, and this method should not be called, as it uses a separate cache because it cannot pre-resolve all indices, which buffer_all does for performance. :param location_keys: A list of location(byte offset), key tuples. :return: A list of (location_key, result) tuples as expected by bzrlib.bisect_multi.bisect_multi_bytes. """ # Possible improvements: # - only bisect lookup each key once # - sort the keys first, and use that to reduce the bisection window # ----- # this progresses in three parts: # read data # parse it # attempt to answer the question from the now in memory data. # build the readv request # for each location, ask for 800 bytes - much more than rows we've seen # anywhere. readv_ranges = [] for location, key in location_keys: # can we answer from cache? if self._bisect_nodes and key in self._bisect_nodes: # We have the key parsed. continue index = self._parsed_key_index(key) if (len(self._parsed_key_map) and self._parsed_key_map[index][0] <= key and (self._parsed_key_map[index][1] >= key or # end of the file has been parsed self._parsed_byte_map[index][1] == self._size)): # the key has been parsed, so no lookup is needed even if its # not present. continue # - if we have examined this part of the file already - yes index = self._parsed_byte_index(location) if (len(self._parsed_byte_map) and self._parsed_byte_map[index][0] <= location and self._parsed_byte_map[index][1] > location): # the byte region has been parsed, so no read is needed. continue length = 800 if location + length > self._size: length = self._size - location # todo, trim out parsed locations. if length > 0: readv_ranges.append((location, length)) # read the header if needed if self._bisect_nodes is None: readv_ranges.append(_HEADER_READV) self._read_and_parse(readv_ranges) result = [] if self._nodes is not None: # _read_and_parse triggered a _buffer_all because we requested the # whole data range for location, key in location_keys: if key not in self._nodes: # not present result.append(((location, key), False)) elif self.node_ref_lists: value, refs = self._nodes[key] result.append(((location, key), (self, key, value, refs))) else: result.append(((location, key), (self, key, self._nodes[key]))) return result # generate results: # - figure out <, >, missing, present # - result present references so we can return them. # keys that we cannot answer until we resolve references pending_references = [] pending_locations = set() for location, key in location_keys: # can we answer from cache? if key in self._bisect_nodes: # the key has been parsed, so no lookup is needed if self.node_ref_lists: # the references may not have been all parsed. value, refs = self._bisect_nodes[key] wanted_locations = [] for ref_list in refs: for ref in ref_list: if ref not in self._keys_by_offset: wanted_locations.append(ref) if wanted_locations: pending_locations.update(wanted_locations) pending_references.append((location, key)) continue result.append(((location, key), (self, key, value, self._resolve_references(refs)))) else: result.append(((location, key), (self, key, self._bisect_nodes[key]))) continue else: # has the region the key should be in, been parsed? index = self._parsed_key_index(key) if (self._parsed_key_map[index][0] <= key and (self._parsed_key_map[index][1] >= key or # end of the file has been parsed self._parsed_byte_map[index][1] == self._size)): result.append(((location, key), False)) continue # no, is the key above or below the probed location: # get the range of the probed & parsed location index = self._parsed_byte_index(location) # if the key is below the start of the range, its below if key < self._parsed_key_map[index][0]: direction = -1 else: direction = +1 result.append(((location, key), direction)) readv_ranges = [] # lookup data to resolve references for location in pending_locations: length = 800 if location + length > self._size: length = self._size - location # TODO: trim out parsed locations (e.g. if the 800 is into the # parsed region trim it, and dont use the adjust_for_latency # facility) if length > 0: readv_ranges.append((location, length)) self._read_and_parse(readv_ranges) if self._nodes is not None: # The _read_and_parse triggered a _buffer_all, grab the data and # return it for location, key in pending_references: value, refs = self._nodes[key] result.append(((location, key), (self, key, value, refs))) return result for location, key in pending_references: # answer key references we had to look-up-late. value, refs = self._bisect_nodes[key] result.append(((location, key), (self, key, value, self._resolve_references(refs)))) return result def _parse_header_from_bytes(self, bytes): """Parse the header from a region of bytes. :param bytes: The data to parse. :return: An offset, data tuple such as readv yields, for the unparsed data. (which may length 0). """ signature = bytes[0:len(self._signature())] if not signature == self._signature(): raise errors.BadIndexFormatSignature(self._name, GraphIndex) lines = bytes[len(self._signature()):].splitlines() options_line = lines[0] if not options_line.startswith(_OPTION_NODE_REFS): raise errors.BadIndexOptions(self) try: self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):]) except ValueError: raise errors.BadIndexOptions(self) options_line = lines[1] if not options_line.startswith(_OPTION_KEY_ELEMENTS): raise errors.BadIndexOptions(self) try: self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):]) except ValueError: raise errors.BadIndexOptions(self) options_line = lines[2] if not options_line.startswith(_OPTION_LEN): raise errors.BadIndexOptions(self) try: self._key_count = int(options_line[len(_OPTION_LEN):]) except ValueError: raise errors.BadIndexOptions(self) # calculate the bytes we have processed header_end = (len(signature) + len(lines[0]) + len(lines[1]) + len(lines[2]) + 3) self._parsed_bytes(0, None, header_end, None) # setup parsing state self._expected_elements = 3 + self._key_length # raw data keyed by offset self._keys_by_offset = {} # keys with the value and node references self._bisect_nodes = {} return header_end, bytes[header_end:] def _parse_region(self, offset, data): """Parse node data returned from a readv operation. :param offset: The byte offset the data starts at. :param data: The data to parse. """ # trim the data. # end first: end = offset + len(data) high_parsed = offset while True: # Trivial test - if the current index's end is within the # low-matching parsed range, we're done. index = self._parsed_byte_index(high_parsed) if end < self._parsed_byte_map[index][1]: return # print "[%d:%d]" % (offset, end), \ # self._parsed_byte_map[index:index + 2] high_parsed, last_segment = self._parse_segment( offset, data, end, index) if last_segment: return def _parse_segment(self, offset, data, end, index): """Parse one segment of data. :param offset: Where 'data' begins in the file. :param data: Some data to parse a segment of. :param end: Where data ends :param index: The current index into the parsed bytes map. :return: True if the parsed segment is the last possible one in the range of data. :return: high_parsed_byte, last_segment. high_parsed_byte is the location of the highest parsed byte in this segment, last_segment is True if the parsed segment is the last possible one in the data block. """ # default is to use all data trim_end = None # accomodate overlap with data before this. if offset < self._parsed_byte_map[index][1]: # overlaps the lower parsed region # skip the parsed data trim_start = self._parsed_byte_map[index][1] - offset # don't trim the start for \n start_adjacent = True elif offset == self._parsed_byte_map[index][1]: # abuts the lower parsed region # use all data trim_start = None # do not trim anything start_adjacent = True else: # does not overlap the lower parsed region # use all data trim_start = None # but trim the leading \n start_adjacent = False if end == self._size: # lines up to the end of all data: # use it all trim_end = None # do not strip to the last \n end_adjacent = True last_segment = True elif index + 1 == len(self._parsed_byte_map): # at the end of the parsed data # use it all trim_end = None # but strip to the last \n end_adjacent = False last_segment = True elif end == self._parsed_byte_map[index + 1][0]: # buts up against the next parsed region # use it all trim_end = None # do not strip to the last \n end_adjacent = True last_segment = True elif end > self._parsed_byte_map[index + 1][0]: # overlaps into the next parsed region # only consider the unparsed data trim_end = self._parsed_byte_map[index + 1][0] - offset # do not strip to the last \n as we know its an entire record end_adjacent = True last_segment = end < self._parsed_byte_map[index + 1][1] else: # does not overlap into the next region # use it all trim_end = None # but strip to the last \n end_adjacent = False last_segment = True # now find bytes to discard if needed if not start_adjacent: # work around python bug in rfind if trim_start is None: trim_start = data.find('\n') + 1 else: trim_start = data.find('\n', trim_start) + 1 if not (trim_start != 0): raise AssertionError('no \n was present') # print 'removing start', offset, trim_start, repr(data[:trim_start]) if not end_adjacent: # work around python bug in rfind if trim_end is None: trim_end = data.rfind('\n') + 1 else: trim_end = data.rfind('\n', None, trim_end) + 1 if not (trim_end != 0): raise AssertionError('no \n was present') # print 'removing end', offset, trim_end, repr(data[trim_end:]) # adjust offset and data to the parseable data. trimmed_data = data[trim_start:trim_end] if not (trimmed_data): raise AssertionError('read unneeded data [%d:%d] from [%d:%d]' % (trim_start, trim_end, offset, offset + len(data))) if trim_start: offset += trim_start # print "parsing", repr(trimmed_data) # splitlines mangles the \r delimiters.. don't use it. lines = trimmed_data.split('\n') del lines[-1] pos = offset first_key, last_key, nodes, _ = self._parse_lines(lines, pos) for key, value in nodes: self._bisect_nodes[key] = value self._parsed_bytes(offset, first_key, offset + len(trimmed_data), last_key) return offset + len(trimmed_data), last_segment def _parse_lines(self, lines, pos): key = None first_key = None trailers = 0 nodes = [] for line in lines: if line == '': # must be at the end if self._size: if not (self._size == pos + 1): raise AssertionError("%s %s" % (self._size, pos)) trailers += 1 continue elements = line.split('\0') if len(elements) != self._expected_elements: raise errors.BadIndexData(self) # keys are tuples. Each element is a string that may occur many # times, so we intern them to save space. AB, RC, 200807 key = tuple([intern(element) for element in elements[:self._key_length]]) if first_key is None: first_key = key absent, references, value = elements[-3:] ref_lists = [] for ref_string in references.split('\t'): ref_lists.append(tuple([ int(ref) for ref in ref_string.split('\r') if ref ])) ref_lists = tuple(ref_lists) self._keys_by_offset[pos] = (key, absent, ref_lists, value) pos += len(line) + 1 # +1 for the \n if absent: continue if self.node_ref_lists: node_value = (value, ref_lists) else: node_value = value nodes.append((key, node_value)) # print "parsed ", key return first_key, key, nodes, trailers def _parsed_bytes(self, start, start_key, end, end_key): """Mark the bytes from start to end as parsed. Calling self._parsed_bytes(1,2) will mark one byte (the one at offset 1) as parsed. :param start: The start of the parsed region. :param end: The end of the parsed region. """ index = self._parsed_byte_index(start) new_value = (start, end) new_key = (start_key, end_key) if index == -1: # first range parsed is always the beginning. self._parsed_byte_map.insert(index, new_value) self._parsed_key_map.insert(index, new_key) return # four cases: # new region # extend lower region # extend higher region # combine two regions if (index + 1 < len(self._parsed_byte_map) and self._parsed_byte_map[index][1] == start and self._parsed_byte_map[index + 1][0] == end): # combine two regions self._parsed_byte_map[index] = (self._parsed_byte_map[index][0], self._parsed_byte_map[index + 1][1]) self._parsed_key_map[index] = (self._parsed_key_map[index][0], self._parsed_key_map[index + 1][1]) del self._parsed_byte_map[index + 1] del self._parsed_key_map[index + 1] elif self._parsed_byte_map[index][1] == start: # extend the lower entry self._parsed_byte_map[index] = ( self._parsed_byte_map[index][0], end) self._parsed_key_map[index] = ( self._parsed_key_map[index][0], end_key) elif (index + 1 < len(self._parsed_byte_map) and self._parsed_byte_map[index + 1][0] == end): # extend the higher entry self._parsed_byte_map[index + 1] = ( start, self._parsed_byte_map[index + 1][1]) self._parsed_key_map[index + 1] = ( start_key, self._parsed_key_map[index + 1][1]) else: # new entry self._parsed_byte_map.insert(index + 1, new_value) self._parsed_key_map.insert(index + 1, new_key) def _read_and_parse(self, readv_ranges): """Read the ranges and parse the resulting data. :param readv_ranges: A prepared readv range list. """ if not readv_ranges: return if self._nodes is None and self._bytes_read * 2 >= self._size: # We've already read more than 50% of the file and we are about to # request more data, just _buffer_all() and be done self._buffer_all() return base_offset = self._base_offset if base_offset != 0: # Rewrite the ranges for the offset readv_ranges = [(start+base_offset, size) for start, size in readv_ranges] readv_data = self._transport.readv(self._name, readv_ranges, True, self._size + self._base_offset) # parse for offset, data in readv_data: offset -= base_offset self._bytes_read += len(data) if offset < 0: # transport.readv() expanded to extra data which isn't part of # this index data = data[-offset:] offset = 0 if offset == 0 and len(data) == self._size: # We read the whole range, most likely because the # Transport upcast our readv ranges into one long request # for enough total data to grab the whole index. self._buffer_all(StringIO(data)) return if self._bisect_nodes is None: # this must be the start if not (offset == 0): raise AssertionError() offset, data = self._parse_header_from_bytes(data) # print readv_ranges, "[%d:%d]" % (offset, offset + len(data)) self._parse_region(offset, data) def _signature(self): """The file signature for this index type.""" return _SIGNATURE def validate(self): """Validate that everything in the index can be accessed.""" # iter_all validates completely at the moment, so just do that. for node in self.iter_all_entries(): pass class CombinedGraphIndex(object): """A GraphIndex made up from smaller GraphIndices. The backing indices must implement GraphIndex, and are presumed to be static data. Queries against the combined index will be made against the first index, and then the second and so on. The order of indices can thus influence performance significantly. For example, if one index is on local disk and a second on a remote server, the local disk index should be before the other in the index list. Also, queries tend to need results from the same indices as previous queries. So the indices will be reordered after every query to put the indices that had the result(s) of that query first (while otherwise preserving the relative ordering). """ def __init__(self, indices, reload_func=None): """Create a CombinedGraphIndex backed by indices. :param indices: An ordered list of indices to query for data. :param reload_func: A function to call if we find we are missing an index. Should have the form reload_func() => True/False to indicate if reloading actually changed anything. """ self._indices = indices self._reload_func = reload_func # Sibling indices are other CombinedGraphIndex that we should call # _move_to_front_by_name on when we auto-reorder ourself. self._sibling_indices = [] # A list of names that corresponds to the instances in self._indices, # so _index_names[0] is always the name for _indices[0], etc. Sibling # indices must all use the same set of names as each other. self._index_names = [None] * len(self._indices) def __repr__(self): return "%s(%s)" % ( self.__class__.__name__, ', '.join(map(repr, self._indices))) def clear_cache(self): """See GraphIndex.clear_cache()""" for index in self._indices: index.clear_cache() def get_parent_map(self, keys): """See graph.StackedParentsProvider.get_parent_map""" search_keys = set(keys) if _mod_revision.NULL_REVISION in search_keys: search_keys.discard(_mod_revision.NULL_REVISION) found_parents = {_mod_revision.NULL_REVISION:[]} else: found_parents = {} for index, key, value, refs in self.iter_entries(search_keys): parents = refs[0] if not parents: parents = (_mod_revision.NULL_REVISION,) found_parents[key] = parents return found_parents has_key = _has_key_from_parent_map def insert_index(self, pos, index, name=None): """Insert a new index in the list of indices to query. :param pos: The position to insert the index. :param index: The index to insert. :param name: a name for this index, e.g. a pack name. These names can be used to reflect index reorderings to related CombinedGraphIndex instances that use the same names. (see set_sibling_indices) """ self._indices.insert(pos, index) self._index_names.insert(pos, name) def iter_all_entries(self): """Iterate over all keys within the index Duplicate keys across child indices are presumed to have the same value and are only reported once. :return: An iterable of (index, key, reference_lists, value). There is no defined order for the result iteration - it will be in the most efficient order for the index. """ seen_keys = set() while True: try: for index in self._indices: for node in index.iter_all_entries(): if node[1] not in seen_keys: yield node seen_keys.add(node[1]) return except errors.NoSuchFile: self._reload_or_raise() def iter_entries(self, keys): """Iterate over keys within the index. Duplicate keys across child indices are presumed to have the same value and are only reported once. :param keys: An iterable providing the keys to be retrieved. :return: An iterable of (index, key, reference_lists, value). There is no defined order for the result iteration - it will be in the most efficient order for the index. """ keys = set(keys) hit_indices = [] while True: try: for index in self._indices: if not keys: break index_hit = False for node in index.iter_entries(keys): keys.remove(node[1]) yield node index_hit = True if index_hit: hit_indices.append(index) break except errors.NoSuchFile: self._reload_or_raise() self._move_to_front(hit_indices) def iter_entries_prefix(self, keys): """Iterate over keys within the index using prefix matching. Duplicate keys across child indices are presumed to have the same value and are only reported once. Prefix matching is applied within the tuple of a key, not to within the bytestring of each key element. e.g. if you have the keys ('foo', 'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then only the former key is returned. :param keys: An iterable providing the key prefixes to be retrieved. Each key prefix takes the form of a tuple the length of a key, but with the last N elements 'None' rather than a regular bytestring. The first element cannot be 'None'. :return: An iterable as per iter_all_entries, but restricted to the keys with a matching prefix to those supplied. No additional keys will be returned, and every match that is in the index will be returned. """ keys = set(keys) if not keys: return seen_keys = set() hit_indices = [] while True: try: for index in self._indices: index_hit = False for node in index.iter_entries_prefix(keys): if node[1] in seen_keys: continue seen_keys.add(node[1]) yield node index_hit = True if index_hit: hit_indices.append(index) break except errors.NoSuchFile: self._reload_or_raise() self._move_to_front(hit_indices) def _move_to_front(self, hit_indices): """Rearrange self._indices so that hit_indices are first. Order is maintained as much as possible, e.g. the first unhit index will be the first index in _indices after the hit_indices, and the hit_indices will be present in exactly the order they are passed to _move_to_front. _move_to_front propagates to all objects in self._sibling_indices by calling _move_to_front_by_name. """ if self._indices[:len(hit_indices)] == hit_indices: # The 'hit_indices' are already at the front (and in the same # order), no need to re-order return hit_names = self._move_to_front_by_index(hit_indices) for sibling_idx in self._sibling_indices: sibling_idx._move_to_front_by_name(hit_names) def _move_to_front_by_index(self, hit_indices): """Core logic for _move_to_front. Returns a list of names corresponding to the hit_indices param. """ indices_info = zip(self._index_names, self._indices) if 'index' in debug.debug_flags: trace.mutter('CombinedGraphIndex reordering: currently %r, ' 'promoting %r', indices_info, hit_indices) hit_names = [] unhit_names = [] new_hit_indices = [] unhit_indices = [] for offset, (name, idx) in enumerate(indices_info): if idx in hit_indices: hit_names.append(name) new_hit_indices.append(idx) if len(new_hit_indices) == len(hit_indices): # We've found all of the hit entries, everything else is # unhit unhit_names.extend(self._index_names[offset+1:]) unhit_indices.extend(self._indices[offset+1:]) break else: unhit_names.append(name) unhit_indices.append(idx) self._indices = new_hit_indices + unhit_indices self._index_names = hit_names + unhit_names if 'index' in debug.debug_flags: trace.mutter('CombinedGraphIndex reordered: %r', self._indices) return hit_names def _move_to_front_by_name(self, hit_names): """Moves indices named by 'hit_names' to front of the search order, as described in _move_to_front. """ # Translate names to index instances, and then call # _move_to_front_by_index. indices_info = zip(self._index_names, self._indices) hit_indices = [] for name, idx in indices_info: if name in hit_names: hit_indices.append(idx) self._move_to_front_by_index(hit_indices) def find_ancestry(self, keys, ref_list_num): """Find the complete ancestry for the given set of keys. Note that this is a whole-ancestry request, so it should be used sparingly. :param keys: An iterable of keys to look for :param ref_list_num: The reference list which references the parents we care about. :return: (parent_map, missing_keys) """ # XXX: make this call _move_to_front? missing_keys = set() parent_map = {} keys_to_lookup = set(keys) generation = 0 while keys_to_lookup: # keys that *all* indexes claim are missing, stop searching them generation += 1 all_index_missing = None # print 'gen\tidx\tsub\tn_keys\tn_pmap\tn_miss' # print '%4d\t\t\t%4d\t%5d\t%5d' % (generation, len(keys_to_lookup), # len(parent_map), # len(missing_keys)) for index_idx, index in enumerate(self._indices): # TODO: we should probably be doing something with # 'missing_keys' since we've already determined that # those revisions have not been found anywhere index_missing_keys = set() # Find all of the ancestry we can from this index # keep looking until the search_keys set is empty, which means # things we didn't find should be in index_missing_keys search_keys = keys_to_lookup sub_generation = 0 # print ' \t%2d\t\t%4d\t%5d\t%5d' % ( # index_idx, len(search_keys), # len(parent_map), len(index_missing_keys)) while search_keys: sub_generation += 1 # TODO: ref_list_num should really be a parameter, since # CombinedGraphIndex does not know what the ref lists # mean. search_keys = index._find_ancestors(search_keys, ref_list_num, parent_map, index_missing_keys) # print ' \t \t%2d\t%4d\t%5d\t%5d' % ( # sub_generation, len(search_keys), # len(parent_map), len(index_missing_keys)) # Now set whatever was missing to be searched in the next index keys_to_lookup = index_missing_keys if all_index_missing is None: all_index_missing = set(index_missing_keys) else: all_index_missing.intersection_update(index_missing_keys) if not keys_to_lookup: break if all_index_missing is None: # There were no indexes, so all search keys are 'missing' missing_keys.update(keys_to_lookup) keys_to_lookup = None else: missing_keys.update(all_index_missing) keys_to_lookup.difference_update(all_index_missing) return parent_map, missing_keys def key_count(self): """Return an estimate of the number of keys in this index. For CombinedGraphIndex this is approximated by the sum of the keys of the child indices. As child indices may have duplicate keys this can have a maximum error of the number of child indices * largest number of keys in any index. """ while True: try: return sum((index.key_count() for index in self._indices), 0) except errors.NoSuchFile: self._reload_or_raise() missing_keys = _missing_keys_from_parent_map def _reload_or_raise(self): """We just got a NoSuchFile exception. Try to reload the indices, if it fails, just raise the current exception. """ if self._reload_func is None: raise exc_type, exc_value, exc_traceback = sys.exc_info() trace.mutter('Trying to reload after getting exception: %s', exc_value) if not self._reload_func(): # We tried to reload, but nothing changed, so we fail anyway trace.mutter('_reload_func indicated nothing has changed.' ' Raising original exception.') raise exc_type, exc_value, exc_traceback def set_sibling_indices(self, sibling_combined_graph_indices): """Set the CombinedGraphIndex objects to reorder after reordering self. """ self._sibling_indices = sibling_combined_graph_indices def validate(self): """Validate that everything in the index can be accessed.""" while True: try: for index in self._indices: index.validate() return except errors.NoSuchFile: self._reload_or_raise() class InMemoryGraphIndex(GraphIndexBuilder): """A GraphIndex which operates entirely out of memory and is mutable. This is designed to allow the accumulation of GraphIndex entries during a single write operation, where the accumulated entries need to be immediately available - for example via a CombinedGraphIndex. """ def add_nodes(self, nodes): """Add nodes to the index. :param nodes: An iterable of (key, node_refs, value) entries to add. """ if self.reference_lists: for (key, value, node_refs) in nodes: self.add_node(key, value, node_refs) else: for (key, value) in nodes: self.add_node(key, value) def iter_all_entries(self): """Iterate over all keys within the index :return: An iterable of (index, key, reference_lists, value). There is no defined order for the result iteration - it will be in the most efficient order for the index (in this case dictionary hash order). """ if 'evil' in debug.debug_flags: trace.mutter_callsite(3, "iter_all_entries scales with size of history.") if self.reference_lists: for key, (absent, references, value) in self._nodes.iteritems(): if not absent: yield self, key, value, references else: for key, (absent, references, value) in self._nodes.iteritems(): if not absent: yield self, key, value def iter_entries(self, keys): """Iterate over keys within the index. :param keys: An iterable providing the keys to be retrieved. :return: An iterable of (index, key, value, reference_lists). There is no defined order for the result iteration - it will be in the most efficient order for the index (keys iteration order in this case). """ # Note: See BTreeBuilder.iter_entries for an explanation of why we # aren't using set().intersection() here nodes = self._nodes keys = [key for key in keys if key in nodes] if self.reference_lists: for key in keys: node = nodes[key] if not node[0]: yield self, key, node[2], node[1] else: for key in keys: node = nodes[key] if not node[0]: yield self, key, node[2] def iter_entries_prefix(self, keys): """Iterate over keys within the index using prefix matching. Prefix matching is applied within the tuple of a key, not to within the bytestring of each key element. e.g. if you have the keys ('foo', 'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then only the former key is returned. :param keys: An iterable providing the key prefixes to be retrieved. Each key prefix takes the form of a tuple the length of a key, but with the last N elements 'None' rather than a regular bytestring. The first element cannot be 'None'. :return: An iterable as per iter_all_entries, but restricted to the keys with a matching prefix to those supplied. No additional keys will be returned, and every match that is in the index will be returned. """ # XXX: To much duplication with the GraphIndex class; consider finding # a good place to pull out the actual common logic. keys = set(keys) if not keys: return if self._key_length == 1: for key in keys: # sanity check if key[0] is None: raise errors.BadIndexKey(key) if len(key) != self._key_length: raise errors.BadIndexKey(key) node = self._nodes[key] if node[0]: continue if self.reference_lists: yield self, key, node[2], node[1] else: yield self, key, node[2] return nodes_by_key = self._get_nodes_by_key() for key in keys: # sanity check if key[0] is None: raise errors.BadIndexKey(key) if len(key) != self._key_length: raise errors.BadIndexKey(key) # find what it refers to: key_dict = nodes_by_key elements = list(key) # find the subdict to return try: while len(elements) and elements[0] is not None: key_dict = key_dict[elements[0]] elements.pop(0) except KeyError: # a non-existant lookup. continue if len(elements): dicts = [key_dict] while dicts: key_dict = dicts.pop(-1) # can't be empty or would not exist item, value = key_dict.iteritems().next() if type(value) == dict: # push keys dicts.extend(key_dict.itervalues()) else: # yield keys for value in key_dict.itervalues(): yield (self, ) + value else: yield (self, ) + key_dict def key_count(self): """Return an estimate of the number of keys in this index. For InMemoryGraphIndex the estimate is exact. """ return len(self._nodes) - len(self._absent_keys) def validate(self): """In memory index's have no known corruption at the moment.""" class GraphIndexPrefixAdapter(object): """An adapter between GraphIndex with different key lengths. Queries against this will emit queries against the adapted Graph with the prefix added, queries for all items use iter_entries_prefix. The returned nodes will have their keys and node references adjusted to remove the prefix. Finally, an add_nodes_callback can be supplied - when called the nodes and references being added will have prefix prepended. """ def __init__(self, adapted, prefix, missing_key_length, add_nodes_callback=None): """Construct an adapter against adapted with prefix.""" self.adapted = adapted self.prefix_key = prefix + (None,)*missing_key_length self.prefix = prefix self.prefix_len = len(prefix) self.add_nodes_callback = add_nodes_callback def add_nodes(self, nodes): """Add nodes to the index. :param nodes: An iterable of (key, node_refs, value) entries to add. """ # save nodes in case its an iterator nodes = tuple(nodes) translated_nodes = [] try: # Add prefix_key to each reference node_refs is a tuple of tuples, # so split it apart, and add prefix_key to the internal reference for (key, value, node_refs) in nodes: adjusted_references = ( tuple(tuple(self.prefix + ref_node for ref_node in ref_list) for ref_list in node_refs)) translated_nodes.append((self.prefix + key, value, adjusted_references)) except ValueError: # XXX: TODO add an explicit interface for getting the reference list # status, to handle this bit of user-friendliness in the API more # explicitly. for (key, value) in nodes: translated_nodes.append((self.prefix + key, value)) self.add_nodes_callback(translated_nodes) def add_node(self, key, value, references=()): """Add a node to the index. :param key: The key. keys are non-empty tuples containing as many whitespace-free utf8 bytestrings as the key length defined for this index. :param references: An iterable of iterables of keys. Each is a reference to another key. :param value: The value to associate with the key. It may be any bytes as long as it does not contain \0 or \n. """ self.add_nodes(((key, value, references), )) def _strip_prefix(self, an_iter): """Strip prefix data from nodes and return it.""" for node in an_iter: # cross checks if node[1][:self.prefix_len] != self.prefix: raise errors.BadIndexData(self) for ref_list in node[3]: for ref_node in ref_list: if ref_node[:self.prefix_len] != self.prefix: raise errors.BadIndexData(self) yield node[0], node[1][self.prefix_len:], node[2], ( tuple(tuple(ref_node[self.prefix_len:] for ref_node in ref_list) for ref_list in node[3])) def iter_all_entries(self): """Iterate over all keys within the index iter_all_entries is implemented against the adapted index using iter_entries_prefix. :return: An iterable of (index, key, reference_lists, value). There is no defined order for the result iteration - it will be in the most efficient order for the index (in this case dictionary hash order). """ return self._strip_prefix(self.adapted.iter_entries_prefix([self.prefix_key])) def iter_entries(self, keys): """Iterate over keys within the index. :param keys: An iterable providing the keys to be retrieved. :return: An iterable of (index, key, value, reference_lists). There is no defined order for the result iteration - it will be in the most efficient order for the index (keys iteration order in this case). """ return self._strip_prefix(self.adapted.iter_entries( self.prefix + key for key in keys)) def iter_entries_prefix(self, keys): """Iterate over keys within the index using prefix matching. Prefix matching is applied within the tuple of a key, not to within the bytestring of each key element. e.g. if you have the keys ('foo', 'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then only the former key is returned. :param keys: An iterable providing the key prefixes to be retrieved. Each key prefix takes the form of a tuple the length of a key, but with the last N elements 'None' rather than a regular bytestring. The first element cannot be 'None'. :return: An iterable as per iter_all_entries, but restricted to the keys with a matching prefix to those supplied. No additional keys will be returned, and every match that is in the index will be returned. """ return self._strip_prefix(self.adapted.iter_entries_prefix( self.prefix + key for key in keys)) def key_count(self): """Return an estimate of the number of keys in this index. For GraphIndexPrefixAdapter this is relatively expensive - key iteration with the prefix is done. """ return len(list(self.iter_all_entries())) def validate(self): """Call the adapted's validate.""" self.adapted.validate()
gpl-2.0
scalable-networks/ext
gnuradio-3.7.0.1/gr-trellis/examples/python/test_turbo_equalization1.py
13
5480
#!/usr/bin/env python from gnuradio import gr from gnuradio import trellis, digital, filter, blocks from gnuradio import eng_notation import math import sys import random import fsm_utils try: from gnuradio import analog except ImportError: sys.stderr.write("Error: Program requires gr-analog.\n") sys.exit(1) def make_rx(tb,fo,fi,dimensionality,tot_constellation,K,interleaver,IT,Es,N0,type): metrics_in = trellis.metrics_f(fi.O(),dimensionality,tot_constellation,digital.TRELLIS_EUCLIDEAN) # data preprocessing to generate metrics for innner SISO scale = blocks.multiply_const_ff(1.0/N0) gnd = blocks.vector_source_f([0],True); inter=[] deinter=[] siso_in=[] siso_out=[] # generate all blocks for it in range(IT): inter.append( trellis.permutation(interleaver.K(),interleaver.INTER(),fi.I(),gr.sizeof_float) ) siso_in.append( trellis.siso_f(fi,K,0,-1,True,False,type) ) deinter.append( trellis.permutation(interleaver.K(),interleaver.DEINTER(),fi.I(),gr.sizeof_float) ) if it < IT-1: siso_out.append( trellis.siso_f(fo,K,0,-1,False,True,type) ) else: siso_out.append( trellis.viterbi_s(fo,K,0,-1) ) # no soft outputs needed # connect first stage tb.connect (gnd,inter[0]) tb.connect (metrics_in,scale) tb.connect (scale,(siso_in[0],1)) # connect the rest for it in range(IT): if it < IT-1: tb.connect (scale,(siso_in[it+1],1)) tb.connect (siso_in[it],deinter[it],(siso_out[it],1)) tb.connect (gnd,(siso_out[it],0)) tb.connect (siso_out[it],inter[it+1]) tb.connect (inter[it],(siso_in[it],0)) else: tb.connect (siso_in[it],deinter[it],siso_out[it]) tb.connect (inter[it],(siso_in[it],0)) return (metrics_in,siso_out[IT-1]) def run_test (fo,fi,interleaver,Kb,bitspersymbol,K,channel,modulation,dimensionality,tot_constellation,Es,N0,IT,seed): tb = gr.top_block () L = len(channel) # TX # this for loop is TOO slow in python!!! packet = [0]*(K) random.seed(seed) for i in range(len(packet)): packet[i] = random.randint(0, 2**bitspersymbol - 1) # random symbols src = blocks.vector_source_s(packet,False) enc_out = trellis.encoder_ss(fo,0) # initial state = 0 inter = trellis.permutation(interleaver.K(),interleaver.INTER(),1,gr.sizeof_short) mod = digital.chunks_to_symbols_sf(modulation[1],modulation[0]) # CHANNEL isi = filter.fir_filter_fff(1,channel) add = blocks.add_ff() noise = analog.noise_source_f(analog.GR_GAUSSIAN,math.sqrt(N0/2),seed) # RX (head,tail) = make_rx(tb,fo,fi,dimensionality,tot_constellation,K,interleaver,IT,Es,N0,trellis.TRELLIS_MIN_SUM) dst = blocks.vector_sink_s(); tb.connect (src,enc_out,inter,mod) tb.connect (mod,isi,(add,0)) tb.connect (noise,(add,1)) tb.connect (add,head) tb.connect (tail,dst) tb.run() data = dst.data() ntotal = len(data) nright=0 for i in range(ntotal): if packet[i]==data[i]: nright=nright+1 #else: #print "Error in ", i return (ntotal,ntotal-nright) def main(args): nargs = len (args) if nargs == 3: fname_out=args[0] esn0_db=float(args[1]) rep=int(args[2]) else: sys.stderr.write ('usage: test_turbo_equalization.py fsm_name_out Es/No_db repetitions\n') sys.exit (1) # system parameters Kb=64*16 # packet size in bits (multiple of 16) modulation = fsm_utils.pam4 # see fsm_utlis.py for available predefined modulations channel = fsm_utils.c_channel # see fsm_utlis.py for available predefined test channels fo=trellis.fsm(fname_out) # get the outer FSM specification from a file fi=trellis.fsm(len(modulation[1]),len(channel)) # generate the FSM automatically if fo.O() != fi.I(): sys.stderr.write ('Incompatible cardinality between outer and inner FSM.\n') sys.exit (1) bitspersymbol = int(round(math.log(fo.I())/math.log(2))) # bits per FSM input symbol K=Kb/bitspersymbol # packet size in trellis steps interleaver=trellis.interleaver(K,666) # construct a random interleaver tot_channel = fsm_utils.make_isi_lookup(modulation,channel,True) # generate the lookup table (normalize energy to 1) dimensionality = tot_channel[0] tot_constellation = tot_channel[1] if len(tot_constellation)/dimensionality != fi.O(): sys.stderr.write ('Incompatible FSM output cardinality and lookup table size.\n') sys.exit (1) N0=pow(10.0,-esn0_db/10.0); # noise variance IT = 3 # number of turbo iterations tot_s=0 # total number of transmitted shorts terr_s=0 # total number of shorts in error terr_p=0 # total number of packets in error for i in range(rep): (s,e)=run_test(fo,fi,interleaver,Kb,bitspersymbol,K,channel,modulation,dimensionality,tot_constellation,1,N0,IT,-long(666+i)) # run experiment with different seed to get different noise realizations tot_s=tot_s+s terr_s=terr_s+e terr_p=terr_p+(terr_s!=0) if ((i+1)%10==0) : # display progress print i+1,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s) # estimate of the (short or bit) error rate print rep,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s) if __name__ == '__main__': main (sys.argv[1:])
gpl-2.0
astagi/django-cms
cms/test_utils/project/placeholderapp/migrations_django/0001_initial.py
66
4526
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import cms.models.fields import cms.test_utils.project.placeholderapp.models class Migration(migrations.Migration): dependencies = [ ('cms', '0002_auto_20140816_1918'), ] operations = [ migrations.CreateModel( name='DynamicPlaceholderSlotExample', fields=[ ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)), ('char_1', models.CharField(max_length=255, verbose_name='char_1')), ('char_2', models.CharField(max_length=255, verbose_name='char_2')), ('placeholder_1', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname=cms.test_utils.project.placeholderapp.models.dynamic_placeholder_1, related_name='dynamic_pl_1', editable=False)), ('placeholder_2', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname=cms.test_utils.project.placeholderapp.models.dynamic_placeholder_2, related_name='dynamic_pl_2', editable=False)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Example1', fields=[ ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)), ('char_1', models.CharField(max_length=255, verbose_name='char_1')), ('char_2', models.CharField(max_length=255, verbose_name='char_2')), ('char_3', models.CharField(max_length=255, verbose_name='char_3')), ('char_4', models.CharField(max_length=255, verbose_name='char_4')), ('date_field', models.DateField(null=True)), ('placeholder', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='placeholder', editable=False)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='MultilingualExample1', fields=[ ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)), ('placeholder_1', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='placeholder_1', editable=False)), ], options={ 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='MultilingualExample1Translation', fields=[ ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)), ('char_1', models.CharField(max_length=255, verbose_name='char_1')), ('char_2', models.CharField(max_length=255, verbose_name='char_2')), ('language_code', models.CharField(db_index=True, max_length=15)), ('master', models.ForeignKey(null=True, to='placeholderapp.MultilingualExample1', related_name='translations', editable=False)), ], options={ 'db_table': 'placeholderapp_multilingualexample1_translation', }, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='multilingualexample1translation', unique_together=set([('language_code', 'master')]), ), migrations.CreateModel( name='TwoPlaceholderExample', fields=[ ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)), ('char_1', models.CharField(max_length=255, verbose_name='char_1')), ('char_2', models.CharField(max_length=255, verbose_name='char_2')), ('char_3', models.CharField(max_length=255, verbose_name='char_3')), ('char_4', models.CharField(max_length=255, verbose_name='char_4')), ('placeholder_1', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='placeholder_1', related_name='p1', editable=False)), ('placeholder_2', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='placeholder_2', related_name='p2', editable=False)), ], options={ }, bases=(models.Model,), ), ]
bsd-3-clause
lhupfeldt/multiconf
test/invalid_values_test.py
1
19200
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT # All rights reserved. This work is under a BSD license, see LICENSE.TXT. import sys import os.path from pytest import raises from multiconf import mc_config, ConfigItem, ConfigException, MC_REQUIRED from multiconf.envs import EnvFactory from .utils.utils import config_error, next_line_num, replace_ids, lines_in, start_file_line from .utils.messages import already_printed_msg, config_error_mc_required_expected, mc_required_expected from .utils.messages import config_error_never_received_value_expected from .utils.tstclasses import ItemWithAA from .utils.invalid_values_classes import McRequiredInInitL1, McRequiredInInitL3 minor_version = sys.version_info[1] _utils = os.path.join(os.path.dirname(__file__), 'utils') ef1_prod_pp = EnvFactory() pp1 = ef1_prod_pp.Env('pp') prod1 = ef1_prod_pp.Env('prod') def ce(line_num, *lines): return config_error(__file__, line_num, *lines) _attribute_mc_required_expected = mc_required_expected.format(attr='aa', env=prod1) _mc_required_one_error_expected_ex = """There was 1 error when defining item: { "__class__": "ItemWithAA #as: 'ItemWithAA', id: 0000, not-frozen", "env": { "__class__": "Env", "name": "%(env_name)s" }, "aa": "MC_REQUIRED" }""" + already_printed_msg def test_attribute_mc_required_env(capsys): errorline = [None] with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config(root): with ItemWithAA() as cr: errorline[0] = next_line_num() cr.setattr('aa', prod=MC_REQUIRED, pp="hello") _sout, serr = capsys.readouterr() assert lines_in( serr, start_file_line(__file__, errorline[0]), config_error_never_received_value_expected.format(env=prod1), start_file_line(__file__, errorline[0]), '^ConfigError: ' + _attribute_mc_required_expected, ) assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='prod') def test_attribute_mc_required_mc_force_env(capsys): errorline = [None] with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config(root): with ItemWithAA() as cr: errorline[0] = next_line_num() cr.setattr('aa', default=MC_REQUIRED, mc_force=True) _sout, serr = capsys.readouterr() assert lines_in( serr, start_file_line(__file__, errorline[0]), config_error_mc_required_expected.format(attr='aa', env=pp1), ) assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='pp') def test_attribute_mc_required_default(capsys): errorline = [None] with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config(root): with ItemWithAA() as cr: errorline[0] = next_line_num() cr.setattr('aa', default=MC_REQUIRED, pp="hello") _sout, serr = capsys.readouterr() assert lines_in( serr, start_file_line(__file__, errorline[0]), config_error_never_received_value_expected.format(env=prod1), start_file_line(__file__, errorline[0]), '^ConfigError: ' + _attribute_mc_required_expected, ) assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='prod') def test_attribute_mc_required_default_resolved_with_default_value_in_mc_init(capsys): class ItemWithAAMcInitResolve(ItemWithAA): def mc_init(self): super().mc_init() self.aa = 'Hi' @mc_config(ef1_prod_pp, load_now=True) def config(root): with ItemWithAAMcInitResolve() as cr: cr.setattr('aa', default=MC_REQUIRED, pp="hello") cfg = config(pp1) assert cfg.ItemWithAAMcInitResolve.aa == 'hello' cfg = config(prod1) assert cfg.ItemWithAAMcInitResolve.aa == 'Hi' def test_attribute_mc_required_default_resolved_with_default_env_specific_value_in_mc_init(capsys): class ItemWithAAMcInitResolve(ItemWithAA): def mc_init(self): super().mc_init() self.setattr('aa', prod='Hi') @mc_config(ef1_prod_pp, load_now=True) def config(root): with ItemWithAAMcInitResolve() as cr: cr.setattr('aa', default=MC_REQUIRED, pp="hello") cfg = config(pp1) assert cfg.ItemWithAAMcInitResolve.aa == 'hello' cfg = config(prod1) assert cfg.ItemWithAAMcInitResolve.aa == 'Hi' def test_attribute_mc_required_init(capsys): errorline = [None] with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config(root): with ItemWithAA(aa=MC_REQUIRED) as ci: errorline[0] = next_line_num() ci.setattr('aa', pp="hello") _sout, serr = capsys.readouterr() print(serr) print("errorline[0]", errorline[0]) assert serr == ce(errorline[0], _attribute_mc_required_expected) assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='prod') def test_attribute_mc_required_in_with(capsys): errorline = [None] with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config(root): with ItemWithAA() as cr: errorline[0] = next_line_num() cr.setattr('aa', prod="hi", pp=MC_REQUIRED) _sout, serr = capsys.readouterr() assert lines_in( serr, start_file_line(__file__, errorline[0]), config_error_never_received_value_expected.format(env=pp1), start_file_line(__file__, errorline[0]), '^ConfigError: ' + mc_required_expected.format(attr='aa', env=pp1), ) assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='pp') def test_attribute_mc_required_in_with_default_all_overridden(): @mc_config(ef1_prod_pp, load_now=True) def config(root): with ItemWithAA() as cr: # TODO: This should actually not be allowed, it does not make sense! cr.setattr('aa', default=MC_REQUIRED, pp="hello", prod="hi") cr = config(prod1).ItemWithAA assert cr.aa == "hi" def test_attribute_mc_required_init_args_all_overridden(): class Requires(ConfigItem): def __init__(self, aa=MC_REQUIRED): super().__init__() self.aa = aa @mc_config(ef1_prod_pp, load_now=True) def config1(root): with ConfigItem() as cr: Requires(aa=3) cr = config1(prod1).ConfigItem assert cr.Requires.aa == 3 @mc_config(ef1_prod_pp, load_now=True) def config2(root): with ConfigItem() as cr: with Requires() as rq: rq.aa = 3 cr = config2(prod1).ConfigItem assert cr.Requires.aa == 3 def test_attribute_mc_required_args_all_overridden_in_mc_init(): class Requires(ConfigItem): def __init__(self, aa=MC_REQUIRED): super().__init__() self.aa = aa def mc_init(self): self.aa = 7 @mc_config(ef1_prod_pp, load_now=True) def config(root): Requires() cr = config(prod1) assert cr.Requires.aa == 7 def test_attribute_mc_required_args_partial_set_in_init_overridden_in_mc_init(): class Requires(ConfigItem): def __init__(self, aa=MC_REQUIRED): super().__init__() # Partial assignment is allowed in init self.setattr('aa', prod=aa) self.setattr('b', default=MC_REQUIRED, prod=2) def mc_init(self): self.aa = 7 self.b = 7 @mc_config(ef1_prod_pp, load_now=True) def config(root): Requires() cr = config(prod1) assert cr.Requires.aa == 7 assert cr.Requires.b == 2 cr = config(pp1) assert cr.Requires.aa == 7 assert cr.Requires.b == 7 def test_attribute_mc_required_args_partial_set_in_init_overridden_in_with(): class Requires(ConfigItem): def __init__(self, aa=MC_REQUIRED): super().__init__() # Partial assignment is allowed in init self.setattr('aa', prod=aa) self.setattr('b', default=MC_REQUIRED, prod=2) @mc_config(ef1_prod_pp, load_now=True) def config(root): with Requires() as rq: rq.aa = 8 rq.setattr('b', pp=8) cr = config(prod1) assert cr.Requires.aa == 8 assert cr.Requires.b == 2 cr = config(pp1) assert cr.Requires.aa == 8 assert cr.Requires.b == 8 def test_attribute_mc_required_args_set_in_init_overridden_in_with(): class Requires(ConfigItem): def __init__(self, aa=MC_REQUIRED): super().__init__() self.aa = aa @mc_config(ef1_prod_pp, load_now=True) def config(root): with Requires() as rq: rq.aa = 7 cr = config(prod1) assert cr.Requires.aa == 7 cr = config(pp1) assert cr.Requires.aa == 7 _attribute_mc_required_requires_expected_ex = """There was 1 error when defining item: { "__class__": "Requires #as: 'Requires', id: 0000, not-frozen", "env": { "__class__": "Env", "name": "pp" }, "aa": "MC_REQUIRED" }""" + already_printed_msg def test_attribute_mc_required_init_args_missing_env_value(capsys): errorline = [None] class Requires(ConfigItem): def __init__(self, aa=MC_REQUIRED): super().__init__() self.aa = aa with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config(root): with Requires() as rq: errorline[0] = next_line_num() rq.setattr('aa', prod='hi') _sout, serr = capsys.readouterr() print(_sout) assert serr == ce(errorline[0], mc_required_expected.format(attr='aa', env=pp1)) assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_requires_expected_ex _attribute_mc_required_required_init_arg_missing_with_expected_ex = """There was 1 error when defining item: {{ "__class__": "{0} #as: '{0}', id: 0000, not-frozen", "env": {{ "__class__": "Env", "name": "pp" }}, "aa": "MC_REQUIRED" }}""" + already_printed_msg def test_attribute_mc_required_init_args_missing_with(capsys): errorline = [None] # If the error occures on the last object, and that is not under a with statement, then the line will be the @mc_config with raises(ConfigException) as exinfo: errorline[0] = next_line_num() + (1 if minor_version > 7 else 0) @mc_config(ef1_prod_pp, load_now=True) def config(root): McRequiredInInitL1() _sout, serr = capsys.readouterr() assert lines_in( serr, start_file_line(__file__, errorline[0]), config_error_never_received_value_expected.format(env=pp1), '^File "{}/invalid_values_classes.py", line 8'.format(_utils), mc_required_expected.format(attr='aa', env=pp1), ) exp = _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL1') got = replace_ids(str(exinfo.value), False) assert got == exp with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config0(root): with McRequiredInInitL1(): errorline[0] = next_line_num() pass _sout, serr = capsys.readouterr() assert lines_in( serr, start_file_line(__file__, errorline[0]), config_error_never_received_value_expected.format(env=pp1), '^File "{}/invalid_values_classes.py", line 8'.format(_utils), mc_required_expected.format(attr='aa', env=pp1), ) assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL1') # If the error occures on the last object, and that is not under a with statement, then the line will be the @mc_config with raises(ConfigException) as exinfo: errorline[0] = next_line_num() + (1 if minor_version > 7 else 0) @mc_config(ef1_prod_pp, load_now=True) def config1(root): McRequiredInInitL3() _sout, serr = capsys.readouterr() assert lines_in( serr, start_file_line(__file__, errorline[0]), config_error_never_received_value_expected.format(env=pp1), '^File "{}/invalid_values_classes.py", line 8'.format(_utils), mc_required_expected.format(attr='aa', env=pp1), ) assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL3') with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config2(root): with McRequiredInInitL3(): errorline[0] = next_line_num() pass _sout, serr = capsys.readouterr() assert lines_in( serr, start_file_line(__file__, errorline[0]), config_error_never_received_value_expected.format(env=pp1), '^File "{}/invalid_values_classes.py", line 8'.format(_utils), mc_required_expected.format(attr='aa', env=pp1), ) assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL3') def test_attribute_mc_required_init_args_missing_previous_item(capsys): errorline = [None] with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config(root): errorline[0] = next_line_num() McRequiredInInitL1() McRequiredInInitL3() _sout, serr = capsys.readouterr() assert lines_in( serr, config_error_never_received_value_expected.format(env=pp1), '^File "{}/invalid_values_classes.py", line 8'.format(_utils), mc_required_expected.format(attr='aa', env=pp1), ) assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL1') def test_attribute_mc_required_init_assign_all_overridden(): class Requires(ConfigItem): def __init__(self, aa=MC_REQUIRED): super().__init__() self.aa = aa @mc_config(ef1_prod_pp, load_now=True) def config(root): Requires(aa=3) cr = config(prod1) assert cr.Requires.aa == 3 @mc_config(ef1_prod_pp, load_now=True) def config(_): with Requires() as rq: rq.aa = 3 cr = config(prod1) assert cr.Requires.aa == 3 _attribute_mc_required_env_in_init_expected_ex = """There were %(num_errors)s errors when defining item: { "__class__": "MyRoot #as: 'MyRoot', id: 0000, not-frozen", "env": { "__class__": "Env", "name": "pp" }, "aa": "MC_REQUIRED", "bb": "MC_REQUIRED" }""" + already_printed_msg def test_attribute_setattr_mc_required_force_in_init(capsys): errorline = [None] class MyRoot(ConfigItem): def __init__(self): super().__init__() errorline[0] = next_line_num() self.setattr('aa', default=MC_REQUIRED, mc_force=True) self.setattr('bb', default=MC_REQUIRED, mc_force=True) with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config(_): MyRoot() _sout, serr = capsys.readouterr() assert lines_in( serr, start_file_line(__file__, errorline[0]), config_error_mc_required_expected.format(attr='aa', env=pp1), config_error_mc_required_expected.format(attr='bb', env=pp1), ) assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_env_in_init_expected_ex % dict(num_errors=2) def test_multiple_attributes_mc_required_init_not_set(capsys): errorline = [None] class ItemWithAAABBCC(ConfigItem): def __init__(self): super().__init__() self.aa = MC_REQUIRED self.bb = MC_REQUIRED self.cc = MC_REQUIRED with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config(_): with ConfigItem() as cr: errorline[0] = next_line_num() ItemWithAAABBCC() _sout, serr = capsys.readouterr() assert lines_in( serr, start_file_line(__file__, errorline[0]), config_error_mc_required_expected.format(attr='aa', env=pp1), config_error_mc_required_expected.format(attr='bb', env=pp1), config_error_mc_required_expected.format(attr='cc', env=pp1), ) def test_multiple_attributes_mc_required_mc_init_not_set(capsys): errorlines = [None, None] class ItemWithAAABBCC(ConfigItem): def __init__(self): super().__init__() self.aa = MC_REQUIRED self.bb = MC_REQUIRED self.cc = MC_REQUIRED def mc_init(self): super().__init__() errorlines[0] = next_line_num() self.setattr('aa', default=MC_REQUIRED) self.setattr('bb', default=MC_REQUIRED, pp='Hello') errorlines[1] = next_line_num() self.cc = MC_REQUIRED with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config(_): with ConfigItem() as cr: ItemWithAAABBCC() _sout, serr = capsys.readouterr() assert lines_in( serr, start_file_line(__file__, errorlines[0]), config_error_mc_required_expected.format(attr='aa', env=pp1), start_file_line(__file__, errorlines[1]), config_error_mc_required_expected.format(attr='cc', env=pp1), ) _multiple_attributes_mc_required_env_expected_ex = """There %(ww)s %(num_errors)s %(err)s when defining item: { "__class__": "MyRoot #as: 'MyRoot', id: 0000, not-frozen", "env": { "__class__": "Env", "name": "pp" }, "aa": "hello", "bb": "MC_REQUIRED" }""" + already_printed_msg def test_multiple_attributes_mc_required_env(capsys): errorline = [None] class MyRoot(ConfigItem): def __init__(self): super().__init__() self.aa = MC_REQUIRED self.bb = MC_REQUIRED with raises(ConfigException) as exinfo: @mc_config(ef1_prod_pp, load_now=True) def config(_): with MyRoot() as cr: errorline[0] = next_line_num() cr.setattr('aa', prod=MC_REQUIRED, pp="hello") cr.setattr('bb', prod=1, pp=MC_REQUIRED) _sout, serr = capsys.readouterr() #assert ce(errorline[0], mc_required_expected.format(attr='aa', env=prod1)) in serr assert ce(errorline[0] + 1, mc_required_expected.format(attr='bb', env=pp1)) in serr assert replace_ids(str(exinfo.value), False) == _multiple_attributes_mc_required_env_expected_ex % dict(ww='was', num_errors=1, err='error')
bsd-3-clause
mshafiq9/django
tests/gis_tests/geos_tests/test_mutable_list.py
173
14846
# Copyright (c) 2008-2009 Aryeh Leib Taurog, http://www.aryehleib.com # All rights reserved. # # Modified from original contribution by Aryeh Leib Taurog, which was # released under the New BSD license. import unittest from django.contrib.gis.geos.mutable_list import ListMixin from django.utils import six class UserListA(ListMixin): _mytype = tuple def __init__(self, i_list, *args, **kwargs): self._list = self._mytype(i_list) super(UserListA, self).__init__(*args, **kwargs) def __len__(self): return len(self._list) def __str__(self): return str(self._list) def __repr__(self): return repr(self._list) def _set_list(self, length, items): # this would work: # self._list = self._mytype(items) # but then we wouldn't be testing length parameter itemList = ['x'] * length for i, v in enumerate(items): itemList[i] = v self._list = self._mytype(itemList) def _get_single_external(self, index): return self._list[index] class UserListB(UserListA): _mytype = list def _set_single(self, index, value): self._list[index] = value def nextRange(length): nextRange.start += 100 return range(nextRange.start, nextRange.start + length) nextRange.start = 0 class ListMixinTest(unittest.TestCase): """ Tests base class ListMixin by comparing a list clone which is a ListMixin subclass with a real Python list. """ limit = 3 listType = UserListA def lists_of_len(self, length=None): if length is None: length = self.limit pl = list(range(length)) return pl, self.listType(pl) def limits_plus(self, b): return range(-self.limit - b, self.limit + b) def step_range(self): return list(range(-1 - self.limit, 0)) + list(range(1, 1 + self.limit)) def test01_getslice(self): 'Slice retrieval' pl, ul = self.lists_of_len() for i in self.limits_plus(1): self.assertEqual(pl[i:], ul[i:], 'slice [%d:]' % (i)) self.assertEqual(pl[:i], ul[:i], 'slice [:%d]' % (i)) for j in self.limits_plus(1): self.assertEqual(pl[i:j], ul[i:j], 'slice [%d:%d]' % (i, j)) for k in self.step_range(): self.assertEqual(pl[i:j:k], ul[i:j:k], 'slice [%d:%d:%d]' % (i, j, k)) for k in self.step_range(): self.assertEqual(pl[i::k], ul[i::k], 'slice [%d::%d]' % (i, k)) self.assertEqual(pl[:i:k], ul[:i:k], 'slice [:%d:%d]' % (i, k)) for k in self.step_range(): self.assertEqual(pl[::k], ul[::k], 'slice [::%d]' % (k)) def test02_setslice(self): 'Slice assignment' def setfcn(x, i, j, k, L): x[i:j:k] = range(L) pl, ul = self.lists_of_len() for slen in range(self.limit + 1): ssl = nextRange(slen) ul[:] = ssl pl[:] = ssl self.assertEqual(pl, ul[:], 'set slice [:]') for i in self.limits_plus(1): ssl = nextRange(slen) ul[i:] = ssl pl[i:] = ssl self.assertEqual(pl, ul[:], 'set slice [%d:]' % (i)) ssl = nextRange(slen) ul[:i] = ssl pl[:i] = ssl self.assertEqual(pl, ul[:], 'set slice [:%d]' % (i)) for j in self.limits_plus(1): ssl = nextRange(slen) ul[i:j] = ssl pl[i:j] = ssl self.assertEqual(pl, ul[:], 'set slice [%d:%d]' % (i, j)) for k in self.step_range(): ssl = nextRange(len(ul[i:j:k])) ul[i:j:k] = ssl pl[i:j:k] = ssl self.assertEqual(pl, ul[:], 'set slice [%d:%d:%d]' % (i, j, k)) sliceLen = len(ul[i:j:k]) self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen + 1) if sliceLen > 2: self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen - 1) for k in self.step_range(): ssl = nextRange(len(ul[i::k])) ul[i::k] = ssl pl[i::k] = ssl self.assertEqual(pl, ul[:], 'set slice [%d::%d]' % (i, k)) ssl = nextRange(len(ul[:i:k])) ul[:i:k] = ssl pl[:i:k] = ssl self.assertEqual(pl, ul[:], 'set slice [:%d:%d]' % (i, k)) for k in self.step_range(): ssl = nextRange(len(ul[::k])) ul[::k] = ssl pl[::k] = ssl self.assertEqual(pl, ul[:], 'set slice [::%d]' % (k)) def test03_delslice(self): 'Delete slice' for Len in range(self.limit): pl, ul = self.lists_of_len(Len) del pl[:] del ul[:] self.assertEqual(pl[:], ul[:], 'del slice [:]') for i in range(-Len - 1, Len + 1): pl, ul = self.lists_of_len(Len) del pl[i:] del ul[i:] self.assertEqual(pl[:], ul[:], 'del slice [%d:]' % (i)) pl, ul = self.lists_of_len(Len) del pl[:i] del ul[:i] self.assertEqual(pl[:], ul[:], 'del slice [:%d]' % (i)) for j in range(-Len - 1, Len + 1): pl, ul = self.lists_of_len(Len) del pl[i:j] del ul[i:j] self.assertEqual(pl[:], ul[:], 'del slice [%d:%d]' % (i, j)) for k in list(range(-Len - 1, 0)) + list(range(1, Len)): pl, ul = self.lists_of_len(Len) del pl[i:j:k] del ul[i:j:k] self.assertEqual(pl[:], ul[:], 'del slice [%d:%d:%d]' % (i, j, k)) for k in list(range(-Len - 1, 0)) + list(range(1, Len)): pl, ul = self.lists_of_len(Len) del pl[:i:k] del ul[:i:k] self.assertEqual(pl[:], ul[:], 'del slice [:%d:%d]' % (i, k)) pl, ul = self.lists_of_len(Len) del pl[i::k] del ul[i::k] self.assertEqual(pl[:], ul[:], 'del slice [%d::%d]' % (i, k)) for k in list(range(-Len - 1, 0)) + list(range(1, Len)): pl, ul = self.lists_of_len(Len) del pl[::k] del ul[::k] self.assertEqual(pl[:], ul[:], 'del slice [::%d]' % (k)) def test04_get_set_del_single(self): 'Get/set/delete single item' pl, ul = self.lists_of_len() for i in self.limits_plus(0): self.assertEqual(pl[i], ul[i], 'get single item [%d]' % i) for i in self.limits_plus(0): pl, ul = self.lists_of_len() pl[i] = 100 ul[i] = 100 self.assertEqual(pl[:], ul[:], 'set single item [%d]' % i) for i in self.limits_plus(0): pl, ul = self.lists_of_len() del pl[i] del ul[i] self.assertEqual(pl[:], ul[:], 'del single item [%d]' % i) def test05_out_of_range_exceptions(self): 'Out of range exceptions' def setfcn(x, i): x[i] = 20 def getfcn(x, i): return x[i] def delfcn(x, i): del x[i] pl, ul = self.lists_of_len() for i in (-1 - self.limit, self.limit): self.assertRaises(IndexError, setfcn, ul, i) # 'set index %d' % i) self.assertRaises(IndexError, getfcn, ul, i) # 'get index %d' % i) self.assertRaises(IndexError, delfcn, ul, i) # 'del index %d' % i) def test06_list_methods(self): 'List methods' pl, ul = self.lists_of_len() pl.append(40) ul.append(40) self.assertEqual(pl[:], ul[:], 'append') pl.extend(range(50, 55)) ul.extend(range(50, 55)) self.assertEqual(pl[:], ul[:], 'extend') pl.reverse() ul.reverse() self.assertEqual(pl[:], ul[:], 'reverse') for i in self.limits_plus(1): pl, ul = self.lists_of_len() pl.insert(i, 50) ul.insert(i, 50) self.assertEqual(pl[:], ul[:], 'insert at %d' % i) for i in self.limits_plus(0): pl, ul = self.lists_of_len() self.assertEqual(pl.pop(i), ul.pop(i), 'popped value at %d' % i) self.assertEqual(pl[:], ul[:], 'after pop at %d' % i) pl, ul = self.lists_of_len() self.assertEqual(pl.pop(), ul.pop(i), 'popped value') self.assertEqual(pl[:], ul[:], 'after pop') pl, ul = self.lists_of_len() def popfcn(x, i): x.pop(i) self.assertRaises(IndexError, popfcn, ul, self.limit) self.assertRaises(IndexError, popfcn, ul, -1 - self.limit) pl, ul = self.lists_of_len() for val in range(self.limit): self.assertEqual(pl.index(val), ul.index(val), 'index of %d' % val) for val in self.limits_plus(2): self.assertEqual(pl.count(val), ul.count(val), 'count %d' % val) for val in range(self.limit): pl, ul = self.lists_of_len() pl.remove(val) ul.remove(val) self.assertEqual(pl[:], ul[:], 'after remove val %d' % val) def indexfcn(x, v): return x.index(v) def removefcn(x, v): return x.remove(v) self.assertRaises(ValueError, indexfcn, ul, 40) self.assertRaises(ValueError, removefcn, ul, 40) def test07_allowed_types(self): 'Type-restricted list' pl, ul = self.lists_of_len() ul._allowed = six.integer_types ul[1] = 50 ul[:2] = [60, 70, 80] def setfcn(x, i, v): x[i] = v self.assertRaises(TypeError, setfcn, ul, 2, 'hello') self.assertRaises(TypeError, setfcn, ul, slice(0, 3, 2), ('hello', 'goodbye')) def test08_min_length(self): 'Length limits' pl, ul = self.lists_of_len() ul._minlength = 1 def delfcn(x, i): del x[:i] def setfcn(x, i): x[:i] = [] for i in range(self.limit - ul._minlength + 1, self.limit + 1): self.assertRaises(ValueError, delfcn, ul, i) self.assertRaises(ValueError, setfcn, ul, i) del ul[:ul._minlength] ul._maxlength = 4 for i in range(0, ul._maxlength - len(ul)): ul.append(i) self.assertRaises(ValueError, ul.append, 10) def test09_iterable_check(self): 'Error on assigning non-iterable to slice' pl, ul = self.lists_of_len(self.limit + 1) def setfcn(x, i, v): x[i] = v self.assertRaises(TypeError, setfcn, ul, slice(0, 3, 2), 2) def test10_checkindex(self): 'Index check' pl, ul = self.lists_of_len() for i in self.limits_plus(0): if i < 0: self.assertEqual(ul._checkindex(i), i + self.limit, '_checkindex(neg index)') else: self.assertEqual(ul._checkindex(i), i, '_checkindex(pos index)') for i in (-self.limit - 1, self.limit): self.assertRaises(IndexError, ul._checkindex, i) def test_11_sorting(self): 'Sorting' pl, ul = self.lists_of_len() pl.insert(0, pl.pop()) ul.insert(0, ul.pop()) pl.sort() ul.sort() self.assertEqual(pl[:], ul[:], 'sort') mid = pl[len(pl) // 2] pl.sort(key=lambda x: (mid - x) ** 2) ul.sort(key=lambda x: (mid - x) ** 2) self.assertEqual(pl[:], ul[:], 'sort w/ key') pl.insert(0, pl.pop()) ul.insert(0, ul.pop()) pl.sort(reverse=True) ul.sort(reverse=True) self.assertEqual(pl[:], ul[:], 'sort w/ reverse') mid = pl[len(pl) // 2] pl.sort(key=lambda x: (mid - x) ** 2) ul.sort(key=lambda x: (mid - x) ** 2) self.assertEqual(pl[:], ul[:], 'sort w/ key') def test_12_arithmetic(self): 'Arithmetic' pl, ul = self.lists_of_len() al = list(range(10, 14)) self.assertEqual(list(pl + al), list(ul + al), 'add') self.assertEqual(type(ul), type(ul + al), 'type of add result') self.assertEqual(list(al + pl), list(al + ul), 'radd') self.assertEqual(type(al), type(al + ul), 'type of radd result') objid = id(ul) pl += al ul += al self.assertEqual(pl[:], ul[:], 'in-place add') self.assertEqual(objid, id(ul), 'in-place add id') for n in (-1, 0, 1, 3): pl, ul = self.lists_of_len() self.assertEqual(list(pl * n), list(ul * n), 'mul by %d' % n) self.assertEqual(type(ul), type(ul * n), 'type of mul by %d result' % n) self.assertEqual(list(n * pl), list(n * ul), 'rmul by %d' % n) self.assertEqual(type(ul), type(n * ul), 'type of rmul by %d result' % n) objid = id(ul) pl *= n ul *= n self.assertEqual(pl[:], ul[:], 'in-place mul by %d' % n) self.assertEqual(objid, id(ul), 'in-place mul by %d id' % n) pl, ul = self.lists_of_len() self.assertEqual(pl, ul, 'cmp for equal') self.assertNotEqual(ul, pl + [2], 'cmp for not equal') self.assertGreaterEqual(pl, ul, 'cmp for gte self') self.assertLessEqual(pl, ul, 'cmp for lte self') self.assertGreaterEqual(ul, pl, 'cmp for self gte') self.assertLessEqual(ul, pl, 'cmp for self lte') self.assertGreater(pl + [5], ul, 'cmp') self.assertGreaterEqual(pl + [5], ul, 'cmp') self.assertLess(pl, ul + [2], 'cmp') self.assertLessEqual(pl, ul + [2], 'cmp') self.assertGreater(ul + [5], pl, 'cmp') self.assertGreaterEqual(ul + [5], pl, 'cmp') self.assertLess(ul, pl + [2], 'cmp') self.assertLessEqual(ul, pl + [2], 'cmp') # Also works with a custom IndexError ul_longer = ul + [2] ul_longer._IndexError = TypeError ul._IndexError = TypeError self.assertNotEqual(ul_longer, pl) self.assertGreater(ul_longer, ul) pl[1] = 20 self.assertGreater(pl, ul, 'cmp for gt self') self.assertLess(ul, pl, 'cmp for self lt') pl[1] = -20 self.assertLess(pl, ul, 'cmp for lt self') self.assertGreater(ul, pl, 'cmp for gt self') class ListMixinTestSingle(ListMixinTest): listType = UserListB
bsd-3-clause
theheros/kbengine
kbe/src/lib/python/Lib/test/test_raise.py
54
10232
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Tests for the raise statement.""" from test import support import sys import types import unittest def get_tb(): try: raise OSError() except: return sys.exc_info()[2] class Context: def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): return True class TestRaise(unittest.TestCase): def test_invalid_reraise(self): try: raise except RuntimeError as e: self.assertIn("No active exception", str(e)) else: self.fail("No exception raised") def test_reraise(self): try: try: raise IndexError() except IndexError as e: exc1 = e raise except IndexError as exc2: self.assertTrue(exc1 is exc2) else: self.fail("No exception raised") def test_except_reraise(self): def reraise(): try: raise TypeError("foo") except: try: raise KeyError("caught") except KeyError: pass raise self.assertRaises(TypeError, reraise) def test_finally_reraise(self): def reraise(): try: raise TypeError("foo") except: try: raise KeyError("caught") finally: raise self.assertRaises(KeyError, reraise) def test_nested_reraise(self): def nested_reraise(): raise def reraise(): try: raise TypeError("foo") except: nested_reraise() self.assertRaises(TypeError, reraise) def test_with_reraise1(self): def reraise(): try: raise TypeError("foo") except: with Context(): pass raise self.assertRaises(TypeError, reraise) def test_with_reraise2(self): def reraise(): try: raise TypeError("foo") except: with Context(): raise KeyError("caught") raise self.assertRaises(TypeError, reraise) def test_yield_reraise(self): def reraise(): try: raise TypeError("foo") except: yield 1 raise g = reraise() next(g) self.assertRaises(TypeError, lambda: next(g)) self.assertRaises(StopIteration, lambda: next(g)) def test_erroneous_exception(self): class MyException(Exception): def __init__(self): raise RuntimeError() try: raise MyException except RuntimeError: pass else: self.fail("No exception raised") def test_new_returns_invalid_instance(self): # See issue #11627. class MyException(Exception): def __new__(cls, *args): return object() with self.assertRaises(TypeError): raise MyException class TestCause(unittest.TestCase): def test_invalid_cause(self): try: raise IndexError from 5 except TypeError as e: self.assertIn("exception cause", str(e)) else: self.fail("No exception raised") def test_class_cause(self): try: raise IndexError from KeyError except IndexError as e: self.assertIsInstance(e.__cause__, KeyError) else: self.fail("No exception raised") def test_instance_cause(self): cause = KeyError() try: raise IndexError from cause except IndexError as e: self.assertTrue(e.__cause__ is cause) else: self.fail("No exception raised") def test_erroneous_cause(self): class MyException(Exception): def __init__(self): raise RuntimeError() try: raise IndexError from MyException except RuntimeError: pass else: self.fail("No exception raised") class TestTraceback(unittest.TestCase): def test_sets_traceback(self): try: raise IndexError() except IndexError as e: self.assertIsInstance(e.__traceback__, types.TracebackType) else: self.fail("No exception raised") def test_accepts_traceback(self): tb = get_tb() try: raise IndexError().with_traceback(tb) except IndexError as e: self.assertNotEqual(e.__traceback__, tb) self.assertEqual(e.__traceback__.tb_next, tb) else: self.fail("No exception raised") class TestContext(unittest.TestCase): def test_instance_context_instance_raise(self): context = IndexError() try: try: raise context except: raise OSError() except OSError as e: self.assertEqual(e.__context__, context) else: self.fail("No exception raised") def test_class_context_instance_raise(self): context = IndexError try: try: raise context except: raise OSError() except OSError as e: self.assertNotEqual(e.__context__, context) self.assertIsInstance(e.__context__, context) else: self.fail("No exception raised") def test_class_context_class_raise(self): context = IndexError try: try: raise context except: raise OSError except OSError as e: self.assertNotEqual(e.__context__, context) self.assertIsInstance(e.__context__, context) else: self.fail("No exception raised") def test_c_exception_context(self): try: try: 1/0 except: raise OSError except OSError as e: self.assertIsInstance(e.__context__, ZeroDivisionError) else: self.fail("No exception raised") def test_c_exception_raise(self): try: try: 1/0 except: xyzzy except NameError as e: self.assertIsInstance(e.__context__, ZeroDivisionError) else: self.fail("No exception raised") def test_noraise_finally(self): try: try: pass finally: raise OSError except OSError as e: self.assertTrue(e.__context__ is None) else: self.fail("No exception raised") def test_raise_finally(self): try: try: 1/0 finally: raise OSError except OSError as e: self.assertIsInstance(e.__context__, ZeroDivisionError) else: self.fail("No exception raised") def test_context_manager(self): class ContextManager: def __enter__(self): pass def __exit__(self, t, v, tb): xyzzy try: with ContextManager(): 1/0 except NameError as e: self.assertIsInstance(e.__context__, ZeroDivisionError) else: self.fail("No exception raised") def test_cycle_broken(self): # Self-cycles (when re-raising a caught exception) are broken try: try: 1/0 except ZeroDivisionError as e: raise e except ZeroDivisionError as e: self.assertTrue(e.__context__ is None, e.__context__) def test_reraise_cycle_broken(self): # Non-trivial context cycles (through re-raising a previous exception) # are broken too. try: try: xyzzy except NameError as a: try: 1/0 except ZeroDivisionError: raise a except NameError as e: self.assertTrue(e.__context__.__context__ is None) def test_3118(self): # deleting the generator caused the __context__ to be cleared def gen(): try: yield 1 finally: pass def f(): g = gen() next(g) try: try: raise ValueError except: del g raise KeyError except Exception as e: self.assertIsInstance(e.__context__, ValueError) f() def test_3611(self): # A re-raised exception in a __del__ caused the __context__ # to be cleared class C: def __del__(self): try: 1/0 except: raise def f(): x = C() try: try: x.x except AttributeError: del x raise TypeError except Exception as e: self.assertNotEqual(e.__context__, None) self.assertIsInstance(e.__context__, AttributeError) with support.captured_output("stderr"): f() class TestRemovedFunctionality(unittest.TestCase): def test_tuples(self): try: raise (IndexError, KeyError) # This should be a tuple! except TypeError: pass else: self.fail("No exception raised") def test_strings(self): try: raise "foo" except TypeError: pass else: self.fail("No exception raised") def test_main(): support.run_unittest(__name__) if __name__ == "__main__": unittest.main()
lgpl-3.0
embeddedarm/android_external_chromium_org
build/android/tombstones.py
28
5953
#!/usr/bin/env python # # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # # Find the most recent tombstone file(s) on all connected devices # and prints their stacks. # # Assumes tombstone file was created with current symbols. import datetime import logging import multiprocessing import os import subprocess import sys import optparse from pylib import android_commands def _ListTombstones(adb): """List the tombstone files on the device. Args: adb: An instance of AndroidCommands. Yields: Tuples of (tombstone filename, date time of file on device). """ lines = adb.RunShellCommand('TZ=UTC su -c ls -a -l /data/tombstones') for line in lines: if 'tombstone' in line and not 'No such file or directory' in line: details = line.split() t = datetime.datetime.strptime(details[-3] + ' ' + details[-2], '%Y-%m-%d %H:%M') yield details[-1], t def _GetDeviceDateTime(adb): """Determine the date time on the device. Args: adb: An instance of AndroidCommands. Returns: A datetime instance. """ device_now_string = adb.RunShellCommand('TZ=UTC date') return datetime.datetime.strptime( device_now_string[0], '%a %b %d %H:%M:%S %Z %Y') def _GetTombstoneData(adb, tombstone_file): """Retrieve the tombstone data from the device Args: tombstone_file: the tombstone to retrieve Returns: A list of lines """ return adb.GetProtectedFileContents('/data/tombstones/' + tombstone_file) def _EraseTombstone(adb, tombstone_file): """Deletes a tombstone from the device. Args: tombstone_file: the tombstone to delete. """ return adb.RunShellCommandWithSU('rm /data/tombstones/' + tombstone_file) def _ResolveSymbols(tombstone_data, include_stack): """Run the stack tool for given tombstone input. Args: tombstone_data: a list of strings of tombstone data. include_stack: boolean whether to include stack data in output. Yields: A string for each line of resolved stack output. """ stack_tool = os.path.join(os.path.dirname(__file__), '..', '..', 'third_party', 'android_platform', 'development', 'scripts', 'stack') proc = subprocess.Popen(stack_tool, stdin=subprocess.PIPE, stdout=subprocess.PIPE) output = proc.communicate(input='\n'.join(tombstone_data))[0] for line in output.split('\n'): if not include_stack and 'Stack Data:' in line: break yield line def _ResolveTombstone(tombstone): lines = [] lines += [tombstone['file'] + ' created on ' + str(tombstone['time']) + ', about this long ago: ' + (str(tombstone['device_now'] - tombstone['time']) + ' Device: ' + tombstone['serial'])] print '\n'.join(lines) print 'Resolving...' lines += _ResolveSymbols(tombstone['data'], tombstone['stack']) return lines def _ResolveTombstones(jobs, tombstones): """Resolve a list of tombstones. Args: jobs: the number of jobs to use with multiprocess. tombstones: a list of tombstones. """ if not tombstones: print 'No device attached? Or no tombstones?' return if len(tombstones) == 1: data = _ResolveTombstone(tombstones[0]) else: pool = multiprocessing.Pool(processes=jobs) data = pool.map(_ResolveTombstone, tombstones) data = ['\n'.join(d) for d in data] print '\n'.join(data) def _GetTombstonesForDevice(adb, options): """Returns a list of tombstones on a given adb connection. Args: adb: An instance of Androidcommands. options: command line arguments from OptParse """ ret = [] all_tombstones = list(_ListTombstones(adb)) if not all_tombstones: print 'No device attached? Or no tombstones?' return ret # Sort the tombstones in date order, descending all_tombstones.sort(cmp=lambda a, b: cmp(b[1], a[1])) # Only resolve the most recent unless --all-tombstones given. tombstones = all_tombstones if options.all_tombstones else [all_tombstones[0]] device_now = _GetDeviceDateTime(adb) for tombstone_file, tombstone_time in tombstones: ret += [{'serial': adb.Adb().GetSerialNumber(), 'device_now': device_now, 'time': tombstone_time, 'file': tombstone_file, 'stack': options.stack, 'data': _GetTombstoneData(adb, tombstone_file)}] # Erase all the tombstones if desired. if options.wipe_tombstones: for tombstone_file, _ in all_tombstones: _EraseTombstone(adb, tombstone_file) return ret def main(): parser = optparse.OptionParser() parser.add_option('--device', help='The serial number of the device. If not specified ' 'will use all devices.') parser.add_option('-a', '--all-tombstones', action='store_true', help="""Resolve symbols for all tombstones, rather than just the most recent""") parser.add_option('-s', '--stack', action='store_true', help='Also include symbols for stack data') parser.add_option('-w', '--wipe-tombstones', action='store_true', help='Erase all tombstones from device after processing') parser.add_option('-j', '--jobs', type='int', default=4, help='Number of jobs to use when processing multiple ' 'crash stacks.') options, args = parser.parse_args() if options.device: devices = [options.device] else: devices = android_commands.GetAttachedDevices() tombstones = [] for device in devices: adb = android_commands.AndroidCommands(device) tombstones += _GetTombstonesForDevice(adb, options) _ResolveTombstones(options.jobs, tombstones) if __name__ == '__main__': sys.exit(main())
bsd-3-clause
datalogics-robb/scons
src/engine/SCons/Tool/tar.py
2
2320
"""SCons.Tool.tar Tool-specific initialization for tar. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" import SCons.Action import SCons.Builder import SCons.Defaults import SCons.Node.FS import SCons.Util tars = ['tar', 'gtar'] TarAction = SCons.Action.Action('$TARCOM', '$TARCOMSTR') TarBuilder = SCons.Builder.Builder(action = TarAction, source_factory = SCons.Node.FS.Entry, source_scanner = SCons.Defaults.DirScanner, suffix = '$TARSUFFIX', multi = 1) def generate(env): """Add Builders and construction variables for tar to an Environment.""" try: bld = env['BUILDERS']['Tar'] except KeyError: bld = TarBuilder env['BUILDERS']['Tar'] = bld env['TAR'] = env.Detect(tars) or 'gtar' env['TARFLAGS'] = SCons.Util.CLVar('-c') env['TARCOM'] = '$TAR $TARFLAGS -f $TARGET $SOURCES' env['TARSUFFIX'] = '.tar' def exists(env): return env.Detect(tars)
mit
GoogleCloudPlatform/DataflowTemplates
v2/common/src/test/resources/PythonTextTransformerTest/transform.py
1
2207
""" Copyright (C) 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ """ A good transform function. @param {string} inJson @return {string} outJson """ import copy import json import sys import traceback def transform(event): """ Return a Dict or List of Dict Objects. Return None to discard """ event['new_key'] = 'new_value' # event = event return event def _handle_result(input_data): event_id = copy.deepcopy(input_data['id']) event = copy.deepcopy(input_data['event']) try: transformed_event = transform(event) if isinstance(transformed_event, list): for row in transformed_event: payload = json.dumps({'status': 'SUCCESS', 'id': event_id, 'event': row, 'error_message': None}) print(payload) else: payload = json.dumps({'status': 'SUCCESS', 'id': event_id, 'event': transformed_event, 'error_message': None}) print(payload) except Exception as e: stack_trace = traceback.format_exc() payload = json.dumps({'status': 'FAILED', 'id': event_id, 'event': event, 'error_message': stack_trace}) print(payload) if __name__ == '__main__': # TODO: How do we handle the case where there are no messages file_name = sys.argv[1] data = [] with open(file_name, "r") as data_file: for line in data_file: data.append(json.loads(line)) if isinstance(data, list): for event in data: _handle_result(event) else: event = data _handle_result(event) exit()
apache-2.0
zifeishan/deepdive
examples/tutorial_example/step3-more-data/experiment-reports/v00001/code/udf/ext_has_spouse_features.py
60
1304
#! /usr/bin/env python import sys import ddlib # DeepDive python utility ARR_DELIM = '~^~' # For each input tuple for row in sys.stdin: parts = row.strip().split('\t') if len(parts) != 6: print >>sys.stderr, 'Failed to parse row:', row continue # Get all fields from a row words = parts[0].split(ARR_DELIM) relation_id = parts[1] p1_start, p1_length, p2_start, p2_length = [int(x) for x in parts[2:]] # Unpack input into tuples. span1 = ddlib.Span(begin_word_id=p1_start, length=p1_length) span2 = ddlib.Span(begin_word_id=p2_start, length=p2_length) # Features for this pair come in here features = set() # Feature 1: Bag of words between the two phrases words_between = ddlib.tokens_between_spans(words, span1, span2) for word in words_between.elements: features.add("word_between=" + word) # Feature 2: Number of words between the two phrases features.add("num_words_between=%s" % len(words_between.elements)) # Feature 3: Does the last word (last name) match? last_word_left = ddlib.materialize_span(words, span1)[-1] last_word_right = ddlib.materialize_span(words, span2)[-1] if (last_word_left == last_word_right): features.add("potential_last_name_match") for feature in features: print str(relation_id) + '\t' + feature
apache-2.0
JT5D/Alfred-Popclip-Sublime
Sublime Text 2/Python PEP8 Autoformat/libs/lib2to3/fixes/fix_intern.py
7
1362
# Copyright 2006 Georg Brandl. # Licensed to PSF under a Contributor Agreement. """Fixer for intern(). intern(s) -> sys.intern(s)""" # Local imports from .. import pytree from .. import fixer_base from ..fixer_util import Name, Attr, touch_import class FixIntern(fixer_base.BaseFix): PATTERN = """ power< 'intern' trailer< lpar='(' ( not(arglist | argument<any '=' any>) obj=any | obj=arglist<(not argument<any '=' any>) any ','> ) rpar=')' > after=any* > """ def transform(self, node, results): syms = self.syms obj = results["obj"].clone() if obj.type == syms.arglist: newarglist = obj.clone() else: newarglist = pytree.Node(syms.arglist, [obj.clone()]) after = results["after"] if after: after = [n.clone() for n in after] new = pytree.Node(syms.power, Attr(Name(u"sys"), Name(u"intern")) + [pytree.Node(syms.trailer, [results["lpar"].clone(), newarglist, results["rpar"].clone()])] + after) new.prefix = node.prefix touch_import(None, u'sys', node) return new
gpl-2.0
mrjefftang/psutil
docs/conf.py
16
7822
# -*- coding: utf-8 -*- # # psutil documentation build configuration file, created by # sphinx-quickstart. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import datetime import os PROJECT_NAME = "psutil" AUTHOR = "Giampaolo Rodola'" THIS_YEAR = str(datetime.datetime.now().year) HERE = os.path.abspath(os.path.dirname(__file__)) def get_version(): INIT = os.path.abspath(os.path.join(HERE, '../psutil/__init__.py')) with open(INIT, 'r') as f: for line in f: if line.startswith('__version__'): ret = eval(line.strip().split(' = ')[1]) assert ret.count('.') == 2, ret for num in ret.split('.'): assert num.isdigit(), ret return ret else: raise ValueError("couldn't find version string") VERSION = get_version() # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_template'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = PROJECT_NAME copyright = '2009-%s, %s' % (THIS_YEAR, AUTHOR) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = VERSION # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True autodoc_docstring_signature = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme = 'pydoctheme' html_theme_options = {'collapsiblesidebar': True} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ["_themes"] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "{project} {version} documentation".format(**locals()) # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = 'logo.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '_static/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { 'index': 'indexsidebar.html', '**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'] } # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = { # 'index': 'indexcontent.html', # } # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = '%s-doc' % PROJECT_NAME # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % PROJECT_NAME, '%s documentation' % PROJECT_NAME, AUTHOR), ] # The name of an image file (relative to this directory) to place at # the top of the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', PROJECT_NAME, '%s documentation' % PROJECT_NAME, [AUTHOR], 1) ] # If true, show URL addresses after external links. # man_show_urls = False
bsd-3-clause
ychen820/microblog
y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/contrib/auth/management/__init__.py
126
2854
""" Creates permissions for all installed apps that need permissions. """ from django.contrib.auth import models as auth_app from django.db.models import get_models, signals def _get_permission_codename(action, opts): return u'%s_%s' % (action, opts.object_name.lower()) def _get_all_permissions(opts): "Returns (codename, name) for all permissions in the given opts." perms = [] for action in ('add', 'change', 'delete'): perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw))) return perms + list(opts.permissions) def create_permissions(app, created_models, verbosity, **kwargs): from django.contrib.contenttypes.models import ContentType app_models = get_models(app) # This will hold the permissions we're looking for as # (content_type, (codename, name)) searched_perms = list() # The codenames and ctypes that should exist. ctypes = set() for klass in app_models: ctype = ContentType.objects.get_for_model(klass) ctypes.add(ctype) for perm in _get_all_permissions(klass._meta): searched_perms.append((ctype, perm)) # Find all the Permissions that have a context_type for a model we're # looking for. We don't need to check for codenames since we already have # a list of the ones we're going to create. all_perms = set(auth_app.Permission.objects.filter( content_type__in=ctypes, ).values_list( "content_type", "codename" )) for ctype, (codename, name) in searched_perms: # If the permissions exists, move on. if (ctype.pk, codename) in all_perms: continue p = auth_app.Permission.objects.create( codename=codename, name=name, content_type=ctype ) if verbosity >= 2: print "Adding permission '%s'" % p def create_superuser(app, created_models, verbosity, **kwargs): from django.core.management import call_command if auth_app.User in created_models and kwargs.get('interactive', True): msg = ("\nYou just installed Django's auth system, which means you " "don't have any superusers defined.\nWould you like to create one " "now? (yes/no): ") confirm = raw_input(msg) while 1: if confirm not in ('yes', 'no'): confirm = raw_input('Please enter either "yes" or "no": ') continue if confirm == 'yes': call_command("createsuperuser", interactive=True) break signals.post_syncdb.connect(create_permissions, dispatch_uid = "django.contrib.auth.management.create_permissions") signals.post_syncdb.connect(create_superuser, sender=auth_app, dispatch_uid = "django.contrib.auth.management.create_superuser")
bsd-3-clause
gimite/personfinder
app/vendors/xlrd/compdoc.py
27
21226
# -*- coding: cp1252 -*- ## # Implements the minimal functionality required # to extract a "Workbook" or "Book" stream (as one big string) # from an OLE2 Compound Document file. # <p>Copyright � 2005-2012 Stephen John Machin, Lingfo Pty Ltd</p> # <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p> ## # No part of the content of this file was derived from the works of David Giffin. # 2008-11-04 SJM Avoid assertion error when -1 used instead of -2 for first_SID of empty SCSS [Frank Hoffsuemmer] # 2007-09-08 SJM Warning message if sector sizes are extremely large. # 2007-05-07 SJM Meaningful exception instead of IndexError if a SAT (sector allocation table) is corrupted. # 2007-04-22 SJM Missing "<" in a struct.unpack call => can't open files on bigendian platforms. from __future__ import print_function import sys from struct import unpack from .timemachine import * import array ## # Magic cookie that should appear in the first 8 bytes of the file. SIGNATURE = b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1" EOCSID = -2 FREESID = -1 SATSID = -3 MSATSID = -4 EVILSID = -5 class CompDocError(Exception): pass class DirNode(object): def __init__(self, DID, dent, DEBUG=0, logfile=sys.stdout): # dent is the 128-byte directory entry self.DID = DID self.logfile = logfile (cbufsize, self.etype, self.colour, self.left_DID, self.right_DID, self.root_DID) = \ unpack('<HBBiii', dent[64:80]) (self.first_SID, self.tot_size) = \ unpack('<ii', dent[116:124]) if cbufsize == 0: self.name = UNICODE_LITERAL('') else: self.name = unicode(dent[0:cbufsize-2], 'utf_16_le') # omit the trailing U+0000 self.children = [] # filled in later self.parent = -1 # indicates orphan; fixed up later self.tsinfo = unpack('<IIII', dent[100:116]) if DEBUG: self.dump(DEBUG) def dump(self, DEBUG=1): fprintf( self.logfile, "DID=%d name=%r etype=%d DIDs(left=%d right=%d root=%d parent=%d kids=%r) first_SID=%d tot_size=%d\n", self.DID, self.name, self.etype, self.left_DID, self.right_DID, self.root_DID, self.parent, self.children, self.first_SID, self.tot_size ) if DEBUG == 2: # cre_lo, cre_hi, mod_lo, mod_hi = tsinfo print("timestamp info", self.tsinfo, file=self.logfile) def _build_family_tree(dirlist, parent_DID, child_DID): if child_DID < 0: return _build_family_tree(dirlist, parent_DID, dirlist[child_DID].left_DID) dirlist[parent_DID].children.append(child_DID) dirlist[child_DID].parent = parent_DID _build_family_tree(dirlist, parent_DID, dirlist[child_DID].right_DID) if dirlist[child_DID].etype == 1: # storage _build_family_tree(dirlist, child_DID, dirlist[child_DID].root_DID) ## # Compound document handler. # @param mem The raw contents of the file, as a string, or as an mmap.mmap() object. The # only operation it needs to support is slicing. class CompDoc(object): def __init__(self, mem, logfile=sys.stdout, DEBUG=0): self.logfile = logfile self.DEBUG = DEBUG if mem[0:8] != SIGNATURE: raise CompDocError('Not an OLE2 compound document') if mem[28:30] != b'\xFE\xFF': raise CompDocError('Expected "little-endian" marker, found %r' % mem[28:30]) revision, version = unpack('<HH', mem[24:28]) if DEBUG: print("\nCompDoc format: version=0x%04x revision=0x%04x" % (version, revision), file=logfile) self.mem = mem ssz, sssz = unpack('<HH', mem[30:34]) if ssz > 20: # allows for 2**20 bytes i.e. 1MB print("WARNING: sector size (2**%d) is preposterous; assuming 512 and continuing ..." \ % ssz, file=logfile) ssz = 9 if sssz > ssz: print("WARNING: short stream sector size (2**%d) is preposterous; assuming 64 and continuing ..." \ % sssz, file=logfile) sssz = 6 self.sec_size = sec_size = 1 << ssz self.short_sec_size = 1 << sssz if self.sec_size != 512 or self.short_sec_size != 64: print("@@@@ sec_size=%d short_sec_size=%d" % (self.sec_size, self.short_sec_size), file=logfile) ( SAT_tot_secs, self.dir_first_sec_sid, _unused, self.min_size_std_stream, SSAT_first_sec_sid, SSAT_tot_secs, MSATX_first_sec_sid, MSATX_tot_secs, # ) = unpack('<ii4xiiiii', mem[44:76]) ) = unpack('<iiiiiiii', mem[44:76]) mem_data_len = len(mem) - 512 mem_data_secs, left_over = divmod(mem_data_len, sec_size) if left_over: #### raise CompDocError("Not a whole number of sectors") mem_data_secs += 1 print("WARNING *** file size (%d) not 512 + multiple of sector size (%d)" \ % (len(mem), sec_size), file=logfile) self.mem_data_secs = mem_data_secs # use for checking later self.mem_data_len = mem_data_len seen = self.seen = array.array('B', [0]) * mem_data_secs if DEBUG: print('sec sizes', ssz, sssz, sec_size, self.short_sec_size, file=logfile) print("mem data: %d bytes == %d sectors" % (mem_data_len, mem_data_secs), file=logfile) print("SAT_tot_secs=%d, dir_first_sec_sid=%d, min_size_std_stream=%d" \ % (SAT_tot_secs, self.dir_first_sec_sid, self.min_size_std_stream,), file=logfile) print("SSAT_first_sec_sid=%d, SSAT_tot_secs=%d" % (SSAT_first_sec_sid, SSAT_tot_secs,), file=logfile) print("MSATX_first_sec_sid=%d, MSATX_tot_secs=%d" % (MSATX_first_sec_sid, MSATX_tot_secs,), file=logfile) nent = sec_size // 4 # number of SID entries in a sector fmt = "<%di" % nent trunc_warned = 0 # # === build the MSAT === # MSAT = list(unpack('<109i', mem[76:512])) SAT_sectors_reqd = (mem_data_secs + nent - 1) // nent expected_MSATX_sectors = max(0, (SAT_sectors_reqd - 109 + nent - 2) // (nent - 1)) actual_MSATX_sectors = 0 if MSATX_tot_secs == 0 and MSATX_first_sec_sid in (EOCSID, FREESID, 0): # Strictly, if there is no MSAT extension, then MSATX_first_sec_sid # should be set to EOCSID ... FREESID and 0 have been met in the wild. pass # Presuming no extension else: sid = MSATX_first_sec_sid while sid not in (EOCSID, FREESID, MSATSID): # Above should be only EOCSID according to MS & OOo docs # but Excel doesn't complain about FREESID. Zero is a valid # sector number, not a sentinel. if DEBUG > 1: print('MSATX: sid=%d (0x%08X)' % (sid, sid), file=logfile) if sid >= mem_data_secs: msg = "MSAT extension: accessing sector %d but only %d in file" % (sid, mem_data_secs) if DEBUG > 1: print(msg, file=logfile) break raise CompDocError(msg) elif sid < 0: raise CompDocError("MSAT extension: invalid sector id: %d" % sid) if seen[sid]: raise CompDocError("MSAT corruption: seen[%d] == %d" % (sid, seen[sid])) seen[sid] = 1 actual_MSATX_sectors += 1 if DEBUG and actual_MSATX_sectors > expected_MSATX_sectors: print("[1]===>>>", mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, file=logfile) offset = 512 + sec_size * sid MSAT.extend(unpack(fmt, mem[offset:offset+sec_size])) sid = MSAT.pop() # last sector id is sid of next sector in the chain if DEBUG and actual_MSATX_sectors != expected_MSATX_sectors: print("[2]===>>>", mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, file=logfile) if DEBUG: print("MSAT: len =", len(MSAT), file=logfile) dump_list(MSAT, 10, logfile) # # === build the SAT === # self.SAT = [] actual_SAT_sectors = 0 dump_again = 0 for msidx in xrange(len(MSAT)): msid = MSAT[msidx] if msid in (FREESID, EOCSID): # Specification: the MSAT array may be padded with trailing FREESID entries. # Toleration: a FREESID or EOCSID entry anywhere in the MSAT array will be ignored. continue if msid >= mem_data_secs: if not trunc_warned: print("WARNING *** File is truncated, or OLE2 MSAT is corrupt!!", file=logfile) print("INFO: Trying to access sector %d but only %d available" \ % (msid, mem_data_secs), file=logfile) trunc_warned = 1 MSAT[msidx] = EVILSID dump_again = 1 continue elif msid < -2: raise CompDocError("MSAT: invalid sector id: %d" % msid) if seen[msid]: raise CompDocError("MSAT extension corruption: seen[%d] == %d" % (msid, seen[msid])) seen[msid] = 2 actual_SAT_sectors += 1 if DEBUG and actual_SAT_sectors > SAT_sectors_reqd: print("[3]===>>>", mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, actual_SAT_sectors, msid, file=logfile) offset = 512 + sec_size * msid self.SAT.extend(unpack(fmt, mem[offset:offset+sec_size])) if DEBUG: print("SAT: len =", len(self.SAT), file=logfile) dump_list(self.SAT, 10, logfile) # print >> logfile, "SAT ", # for i, s in enumerate(self.SAT): # print >> logfile, "entry: %4d offset: %6d, next entry: %4d" % (i, 512 + sec_size * i, s) # print >> logfile, "%d:%d " % (i, s), print(file=logfile) if DEBUG and dump_again: print("MSAT: len =", len(MSAT), file=logfile) dump_list(MSAT, 10, logfile) for satx in xrange(mem_data_secs, len(self.SAT)): self.SAT[satx] = EVILSID print("SAT: len =", len(self.SAT), file=logfile) dump_list(self.SAT, 10, logfile) # # === build the directory === # dbytes = self._get_stream( self.mem, 512, self.SAT, self.sec_size, self.dir_first_sec_sid, name="directory", seen_id=3) dirlist = [] did = -1 for pos in xrange(0, len(dbytes), 128): did += 1 dirlist.append(DirNode(did, dbytes[pos:pos+128], 0, logfile)) self.dirlist = dirlist _build_family_tree(dirlist, 0, dirlist[0].root_DID) # and stand well back ... if DEBUG: for d in dirlist: d.dump(DEBUG) # # === get the SSCS === # sscs_dir = self.dirlist[0] assert sscs_dir.etype == 5 # root entry if sscs_dir.first_SID < 0 or sscs_dir.tot_size == 0: # Problem reported by Frank Hoffsuemmer: some software was # writing -1 instead of -2 (EOCSID) for the first_SID # when the SCCS was empty. Not having EOCSID caused assertion # failure in _get_stream. # Solution: avoid calling _get_stream in any case when the # SCSS appears to be empty. self.SSCS = "" else: self.SSCS = self._get_stream( self.mem, 512, self.SAT, sec_size, sscs_dir.first_SID, sscs_dir.tot_size, name="SSCS", seen_id=4) # if DEBUG: print >> logfile, "SSCS", repr(self.SSCS) # # === build the SSAT === # self.SSAT = [] if SSAT_tot_secs > 0 and sscs_dir.tot_size == 0: print("WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero", file=logfile) if sscs_dir.tot_size > 0: sid = SSAT_first_sec_sid nsecs = SSAT_tot_secs while sid >= 0 and nsecs > 0: if seen[sid]: raise CompDocError("SSAT corruption: seen[%d] == %d" % (sid, seen[sid])) seen[sid] = 5 nsecs -= 1 start_pos = 512 + sid * sec_size news = list(unpack(fmt, mem[start_pos:start_pos+sec_size])) self.SSAT.extend(news) sid = self.SAT[sid] if DEBUG: print("SSAT last sid %d; remaining sectors %d" % (sid, nsecs), file=logfile) assert nsecs == 0 and sid == EOCSID if DEBUG: print("SSAT", file=logfile) dump_list(self.SSAT, 10, logfile) if DEBUG: print("seen", file=logfile) dump_list(seen, 20, logfile) def _get_stream(self, mem, base, sat, sec_size, start_sid, size=None, name='', seen_id=None): # print >> self.logfile, "_get_stream", base, sec_size, start_sid, size sectors = [] s = start_sid if size is None: # nothing to check against while s >= 0: if seen_id is not None: if self.seen[s]: raise CompDocError("%s corruption: seen[%d] == %d" % (name, s, self.seen[s])) self.seen[s] = seen_id start_pos = base + s * sec_size sectors.append(mem[start_pos:start_pos+sec_size]) try: s = sat[s] except IndexError: raise CompDocError( "OLE2 stream %r: sector allocation table invalid entry (%d)" % (name, s) ) assert s == EOCSID else: todo = size while s >= 0: if seen_id is not None: if self.seen[s]: raise CompDocError("%s corruption: seen[%d] == %d" % (name, s, self.seen[s])) self.seen[s] = seen_id start_pos = base + s * sec_size grab = sec_size if grab > todo: grab = todo todo -= grab sectors.append(mem[start_pos:start_pos+grab]) try: s = sat[s] except IndexError: raise CompDocError( "OLE2 stream %r: sector allocation table invalid entry (%d)" % (name, s) ) assert s == EOCSID if todo != 0: fprintf(self.logfile, "WARNING *** OLE2 stream %r: expected size %d, actual size %d\n", name, size, size - todo) return b''.join(sectors) def _dir_search(self, path, storage_DID=0): # Return matching DirNode instance, or None head = path[0] tail = path[1:] dl = self.dirlist for child in dl[storage_DID].children: if dl[child].name.lower() == head.lower(): et = dl[child].etype if et == 2: return dl[child] if et == 1: if not tail: raise CompDocError("Requested component is a 'storage'") return self._dir_search(tail, child) dl[child].dump(1) raise CompDocError("Requested stream is not a 'user stream'") return None ## # Interrogate the compound document's directory; return the stream as a string if found, otherwise # return None. # @param qname Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto. def get_named_stream(self, qname): d = self._dir_search(qname.split("/")) if d is None: return None if d.tot_size >= self.min_size_std_stream: return self._get_stream( self.mem, 512, self.SAT, self.sec_size, d.first_SID, d.tot_size, name=qname, seen_id=d.DID+6) else: return self._get_stream( self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID, d.tot_size, name=qname + " (from SSCS)", seen_id=None) ## # Interrogate the compound document's directory. # If the named stream is not found, (None, 0, 0) will be returned. # If the named stream is found and is contiguous within the original byte sequence ("mem") # used when the document was opened, # then (mem, offset_to_start_of_stream, length_of_stream) is returned. # Otherwise a new string is built from the fragments and (new_string, 0, length_of_stream) is returned. # @param qname Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto. def locate_named_stream(self, qname): d = self._dir_search(qname.split("/")) if d is None: return (None, 0, 0) if d.tot_size > self.mem_data_len: raise CompDocError("%r stream length (%d bytes) > file data size (%d bytes)" % (qname, d.tot_size, self.mem_data_len)) if d.tot_size >= self.min_size_std_stream: result = self._locate_stream( self.mem, 512, self.SAT, self.sec_size, d.first_SID, d.tot_size, qname, d.DID+6) if self.DEBUG: print("\nseen", file=self.logfile) dump_list(self.seen, 20, self.logfile) return result else: return ( self._get_stream( self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID, d.tot_size, qname + " (from SSCS)", None), 0, d.tot_size ) def _locate_stream(self, mem, base, sat, sec_size, start_sid, expected_stream_size, qname, seen_id): # print >> self.logfile, "_locate_stream", base, sec_size, start_sid, expected_stream_size s = start_sid if s < 0: raise CompDocError("_locate_stream: start_sid (%d) is -ve" % start_sid) p = -99 # dummy previous SID start_pos = -9999 end_pos = -8888 slices = [] tot_found = 0 found_limit = (expected_stream_size + sec_size - 1) // sec_size while s >= 0: if self.seen[s]: print("_locate_stream(%s): seen" % qname, file=self.logfile); dump_list(self.seen, 20, self.logfile) raise CompDocError("%s corruption: seen[%d] == %d" % (qname, s, self.seen[s])) self.seen[s] = seen_id tot_found += 1 if tot_found > found_limit: raise CompDocError( "%s: size exceeds expected %d bytes; corrupt?" % (qname, found_limit * sec_size) ) # Note: expected size rounded up to higher sector if s == p+1: # contiguous sectors end_pos += sec_size else: # start new slice if p >= 0: # not first time slices.append((start_pos, end_pos)) start_pos = base + s * sec_size end_pos = start_pos + sec_size p = s s = sat[s] assert s == EOCSID assert tot_found == found_limit # print >> self.logfile, "_locate_stream(%s): seen" % qname; dump_list(self.seen, 20, self.logfile) if not slices: # The stream is contiguous ... just what we like! return (mem, start_pos, expected_stream_size) slices.append((start_pos, end_pos)) # print >> self.logfile, "+++>>> %d fragments" % len(slices) return (b''.join([mem[start_pos:end_pos] for start_pos, end_pos in slices]), 0, expected_stream_size) # ========================================================================================== def x_dump_line(alist, stride, f, dpos, equal=0): print("%5d%s" % (dpos, " ="[equal]), end=' ', file=f) for value in alist[dpos:dpos + stride]: print(str(value), end=' ', file=f) print(file=f) def dump_list(alist, stride, f=sys.stdout): def _dump_line(dpos, equal=0): print("%5d%s" % (dpos, " ="[equal]), end=' ', file=f) for value in alist[dpos:dpos + stride]: print(str(value), end=' ', file=f) print(file=f) pos = None oldpos = None for pos in xrange(0, len(alist), stride): if oldpos is None: _dump_line(pos) oldpos = pos elif alist[pos:pos+stride] != alist[oldpos:oldpos+stride]: if pos - oldpos > stride: _dump_line(pos - stride, equal=1) _dump_line(pos) oldpos = pos if oldpos is not None and pos is not None and pos != oldpos: _dump_line(pos, equal=1)
apache-2.0
CallaJun/hackprince
indico/numpy/distutils/tests/test_misc_util.py
69
3104
#!/usr/bin/env python from __future__ import division, absolute_import, print_function from numpy.testing import * from numpy.distutils.misc_util import appendpath, minrelpath, \ gpaths, get_shared_lib_extension from os.path import join, sep, dirname ajoin = lambda *paths: join(*((sep,)+paths)) class TestAppendpath(TestCase): def test_1(self): assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) def test_2(self): assert_equal(appendpath('prefix/sub', 'name'), join('prefix', 'sub', 'name')) assert_equal(appendpath('prefix/sub', 'sup/name'), join('prefix', 'sub', 'sup', 'name')) assert_equal(appendpath('/prefix/sub', '/prefix/name'), ajoin('prefix', 'sub', 'name')) def test_3(self): assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), ajoin('prefix', 'sub', 'sup', 'name')) assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) class TestMinrelpath(TestCase): def test_1(self): n = lambda path: path.replace('/', sep) assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) assert_equal(minrelpath('..'), '..') assert_equal(minrelpath(n('aa/..')), '') assert_equal(minrelpath(n('aa/../bb')), 'bb') assert_equal(minrelpath(n('aa/bb/..')), 'aa') assert_equal(minrelpath(n('aa/bb/../..')), '') assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) assert_equal(minrelpath(n('.././..')), n('../..')) assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) class TestGpaths(TestCase): def test_gpaths(self): local_path = minrelpath(join(dirname(__file__), '..')) ls = gpaths('command/*.py', local_path) assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) f = gpaths('system_info.py', local_path) assert_(join(local_path, 'system_info.py')==f[0], repr(f)) class TestSharedExtension(TestCase): def test_get_shared_lib_extension(self): import sys ext = get_shared_lib_extension(is_python_ext=False) if sys.platform.startswith('linux'): assert_equal(ext, '.so') elif sys.platform.startswith('gnukfreebsd'): assert_equal(ext, '.so') elif sys.platform.startswith('darwin'): assert_equal(ext, '.dylib') elif sys.platform.startswith('win'): assert_equal(ext, '.dll') # just check for no crash assert_(get_shared_lib_extension(is_python_ext=True)) if __name__ == "__main__": run_module_suite()
lgpl-3.0
loco-odoo/localizacion_co
openerp/addons/email_template/wizard/__init__.py
446
1130
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2009 Sharoon Thomas # Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## import email_template_preview import mail_compose_message # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Distrotech/pycairo
examples/cairo_snippets/snippets_pdf.py
9
1523
#!/usr/bin/env python """Python version of cairo-demo/cairo_snippets/cairo_snippets_pdf.c create a file for each example rather than one large file for all examples """ from __future__ import division from math import pi as M_PI # used by many snippets import sys import cairo if not cairo.HAS_PDF_SURFACE: raise SystemExit ('cairo was not compiled with PDF support') from snippets import snip_list, snippet_normalize width_in_inches, height_in_inches = 2, 2 width_in_points, height_in_points = width_in_inches * 72, height_in_inches * 72 width, height = width_in_points, height_in_points # used by snippet_normalize() def do_snippet (snippet): if verbose_mode: print('processing %s' % snippet) filename = 'snippets/%s.pdf' % snippet surface = cairo.PDFSurface (filename, width_in_points, height_in_points) cr = cairo.Context (surface) cr.save() try: fName = 'snippets/%s.py' % snippet code = open(fName).read() exec (code, globals(), locals()) except: exc_type, exc_value = sys.exc_info()[:2] print(exc_type, exc_value, file=sys.stderr) else: cr.restore() cr.show_page() surface.finish() if verbose_mode: print if __name__ == '__main__': verbose_mode = True if len(sys.argv) > 1 and sys.argv[1] == '-s': verbose_mode = False del sys.argv[1] if len(sys.argv) > 1: # do specified snippets snippet_list = sys.argv[1:] else: # do all snippets snippet_list = snip_list for s in snippet_list: do_snippet (s)
gpl-3.0
cosmoharrigan/pylearn2
pylearn2/costs/gated_autoencoder.py
39
5793
""" Definitions of the cost for the gated-autoencoder. """ from pylearn2.costs.cost import Cost, DefaultDataSpecsMixin from pylearn2.space import VectorSpace class SymmetricCost(DefaultDataSpecsMixin, Cost): """ Summary (Class representing the symmetric cost). Subclasses can define the type of data they will use. Mean reconstruction error is used for real valued data and cross-Entropy loss is used for binary. See Also -------- "Gradient-based learning of higher-order image features" """ @staticmethod def cost(x, y, rx, ry): """ Symmetric reconstruction cost. Parameters ---------- x : tensor_like Theano symbolic representing the first input minibatch. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. y : tensor_like Theano symbolic representing the seconde input minibatch. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. rx : tensor_like Reconstruction of the first minibatch by the model. ry: tensor_like Reconstruction of the second minibatch by the model. Returns ------- Cost: theano_like expression Representation of the cost """ raise NotImplementedError def expr(self, model, data, *args, **kwargs): """ Returns a theano expression for the cost function. Returns a symbolic expression for a cost function applied to the minibatch of data. Optionally, may return None. This represents that the cost function is intractable but may be optimized via the get_gradients method. Parameters ---------- model : a pylearn2 Model instance data : a batch in cost.get_data_specs() form kwargs : dict Optional extra arguments. Not used by the base class. """ self.get_data_specs(model)[0].validate(data) x, y = data input_space = model.get_input_space() if not isinstance(input_space.components[0], VectorSpace): conv = input_space.components[0] vec = VectorSpace(conv.get_total_dimension()) x = conv.format_as(x, vec) if not isinstance(input_space.components[1], VectorSpace): conv = input_space.components[1] vec = VectorSpace(conv.get_total_dimension()) y = conv.format_as(y, vec) rx, ry = model.reconstructXY((x, y)) return self.cost(x, y, rx, ry) class SymmetricMSRE(SymmetricCost): """ Summary (Symmetric cost for real valued data). See Also -------- "Gradient-based learning of higher-order image features" """ @staticmethod def cost(x, y, rx, ry): """ Summary (Definition of the cost). Mean squared reconstruction error. Parameters ---------- x : tensor_like Theano symbolic representing the first input minibatch. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. y : tensor_like Theano symbolic representing the seconde input minibatch. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. rx : tensor_like Reconstruction of the first minibatch by the model. ry: tensor_like Reconstruction of the second minibatch by the model. Returns ------- Cost: theano_like expression Representation of the cost Notes ----- Symmetric reconstruction cost as defined by Memisevic in: "Gradient-based learning of higher-order image features". This function only works with real valued data. """ return ( ((0.5*((x - rx)**2)) + (0.5*((y - ry)**2)))).sum(axis=1).mean() class NormalizedSymmetricMSRE(SymmetricCost): """ Summary (Normalized Symmetric cost for real valued data). Notes ----- Value used to observe the percentage of reconstruction. """ @staticmethod def cost(x, y, rx, ry): """ Summary (Definition of the cost). Normalized Mean squared reconstruction error. Values between 0 and 1. Parameters ---------- x : tensor_like Theano symbolic representing the first input minibatch. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. y : tensor_like Theano symbolic representing the seconde input minibatch. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. rx : tensor_like Reconstruction of the first minibatch by the model. ry: tensor_like Reconstruction of the second minibatch by the model. Returns ------- Cost: theano_like expression Representation of the cost Notes ----- Do not use this function to train, only to monitor the average percentage of reconstruction achieved when training on real valued data. """ num = (((0.5*((x - rx)**2)) + (0.5*((y - ry)**2)))).sum(axis=1).mean() den = ((0.5*(x.norm(2, 1)**2)) + (0.5*(y.norm(2, 1)**2))).mean() return num/den
bsd-3-clause
jaysonsantos/servo
tests/wpt/web-platform-tests/tools/wptserve/tests/functional/test_server.py
299
1320
import os import unittest import urllib2 import json import wptserve from base import TestUsingServer, doc_root class TestFileHandler(TestUsingServer): def test_not_handled(self): with self.assertRaises(urllib2.HTTPError) as cm: resp = self.request("/not_existing") self.assertEquals(cm.exception.code, 404) class TestRewriter(TestUsingServer): def test_rewrite(self): @wptserve.handlers.handler def handler(request, response): return request.request_path route = ("GET", "/test/rewritten", handler) self.server.rewriter.register("GET", "/test/original", route[1]) self.server.router.register(*route) resp = self.request("/test/original") self.assertEquals(200, resp.getcode()) self.assertEquals("/test/rewritten", resp.read()) class TestRequestHandler(TestUsingServer): def test_exception(self): @wptserve.handlers.handler def handler(request, response): raise Exception route = ("GET", "/test/raises", handler) self.server.router.register(*route) with self.assertRaises(urllib2.HTTPError) as cm: resp = self.request("/test/raises") self.assertEquals(cm.exception.code, 500) if __name__ == "__main__": unittest.main()
mpl-2.0
Rudloff/youtube-dl
youtube_dl/extractor/expotv.py
4
2907
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, unified_strdate, ) class ExpoTVIE(InfoExtractor): _VALID_URL = r'https?://www\.expotv\.com/videos/[^?#]*/(?P<id>[0-9]+)($|[?#])' _TEST = { 'url': 'http://www.expotv.com/videos/reviews/3/40/NYX-Butter-lipstick/667916', 'md5': 'fe1d728c3a813ff78f595bc8b7a707a8', 'info_dict': { 'id': '667916', 'ext': 'mp4', 'title': 'NYX Butter Lipstick Little Susie', 'description': 'Goes on like butter, but looks better!', 'thumbnail': 're:^https?://.*\.jpg$', 'uploader': 'Stephanie S.', 'upload_date': '20150520', 'view_count': int, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) player_key = self._search_regex( r'<param name="playerKey" value="([^"]+)"', webpage, 'player key') config = self._download_json( 'http://client.expotv.com/video/config/%s/%s' % (video_id, player_key), video_id, 'Downloading video configuration') formats = [] for fcfg in config['sources']: media_url = fcfg.get('file') if not media_url: continue if fcfg.get('type') == 'm3u8': formats.extend(self._extract_m3u8_formats( media_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')) else: formats.append({ 'url': media_url, 'height': int_or_none(fcfg.get('height')), 'format_id': fcfg.get('label'), 'ext': self._search_regex( r'filename=.*\.([a-z0-9_A-Z]+)&', media_url, 'file extension', default=None) or fcfg.get('type'), }) self._sort_formats(formats) title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = config.get('image') view_count = int_or_none(self._search_regex( r'<h5>Plays: ([0-9]+)</h5>', webpage, 'view counts')) uploader = self._search_regex( r'<div class="reviewer">\s*<img alt="([^"]+)"', webpage, 'uploader', fatal=False) upload_date = unified_strdate(self._search_regex( r'<h5>Reviewed on ([0-9/.]+)</h5>', webpage, 'upload date', fatal=False), day_first=False) return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'view_count': view_count, 'thumbnail': thumbnail, 'uploader': uploader, 'upload_date': upload_date, }
unlicense
thinksabin/lazy-devops
S3 bucket Maker/IdentityAccessManagement.py
1
2418
__author__ = 'gambit' import boto from boto.iam.connection import IAMConnection from boto.s3.key import Key import datetime import time import smtplib import os class IdentityAccessManagement(): admin_access_key = "XXXXXXXXXXXXXXXXXXXXXXX" admin_secret_key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" def create_user(self, s3_user): connect = IAMConnection(self.admin_access_key, self.admin_secret_key) user = connect.get_all_users() users = user['list_users_response']['list_users_result']['users'] for user in users: if s3_user in user['user_name']: return False connect.create_user(s3_user) return True def access_key(self, s3_user): connect = IAMConnection(self.admin_access_key, self.admin_secret_key) key = connect.create_access_key(s3_user) access_key = key['create_access_key_response'][u'create_access_key_result'][u'access_key'][u'access_key_id'] secret_key = key['create_access_key_response'][u'create_access_key_result'][u'access_key'][u'secret_access_key'] return s3_user, access_key, secret_key def attach_policy(self, S3_User, bucket_name): policy = '''{ "Version": "2012-10-17", "Statement": [ { "Action": [ "s3:ListAllMyBuckets" ], "Effect": "Allow", "Resource": "arn:aws:s3:::*" }, { "Action": "s3:*", "Effect": "Allow", "Resource": [ "arn:aws:s3:::%s*", "arn:aws:s3:::%s*/*" ] } ] }''' % (bucket_name, bucket_name) print policy # # Attach Policy to acces s3 bucket connect = IAMConnection(self.admin_access_key, self.admin_secret_key) connect.put_user_policy(S3_User, bucket_name, policy) def create_s3_bucket(self, bucket_name): s3 = boto.connect_s3(self.admin_access_key, self.admin_secret_key) all_bucket = s3.get_all_buckets() for bucket in all_bucket: name = bucket.name if bucket_name not in name: s3.create_bucket(bucket_name) return True else: return False
apache-2.0
mahak/cinder
cinder/tests/unit/volume/drivers/test_kioxia.py
2
40143
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from unittest import mock from oslo_utils.secretutils import md5 from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.kioxia import entities from cinder.volume.drivers.kioxia import kumoscale as kioxia from cinder.volume.drivers.kioxia import rest_client VOL_BACKEND_NAME = 'kioxia_kumoscale_1' VOL_NAME = 'volume-c2fd04e3-320e-44eb-b-2' VOL_UUID = 'c20aba21-6ef6-446b-b374-45733b4883ba' VOL_SIZE = 10 VOL_PROTOCOL = 'NVMeoF' SNAP_UUID = 'c9ef9d49-0d26-44cb-b609-0b8bd2d3db77' CONN_UUID = '34206309-3733-4cc6-a7d5-9d4dbbe377da' CONN_HOST_NAME = 'devstack' CONN_NQN = 'nqn.2014-08.org.nvmexpress:uuid:' \ 'beaae2de-3a97-4be1-a739-6ac4bc5bf138' success_prov_response = entities.ProvisionerResponse(None, None, "Success", "Success") fail_prov_response = entities.ProvisionerResponse(None, None, "Failure", "Failure") prov_backend1 = entities.Backend(None, None, None, None, 'dummy-pid-1') prov_backend2 = entities.Backend(None, None, None, None, 'dummy-pid-2') prov_location1 = entities.Location(VOL_UUID, prov_backend1) prov_location2 = entities.Location(VOL_UUID, prov_backend2) prov_volume = entities.VolumeProv(VOL_UUID, None, None, None, None, None, None, None, None, None, None, True, None, [prov_location1, prov_location2]) prov_volumes_response = entities.ProvisionerResponse([prov_volume]) no_entities_prov_response = entities.ProvisionerResponse([], None, "Success") class KioxiaVolumeTestCase(test.TestCase): @mock.patch.object(rest_client.KioxiaProvisioner, 'get_info') @mock.patch.object(kioxia.KumoScaleBaseVolumeDriver, '_get_kumoscale') def setUp(self, mock_kumoscale, mock_get_info): mock_get_info.return_value = success_prov_response mock_kumoscale.return_value = \ rest_client.KioxiaProvisioner(['1.2.3.4'], 'cert', 'token') super(KioxiaVolumeTestCase, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.volume_backend_name = VOL_BACKEND_NAME self.cfg.url = 'dummyURL' self.cfg.token = 'dummy.dummy.Rf-dummy-dummy-lE' self.cfg.cafile = 'dummy' self.cfg.num_replicas = 1 self.cfg.block_size = 512 self.cfg.max_iops_per_gb = 1000 self.cfg.desired_iops_per_gb = 1000 self.cfg.max_bw_per_gb = 1000 self.cfg.desired_bw_per_gb = 1000 self.cfg.same_rack_allowed = False self.cfg.max_replica_down_time = 5 self.cfg.span_allowed = True self.cfg.vol_reserved_space_percentage = 20 self.cfg.provisioning_type = 'THIN' self.driver = kioxia.KumoScaleBaseVolumeDriver(configuration=self.cfg) self.driver.configuration.get = lambda *args, **kwargs: {} self.driver.num_replicas = 2 self.expected_stats = { 'volume_backend_name': VOL_BACKEND_NAME, 'vendor_name': 'KIOXIA', 'driver_version': self.driver.VERSION, 'storage_protocol': 'NVMeOF', 'consistencygroup_support': False, 'thin_provisioning_support': True, 'multiattach': False, 'total_capacity_gb': 1000, 'free_capacity_gb': 600 } @mock.patch.object(rest_client.KioxiaProvisioner, 'get_info') def test_get_kumoscale(self, mock_get_info): mock_get_info.return_value = success_prov_response result = self.driver._get_kumoscale('https://1.2.3.4:8090', 'token', 'cert') self.assertEqual(result.mgmt_ips, ['1.2.3.4']) self.assertEqual(result.port, '8090') self.assertEqual(result.token, 'token') @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume') def test_volume_create_success(self, mock_create_volume): testvol = _stub_volume() mock_create_volume.return_value = success_prov_response result = self.driver.create_volume(testvol) args, kwargs = mock_create_volume.call_args mock_call = args[0] self.assertEqual(mock_call.alias, testvol['name'][:27]) self.assertEqual(mock_call.capacity, testvol['size']) self.assertEqual(mock_call.uuid, testvol['id']) self.assertEqual(mock_call.protocol, VOL_PROTOCOL) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume') def test_volume_create_failure(self, mock_create_volume): testvol = _stub_volume() mock_create_volume.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume') def test_volume_create_exception(self, mock_create_volume): testvol = _stub_volume() mock_create_volume.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume') def test_delete_volume_success(self, mock_delete_volume): testvol = _stub_volume() mock_delete_volume.return_value = success_prov_response result = self.driver.delete_volume(testvol) mock_delete_volume.assert_any_call(testvol['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume') def test_delete_volume_failure(self, mock_delete_volume): testvol = _stub_volume() mock_delete_volume.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume') def test_delete_volume_exception(self, mock_delete_volume): testvol = _stub_volume() mock_delete_volume.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target1 = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target1]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) result = self.driver.initialize_connection(testvol, testconn) mock_host_probe.assert_any_call(testconn['nqn'], testconn['uuid'], testconn['host'], 'Agent', 'cinder-driver-0.1', 30) mock_publish.assert_any_call(testconn['uuid'], testvol['id']) mock_get_volumes_by_uuid.assert_any_call(testvol['id']) mock_get_targets.assert_any_call(testconn['uuid'], testvol['id']) mock_get_backend_by_id.assert_any_call('dummy-pid-1') expected_replica = {'portals': [('1.2.3.4', '4420', 'TCP')], 'target_nqn': 'target.nqn', 'vol_uuid': testvol['id']} expected_data = { 'vol_uuid': testvol['id'], 'alias': testvol['name'], 'writable': True, 'volume_replicas': [expected_replica] } expected_result = { 'driver_volume_type': 'nvmeof', 'data': expected_data } self.assertDictEqual(result, expected_result) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_host_probe_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = fail_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_host_probe_exception( self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.side_effect = Exception() mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_publish_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = fail_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_publish_exception(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.side_effect = Exception() mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_volumes_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = fail_prov_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_no_volumes(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = no_entities_prov_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_volumes_exception(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.side_effect = Exception() mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_targets_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = fail_prov_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_no_targets(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = no_entities_prov_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_targets_exception(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.side_effect = Exception() mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_backend_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_no_backend(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = no_entities_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_backend_exception(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish') def test_terminate_connection(self, mock_unpublish): testvol = _stub_volume() testconn = _stub_connector() mock_unpublish.return_value = success_prov_response result = self.driver.terminate_connection(testvol, testconn) mock_unpublish.assert_any_call(testconn['uuid'], testvol['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish') def test_terminate_connection_unpublish_failure(self, mock_unpublish): testvol = _stub_volume() testconn = _stub_connector() mock_unpublish.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish') def test_terminate_connection_unpublish_exception(self, mock_unpublish): testvol = _stub_volume() testconn = _stub_connector() mock_unpublish.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') def test_get_volume_stats(self, mock_get_tenants): tenant = TenantEntity(1000, 400) mock_get_tenants.return_value = entities.ProvisionerResponse([tenant]) result = self.driver.get_volume_stats(True) mock_get_tenants.assert_any_call() self.assertDictEqual(result, self.expected_stats) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') def test_get_volume_stats_tenants_failure(self, mock_get_tenants): mock_get_tenants.return_value = fail_prov_response self.expected_stats['total_capacity_gb'] = 'unknown' self.expected_stats['free_capacity_gb'] = 'unknown' self.assertDictEqual( self.driver.get_volume_stats(True), self.expected_stats) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') def test_get_volume_stats_no_tenants(self, mock_get_tenants): mock_get_tenants.return_value = no_entities_prov_response self.expected_stats['total_capacity_gb'] = 'unknown' self.expected_stats['free_capacity_gb'] = 'unknown' self.assertDictEqual( self.driver.get_volume_stats(True), self.expected_stats) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') def test_get_volume_stats_tenants_exception(self, mock_get_tenants): mock_get_tenants.side_effect = Exception() self.expected_stats['total_capacity_gb'] = 'unknown' self.expected_stats['free_capacity_gb'] = 'unknown' self.assertDictEqual( self.driver.get_volume_stats(True), self.expected_stats) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot') def test_create_snapshot_success(self, mock_create_snapshot): testsnap = _stub_snapshot() mock_create_snapshot.return_value = success_prov_response result = self.driver.create_snapshot(testsnap) args, kwargs = mock_create_snapshot.call_args mock_call = args[0] self.assertEqual(mock_call.alias, testsnap['name']) self.assertEqual(mock_call.volumeID, testsnap['volume_id']) self.assertEqual(mock_call.snapshotID, testsnap['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot') def test_create_snapshot_failure(self, mock_create_snapshot): testsnap = _stub_snapshot() mock_create_snapshot.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot') def test_create_snapshot_exception(self, mock_create_snapshot): testsnap = _stub_snapshot() mock_create_snapshot.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot') def test_delete_snapshot_success(self, mock_delete_snapshot): testsnap = _stub_snapshot() mock_delete_snapshot.return_value = success_prov_response result = self.driver.delete_snapshot(testsnap) mock_delete_snapshot.assert_any_call(testsnap['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot') def test_delete_snapshot_failure(self, mock_delete_snapshot): testsnap = _stub_snapshot() mock_delete_snapshot.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot') def test_delete_snapshot_exception(self, mock_delete_snapshot): testsnap = _stub_snapshot() mock_delete_snapshot.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume') def test_create_volume_from_snapshot_success(self, mock_create_snapshot_volume): testsnap = _stub_snapshot() testvol = _stub_volume() mock_create_snapshot_volume.return_value = success_prov_response result = self.driver.create_volume_from_snapshot(testvol, testsnap) args, kwargs = mock_create_snapshot_volume.call_args mock_call = args[0] self.assertEqual(mock_call.alias, testvol['name']) self.assertEqual(mock_call.volumeID, testsnap['volume_id']) self.assertEqual(mock_call.snapshotID, testsnap['id']) self.assertEqual(mock_call.protocol, VOL_PROTOCOL) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume') def test_create_volume_from_snapshot_failure(self, mock_create_snapshot_volume): testsnap = _stub_snapshot() testvol = _stub_volume() mock_create_snapshot_volume.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, testvol, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume') def test_create_volume_from_snapshot_exception( self, mock_create_snapshot_volume): testsnap = _stub_snapshot() testvol = _stub_volume() mock_create_snapshot_volume.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, testvol, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume') def test_extend_volume_success(self, mock_expand_volume): testvol = _stub_volume() mock_expand_volume.return_value = success_prov_response new_size = VOL_SIZE + 2 result = self.driver.extend_volume(testvol, new_size) mock_expand_volume.assert_any_call(new_size, testvol['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume') def test_extend_volume_failure(self, mock_expand_volume): testvol = _stub_volume() mock_expand_volume.return_value = fail_prov_response new_size = VOL_SIZE + 2 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, testvol, new_size) @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume') def test_extend_volume_exception(self, mock_expand_volume): testvol = _stub_volume() mock_expand_volume.side_effect = Exception() new_size = VOL_SIZE + 2 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, testvol, new_size) @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume') def test_create_cloned_volume_success(self, mock_clone_volume): testvol = _stub_volume() mock_clone_volume.return_value = success_prov_response result = self.driver.create_cloned_volume(testvol, testvol) args, kwargs = mock_clone_volume.call_args mock_call = args[0] self.assertEqual(mock_call.alias, testvol['name']) self.assertEqual(mock_call.capacity, testvol['size']) self.assertEqual(mock_call.volumeId, testvol['id']) self.assertEqual(mock_call.sourceVolumeId, testvol['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume') def test_create_cloned_volume_failure(self, mock_clone_volume): testvol = _stub_volume() mock_clone_volume.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, testvol, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume') def test_create_cloned_volume_exception(self, mock_clone_volume): testvol = _stub_volume() mock_clone_volume.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, testvol, testvol) def test_convert_host_name(self): name = 'ks-node3-000c2960a794-000c2960a797' result = self.driver._convert_host_name(name) expected = md5(name.encode('utf-8'), usedforsecurity=False).hexdigest() self.assertEqual(result, expected) def test_create_export(self): result = self.driver.create_export(None, None, None) self.assertIsNone(result) def test_ensure_export(self): result = self.driver.ensure_export(None, None) self.assertIsNone(result) def test_remove_export(self): result = self.driver.remove_export(None, None) self.assertIsNone(result) def test_check_for_setup_error(self): result = self.driver.check_for_setup_error() self.assertIsNone(result) def _stub_volume(*args, **kwargs): volume = {'id': kwargs.get('id', VOL_UUID), 'name': kwargs.get('name', VOL_NAME), 'project_id': "test-project", 'display_name': kwargs.get('display_name', VOL_NAME), 'size': kwargs.get('size', VOL_SIZE), 'provider_location': kwargs.get('provider_location', None), 'volume_type_id': kwargs.get('volume_type_id', None)} return volume def _stub_connector(*args, **kwargs): connector = {'uuid': kwargs.get('uuid', CONN_UUID), 'nqn': kwargs.get('nqn', CONN_NQN), 'host': kwargs.get('host', CONN_HOST_NAME)} return connector def _stub_snapshot(*args, **kwargs): volume = {'id': kwargs.get('id', SNAP_UUID), 'name': kwargs.get('name', 'snap2000'), 'volume_id': kwargs.get('id', VOL_UUID)} return volume class TenantEntity: def __init__(self, capacity, consumed): self.tenantId = '0' self.capacity = capacity self.consumedCapacity = consumed class TargetEntity: def __init__(self, name, backend): self.targetName = name self.backend = backend class BackendEntity: def __init__(self, portals): self.portals = portals class PortalEntity: def __init__(self, ip, port, transport): self.ip = ip self.port = port self.transport = transport if __name__ == '__main__': unittest.main()
apache-2.0
c-o-m-m-a-n-d-e-r/CouchPotatoServer
libs/caper/result.py
81
5904
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from logr import Logr GROUP_MATCHES = ['identifier'] class CaperNode(object): def __init__(self, closure, parent=None, match=None): """ :type parent: CaperNode :type weight: float """ #: :type: caper.objects.CaperClosure self.closure = closure #: :type: CaperNode self.parent = parent #: :type: CaptureMatch self.match = match #: :type: list of CaptureGroup self.finished_groups = [] def next(self): raise NotImplementedError() def captured(self): cur = self if cur.match: yield cur.match.tag, cur.match.result while cur.parent: cur = cur.parent if cur.match: yield cur.match.tag, cur.match.result class CaperRootNode(CaperNode): def __init__(self, closure): """ :type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure """ super(CaperRootNode, self).__init__(closure) def next(self): return self.closure class CaperClosureNode(CaperNode): def __init__(self, closure, parent=None, match=None): """ :type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure """ super(CaperClosureNode, self).__init__(closure, parent, match) def next(self): if not self.closure: return None if self.match: # Jump to next closure if we have a match return self.closure.right elif len(self.closure.fragments) > 0: # Otherwise parse the fragments return self.closure.fragments[0] return None def __str__(self): return "<CaperClosureNode match: %s>" % repr(self.match) def __repr__(self): return self.__str__() class CaperFragmentNode(CaperNode): def __init__(self, closure, fragments, parent=None, match=None): """ :type closure: caper.objects.CaperClosure :type fragments: list of caper.objects.CaperFragment """ super(CaperFragmentNode, self).__init__(closure, parent, match) #: :type: caper.objects.CaperFragment or list of caper.objects.CaperFragment self.fragments = fragments def next(self): if len(self.fragments) > 0 and self.fragments[-1] and self.fragments[-1].right: return self.fragments[-1].right if self.closure.right: return self.closure.right return None def __str__(self): return "<CaperFragmentNode match: %s>" % repr(self.match) def __repr__(self): return self.__str__() class CaperResult(object): def __init__(self): #: :type: list of CaperNode self.heads = [] self.chains = [] def build(self): max_matched = 0 for head in self.heads: for chain in self.combine_chain(head): if chain.num_matched > max_matched: max_matched = chain.num_matched self.chains.append(chain) for chain in self.chains: chain.weights.append(chain.num_matched / float(max_matched or chain.num_matched or 1)) chain.finish() self.chains.sort(key=lambda chain: chain.weight, reverse=True) for chain in self.chains: Logr.debug("chain weight: %.02f", chain.weight) Logr.debug("\tInfo: %s", chain.info) Logr.debug("\tWeights: %s", chain.weights) Logr.debug("\tNumber of Fragments Matched: %s", chain.num_matched) def combine_chain(self, subject, chain=None): nodes = subject if type(subject) is list else [subject] if chain is None: chain = CaperResultChain() result = [] for x, node in enumerate(nodes): node_chain = chain if x == len(nodes) - 1 else chain.copy() if not node.parent: result.append(node_chain) continue node_chain.update(node) result.extend(self.combine_chain(node.parent, node_chain)) return result class CaperResultChain(object): def __init__(self): #: :type: float self.weight = None self.info = {} self.num_matched = 0 self.weights = [] def update(self, subject): """ :type subject: CaperFragmentNode """ if not subject.match or not subject.match.success: return # TODO this should support closure nodes if type(subject) is CaperFragmentNode: self.num_matched += len(subject.fragments) if subject.fragments is not None else 0 self.weights.append(subject.match.weight) if subject.match: if subject.match.tag not in self.info: self.info[subject.match.tag] = [] self.info[subject.match.tag].insert(0, subject.match.result) def finish(self): self.weight = sum(self.weights) / len(self.weights) def copy(self): chain = CaperResultChain() chain.weight = self.weight chain.info = copy.deepcopy(self.info) chain.num_matched = self.num_matched chain.weights = copy.copy(self.weights) return chain
gpl-3.0
ToBeReplaced/ansible-modules-extras
notification/hall.py
142
3619
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Billy Kimble <basslines@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = """ module: hall short_description: Send notification to Hall description: - "The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms." version_added: "2.0" author: Billy Kimble (@bkimble) <basslines@gmail.com> options: room_token: description: - "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)" required: true msg: description: - The message you wish to deliver as a notifcation required: true title: description: - The title of the message required: true picture: description: - "The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)" required: false """ EXAMPLES = """ - name: Send Hall notifiation local_action: module: hall room_token: <hall room integration token> title: Nginx msg: Created virtual host file on {{ inventory_hostname }} - name: Send Hall notification if EC2 servers were created. when: ec2.instances|length > 0 local_action: module: hall room_token: <hall room integration token> title: Server Creation msg: "Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region." with_items: ec2.instances """ HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s' def send_request_to_hall(module, room_token, payload): headers = {'Content-Type': 'application/json'} payload=module.jsonify(payload) api_endpoint = HALL_API_ENDPOINT % (room_token) response, info = fetch_url(module, api_endpoint, data=payload, headers=headers) if info['status'] != 200: secure_url = HALL_API_ENDPOINT % ('[redacted]') module.fail_json(msg=" failed to send %s to %s: %s" % (payload, secure_url, info['msg'])) def main(): module = AnsibleModule( argument_spec = dict( room_token = dict(type='str', required=True), msg = dict(type='str', required=True), title = dict(type='str', required=True), picture = dict(type='str', default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'), ) ) room_token = module.params['room_token'] message = module.params['msg'] title = module.params['title'] picture = module.params['picture'] payload = {'title': title, 'message': message, 'picture': picture} send_request_to_hall(module, room_token, payload) module.exit_json(msg="OK") from ansible.module_utils.basic import * from ansible.module_utils.urls import * main()
gpl-3.0
misdoro/python-ase
ase/calculators/jacapo/utils/bader.py
2
6745
from __future__ import print_function import os, string, tempfile, shutil from subprocess import Popen from ase.io import write from ase.units import Bohr class Bader: '''class for running bader analysis and extracting data from it. The class runs bader, extracts the charge density and outputs it to a cube file. Then you call different functions of the class to extract the charges, volumes, etc... ACF.dat contains the coordinates of each atom, the charge associated with it according to Bader partitioning, percentage of the whole according to Bader partitioning and the minimum distance to the surface. This distance should be compared to maximum cut-off radius for the core region if pseudo potentials have been used. BCF.dat contains the coordinates of each Bader maxima, the charge within that volume, the nearest atom and the distance to that atom. AtomVolumes.dat contains the number of each volume that has been assigned to each atom. These numbers correspond to the number of the BvAtxxxx.dat files. The options for the executable are:: bader [ -c bader | voronoi ] [ -n bader | voronoi ] [ -b neargrid | ongrid ] [ -r refine_edge_iterations ] [ -ref reference_charge ] [ -p all_atom | all_bader ] [ -p sel_atom | sel_bader ] [volume list] [ -p atom_index | bader_index ] [ -i cube | chgcar ] [ -h ] [ -v ] chargefile References: G. Henkelman, A. Arnaldsson, and H. Jonsson, A fast and robust algorithm for Bader decomposition of charge density, Comput. Mater. Sci. 36 254-360 (2006). E. Sanville, S. D. Kenny, R. Smith, and G. Henkelman An improved grid-based algorithm for Bader charge allocation, J. Comp. Chem. 28 899-908 (2007). W. Tang, E. Sanville, and G. Henkelman A grid-based Bader analysis algorithm without lattice bias, J. Phys.: Condens. Matter 21 084204 (2009). ''' def __init__(self, atoms): ''' ''' self.atoms = atoms #get density and write cube file calc = atoms.get_calculator() ncfile = calc.get_nc() base, ext = os.path.splitext(ncfile) x, y, z, density = calc.get_charge_density() cubefile = base + '_charge_density.cube' self.densityfile = cubefile if not os.path.exists(cubefile): write(cubefile, atoms, data=density * Bohr ** 3) #cmd to run for bader analysis. check if output exists so we #don't run this too often. acf_file = base + '_ACF.dat' if not os.path.exists(acf_file): #mk tempdir tempdir = tempfile.mkdtemp() cwd = os.getcwd() abscubefile = os.path.abspath(cubefile) os.chdir(tempdir) cmd = 'bader %s' % abscubefile process = Popen(cmd) status = Popen.wait() if status != 0: print(process) shutil.copy2('ACF.dat', os.path.join(cwd, acf_file)) os.chdir(cwd) shutil.rmtree(tempdir) self.charges = [] self.volumes = [] #now parse the output f = open(acf_file, 'r') #skip 2 lines f.readline() f.readline() for i, atom in enumerate(self.atoms): line = f.readline() fields = line.split() n = int(fields[0]) x = float(fields[1]) y = float(fields[2]) z = float(fields[3]) chg = float(fields[4]) mindist = float(fields[5]) vol = float(fields[6]) self.charges.append(chg) self.volumes.append(vol) f.close() def get_bader_charges(self): return self.charges def get_bader_volumes(self): 'return volumes in Ang**3' return [x * Bohr ** 3 for x in self.volumes] def write_atom_volume(self, atomlist): '''write bader atom volumes to cube files. atomlist = [0,2] #for example -p sel_atom Write the selected atomic volumes, read from the subsequent list of volumes. ''' alist = string.join([str(x) for x in atomlist], ' ') cmd = 'bader -p sel_atom %s %s' % (alist, self.densityfile) print(cmd) os.system(cmd) def write_bader_volume(self, atomlist): """write bader atom volumes to cube files. :: atomlist = [0,2] # for example -p sel_bader Write the selected Bader volumes, read from the subsequent list of volumes. """ alist = string.join([str(x) for x in atomlist], ' ') cmd = 'bader -p sel_bader %s %s' % (alist, self.densityfile) print(cmd) os.system(cmd) def write_atom_index(self): ''' -p atom_index Write the atomic volume index to a charge density file. ''' cmd = 'bader -p atom_index %s' % (self.densityfile) print(cmd) os.system(cmd) def write_bader_index(self): ''' -p bader_index Write the Bader volume index to a charge density file. ''' cmd = 'bader -p bader_index %s' % (self.densityfile) print(cmd) os.system(cmd) def write_all_atom(self): ''' -p all_atom Combine all volumes associated with an atom and write to file. This is done for all atoms and written to files named BvAtxxxx.dat. The volumes associated with atoms are those for which the maximum in charge density within the volume is closest to the atom. ''' cmd = 'bader -p all_atom %s' % (self.densityfile) print(cmd) os.system(cmd) def write_all_bader(self): ''' -p all_bader Write all Bader volumes (containing charge above threshold of 0.0001) to a file. The charge distribution in each volume is written to a separate file, named Bvolxxxx.dat. It will either be of a CHGCAR format or a CUBE file format, depending on the format of the initial charge density file. These files can be quite large, so this option should be used with caution. ''' cmd = 'bader -p all_bader %s' % (self.densityfile) print(cmd) os.system(cmd) if __name__ == '__main__': from ase.calculators.jacapo import Jacapo atoms = Jacapo.read_atoms('ethylene.nc') b = Bader(atoms) print(b.get_bader_charges()) print(b.get_bader_volumes()) b.write_atom_volume([3, 4])
gpl-2.0
playm2mboy/edx-platform
lms/djangoapps/open_ended_grading/staff_grading_service.py
64
16269
""" This module provides views that proxy to the staff grading backend service. """ import json import logging from django.conf import settings from django.http import HttpResponse, Http404 from django.utils.translation import ugettext as _ from opaque_keys.edx.locations import SlashSeparatedCourseKey from xmodule.open_ended_grading_classes.grading_service_module import GradingService, GradingServiceError from courseware.access import has_access from edxmako.shortcuts import render_to_string from student.models import unique_id_for_user from open_ended_grading.utils import does_location_exist import dogstats_wrapper as dog_stats_api log = logging.getLogger(__name__) STAFF_ERROR_MESSAGE = _( u'Could not contact the external grading server. Please contact the ' u'development team at {email}.' ).format( email=u'<a href="mailto:{tech_support_email}>{tech_support_email}</a>'.format( tech_support_email=settings.TECH_SUPPORT_EMAIL ) ) MAX_ALLOWED_FEEDBACK_LENGTH = 5000 class MockStaffGradingService(object): """ A simple mockup of a staff grading service, testing. """ def __init__(self): self.cnt = 0 def get_next(self, course_id, location, grader_id): self.cnt += 1 return {'success': True, 'submission_id': self.cnt, 'submission': 'Test submission {cnt}'.format(cnt=self.cnt), 'num_graded': 3, 'min_for_ml': 5, 'num_pending': 4, 'prompt': 'This is a fake prompt', 'ml_error_info': 'ML info', 'max_score': 2 + self.cnt % 3, 'rubric': 'A rubric'} def get_problem_list(self, course_id, grader_id): self.cnt += 1 return { 'success': True, 'problem_list': [ json.dumps({ 'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10, }), json.dumps({ 'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10, }), ], } def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged): return self.get_next(course_id, 'fake location', grader_id) class StaffGradingService(GradingService): """ Interface to staff grading backend. """ METRIC_NAME = 'edxapp.open_ended_grading.staff_grading_service' def __init__(self, config): config['render_template'] = render_to_string super(StaffGradingService, self).__init__(config) self.url = config['url'] + config['staff_grading'] self.login_url = self.url + '/login/' self.get_next_url = self.url + '/get_next_submission/' self.save_grade_url = self.url + '/save_grade/' self.get_problem_list_url = self.url + '/get_problem_list/' self.get_notifications_url = self.url + "/get_notifications/" def get_problem_list(self, course_id, grader_id): """ Get the list of problems for a given course. Args: course_id: course id that we want the problems of grader_id: who is grading this? The anonymous user_id of the grader. Returns: dict with the response from the service. (Deliberately not writing out the fields here--see the docs on the staff_grading view in the grading_controller repo) Raises: GradingServiceError: something went wrong with the connection. """ params = {'course_id': course_id.to_deprecated_string(), 'grader_id': grader_id} result = self.get(self.get_problem_list_url, params) tags = [u'course_id:{}'.format(course_id)] self._record_result('get_problem_list', result, tags) dog_stats_api.histogram( self._metric_name('get_problem_list.result.length'), len(result.get('problem_list', [])) ) return result def get_next(self, course_id, location, grader_id): """ Get the next thing to grade. Args: course_id: the course that this problem belongs to location: location of the problem that we are grading and would like the next submission for grader_id: who is grading this? The anonymous user_id of the grader. Returns: dict with the response from the service. (Deliberately not writing out the fields here--see the docs on the staff_grading view in the grading_controller repo) Raises: GradingServiceError: something went wrong with the connection. """ result = self._render_rubric( self.get( self.get_next_url, params={ 'location': location.to_deprecated_string(), 'grader_id': grader_id } ) ) tags = [u'course_id:{}'.format(course_id)] self._record_result('get_next', result, tags) return result def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged): """ Save a score and feedback for a submission. Returns: dict with keys 'success': bool 'error': error msg, if something went wrong. Raises: GradingServiceError if there's a problem connecting. """ data = {'course_id': course_id.to_deprecated_string(), 'submission_id': submission_id, 'score': score, 'feedback': feedback, 'grader_id': grader_id, 'skipped': skipped, 'rubric_scores': rubric_scores, 'rubric_scores_complete': True, 'submission_flagged': submission_flagged} result = self._render_rubric(self.post(self.save_grade_url, data=data)) tags = [u'course_id:{}'.format(course_id)] self._record_result('save_grade', result, tags) return result def get_notifications(self, course_id): params = {'course_id': course_id.to_deprecated_string()} result = self.get(self.get_notifications_url, params) tags = [ u'course_id:{}'.format(course_id), u'staff_needs_to_grade:{}'.format(result.get('staff_needs_to_grade')) ] self._record_result('get_notifications', result, tags) return result # don't initialize until staff_grading_service() is called--means that just # importing this file doesn't create objects that may not have the right config _service = None def staff_grading_service(): """ Return a staff grading service instance--if settings.MOCK_STAFF_GRADING is True, returns a mock one, otherwise a real one. Caches the result, so changing the setting after the first call to this function will have no effect. """ global _service if _service is not None: return _service if settings.MOCK_STAFF_GRADING: _service = MockStaffGradingService() else: _service = StaffGradingService(settings.OPEN_ENDED_GRADING_INTERFACE) return _service def _err_response(msg): """ Return a HttpResponse with a json dump with success=False, and the given error message. """ return HttpResponse(json.dumps({'success': False, 'error': msg}), mimetype="application/json") def _check_access(user, course_id): """ Raise 404 if user doesn't have staff access to course_id """ if not has_access(user, 'staff', course_id): raise Http404 return def get_next(request, course_id): """ Get the next thing to grade for course_id and with the location specified in the request. Returns a json dict with the following keys: 'success': bool 'submission_id': a unique identifier for the submission, to be passed back with the grade. 'submission': the submission, rendered as read-only html for grading 'rubric': the rubric, also rendered as html. 'message': if there was no submission available, but nothing went wrong, there will be a message field. 'error': if success is False, will have an error message with more info. """ assert isinstance(course_id, basestring) course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) _check_access(request.user, course_key) required = set(['location']) if request.method != 'POST': raise Http404 actual = set(request.POST.keys()) missing = required - actual if len(missing) > 0: return _err_response('Missing required keys {0}'.format( ', '.join(missing))) grader_id = unique_id_for_user(request.user) p = request.POST location = course_key.make_usage_key_from_deprecated_string(p['location']) return HttpResponse(json.dumps(_get_next(course_key, grader_id, location)), mimetype="application/json") def get_problem_list(request, course_id): """ Get all the problems for the given course id Returns a json dict with the following keys: success: bool problem_list: a list containing json dicts with the following keys: each dict represents a different problem in the course location: the location of the problem problem_name: the name of the problem num_graded: the number of responses that have been graded num_pending: the number of responses that are sitting in the queue min_for_ml: the number of responses that need to be graded before the ml can be run 'error': if success is False, will have an error message with more info. """ assert isinstance(course_id, basestring) course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) _check_access(request.user, course_key) try: response = staff_grading_service().get_problem_list(course_key, unique_id_for_user(request.user)) # If 'problem_list' is in the response, then we got a list of problems from the ORA server. # If it is not, then ORA could not find any problems. if 'problem_list' in response: problem_list = response['problem_list'] else: problem_list = [] # Make an error messages to reflect that we could not find anything to grade. response['error'] = _( u'Cannot find any open response problems in this course. ' u'Have you submitted answers to any open response assessment questions? ' u'If not, please do so and return to this page.' ) valid_problem_list = [] for i in xrange(len(problem_list)): # Needed to ensure that the 'location' key can be accessed. try: problem_list[i] = json.loads(problem_list[i]) except Exception: pass if does_location_exist(course_key.make_usage_key_from_deprecated_string(problem_list[i]['location'])): valid_problem_list.append(problem_list[i]) response['problem_list'] = valid_problem_list response = json.dumps(response) return HttpResponse(response, mimetype="application/json") except GradingServiceError: #This is a dev_facing_error log.exception( "Error from staff grading service in open " "ended grading. server url: {0}".format(staff_grading_service().url) ) #This is a staff_facing_error return HttpResponse(json.dumps({'success': False, 'error': STAFF_ERROR_MESSAGE})) def _get_next(course_id, grader_id, location): """ Implementation of get_next (also called from save_grade) -- returns a json string """ try: return staff_grading_service().get_next(course_id, location, grader_id) except GradingServiceError: #This is a dev facing error log.exception( "Error from staff grading service in open " "ended grading. server url: {0}".format(staff_grading_service().url) ) #This is a staff_facing_error return json.dumps({'success': False, 'error': STAFF_ERROR_MESSAGE}) def save_grade(request, course_id): """ Save the grade and feedback for a submission, and, if all goes well, return the next thing to grade. Expects the following POST parameters: 'score': int 'feedback': string 'submission_id': int Returns the same thing as get_next, except that additional error messages are possible if something goes wrong with saving the grade. """ course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) _check_access(request.user, course_key) if request.method != 'POST': raise Http404 p = request.POST required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged']) skipped = 'skipped' in p #If the instructor has skipped grading the submission, then there will not be any rubric scores. #Only add in the rubric scores if the instructor has not skipped. if not skipped: required.add('rubric_scores[]') actual = set(p.keys()) missing = required - actual if len(missing) > 0: return _err_response('Missing required keys {0}'.format( ', '.join(missing))) success, message = check_feedback_length(p) if not success: return _err_response(message) grader_id = unique_id_for_user(request.user) location = course_key.make_usage_key_from_deprecated_string(p['location']) try: result = staff_grading_service().save_grade(course_key, grader_id, p['submission_id'], p['score'], p['feedback'], skipped, p.getlist('rubric_scores[]'), p['submission_flagged']) except GradingServiceError: #This is a dev_facing_error log.exception( "Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format( request, course_id)) #This is a staff_facing_error return _err_response(STAFF_ERROR_MESSAGE) except ValueError: #This is a dev_facing_error log.exception( "save_grade returned broken json in the staff grading interface in open ended grading: {0}".format( result_json)) #This is a staff_facing_error return _err_response(STAFF_ERROR_MESSAGE) if not result.get('success', False): #This is a dev_facing_error log.warning( 'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json)) return _err_response(STAFF_ERROR_MESSAGE) # Ok, save_grade seemed to work. Get the next submission to grade. return HttpResponse(json.dumps(_get_next(course_id, grader_id, location)), mimetype="application/json") def check_feedback_length(data): feedback = data.get("feedback") if feedback and len(feedback) > MAX_ALLOWED_FEEDBACK_LENGTH: return False, "Feedback is too long, Max length is {0} characters.".format( MAX_ALLOWED_FEEDBACK_LENGTH ) else: return True, ""
agpl-3.0
batxes/4c2vhic
Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/Six_zebra_models29901.py
2
13923
import _surface import chimera try: import chimera.runCommand except: pass from VolumePath import markerset as ms try: from VolumePath import Marker_Set, Link new_marker_set=Marker_Set except: from VolumePath import volume_path_dialog d= volume_path_dialog(True) new_marker_set= d.new_marker_set marker_sets={} surf_sets={} if "particle_0 geometry" not in marker_sets: s=new_marker_set('particle_0 geometry') marker_sets["particle_0 geometry"]=s s= marker_sets["particle_0 geometry"] mark=s.place_marker((14969.5, 9355.22, 4884.95), (0.7, 0.7, 0.7), 507.685) if "particle_1 geometry" not in marker_sets: s=new_marker_set('particle_1 geometry') marker_sets["particle_1 geometry"]=s s= marker_sets["particle_1 geometry"] mark=s.place_marker((15857.7, 8984.62, 4617.48), (0.7, 0.7, 0.7), 479.978) if "particle_2 geometry" not in marker_sets: s=new_marker_set('particle_2 geometry') marker_sets["particle_2 geometry"]=s s= marker_sets["particle_2 geometry"] mark=s.place_marker((14048.8, 8305.7, 4500.15), (0.7, 0.7, 0.7), 681.834) if "particle_3 geometry" not in marker_sets: s=new_marker_set('particle_3 geometry') marker_sets["particle_3 geometry"]=s s= marker_sets["particle_3 geometry"] mark=s.place_marker((11860.6, 7514.14, 4365.92), (0.7, 0.7, 0.7), 522.532) if "particle_4 geometry" not in marker_sets: s=new_marker_set('particle_4 geometry') marker_sets["particle_4 geometry"]=s s= marker_sets["particle_4 geometry"] mark=s.place_marker((11168.8, 7253.91, 4353.22), (0, 1, 0), 751.925) if "particle_5 geometry" not in marker_sets: s=new_marker_set('particle_5 geometry') marker_sets["particle_5 geometry"]=s s= marker_sets["particle_5 geometry"] mark=s.place_marker((12710.6, 5841.18, 3924.41), (0.7, 0.7, 0.7), 437.001) if "particle_6 geometry" not in marker_sets: s=new_marker_set('particle_6 geometry') marker_sets["particle_6 geometry"]=s s= marker_sets["particle_6 geometry"] mark=s.place_marker((11163.9, 4708.08, 4115.8), (0.7, 0.7, 0.7), 710.767) if "particle_7 geometry" not in marker_sets: s=new_marker_set('particle_7 geometry') marker_sets["particle_7 geometry"]=s s= marker_sets["particle_7 geometry"] mark=s.place_marker((11220.3, 3116.23, 3445.88), (0.7, 0.7, 0.7), 762.077) if "particle_8 geometry" not in marker_sets: s=new_marker_set('particle_8 geometry') marker_sets["particle_8 geometry"]=s s= marker_sets["particle_8 geometry"] mark=s.place_marker((10024.2, 2278.2, 2811.32), (0.7, 0.7, 0.7), 726.799) if "particle_9 geometry" not in marker_sets: s=new_marker_set('particle_9 geometry') marker_sets["particle_9 geometry"]=s s= marker_sets["particle_9 geometry"] mark=s.place_marker((8565.6, 1179.39, 2598.93), (0.7, 0.7, 0.7), 885.508) if "particle_10 geometry" not in marker_sets: s=new_marker_set('particle_10 geometry') marker_sets["particle_10 geometry"]=s s= marker_sets["particle_10 geometry"] mark=s.place_marker((7241.9, 1843.25, 1632.03), (0.7, 0.7, 0.7), 778.489) if "particle_11 geometry" not in marker_sets: s=new_marker_set('particle_11 geometry') marker_sets["particle_11 geometry"]=s s= marker_sets["particle_11 geometry"] mark=s.place_marker((7011.72, 1012.49, -305.89), (0.7, 0.7, 0.7), 790.333) if "particle_12 geometry" not in marker_sets: s=new_marker_set('particle_12 geometry') marker_sets["particle_12 geometry"]=s s= marker_sets["particle_12 geometry"] mark=s.place_marker((6903.61, 98.5444, -2158.28), (0.7, 0.7, 0.7), 707.721) if "particle_13 geometry" not in marker_sets: s=new_marker_set('particle_13 geometry') marker_sets["particle_13 geometry"]=s s= marker_sets["particle_13 geometry"] mark=s.place_marker((8201.9, 1002.62, -1834.4), (0.7, 0.7, 0.7), 651.166) if "particle_14 geometry" not in marker_sets: s=new_marker_set('particle_14 geometry') marker_sets["particle_14 geometry"]=s s= marker_sets["particle_14 geometry"] mark=s.place_marker((7414.09, -129.02, -854.322), (0.7, 0.7, 0.7), 708.61) if "particle_15 geometry" not in marker_sets: s=new_marker_set('particle_15 geometry') marker_sets["particle_15 geometry"]=s s= marker_sets["particle_15 geometry"] mark=s.place_marker((7122.12, -299.946, 714.323), (0.7, 0.7, 0.7), 490.595) if "particle_16 geometry" not in marker_sets: s=new_marker_set('particle_16 geometry') marker_sets["particle_16 geometry"]=s s= marker_sets["particle_16 geometry"] mark=s.place_marker((7708.18, 230.276, 1947.9), (0.7, 0.7, 0.7), 591.565) if "particle_17 geometry" not in marker_sets: s=new_marker_set('particle_17 geometry') marker_sets["particle_17 geometry"]=s s= marker_sets["particle_17 geometry"] mark=s.place_marker((8140.74, 861.511, 3347.95), (0.7, 0.7, 0.7), 581.287) if "particle_18 geometry" not in marker_sets: s=new_marker_set('particle_18 geometry') marker_sets["particle_18 geometry"]=s s= marker_sets["particle_18 geometry"] mark=s.place_marker((9916.15, 691.375, 3641.12), (0.7, 0.7, 0.7), 789.529) if "particle_19 geometry" not in marker_sets: s=new_marker_set('particle_19 geometry') marker_sets["particle_19 geometry"]=s s= marker_sets["particle_19 geometry"] mark=s.place_marker((10028.7, 610.495, 5184.02), (0.7, 0.7, 0.7), 623.587) if "particle_20 geometry" not in marker_sets: s=new_marker_set('particle_20 geometry') marker_sets["particle_20 geometry"]=s s= marker_sets["particle_20 geometry"] mark=s.place_marker((9947.99, 101.655, 6937.75), (0.7, 0.7, 0.7), 1083.56) if "particle_21 geometry" not in marker_sets: s=new_marker_set('particle_21 geometry') marker_sets["particle_21 geometry"]=s s= marker_sets["particle_21 geometry"] mark=s.place_marker((10141.9, -1122.2, 8124.17), (0.7, 0.7, 0.7), 504.258) if "particle_22 geometry" not in marker_sets: s=new_marker_set('particle_22 geometry') marker_sets["particle_22 geometry"]=s s= marker_sets["particle_22 geometry"] mark=s.place_marker((9427.43, 62.891, 7740), (0.7, 0.7, 0.7), 805.519) if "particle_23 geometry" not in marker_sets: s=new_marker_set('particle_23 geometry') marker_sets["particle_23 geometry"]=s s= marker_sets["particle_23 geometry"] mark=s.place_marker((7792.4, 1113.02, 6928.08), (0.7, 0.7, 0.7), 631.708) if "particle_24 geometry" not in marker_sets: s=new_marker_set('particle_24 geometry') marker_sets["particle_24 geometry"]=s s= marker_sets["particle_24 geometry"] mark=s.place_marker((5839.87, 1426.25, 6096.11), (0.7, 0.7, 0.7), 805.942) if "particle_25 geometry" not in marker_sets: s=new_marker_set('particle_25 geometry') marker_sets["particle_25 geometry"]=s s= marker_sets["particle_25 geometry"] mark=s.place_marker((4866.74, 1500.33, 5695.34), (1, 0.7, 0), 672.697) if "particle_26 geometry" not in marker_sets: s=new_marker_set('particle_26 geometry') marker_sets["particle_26 geometry"]=s s= marker_sets["particle_26 geometry"] mark=s.place_marker((4653.24, 3936.53, 6840.6), (0.7, 0.7, 0.7), 797.863) if "particle_27 geometry" not in marker_sets: s=new_marker_set('particle_27 geometry') marker_sets["particle_27 geometry"]=s s= marker_sets["particle_27 geometry"] mark=s.place_marker((3660.54, 5240.38, 7746.49), (1, 0.7, 0), 735.682) if "particle_28 geometry" not in marker_sets: s=new_marker_set('particle_28 geometry') marker_sets["particle_28 geometry"]=s s= marker_sets["particle_28 geometry"] mark=s.place_marker((4273.05, 5438.27, 8823.37), (0.7, 0.7, 0.7), 602.14) if "particle_29 geometry" not in marker_sets: s=new_marker_set('particle_29 geometry') marker_sets["particle_29 geometry"]=s s= marker_sets["particle_29 geometry"] mark=s.place_marker((4960.15, 5382.51, 11055.5), (0.7, 0.7, 0.7), 954.796) if "particle_30 geometry" not in marker_sets: s=new_marker_set('particle_30 geometry') marker_sets["particle_30 geometry"]=s s= marker_sets["particle_30 geometry"] mark=s.place_marker((4538.04, 5433.08, 10570.7), (0.7, 0.7, 0.7), 1021.88) if "particle_31 geometry" not in marker_sets: s=new_marker_set('particle_31 geometry') marker_sets["particle_31 geometry"]=s s= marker_sets["particle_31 geometry"] mark=s.place_marker((4142.42, 6699.7, 10521.4), (0.7, 0.7, 0.7), 909.323) if "particle_32 geometry" not in marker_sets: s=new_marker_set('particle_32 geometry') marker_sets["particle_32 geometry"]=s s= marker_sets["particle_32 geometry"] mark=s.place_marker((3794.77, 8574.69, 11766.8), (0.7, 0.7, 0.7), 621.049) if "particle_33 geometry" not in marker_sets: s=new_marker_set('particle_33 geometry') marker_sets["particle_33 geometry"]=s s= marker_sets["particle_33 geometry"] mark=s.place_marker((4229.33, 9771.99, 11046.9), (0.7, 0.7, 0.7), 525.154) if "particle_34 geometry" not in marker_sets: s=new_marker_set('particle_34 geometry') marker_sets["particle_34 geometry"]=s s= marker_sets["particle_34 geometry"] mark=s.place_marker((5420.81, 10555.7, 10510.1), (0.7, 0.7, 0.7), 890.246) if "particle_35 geometry" not in marker_sets: s=new_marker_set('particle_35 geometry') marker_sets["particle_35 geometry"]=s s= marker_sets["particle_35 geometry"] mark=s.place_marker((6615.4, 11834.4, 10784.8), (0.7, 0.7, 0.7), 671.216) if "particle_36 geometry" not in marker_sets: s=new_marker_set('particle_36 geometry') marker_sets["particle_36 geometry"]=s s= marker_sets["particle_36 geometry"] mark=s.place_marker((8123.43, 12065.3, 11499), (0.7, 0.7, 0.7), 662.672) if "particle_37 geometry" not in marker_sets: s=new_marker_set('particle_37 geometry') marker_sets["particle_37 geometry"]=s s= marker_sets["particle_37 geometry"] mark=s.place_marker((8008.57, 10546.9, 12037.1), (0.7, 0.7, 0.7), 646.682) if "particle_38 geometry" not in marker_sets: s=new_marker_set('particle_38 geometry') marker_sets["particle_38 geometry"]=s s= marker_sets["particle_38 geometry"] mark=s.place_marker((6588.29, 10507.4, 12644.8), (0.7, 0.7, 0.7), 769.945) if "particle_39 geometry" not in marker_sets: s=new_marker_set('particle_39 geometry') marker_sets["particle_39 geometry"]=s s= marker_sets["particle_39 geometry"] mark=s.place_marker((5333.97, 9838.61, 11243.6), (0.7, 0.7, 0.7), 606.92) if "particle_40 geometry" not in marker_sets: s=new_marker_set('particle_40 geometry') marker_sets["particle_40 geometry"]=s s= marker_sets["particle_40 geometry"] mark=s.place_marker((4610.87, 10843.4, 11069.5), (0.7, 0.7, 0.7), 622.571) if "particle_41 geometry" not in marker_sets: s=new_marker_set('particle_41 geometry') marker_sets["particle_41 geometry"]=s s= marker_sets["particle_41 geometry"] mark=s.place_marker((5113.26, 9718.7, 10484.3), (0.7, 0.7, 0.7), 466.865) if "particle_42 geometry" not in marker_sets: s=new_marker_set('particle_42 geometry') marker_sets["particle_42 geometry"]=s s= marker_sets["particle_42 geometry"] mark=s.place_marker((5912.2, 10033, 10071.9), (0.7, 0.7, 0.7), 682.933) if "particle_43 geometry" not in marker_sets: s=new_marker_set('particle_43 geometry') marker_sets["particle_43 geometry"]=s s= marker_sets["particle_43 geometry"] mark=s.place_marker((5196.91, 9912.15, 10527.5), (0.7, 0.7, 0.7), 809.326) if "particle_44 geometry" not in marker_sets: s=new_marker_set('particle_44 geometry') marker_sets["particle_44 geometry"]=s s= marker_sets["particle_44 geometry"] mark=s.place_marker((4146.77, 8424.44, 10674.7), (0.7, 0.7, 0.7), 796.72) if "particle_45 geometry" not in marker_sets: s=new_marker_set('particle_45 geometry') marker_sets["particle_45 geometry"]=s s= marker_sets["particle_45 geometry"] mark=s.place_marker((3517.66, 6984.69, 8251.86), (0.7, 0.7, 0.7), 870.026) if "particle_46 geometry" not in marker_sets: s=new_marker_set('particle_46 geometry') marker_sets["particle_46 geometry"]=s s= marker_sets["particle_46 geometry"] mark=s.place_marker((2724.62, 7296.95, 6580.21), (0.7, 0.7, 0.7), 909.577) if "particle_47 geometry" not in marker_sets: s=new_marker_set('particle_47 geometry') marker_sets["particle_47 geometry"]=s s= marker_sets["particle_47 geometry"] mark=s.place_marker((2710.77, 7987.75, 5648.79), (0, 1, 0), 500.536) if "particle_48 geometry" not in marker_sets: s=new_marker_set('particle_48 geometry') marker_sets["particle_48 geometry"]=s s= marker_sets["particle_48 geometry"] mark=s.place_marker((1852.63, 9762.8, 5359.63), (0.7, 0.7, 0.7), 725.276) if "particle_49 geometry" not in marker_sets: s=new_marker_set('particle_49 geometry') marker_sets["particle_49 geometry"]=s s= marker_sets["particle_49 geometry"] mark=s.place_marker((41.5662, 11727.3, 5413.74), (0.7, 0.7, 0.7), 570.331) if "particle_50 geometry" not in marker_sets: s=new_marker_set('particle_50 geometry') marker_sets["particle_50 geometry"]=s s= marker_sets["particle_50 geometry"] mark=s.place_marker((412.316, 12008.6, 7020.04), (0.7, 0.7, 0.7), 492.203) if "particle_51 geometry" not in marker_sets: s=new_marker_set('particle_51 geometry') marker_sets["particle_51 geometry"]=s s= marker_sets["particle_51 geometry"] mark=s.place_marker((304.441, 9258.71, 7961.56), (0, 1, 0), 547.7) if "particle_52 geometry" not in marker_sets: s=new_marker_set('particle_52 geometry') marker_sets["particle_52 geometry"]=s s= marker_sets["particle_52 geometry"] mark=s.place_marker((1047.31, 9521.91, 7963.95), (0.7, 0.7, 0.7), 581.921) if "particle_53 geometry" not in marker_sets: s=new_marker_set('particle_53 geometry') marker_sets["particle_53 geometry"]=s s= marker_sets["particle_53 geometry"] mark=s.place_marker((1974.71, 10863.2, 8973.95), (0.7, 0.7, 0.7), 555.314) if "particle_54 geometry" not in marker_sets: s=new_marker_set('particle_54 geometry') marker_sets["particle_54 geometry"]=s s= marker_sets["particle_54 geometry"] mark=s.place_marker((3220.08, 11733.6, 9246.88), (0.7, 0.7, 0.7), 404.219) if "particle_55 geometry" not in marker_sets: s=new_marker_set('particle_55 geometry') marker_sets["particle_55 geometry"]=s s= marker_sets["particle_55 geometry"] mark=s.place_marker((4736.09, 11001.1, 8495.51), (0.7, 0.7, 0.7), 764.234) for k in surf_sets.keys(): chimera.openModels.add([surf_sets[k]])
gpl-3.0
aostapenko/manila
manila/scheduler/chance.py
2
2704
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Chance (Random) Scheduler implementation """ import random from manila import exception from manila.scheduler import driver from oslo.config import cfg CONF = cfg.CONF class ChanceScheduler(driver.Scheduler): """Implements Scheduler as a random node selector.""" def _filter_hosts(self, request_spec, hosts, **kwargs): """Filter a list of hosts based on request_spec.""" filter_properties = kwargs.get('filter_properties', {}) ignore_hosts = filter_properties.get('ignore_hosts', []) hosts = [host for host in hosts if host not in ignore_hosts] return hosts def _schedule(self, context, topic, request_spec, **kwargs): """Picks a host that is up at random.""" elevated = context.elevated() hosts = self.hosts_up(elevated, topic) if not hosts: msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg) hosts = self._filter_hosts(request_spec, hosts, **kwargs) if not hosts: msg = _("Could not find another host") raise exception.NoValidHost(reason=msg) return hosts[int(random.random() * len(hosts))] def schedule_create_share(self, context, request_spec, filter_properties): """Picks a host that is up at random.""" topic = CONF.share_topic host = self._schedule(context, topic, request_spec, filter_properties=filter_properties) share_id = request_spec['share_id'] snapshot_id = request_spec['snapshot_id'] updated_share = driver.share_update_db(context, share_id, host) self.share_rpcapi.create_share(context, updated_share, host, request_spec, filter_properties, snapshot_id)
apache-2.0
wujuguang/sentry
src/sentry/migrations/0098_auto__add_user__chg_field_team_owner__chg_field_activity_user__chg_fie.py
36
28778
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models, connections class Migration(SchemaMigration): def forwards(self, orm): if 'auth_user' in connections['default'].introspection.table_names(): return self.create_auth(orm) def create_auth(self, orm): # Adding model 'User' db.create_table('auth_user', ( (u'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)), ('password', self.gf('django.db.models.fields.CharField')(max_length=128)), ('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)), ('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)), ('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)), ('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)), ('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)), ('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)), ('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)), )) db.send_create_signal(u'auth', ['User']) # Adding M2M table for field groups on 'User' db.create_table('auth_user_groups', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('user', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'sentry.user'], null=False)), ('group', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'auth.group'], null=False)) )) db.create_unique('auth_user_groups', ['user_id', 'group_id']) # Adding M2M table for field user_permissions on 'User' db.create_table('auth_user_user_permissions', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('user', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'sentry.user'], null=False)), ('permission', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'auth.permission'], null=False)) )) db.create_unique('auth_user_user_permissions', ['user_id', 'permission_id']) def backwards(self, orm): pass models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'sentry.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'"}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'sentry.accessgroup': { 'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'}, 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.User']", 'symmetrical': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.Project']", 'symmetrical': 'False'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}) }, u'sentry.activity': { 'Meta': {'object_name': 'Activity'}, 'data': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Event']", 'null': 'True'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}), 'type': ('django.db.models.fields.PositiveIntegerField', [], {}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'}) }, u'sentry.alert': { 'Meta': {'object_name': 'Alert'}, 'data': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}), 'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': u"orm['sentry.AlertRelatedGroup']", 'to': u"orm['sentry.Group']"}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, u'sentry.alertrelatedgroup': { 'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'}, 'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Alert']"}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}) }, u'sentry.event': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': u"orm['sentry.Group']"}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}), 'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}), 'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}), 'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'}) }, u'sentry.eventmapping': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}) }, u'sentry.group': { 'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"}, 'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}), 'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}) }, u'sentry.groupbookmark': { 'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'}, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Group']"}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': u"orm['sentry.User']"}) }, u'sentry.groupcountbyminute': { 'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"}, 'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, u'sentry.groupmeta': { 'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'}, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.TextField', [], {}) }, u'sentry.grouptag': { 'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'sentry.grouptagkey': { 'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'}, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}), 'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, u'sentry.lostpasswordhash': { 'Meta': {'object_name': 'LostPasswordHash'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'unique': 'True'}) }, u'sentry.option': { 'Meta': {'object_name': 'Option'}, u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'value': ('picklefield.fields.PickledObjectField', [], {}) }, u'sentry.pendingteammember': { 'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': u"orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}) }, u'sentry.project': { 'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': u"orm['sentry.User']"}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Team']", 'null': 'True'}) }, u'sentry.projectcountbyminute': { 'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'}, 'date': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, u'sentry.projectkey': { 'Meta': {'object_name': 'ProjectKey'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': u"orm['sentry.Project']"}), 'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'}), 'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': u"orm['sentry.User']"}) }, u'sentry.projectoption': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"}, u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}), 'value': ('picklefield.fields.PickledObjectField', [], {}) }, u'sentry.searchdocument': { 'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}) }, u'sentry.searchtoken': { 'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'}, 'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': u"orm['sentry.SearchDocument']"}), 'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, u'sentry.tagkey': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"}, u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}), 'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, u'sentry.tagvalue': { 'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"}, 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'sentry.team': { 'Meta': {'object_name': 'Team'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': u"orm['sentry.TeamMember']", 'to': u"orm['sentry.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']"}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}) }, u'sentry.teammember': { 'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': u"orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': u"orm['sentry.User']"}) }, u'sentry.useroption': { 'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'}, u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']"}), 'value': ('picklefield.fields.PickledObjectField', [], {}) } } complete_apps = ['sentry']
bsd-3-clause
vanhonit/xmario_center
softwarecenter/ui/gtk3/widgets/description.py
4
47888
# Copyright (C) 2010 Matthew McGowan # # Authors: # Matthew McGowan # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from gi.repository import Gtk, Gdk from gi.repository import GObject from gi.repository import Pango from softwarecenter.utils import normalize_package_description from softwarecenter.ui.gtk3.drawing import color_to_hex from softwarecenter.ui.gtk3.utils import point_in _PS = Pango.SCALE class _SpecialCasePreParsers(object): def preparse(self, k, desc): if k is None: return desc func_name = '_%s_preparser' % k.lower().replace('-', '_') if not hasattr(self, func_name): return desc f = getattr(self, func_name) return f(desc) # special case pre-parsers def _skype_preparser(self, desc): return desc.replace('. *', '.\n*') def _texlive_fonts_extra_preparser(self, desc): return desc.replace(')\n', ').\n').replace('--\n', '--\n\n') class EventHelper(dict): # FIXME: workaround for broken event.copy() class ButtonEvent(object): def __init__(self, event): self.x = event.x self.y = event.y self.type = event.type self.button = event.button VALID_KEYS = ( 'event', 'layout', 'index', 'within-selection', 'drag-active', 'drag-context') def __init__(self): dict.__init__(self) self.new_press(None, None, None, False) def __setitem__(self, k, v): if k not in EventHelper.VALID_KEYS: raise KeyError('\"%s\" is not a valid key' % k) return False return dict.__setitem__(self, k, v) def new_press(self, event, layout, index, within_sel): if event is None: self['event'] = None else: # this should be simply event.copy() but that appears broken # currently(?) self['event'] = EventHelper.ButtonEvent(event) self['layout'] = layout self['index'] = index self['within-selection'] = within_sel self['drag-active'] = False self['drag-context'] = None class PangoLayoutProxy(object): """ Because i couldn't figure out how to inherit from pygi's Pango.Layout... """ def __init__(self, context): self._layout = Pango.Layout.new(context) def xy_to_index(self, x, y): return self._layout.xy_to_index(x, y) def index_to_pos(self, *args): return self._layout.index_to_pos(*args) # setter proxies def set_attributes(self, attrs): return self._layout.set_attributes(attrs) def set_markup(self, markup): return self._layout.set_markup(markup, -1) def set_font_description(self, font_desc): return self._layout.set_font_description(font_desc) def set_wrap(self, wrap_mode): return self._layout.set_wrap(wrap_mode) def set_width(self, width): return self._layout.set_width(width) # getter proxies def get_text(self): return self._layout.get_text() def get_pixel_extents(self): return self._layout.get_pixel_extents()[1] def get_cursor_pos(self, index): return self._layout.get_cursor_pos(index) def get_iter(self): return self._layout.get_iter() def get_extents(self): return self._layout.get_extents() class Layout(PangoLayoutProxy): def __init__(self, widget, text=""): PangoLayoutProxy.__init__(self, widget.get_pango_context()) self.widget = widget self.length = 0 self.indent = 0 self.vspacing = None self.is_bullet = False self.index = 0 self.allocation = Gdk.Rectangle() self._default_attrs = True self.set_markup(text) def __len__(self): return self.length def set_text(self, text): PangoLayoutProxy.set_markup(self, text) self.length = len(self.get_text()) def set_allocation(self, x, y, w, h): a = self.allocation a.x = x a.y = y a.width = w a.height = h def get_position(self): return self.allocation.x, self.allocation.y def cursor_up(self, cursor, target_x=-1): layout = self.widget.order[cursor.paragraph] pos = layout.index_to_pos(cursor.index) x, y = pos.x, pos.y if target_x >= 0: x = target_x y -= _PS * self.widget.line_height return layout.xy_to_index(x, y), (x, y) def cursor_down(self, cursor, target_x=-1): layout = self.widget.order[cursor.paragraph] pos = layout.index_to_pos(cursor.index) x, y = pos.x, pos.y if target_x >= 0: x = target_x y += _PS * self.widget.line_height return layout.xy_to_index(x, y), (x, y) def index_at(self, px, py): #wa = self.widget.get_allocation() x, y = self.get_position() # layout allocation (_, index, k) = self.xy_to_index((px - x) * _PS, (py - y) * _PS) return point_in(self.allocation, px, py), index + k def reset_attrs(self): #~ self.set_attributes(Pango.AttrList()) self.set_markup(self.get_text()) self._default_attrs = True def highlight(self, start, end, bg, fg): # FIXME: AttrBackground doesnt seem to be expose by gi yet?? #~ attrs = Pango.AttrList() #~ attrs.insert(Pango.AttrBackground(bg.red, bg.green, bg.blue, start, #~ end)) #~ attrs.insert(Pango.AttrForeground(fg.red, fg.green, fg.blue, start, #~ end)) #~ self.set_attributes(attrs) # XXX: workaround text = self.get_text() new_text = (text[:start] + '<span background="%s" foreground="%s">' % (bg, fg)) new_text += text[start:end] new_text += '</span>' + text[end:] self.set_markup(new_text) self._default_attrs = False def highlight_all(self, bg, fg): # FIXME: AttrBackground doesnt seem to be expose by gi yet?? #~ attrs = Pango.AttrList() #~ attrs.insert(Pango.AttrBackground(bg.red, bg.green, bg.blue, 0, -1)) #~ attrs.insert(Pango.AttrForeground(fg.red, fg.green, fg.blue, 0, -1)) #~ self.set_attributes(attrs) # XXX: workaround text = self.get_text() self.set_markup('<span background="%s" foreground="%s">%s</span>' % (bg, fg, text)) self._default_attrs = False class Cursor(object): WORD_TERMINATORS = (' ',) # empty space. suggestions recommended... def __init__(self, parent): self.parent = parent self.index = 0 self.paragraph = 0 def is_min(self, cursor): return self.get_position() <= cursor.get_position() def is_max(self, cursor): return self.get_position() >= cursor.get_position() def switch(self, cursor): this_pos = self.get_position() other_pos = cursor.get_position() self.set_position(*other_pos) cursor.set_position(*this_pos) def same_line(self, cursor): return self.get_current_line()[0] == cursor.get_current_line()[0] def get_current_line(self): keep_going = True i, it = self.index, self.parent.order[self.paragraph].get_iter() ln = 0 while keep_going: l = it.get_line() ls = l.start_index le = ls + l.length if i >= ls and i <= le: if not it.at_last_line(): le -= 1 return (self.paragraph, ln), (ls, le) ln += 1 keep_going = it.next_line() return None, None, None def get_current_word(self): keep_going = True layout = self.parent.order[self.paragraph] text = layout.get_text() i, it = self.index, layout.get_iter() start = 0 while keep_going: j = it.get_index() if j >= i and text[j] in self.WORD_TERMINATORS: return self.paragraph, (start, j) elif text[j] in self.WORD_TERMINATORS: start = j + 1 keep_going = it.next_char() return self.paragraph, (start, len(layout)) def set_position(self, paragraph, index): self.index = index self.paragraph = paragraph def get_position(self): return self.paragraph, self.index class PrimaryCursor(Cursor): def __init__(self, parent): Cursor.__init__(self, parent) def __repr__(self): return 'Cursor: ' + str((self.paragraph, self.index)) def get_rectangle(self, layout, a): if self.index < len(layout): pos = layout.get_cursor_pos(self.index)[1] else: pos = layout.get_cursor_pos(len(layout))[1] x = layout.allocation.x + pos.x / _PS y = layout.allocation.y + pos.y / _PS return x, y, 1, pos.height / _PS def draw(self, cr, layout, a): cr.set_source_rgb(0, 0, 0) cr.rectangle(*self.get_rectangle(layout, a)) cr.fill() def zero(self): self.index = 0 self.paragraph = 0 class SelectionCursor(Cursor): def __init__(self, cursor): Cursor.__init__(self, cursor.parent) self.cursor = cursor self.target_x = None self.target_x_indent = 0 self.restore_point = None def __repr__(self): return 'Selection: ' + str(self.get_range()) def __nonzero__(self): c = self.cursor return (self.paragraph, self.index) != (c.paragraph, c.index) @property def min(self): c = self.cursor return min((self.paragraph, self.index), (c.paragraph, c.index)) @property def max(self): c = self.cursor return max((self.paragraph, self.index), (c.paragraph, c.index)) def clear(self, key=None): self.index = self.cursor.index self.paragraph = self.cursor.paragraph self.restore_point = None if key not in (Gdk.KEY_uparrow, Gdk.KEY_downarrow): self.target_x = None self.target_x_indent = 0 def set_target_x(self, x, indent): self.target_x = x self.target_x_indent = indent def get_range(self): return self.min, self.max def within_selection(self, pos): l = list(self.get_range()) l.append(pos) l.sort() # sort the list, see if pos is in between the extents of the selection # range, if it is, pos is within the selection if pos in l: return l.index(pos) == 1 return False class TextBlock(Gtk.EventBox): PAINT_PRIMARY_CURSOR = False DEBUG_PAINT_BBOXES = False BULLET_POINT = u' \u2022 ' def __init__(self): Gtk.EventBox.__init__(self) self.set_visible_window(False) self.set_size_request(200, -1) self.set_can_focus(True) self.set_events(Gdk.EventMask.KEY_PRESS_MASK | Gdk.EventMask.ENTER_NOTIFY_MASK | Gdk.EventMask.LEAVE_NOTIFY_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.POINTER_MOTION_MASK) self._is_new = False self.order = [] self.cursor = cur = PrimaryCursor(self) self.selection = sel = SelectionCursor(self.cursor) self.clipboard = None #~ event_helper = EventHelper() self._update_cached_layouts() self._test_layout = self.create_pango_layout('') #self._xterm = Gdk.Cursor.new(Gdk.XTERM) # popup menu and menuitem's self.copy_menuitem = Gtk.ImageMenuItem.new_from_stock( Gtk.STOCK_COPY, None) self.select_all_menuitem = Gtk.ImageMenuItem.new_from_stock( Gtk.STOCK_SELECT_ALL, None) self.menu = Gtk.Menu() self.menu.attach_to_widget(self, None) self.menu.append(self.copy_menuitem) self.menu.append(self.select_all_menuitem) self.menu.show_all() self.copy_menuitem.connect('select', self._menu_do_copy, sel) self.select_all_menuitem.connect('select', self._menu_do_select_all, cur, sel) #~ Gtk.drag_source_set(self, Gdk.ModifierType.BUTTON1_MASK, #~ None, Gdk.DragAction.COPY) #~ Gtk.drag_source_add_text_targets(self) #~ self.connect('drag-begin', self._on_drag_begin) #~ self.connect('drag-data-get', self._on_drag_data_get, sel) event_helper = EventHelper() self.connect('button-press-event', self._on_press, event_helper, cur, sel) self.connect('button-release-event', self._on_release, event_helper, cur, sel) self.connect('motion-notify-event', self._on_motion, event_helper, cur, sel) self.connect('key-press-event', self._on_key_press, cur, sel) self.connect('key-release-event', self._on_key_release, cur, sel) self.connect('focus-in-event', self._on_focus_in) self.connect('focus-out-event', self._on_focus_out) self.connect("size-allocate", self.on_size_allocate) self.connect('style-updated', self._on_style_updated) def on_size_allocate(self, *args): allocation = self.get_allocation() width = allocation.width x = y = 0 for layout in self.order: layout.set_width(_PS * (width - layout.indent)) if layout.index > 0: y += (layout.vspacing or self.line_height) e = layout.get_pixel_extents() if self.get_direction() != Gtk.TextDirection.RTL: layout.set_allocation(e.x + layout.indent, y + e.y, width - layout.indent, e.height) else: layout.set_allocation(x + width - e.x - e.width - layout.indent - 1, y + e.y, width - layout.indent, e.height) y += e.y + e.height # overrides def do_get_request_mode(self): return Gtk.SizeRequestMode.HEIGHT_FOR_WIDTH def do_get_preferred_height_for_width(self, width): height = 0 layout = self._test_layout for l in self.order: layout.set_text(l.get_text(), -1) layout.set_width(_PS * (width - l.indent)) lh = layout.get_pixel_extents()[1].height height += lh + (l.vspacing or self.line_height) height = max(50, height) return height, height def do_draw(self, cr): self.render(self, cr) def _config_colors(self): context = self.get_style_context() context.save() context.add_class(Gtk.STYLE_CLASS_HIGHLIGHT) state = self.get_state_flags() if self.has_focus(): state |= Gtk.StateFlags.FOCUSED context.set_state(state) self._bg = color_to_hex(context.get_background_color(state)) self._fg = color_to_hex(context.get_color(state)) context.restore() def _on_style_updated(self, widget): self._config_colors() self._update_cached_layouts() # def _on_drag_begin(self, widgets, context, event_helper): # print 'drag: begin' def _on_drag_data_get(self, widget, context, selection, info, timestamp, sel): # print 'drag: get data' text = self.get_selected_text(sel) selection.set_text(text, -1) def _on_focus_in(self, widget, event): self._config_colors() def _on_focus_out(self, widget, event): self._config_colors() def _on_motion(self, widget, event, event_helper, cur, sel): if not (event.state == Gdk.ModifierType.BUTTON1_MASK): # or not self.has_focus(): return # check if we have moved enough to count as a drag press = event_helper['event'] # mvo: how can this be? if not press: return start_x, start_y = int(press.x), int(press.y) cur_x, cur_y = int(event.x), int(event.y) if (not event_helper['drag-active'] and self.drag_check_threshold(start_x, start_y, cur_x, cur_y)): event_helper['drag-active'] = True if not event_helper['drag-active']: return #~ if (event_helper['within-selection'] and #~ not event_helper['drag-context']): #~ target_list = Gtk.TargetList() #~ target_list.add_text_targets(80) #~ ctx = self.drag_begin(target_list, # target list #~ Gdk.DragAction.COPY, # action #~ 1, # initiating button #~ event) # event #~ #~ event_helper['drag-context'] = ctx #~ return for layout in self.order: point_in, index = layout.index_at(cur_x, cur_y) if point_in: cur.set_position(layout.index, index) self.queue_draw() break def _on_press(self, widget, event, event_helper, cur, sel): if sel and not self.has_focus(): self.grab_focus() return # spot the difference if not self.has_focus(): self.grab_focus() if event.button == 3: self._button3_action(cur, sel, event) return elif event.button != 1: return for layout in self.order: x, y = int(event.x), int(event.y) point_in, index = layout.index_at(x, y) if point_in: within_sel = False #~ within_sel = sel.within_selection((layout.index, index)) if not within_sel: cur.set_position(layout.index, index) sel.clear() #~ event_helper.new_press(event.copy(), layout, index, #~ within_sel) event_helper.new_press(event, layout, index, within_sel) break def _on_release(self, widget, event, event_helper, cur, sel): if not event_helper['event']: return # check if a drag occurred if event_helper['drag-active']: # if so, do not handle release return # else, handle release, do click cur.set_position(event_helper['layout'].index, event_helper['index']) sel.clear() press = event_helper['event'] if (press.type == Gdk.EventType._2BUTTON_PRESS): self._2click_select(cur, sel) elif (press.type == Gdk.EventType._3BUTTON_PRESS): self._3click_select(cur, sel) self.queue_draw() def _menu_do_copy(self, item, sel): self._copy_text(sel) def _menu_do_select_all(self, item, cur, sel): self._select_all(cur, sel) def _button3_action(self, cur, sel, event): start, end = sel.get_range() self.copy_menuitem.set_sensitive(True) self.select_all_menuitem.set_sensitive(True) if not sel: self.copy_menuitem.set_sensitive(False) elif start == (0, 0) and \ end == (len(self.order) - 1, len(self.order[-1])): self.select_all_menuitem.set_sensitive(False) self.menu.popup(None, # parent_menu_shell, None, # parent_menu_item, None, # GtkMenuPositionFunc func, None, # data, event.button, event.time) def _on_key_press(self, widget, event, cur, sel): kv = event.keyval s, i = cur.paragraph, cur.index handled_keys = True ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK) > 0 shift = (event.state & Gdk.ModifierType.SHIFT_MASK) > 0 if not self.PAINT_PRIMARY_CURSOR and \ kv in (Gdk.KEY_uparrow, Gdk.KEY_downarrow) and not sel: return False if kv == Gdk.KEY_Tab: handled_keys = False elif kv == Gdk.KEY_Left: if ctrl: self._select_left_word(cur, sel, s, i) else: self._select_left(cur, sel, s, i, shift) if shift: layout = self._get_cursor_layout() pos = layout.index_to_pos(cur.index) sel.set_target_x(pos.x, layout.indent) elif kv == Gdk.KEY_Right: if ctrl: self._select_right_word(cur, sel, s, i) else: self._select_right(cur, sel, s, i, shift) if shift: layout = self._get_cursor_layout() pos = layout.index_to_pos(cur.index) sel.set_target_x(pos.x, layout.indent) elif kv == Gdk.KEY_Up: if ctrl: if i == 0: if s > 0: cur.paragraph -= 1 cur.set_position(cur.paragraph, 0) elif sel and not shift: cur.set_position(*sel.min) else: self._select_up(cur, sel) elif kv == Gdk.KEY_Down: if ctrl: if i == len(self._get_layout(cur)): if s + 1 < len(self.order): cur.paragraph += 1 i = len(self._get_layout(cur)) cur.set_position(cur.paragraph, i) elif sel and not shift: cur.set_position(*sel.max) else: self._select_down(cur, sel) elif kv == Gdk.KEY_Home: if shift: self._select_home(cur, sel, self.order[cur.paragraph]) else: cur.set_position(0, 0) elif kv == Gdk.KEY_End: if shift: self._select_end(cur, sel, self.order[cur.paragraph]) else: cur.paragraph = len(self.order) - 1 cur.index = len(self._get_layout(cur)) else: handled_keys = False if not shift and handled_keys: sel.clear(kv) self.queue_draw() return handled_keys def _on_key_release(self, widget, event, cur, sel): ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK) > 0 if ctrl: if event.keyval == Gdk.KEY_a: self._select_all(cur, sel) elif event.keyval == Gdk.KEY_c: self._copy_text(sel) self.queue_draw() def _select_up(self, cur, sel): #~ if sel and not cur.is_min(sel) and cur.same_line(sel): #~ cur.switch(sel) s = cur.paragraph layout = self._get_layout(cur) if sel.target_x: x = sel.target_x if sel.target_x_indent: x += (sel.target_x_indent - layout.indent) * _PS (_, j, k), (x, y) = layout.cursor_up(cur, x) j += k else: (_, j, k), (x, y) = layout.cursor_up(cur) j += k sel.set_target_x(x, layout.indent) if (s, j) != cur.get_position(): cur.set_position(s, j) elif s > 0: cur.paragraph = s - 1 layout = self._get_layout(cur) if sel.target_x_indent: x += (sel.target_x_indent - layout.indent) * _PS y = layout.get_extents()[0].height (_, j, k) = layout.xy_to_index(x, y) cur.set_position(s - 1, j + k) else: return False return True def _select_down(self, cur, sel): #~ if sel and not cur.is_max(sel) and cur.same_line(sel): #~ cur.switch(sel) s = cur.paragraph layout = self._get_layout(cur) if sel.target_x: x = sel.target_x if sel.target_x_indent: x += (sel.target_x_indent - layout.indent) * _PS (_, j, k), (x, y) = layout.cursor_down(cur, x) j += k else: (_, j, k), (x, y) = layout.cursor_down(cur) j += k sel.set_target_x(x, layout.indent) if (s, j) != cur.get_position(): cur.set_position(s, j) elif s < len(self.order) - 1: cur.paragraph = s + 1 layout = self._get_layout(cur) if sel.target_x_indent: x += (sel.target_x_indent - layout.indent) * _PS y = 0 (_, j, k) = layout.xy_to_index(x, y) cur.set_position(s + 1, j + k) else: return False return True def _2click_select(self, cursor, sel): self._select_word(cursor, sel) def _3click_select(self, cursor, sel): # XXX: # _select_line seems to expose the following Pango issue: # (description.py:3892): Pango-CRITICAL **: # pango_layout_line_unref: assertion `private->ref_count > 0' # failed # ... which can result in a segfault #~ self._select_line(cursor, sel) self._select_all(cursor, sel) def _copy_text(self, sel): text = self.get_selected_text(sel) if not self.clipboard: display = Gdk.Display.get_default() selection = Gdk.Atom.intern("CLIPBOARD", False) self.clipboard = Gtk.Clipboard.get_for_display(display, selection) self.clipboard.clear() self.clipboard.set_text(text.strip(), -1) def _select_end(self, cur, sel, layout): if not cur.is_max(sel): cur.switch(sel) n, r, line = cur.get_current_line() cur_pos = cur.get_position() if cur_pos == (len(self.order) - 1, len(self.order[-1])): # abs end if sel.restore_point: # reinstate restore point cur.set_position(*sel.restore_point) else: # reselect the line end n, r, line = sel.get_current_line() cur.set_position(n[0], r[1]) elif cur_pos[1] == len(self.order[n[0]]): # para end # select abs end cur.set_position(len(self.order) - 1, len(self.order[-1])) elif cur_pos == (n[0], r[1]): # line end # select para end cur.set_position(n[0], len(self.order[n[0]])) else: # not at any end, within line somewhere # select line end if sel: sel.restore_point = cur_pos cur.set_position(n[0], r[1]) def _select_home(self, cur, sel, layout): if not cur.is_min(sel): cur.switch(sel) n, r, line = cur.get_current_line() cur_pos = cur.get_position() if cur_pos == (0, 0): # absolute home if sel.restore_point: cur.set_position(*sel.restore_point) else: n, r, line = sel.get_current_line() cur.set_position(n[0], r[0]) elif cur_pos[1] == 0: # para home cur.set_position(0, 0) elif cur_pos == (n[0], r[0]): # line home cur.set_position(n[0], 0) else: # not at any home, within line somewhere if sel: sel.restore_point = cur_pos cur.set_position(n[0], r[0]) def _select_left(self, cur, sel, s, i, shift): if not shift and not cur.is_min(sel): cur.switch(sel) return if i > 0: cur.set_position(s, i - 1) elif cur.paragraph > 0: cur.paragraph -= 1 cur.set_position(s - 1, len(self._get_layout(cur))) def _select_right(self, cur, sel, s, i, shift): if not shift and not cur.is_max(sel): cur.switch(sel) return if i < len(self._get_layout(cur)): cur.set_position(s, i + 1) elif s < len(self.order) - 1: cur.set_position(s + 1, 0) def _select_left_word(self, cur, sel, s, i): if i > 0: cur.index -= 1 elif s > 0: cur.paragraph -= 1 cur.index = len(self._get_layout(cur)) paragraph, word = cur.get_current_word() if not word: return cur.set_position(paragraph, max(0, word[0] - 1)) def _select_right_word(self, cur, sel, s, i): ll = len(self._get_layout(cur)) if i < ll: cur.index += 1 elif s + 1 < len(self.order): cur.paragraph += 1 cur.index = 0 paragraph, word = cur.get_current_word() if not word: return cur.set_position(paragraph, min(word[1] + 1, ll)) def _select_word(self, cursor, sel): paragraph, word = cursor.get_current_word() if word: cursor.set_position(paragraph, word[1] + 1) sel.set_position(paragraph, word[0]) if self.get_direction() == Gtk.TextDirection.RTL: cursor.switch(sel) def _select_line(self, cursor, sel): n, r = self.cursor.get_current_line() sel.set_position(n[0], r[0]) cursor.set_position(n[0], r[1]) if self.get_direction() == Gtk.TextDirection.RTL: cursor.switch(sel) def _select_all(self, cursor, sel): layout = self.order[-1] sel.set_position(0, 0) cursor.set_position(layout.index, len(layout)) if self.get_direction() == Gtk.TextDirection.RTL: cursor.switch(sel) def _selection_copy(self, layout, sel, new_para=True): i = layout.index start, end = sel.get_range() if new_para: text = '\n\n' else: text = '' if sel and i >= start[0] and i <= end[0]: if i == start[0]: if end[0] > i: return text + layout.get_text()[start[1]: len(layout)] else: return text + layout.get_text()[start[1]: end[1]] elif i == end[0]: if start[0] < i: return text + layout.get_text()[0: end[1]] else: return text + layout.get_text()[start[1]: end[1]] else: return text + layout.get_text() return '' def _new_layout(self, text=''): layout = Layout(self, text) layout.set_wrap(Pango.WrapMode.WORD_CHAR) return layout def _update_cached_layouts(self): self._bullet = self._new_layout() self._bullet.set_markup(self.BULLET_POINT) font_desc = Pango.FontDescription() font_desc.set_weight(Pango.Weight.BOLD) self._bullet.set_font_description(font_desc) e = self._bullet.get_pixel_extents() self.indent, self.line_height = e.width, e.height def _selection_highlight(self, layout, sel, bg, fg): i = layout.index start, end = sel.get_range() if sel and i >= start[0] and i <= end[0]: if i == start[0]: if end[0] > i: layout.highlight(start[1], len(layout), bg, fg) else: layout.highlight(start[1], end[1], bg, fg) elif i == end[0]: if start[0] < i: layout.highlight(0, end[1], bg, fg) else: layout.highlight(start[1], end[1], bg, fg) else: layout.highlight_all(bg, fg) elif not layout._default_attrs: layout.reset_attrs() def _paint_bullet_point(self, cr, x, y): # draw the layout Gtk.render_layout(self.get_style_context(), cr, # state x, # x coord y, # y coord self._bullet._layout) # a Pango.Layout() def _get_layout(self, cursor): return self.order[cursor.paragraph] def _get_cursor_layout(self): return self.order[self.cursor.paragraph] def _get_selection_layout(self): return self.order[self.selection.paragraph] def render(self, widget, cr): if not self.order: return a = self.get_allocation() for layout in self.order: lx, ly = layout.get_position() self._selection_highlight(layout, self.selection, self._bg, self._fg) if layout.is_bullet: if self.get_direction() != Gtk.TextDirection.RTL: indent = layout.indent - self.indent else: indent = a.width - layout.indent self._paint_bullet_point(cr, indent, ly) if self.DEBUG_PAINT_BBOXES: la = layout.allocation cr.rectangle(la.x, la.y, la.width, la.height) cr.set_source_rgb(1, 0, 0) cr.stroke() # draw the layout Gtk.render_layout(self.get_style_context(), cr, lx, # x coord ly, # y coord layout._layout) # a Pango.Layout() # draw the cursor if self.PAINT_PRIMARY_CURSOR and self.has_focus(): self.cursor.draw(cr, self._get_layout(self.cursor), a) def append_paragraph(self, p, vspacing=None): l = self._new_layout() l.index = len(self.order) l.vspacing = vspacing l.set_text(p) self.order.append(l) def append_bullet(self, point, indent_level, vspacing=None): l = self._new_layout() l.index = len(self.order) l.indent = self.indent * (indent_level + 1) l.vspacing = vspacing l.is_bullet = True l.set_text(point) self.order.append(l) def copy_clipboard(self): self._copy_text(self.selection) def get_selected_text(self, sel=None): text = '' if not sel: sel = self.selection for layout in self.order: text += self._selection_copy(layout, sel, (layout.index > 0)) return text def select_all(self): self._select_all(self.cursor, self.selection) self.queue_draw() def finished(self): self.queue_resize() def clear(self, key=None): self.cursor.zero() self.selection.clear(key) self.order = [] class AppDescription(Gtk.VBox): TYPE_PARAGRAPH = 0 TYPE_BULLET = 1 _preparser = _SpecialCasePreParsers() def __init__(self): Gtk.VBox.__init__(self) self.description = TextBlock() self.pack_start(self.description, False, False, 0) self._prev_type = None def _part_is_bullet(self, part): # normalize_description() ensures that we only have "* " bullets i = part.find("* ") return i > -1, i def _parse_desc(self, desc, pkgname): """ Attempt to maintain original fixed width layout, while reconstructing the description into text blocks (either paragraphs or bullets) which are line-wrap friendly. """ # pre-parse descrition if special case exists for the given pkgname desc = self._preparser.preparse(pkgname, desc) parts = normalize_package_description(desc).split('\n') for part in parts: if not part: continue is_bullet, indent = self._part_is_bullet(part) if is_bullet: self.append_bullet(part, indent) else: self.append_paragraph(part) self.description.finished() def clear(self): self.description.clear() def append_paragraph(self, p): vspacing = self.description.line_height self.description.append_paragraph(p.strip(), vspacing) self._prev_type = self.TYPE_PARAGRAPH def append_bullet(self, point, indent_level): if self._prev_type == self.TYPE_BULLET: vspacing = int(0.4 * self.description.line_height) else: vspacing = self.description.line_height self.description.append_bullet( point[indent_level + 2:], indent_level, vspacing) self._prev_type = self.TYPE_BULLET def set_description(self, raw_desc, pkgname): self.clear() if type(raw_desc) == str: encoded_desc = unicode(raw_desc, 'utf8').encode('utf8') else: encoded_desc = raw_desc.encode('utf8') self._text = GObject.markup_escape_text(encoded_desc) self._parse_desc(self._text, pkgname) self.show_all() # easy access to some TextBlock methods def copy_clipboard(self): return TextBlock.copy_clipboard(self.description) def get_selected_text(self): return TextBlock.get_selected_text(self.description) def select_all(self): return TextBlock.select_all(self.description) def get_test_description_window(): EXAMPLE0 = """p7zip is the Unix port of 7-Zip, a file archiver that \ archives with very high compression ratios. p7zip-full provides: - /usr/bin/7za a standalone version of the 7-zip tool that handles 7z archives (implementation of the LZMA compression algorithm) and some \ other formats. - /usr/bin/7z not only does it handle 7z but also ZIP, Zip64, CAB, RAR, \ ARJ, GZIP, BZIP2, TAR, CPIO, RPM, ISO and DEB archives. 7z compression is 30-50% \ better than ZIP compression. p7zip provides 7zr, a light version of 7za, and p7zip a gzip like wrapper \ around 7zr.""" EXAMPLE1 = """Transmageddon supports almost any format as its input and \ can generate a very large host of output files. The goal of the application \ was to help people to create the files they need to be able to play on their \ mobile devices and for people not hugely experienced with multimedia to \ generate a multimedia file without having to resort to command line tools \ with ungainly syntaxes. The currently supported codecs are: * Containers: - Ogg - Matroska - AVI - MPEG TS - flv - QuickTime - MPEG4 - 3GPP - MXT * Audio encoders: - Vorbis - FLAC - MP3 - AAC - AC3 - Speex - Celt * Video encoders: - Theora - Dirac - H264 - MPEG2 - MPEG4/DivX5 - xvid - DNxHD It also provide the support for the GStreamer's plugins auto-search.""" EXAMPLE2 = """File-roller is an archive manager for the GNOME \ environment. It allows you to: * Create and modify archives. * View the content of an archive. * View a file contained in an archive. * Extract files from the archive. File-roller supports the following formats: * Tar (.tar) archives, including those compressed with gzip (.tar.gz, .tgz), bzip (.tar.bz, .tbz), bzip2 (.tar.bz2, .tbz2), compress (.tar.Z, .taz), lzip (.tar.lz, .tlz), lzop (.tar.lzo, .tzo), lzma (.tar.lzma) and xz (.tar.xz) * Zip archives (.zip) * Jar archives (.jar, .ear, .war) * 7z archives (.7z) * iso9660 CD images (.iso) * Lha archives (.lzh) * Single files compressed with gzip (.gz), bzip (.bz), bzip2 (.bz2), compress (.Z), lzip (.lz), lzop (.lzo), lzma (.lzma) and xz (.xz) File-roller doesn't perform archive operations by itself, but relies on \ standard tools for this.""" EXAMPLE3 = """This package includes the following CTAN packages: Asana-Math -- A font to typeset maths in Xe(La)TeX. albertus -- allrunes -- Fonts and LaTeX package for almost all runes. antiqua -- the URW Antiqua Condensed Font. antp -- Antykwa Poltawskiego: a Type 1 family of Polish traditional type. antt -- Antykwa Torunska: a Type 1 family of a Polish traditional type. apl -- Fonts for typesetting APL programs. ar -- Capital A and capital R ligature for Apsect Ratio. archaic -- A collection of archaic fonts. arev -- Fonts and LaTeX support files for Arev Sans. ascii -- Support for IBM "standard ASCII" font. astro -- Astronomical (planetary) symbols. atqolive -- augie -- Calligraphic font for typesetting handwriting. auncial-new -- Artificial Uncial font and LaTeX support macros. aurical -- Calligraphic fonts for use with LaTeX in T1 encoding. barcodes -- Fonts for making barcodes. bayer -- Herbert Bayers Universal Font For Metafont. bbding -- A symbol (dingbat) font and LaTeX macros for its use. bbm -- "Blackboard-style" cm fonts. bbm-macros -- LaTeX support for "blackboard-style" cm fonts. bbold -- Sans serif blackboard bold. belleek -- Free replacement for basic MathTime fonts. bera -- Bera fonts. blacklettert1 -- T1-encoded versions of Haralambous old German fonts. boisik -- A font inspired by Baskerville design. bookhands -- A collection of book-hand fonts. braille -- Support for braille. brushscr -- A handwriting script font. calligra -- Calligraphic font. carolmin-ps -- Adobe Type 1 format of Carolingian Minuscule fonts. cherokee -- A font for the Cherokee script. clarendo -- cm-lgc -- Type 1 CM-based fonts for Latin, Greek and Cyrillic. cmbright -- Computer Modern Bright fonts. cmll -- Symbols for linear logic. cmpica -- A Computer Modern Pica variant. coronet -- courier-scaled -- Provides a scaled Courier font. cryst -- Font for graphical symbols used in crystallography. cyklop -- The Cyclop typeface. dancers -- Font for Conan Doyle's "The Dancing Men". dice -- A font for die faces. dictsym -- DictSym font and macro package dingbat -- Two dingbat symbol fonts. doublestroke -- Typeset mathematical double stroke symbols. dozenal -- Typeset documents using base twelve numbering (also called "dozenal") duerer -- Computer Duerer fonts. duerer-latex -- LaTeX support for the Duerer fonts. ean -- Macros for making EAN barcodes. ecc -- Sources for the European Concrete fonts. eco -- Oldstyle numerals using EC fonts. eiad -- Traditional style Irish fonts. eiad-ltx -- LaTeX support for the eiad font. elvish -- Fonts for typesetting Tolkien Elvish scripts. epigrafica -- A Greek and Latin font. epsdice -- A scalable dice "font". esvect -- Vector arrows. eulervm -- Euler virtual math fonts. euxm -- feyn -- A font for in-text Feynman diagrams. fge -- A font for Frege's Grundgesetze der Arithmetik. foekfont -- The title font of the Mads Fok magazine. fonetika -- Support for the danish "Dania" phonetic system. fourier -- Using Utopia fonts in LaTeX documents. fouriernc -- Use New Century Schoolbook text with Fourier maths fonts. frcursive -- French cursive hand fonts. garamond -- genealogy -- A compilation genealogy font. gfsartemisia -- A modern Greek font design. gfsbodoni -- A Greek and Latin font based on Bodoni. gfscomplutum -- A Greek font with a long history. gfsdidot -- A Greek font based on Didot's work. gfsneohellenic -- A Greek font in the Neo-Hellenic style. gfssolomos -- A Greek-alphabet font. gothic -- A collection of old German-style fonts. greenpoint -- The Green Point logo. groff -- grotesq -- the URW Grotesk Bold Font. hands -- Pointing hand font. hfbright -- The hfbright fonts. hfoldsty -- Old style numerals with EC fonts. ifsym -- A collection of symbols. inconsolata -- A monospaced font, with support files for use with TeX. initials -- Adobe Type 1 decorative initial fonts. iwona -- A two-element sans-serif font. junicode -- A TrueType font for mediaevalists. kixfont -- A font for KIX codes. knuthotherfonts -- kpfonts -- A complete set of fonts for text and mathematics. kurier -- A two-element sans-serif typeface. lettrgth -- lfb -- A Greek font with normal and bold variants. libertine -- Use the font Libertine with LaTeX. libris -- Libris ADF fonts, with LaTeX support. linearA -- Linear A script fonts. logic -- A font for electronic logic design. lxfonts -- Set of slide fonts based on CM. ly1 -- Support for LY1 LaTeX encoding. marigold -- mathabx -- Three series of mathematical symbols. mathdesign -- Mathematical fonts to fit with particular text fonts. mnsymbol -- Mathematical symbol font for Adobe MinionPro. nkarta -- A "new" version of the karta cartographic fonts. ocherokee -- LaTeX Support for the Cherokee language. ogham -- Fonts for typesetting Ogham script. oinuit -- LaTeX Support for the Inuktitut Language. optima -- orkhun -- A font for orkhun script. osmanian -- Osmanian font for writing Somali. pacioli -- Fonts designed by Fra Luca de Pacioli in 1497. pclnfss -- Font support for current PCL printers. phaistos -- Disk of Phaistos font. phonetic -- MetaFont Phonetic fonts, based on Computer Modern. pigpen -- A font for the pigpen (or masonic) cipher. psafm -- punk -- Donald Knuth's punk font. recycle -- A font providing the "recyclable" logo. sauter -- Wide range of design sizes for CM fonts. sauterfonts -- Use sauter fonts in LaTeX. semaphor -- Semaphore alphabet font. simpsons -- MetaFont source for Simpsons characters. skull -- A font to draw a skull. staves -- Typeset Icelandic staves and runic letters. tapir -- A simple geometrical font. tengwarscript -- LaTeX support for using Tengwar fonts. trajan -- Fonts from the Trajan column in Rome. umtypewriter -- Fonts to typeset with the xgreek package. univers -- universa -- Herbert Bayer's 'universal' font. venturisadf -- Venturis ADF fonts collection. wsuipa -- International Phonetic Alphabet fonts. yfonts -- Support for old German fonts. zefonts -- Virtual fonts to provide T1 encoding from existing fonts.""" EXAMPLE4 = """Arista is a simple multimedia transcoder, it focuses on \ being easy to use by making complex task of encoding for various devices \ simple. Users should pick an input and a target device, choose a file to save to and \ go. Features: * Presets for iPod, computer, DVD player, PSP, Playstation 3, and more. * Live preview to see encoded quality. * Automatically discover available DVD media and Video 4 Linux (v4l) devices. * Rip straight from DVD media easily (requires libdvdcss). * Rip straight from v4l devices. * Simple terminal client for scripting. * Automatic preset updating.""" def on_clicked(widget, desc_widget, descs): widget.position += 1 if widget.position >= len(descs): widget.position = 0 desc_widget.set_description(*descs[widget.position]) descs = ((EXAMPLE0, ''), (EXAMPLE1, ''), (EXAMPLE2, ''), (EXAMPLE3, 'texlive-fonts-extra'), (EXAMPLE4, '')) win = Gtk.Window() win.set_default_size(300, 400) win.set_has_resize_grip(True) vb = Gtk.VBox() win.add(vb) b = Gtk.Button('Next test description >>') b.position = 0 vb.pack_start(b, False, False, 0) scroll = Gtk.ScrolledWindow() vb.add(scroll) d = AppDescription() #~ d.description.DEBUG_PAINT_BBOXES = True d.set_description(EXAMPLE0, pkgname='') scroll.add_with_viewport(d) win.show_all() b.connect("clicked", on_clicked, d, descs) win.connect('destroy', lambda x: Gtk.main_quit()) return win if __name__ == '__main__': win = get_test_description_window() win.show_all() Gtk.main()
gpl-3.0
teddym6/qualitybots
src/appengine/handlers/machine_pool.py
26
5651
#!/usr/bin/python2.4 # # Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Handler for assisting with the machine install process.""" # Disable 'Import not at top of file' lint error. # pylint: disable-msg=C6204, C6205, W0611 import logging from django.utils import simplejson from google.appengine.api import memcache from google.appengine.ext import db from google.appengine.ext import deferred from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app from common import ec2_manager from common import enum from handlers import base from handlers import launch_tasks from models import client_machine INIT_START = '/init/start' INSTALL_FAILED = '/init/install_failed' INSTALL_SUCEEDED = '/init/install_succeeded' class InitializationStart(base.BaseHandler): """Handler to acknowledge a machine starting initialization.""" # Disable 'Invalid method name' lint error. # pylint: disable-msg=C6409 def get(self): """Updates the status of a machine starting initialization.""" instance_id = self.GetRequiredParameter('instance_id') instance = db.GqlQuery('SELECT * FROM ClientMachine WHERE client_id = :1', instance_id).get() if not instance: logging.error('The given instance id "%s" does not match any machines.', instance_id) self.error(500) return if instance.status != enum.MACHINE_STATUS.PROVISIONED: logging.error('The machine with instance id "%s" was in an unexpected ' 'state for initialization: "%s"', instance_id, enum.MACHINE_STATUS.LookupKey(instance.status)) instance.status = enum.MACHINE_STATUS.INITIALIZING instance.put() self.response.out.write('Initialization acknowledged.') class InstallFailed(base.BaseHandler): """Handler to deal with a machine that fails to properly setup and install.""" # Disable 'Invalid method name' lint error. # pylint: disable-msg=C6409 def post(self): """Updates the status of a machine that failed with initialization.""" instance_id = self.GetRequiredParameter('instance_id') log = self.GetOptionalParameter('log', None) old_instance = db.GqlQuery( 'SELECT * FROM ClientMachine WHERE client_id = :1', instance_id).get() if not old_instance: logging.error('The given instance id "%s" does not match any machines.', instance_id) self.error(500) return if old_instance.status != enum.MACHINE_STATUS.INITIALIZING: logging.error('The machine with instance id "%s" was in an unexpected ' 'state for initialization: "%s"', instance_id, enum.MACHINE_STATUS.LookupKey(old_instance.status)) old_instance.status = enum.MACHINE_STATUS.FAILED if log: old_instance.initialization_log = log old_instance.put() if old_instance.retry_count >= client_machine.MAX_RETRIES: logging.error('Reached the maximum number of retries for starting this ' 'machine: %s.', str(old_instance.key())) logging.info('Terminating the failed instance.') deferred.defer(launch_tasks.TerminateFailedMachine, instance_id, _countdown=launch_tasks.DEFAULT_COUNTDOWN, _queue=launch_tasks.DEFAULT_QUEUE) self.error(500) return logging.info('Rebooting the failed instance.') deferred.defer(launch_tasks.RebootMachine, instance_id, _countdown=launch_tasks.DEFAULT_COUNTDOWN, _queue=launch_tasks.DEFAULT_QUEUE) self.response.out.write('Initialization failure acknowledged.') class InstallSucceeded(base.BaseHandler): """Handler to deal with a machine that installs successfully.""" # Disable 'Invalid method name' lint error. # pylint: disable-msg=C6409 def post(self): """Updates the status of a machine that succeeded with initialization.""" instance_id = self.GetRequiredParameter('instance_id') log = self.GetOptionalParameter('log', None) instance = db.GqlQuery('SELECT * FROM ClientMachine WHERE client_id = :1', instance_id).get() if not instance: logging.error('The given instance id "%s" does not match any machines.', instance_id) self.error(500) return if instance.status != enum.MACHINE_STATUS.INITIALIZING: logging.error('The machine with instance id "%s" was in an unexpected ' 'state for initialization: "%s"', instance_id, enum.MACHINE_STATUS.LookupKey(instance.status)) instance.status = enum.MACHINE_STATUS.RUNNING if log: instance.initialization_log = log instance.put() self.response.out.write('Initialization success acknowledged.') application = webapp.WSGIApplication( [(INIT_START, InitializationStart), (INSTALL_FAILED, InstallFailed), (INSTALL_SUCEEDED, InstallSucceeded)], debug=True) def main(): run_wsgi_app(application) if __name__ == '__main__': main()
apache-2.0
Endika/django
django/middleware/clickjacking.py
284
1989
""" Clickjacking Protection Middleware. This module provides a middleware that implements protection against a malicious site loading resources from your site in a hidden frame. """ from django.conf import settings class XFrameOptionsMiddleware(object): """ Middleware that sets the X-Frame-Options HTTP header in HTTP responses. Does not set the header if it's already set or if the response contains a xframe_options_exempt value set to True. By default, sets the X-Frame-Options header to 'SAMEORIGIN', meaning the response can only be loaded on a frame within the same site. To prevent the response from being loaded in a frame in any site, set X_FRAME_OPTIONS in your project's Django settings to 'DENY'. Note: older browsers will quietly ignore this header, thus other clickjacking protection techniques should be used if protection in those browsers is required. https://en.wikipedia.org/wiki/Clickjacking#Server_and_client """ def process_response(self, request, response): # Don't set it if it's already in the response if response.get('X-Frame-Options') is not None: return response # Don't set it if they used @xframe_options_exempt if getattr(response, 'xframe_options_exempt', False): return response response['X-Frame-Options'] = self.get_xframe_options_value(request, response) return response def get_xframe_options_value(self, request, response): """ Gets the value to set for the X_FRAME_OPTIONS header. By default this uses the value from the X_FRAME_OPTIONS Django settings. If not found in settings, defaults to 'SAMEORIGIN'. This method can be overridden if needed, allowing it to vary based on the request or response. """ return getattr(settings, 'X_FRAME_OPTIONS', 'SAMEORIGIN').upper()
bsd-3-clause
aspiers/pacemaker
cts/CM_ais.py
15
5946
'''CTS: Cluster Testing System: AIS dependent modules... ''' __copyright__ = ''' Copyright (C) 2007 Andrew Beekhof <andrew@suse.de> ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. from cts.CTSvars import * from cts.CM_lha import crm_lha from cts.CTS import Process from cts.patterns import PatternSelector ####################################################################### # # LinuxHA v2 dependent modules # ####################################################################### class crm_ais(crm_lha): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of openais ''' def __init__(self, Environment, randseed=None, name=None): if not name: name="crm-ais" crm_lha.__init__(self, Environment, randseed=randseed, name=name) self.fullcomplist = {} self.templates = PatternSelector(self.name) def NodeUUID(self, node): return node def ais_components(self, extra={}): complist = [] if not len(self.fullcomplist.keys()): for c in ["cib", "lrmd", "crmd", "attrd" ]: self.fullcomplist[c] = Process( self, c, pats = self.templates.get_component(self.name, c), badnews_ignore = self.templates.get_component(self.name, "%s-ignore" % c), common_ignore = self.templates.get_component(self.name, "common-ignore")) # pengine uses dc_pats instead of pats self.fullcomplist["pengine"] = Process( self, "pengine", dc_pats = self.templates.get_component(self.name, "pengine"), badnews_ignore = self.templates.get_component(self.name, "pengine-ignore"), common_ignore = self.templates.get_component(self.name, "common-ignore")) # stonith-ng's process name is different from its component name self.fullcomplist["stonith-ng"] = Process( self, "stonith-ng", process="stonithd", pats = self.templates.get_component(self.name, "stonith"), badnews_ignore = self.templates.get_component(self.name, "stonith-ignore"), common_ignore = self.templates.get_component(self.name, "common-ignore")) # add (or replace) any extra components passed in self.fullcomplist.update(extra) # Processes running under valgrind can't be shot with "killall -9 processname", # so don't include them in the returned list vgrind = self.Env["valgrind-procs"].split() for key in list(self.fullcomplist.keys()): if self.Env["valgrind-tests"]: if key in vgrind: self.log("Filtering %s from the component list as it is being profiled by valgrind" % key) continue if key == "stonith-ng" and not self.Env["DoFencing"]: continue complist.append(self.fullcomplist[key]) return complist class crm_cs_v0(crm_ais): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running against version 0 of our plugin ''' def __init__(self, Environment, randseed=None, name=None): if not name: name="crm-plugin-v0" crm_ais.__init__(self, Environment, randseed=randseed, name=name) def Components(self): extra = {} extra["corosync"] = Process( self, "corosync", pats = self.templates.get_component(self.name, "corosync"), badnews_ignore = self.templates.get_component(self.name, "corosync-ignore"), common_ignore = self.templates.get_component(self.name, "common-ignore") ) return self.ais_components(extra=extra) class crm_cs_v1(crm_cs_v0): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of version 1 of our plugin ''' def __init__(self, Environment, randseed=None, name=None): if not name: name="crm-plugin-v1" crm_cs_v0.__init__(self, Environment, randseed=randseed, name=name) class crm_mcp(crm_cs_v0): ''' The crm version 4 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of native corosync (no plugins) ''' def __init__(self, Environment, randseed=None, name=None): if not name: name="crm-mcp" crm_cs_v0.__init__(self, Environment, randseed=randseed, name=name) if self.Env["have_systemd"]: self.update({ # When systemd is in use, we can look for this instead "Pat:We_stopped" : "%s.*Corosync Cluster Engine exiting normally", }) class crm_cman(crm_cs_v0): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of openais ''' def __init__(self, Environment, randseed=None, name=None): if not name: name="crm-cman" crm_cs_v0.__init__(self, Environment, randseed=randseed, name=name)
gpl-2.0
jlguardi/yowsup
yowsup/layers/protocol_media/protocolentities/builder_message_media_downloadable.py
17
1886
# from yowsup.layers.protocol_media import mediacipher import tempfile import os class DownloadableMediaMessageBuilder(object): def __init__(self, downloadbleMediaMessageClass, jid, filepath): self.jid = jid self.filepath = filepath self.encryptedFilepath = None self.cls = downloadbleMediaMessageClass self.mediaKey = None self.attributes = {} self.mediaType = self.cls.__name__.split("DownloadableMediaMessageProtocolEntity")[0].lower() #ugly ? # def encrypt(self): # fd, encpath = tempfile.mkstemp() # mediaKey = os.urandom(112) # keys = mediacipher.getDerivedKeys(mediaKey) # out = mediacipher.encryptImage(self.filepath, keys) # with open(encImagePath, 'w') as outF: # outF.write(out) # # self.mediaKey = mediaKey # self.encryptedFilepath = encpath # def decrypt(self): # self.mediaKey = None # self.encryptedFilePath = None def setEncryptionData(self, mediaKey, encryptedFilepath): self.mediaKey = mediaKey self.encryptedFilepath = encryptedFilepath def isEncrypted(self): return self.encryptedFilepath is not None def getFilepath(self): return self.encryptedFilepath or self.filepath def getOriginalFilepath(self): return self.filepath def set(self, key, val): self.attributes[key] = val def get(self, key, default = None): if key in self.attributes and self.attributes[key] is not None: return self.attributes[key] return default def getOrSet(self, key, func): if not self.get(key): self.set(key, func()) def build(self, url = None, ip = None): if url: self.set("url", url) if ip: self.set("ip", ip) return self.cls.fromBuilder(self)
gpl-3.0
damorim/compilers-cin
2020_3/projeto2/antlr4-python3-runtime-4.7.2/src/antlr4/atn/ATNDeserializer.py
9
22186
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. # Use of this file is governed by the BSD 3-clause license that # can be found in the LICENSE.txt file in the project root. #/ from uuid import UUID from io import StringIO from typing import Callable from antlr4.Token import Token from antlr4.atn.ATN import ATN from antlr4.atn.ATNType import ATNType from antlr4.atn.ATNState import * from antlr4.atn.Transition import * from antlr4.atn.LexerAction import * from antlr4.atn.ATNDeserializationOptions import ATNDeserializationOptions # This is the earliest supported serialized UUID. BASE_SERIALIZED_UUID = UUID("AADB8D7E-AEEF-4415-AD2B-8204D6CF042E") # This UUID indicates the serialized ATN contains two sets of # IntervalSets, where the second set's values are encoded as # 32-bit integers to support the full Unicode SMP range up to U+10FFFF. ADDED_UNICODE_SMP = UUID("59627784-3BE5-417A-B9EB-8131A7286089") # This list contains all of the currently supported UUIDs, ordered by when # the feature first appeared in this branch. SUPPORTED_UUIDS = [ BASE_SERIALIZED_UUID, ADDED_UNICODE_SMP ] SERIALIZED_VERSION = 3 # This is the current serialized UUID. SERIALIZED_UUID = ADDED_UNICODE_SMP class ATNDeserializer (object): def __init__(self, options : ATNDeserializationOptions = None): if options is None: options = ATNDeserializationOptions.defaultOptions self.deserializationOptions = options # Determines if a particular serialized representation of an ATN supports # a particular feature, identified by the {@link UUID} used for serializing # the ATN at the time the feature was first introduced. # # @param feature The {@link UUID} marking the first time the feature was # supported in the serialized ATN. # @param actualUuid The {@link UUID} of the actual serialized ATN which is # currently being deserialized. # @return {@code true} if the {@code actualUuid} value represents a # serialized ATN at or after the feature identified by {@code feature} was # introduced; otherwise, {@code false}. def isFeatureSupported(self, feature : UUID , actualUuid : UUID ): idx1 = SUPPORTED_UUIDS.index(feature) if idx1<0: return False idx2 = SUPPORTED_UUIDS.index(actualUuid) return idx2 >= idx1 def deserialize(self, data : str): self.reset(data) self.checkVersion() self.checkUUID() atn = self.readATN() self.readStates(atn) self.readRules(atn) self.readModes(atn) sets = [] # First, read all sets with 16-bit Unicode code points <= U+FFFF. self.readSets(atn, sets, self.readInt) # Next, if the ATN was serialized with the Unicode SMP feature, # deserialize sets with 32-bit arguments <= U+10FFFF. if self.isFeatureSupported(ADDED_UNICODE_SMP, self.uuid): self.readSets(atn, sets, self.readInt32) self.readEdges(atn, sets) self.readDecisions(atn) self.readLexerActions(atn) self.markPrecedenceDecisions(atn) self.verifyATN(atn) if self.deserializationOptions.generateRuleBypassTransitions \ and atn.grammarType == ATNType.PARSER: self.generateRuleBypassTransitions(atn) # re-verify after modification self.verifyATN(atn) return atn def reset(self, data:str): def adjust(c): v = ord(c) return v-2 if v>1 else v + 65533 temp = [ adjust(c) for c in data ] # don't adjust the first value since that's the version number temp[0] = ord(data[0]) self.data = temp self.pos = 0 def checkVersion(self): version = self.readInt() if version != SERIALIZED_VERSION: raise Exception("Could not deserialize ATN with version " + str(version) + " (expected " + str(SERIALIZED_VERSION) + ").") def checkUUID(self): uuid = self.readUUID() if not uuid in SUPPORTED_UUIDS: raise Exception("Could not deserialize ATN with UUID: " + str(uuid) + \ " (expected " + str(SERIALIZED_UUID) + " or a legacy UUID).", uuid, SERIALIZED_UUID) self.uuid = uuid def readATN(self): idx = self.readInt() grammarType = ATNType.fromOrdinal(idx) maxTokenType = self.readInt() return ATN(grammarType, maxTokenType) def readStates(self, atn:ATN): loopBackStateNumbers = [] endStateNumbers = [] nstates = self.readInt() for i in range(0, nstates): stype = self.readInt() # ignore bad type of states if stype==ATNState.INVALID_TYPE: atn.addState(None) continue ruleIndex = self.readInt() if ruleIndex == 0xFFFF: ruleIndex = -1 s = self.stateFactory(stype, ruleIndex) if stype == ATNState.LOOP_END: # special case loopBackStateNumber = self.readInt() loopBackStateNumbers.append((s, loopBackStateNumber)) elif isinstance(s, BlockStartState): endStateNumber = self.readInt() endStateNumbers.append((s, endStateNumber)) atn.addState(s) # delay the assignment of loop back and end states until we know all the state instances have been initialized for pair in loopBackStateNumbers: pair[0].loopBackState = atn.states[pair[1]] for pair in endStateNumbers: pair[0].endState = atn.states[pair[1]] numNonGreedyStates = self.readInt() for i in range(0, numNonGreedyStates): stateNumber = self.readInt() atn.states[stateNumber].nonGreedy = True numPrecedenceStates = self.readInt() for i in range(0, numPrecedenceStates): stateNumber = self.readInt() atn.states[stateNumber].isPrecedenceRule = True def readRules(self, atn:ATN): nrules = self.readInt() if atn.grammarType == ATNType.LEXER: atn.ruleToTokenType = [0] * nrules atn.ruleToStartState = [0] * nrules for i in range(0, nrules): s = self.readInt() startState = atn.states[s] atn.ruleToStartState[i] = startState if atn.grammarType == ATNType.LEXER: tokenType = self.readInt() if tokenType == 0xFFFF: tokenType = Token.EOF atn.ruleToTokenType[i] = tokenType atn.ruleToStopState = [0] * nrules for state in atn.states: if not isinstance(state, RuleStopState): continue atn.ruleToStopState[state.ruleIndex] = state atn.ruleToStartState[state.ruleIndex].stopState = state def readModes(self, atn:ATN): nmodes = self.readInt() for i in range(0, nmodes): s = self.readInt() atn.modeToStartState.append(atn.states[s]) def readSets(self, atn:ATN, sets:list, readUnicode:Callable[[], int]): m = self.readInt() for i in range(0, m): iset = IntervalSet() sets.append(iset) n = self.readInt() containsEof = self.readInt() if containsEof!=0: iset.addOne(-1) for j in range(0, n): i1 = readUnicode() i2 = readUnicode() iset.addRange(range(i1, i2 + 1)) # range upper limit is exclusive def readEdges(self, atn:ATN, sets:list): nedges = self.readInt() for i in range(0, nedges): src = self.readInt() trg = self.readInt() ttype = self.readInt() arg1 = self.readInt() arg2 = self.readInt() arg3 = self.readInt() trans = self.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) srcState = atn.states[src] srcState.addTransition(trans) # edges for rule stop states can be derived, so they aren't serialized for state in atn.states: for i in range(0, len(state.transitions)): t = state.transitions[i] if not isinstance(t, RuleTransition): continue outermostPrecedenceReturn = -1 if atn.ruleToStartState[t.target.ruleIndex].isPrecedenceRule: if t.precedence == 0: outermostPrecedenceReturn = t.target.ruleIndex trans = EpsilonTransition(t.followState, outermostPrecedenceReturn) atn.ruleToStopState[t.target.ruleIndex].addTransition(trans) for state in atn.states: if isinstance(state, BlockStartState): # we need to know the end state to set its start state if state.endState is None: raise Exception("IllegalState") # block end states can only be associated to a single block start state if state.endState.startState is not None: raise Exception("IllegalState") state.endState.startState = state if isinstance(state, PlusLoopbackState): for i in range(0, len(state.transitions)): target = state.transitions[i].target if isinstance(target, PlusBlockStartState): target.loopBackState = state elif isinstance(state, StarLoopbackState): for i in range(0, len(state.transitions)): target = state.transitions[i].target if isinstance(target, StarLoopEntryState): target.loopBackState = state def readDecisions(self, atn:ATN): ndecisions = self.readInt() for i in range(0, ndecisions): s = self.readInt() decState = atn.states[s] atn.decisionToState.append(decState) decState.decision = i def readLexerActions(self, atn:ATN): if atn.grammarType == ATNType.LEXER: count = self.readInt() atn.lexerActions = [ None ] * count for i in range(0, count): actionType = self.readInt() data1 = self.readInt() if data1 == 0xFFFF: data1 = -1 data2 = self.readInt() if data2 == 0xFFFF: data2 = -1 lexerAction = self.lexerActionFactory(actionType, data1, data2) atn.lexerActions[i] = lexerAction def generateRuleBypassTransitions(self, atn:ATN): count = len(atn.ruleToStartState) atn.ruleToTokenType = [ 0 ] * count for i in range(0, count): atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 for i in range(0, count): self.generateRuleBypassTransition(atn, i) def generateRuleBypassTransition(self, atn:ATN, idx:int): bypassStart = BasicBlockStartState() bypassStart.ruleIndex = idx atn.addState(bypassStart) bypassStop = BlockEndState() bypassStop.ruleIndex = idx atn.addState(bypassStop) bypassStart.endState = bypassStop atn.defineDecisionState(bypassStart) bypassStop.startState = bypassStart excludeTransition = None if atn.ruleToStartState[idx].isPrecedenceRule: # wrap from the beginning of the rule to the StarLoopEntryState endState = None for state in atn.states: if self.stateIsEndStateFor(state, idx): endState = state excludeTransition = state.loopBackState.transitions[0] break if excludeTransition is None: raise Exception("Couldn't identify final state of the precedence rule prefix section.") else: endState = atn.ruleToStopState[idx] # all non-excluded transitions that currently target end state need to target blockEnd instead for state in atn.states: for transition in state.transitions: if transition == excludeTransition: continue if transition.target == endState: transition.target = bypassStop # all transitions leaving the rule start state need to leave blockStart instead ruleToStartState = atn.ruleToStartState[idx] count = len(ruleToStartState.transitions) while count > 0: bypassStart.addTransition(ruleToStartState.transitions[count-1]) del ruleToStartState.transitions[-1] # link the new states atn.ruleToStartState[idx].addTransition(EpsilonTransition(bypassStart)) bypassStop.addTransition(EpsilonTransition(endState)) matchState = BasicState() atn.addState(matchState) matchState.addTransition(AtomTransition(bypassStop, atn.ruleToTokenType[idx])) bypassStart.addTransition(EpsilonTransition(matchState)) def stateIsEndStateFor(self, state:ATNState, idx:int): if state.ruleIndex != idx: return None if not isinstance(state, StarLoopEntryState): return None maybeLoopEndState = state.transitions[len(state.transitions) - 1].target if not isinstance(maybeLoopEndState, LoopEndState): return None if maybeLoopEndState.epsilonOnlyTransitions and \ isinstance(maybeLoopEndState.transitions[0].target, RuleStopState): return state else: return None # # Analyze the {@link StarLoopEntryState} states in the specified ATN to set # the {@link StarLoopEntryState#isPrecedenceDecision} field to the # correct value. # # @param atn The ATN. # def markPrecedenceDecisions(self, atn:ATN): for state in atn.states: if not isinstance(state, StarLoopEntryState): continue # We analyze the ATN to determine if this ATN decision state is the # decision for the closure block that determines whether a # precedence rule should continue or complete. # if atn.ruleToStartState[state.ruleIndex].isPrecedenceRule: maybeLoopEndState = state.transitions[len(state.transitions) - 1].target if isinstance(maybeLoopEndState, LoopEndState): if maybeLoopEndState.epsilonOnlyTransitions and \ isinstance(maybeLoopEndState.transitions[0].target, RuleStopState): state.isPrecedenceDecision = True def verifyATN(self, atn:ATN): if not self.deserializationOptions.verifyATN: return # verify assumptions for state in atn.states: if state is None: continue self.checkCondition(state.epsilonOnlyTransitions or len(state.transitions) <= 1) if isinstance(state, PlusBlockStartState): self.checkCondition(state.loopBackState is not None) if isinstance(state, StarLoopEntryState): self.checkCondition(state.loopBackState is not None) self.checkCondition(len(state.transitions) == 2) if isinstance(state.transitions[0].target, StarBlockStartState): self.checkCondition(isinstance(state.transitions[1].target, LoopEndState)) self.checkCondition(not state.nonGreedy) elif isinstance(state.transitions[0].target, LoopEndState): self.checkCondition(isinstance(state.transitions[1].target, StarBlockStartState)) self.checkCondition(state.nonGreedy) else: raise Exception("IllegalState") if isinstance(state, StarLoopbackState): self.checkCondition(len(state.transitions) == 1) self.checkCondition(isinstance(state.transitions[0].target, StarLoopEntryState)) if isinstance(state, LoopEndState): self.checkCondition(state.loopBackState is not None) if isinstance(state, RuleStartState): self.checkCondition(state.stopState is not None) if isinstance(state, BlockStartState): self.checkCondition(state.endState is not None) if isinstance(state, BlockEndState): self.checkCondition(state.startState is not None) if isinstance(state, DecisionState): self.checkCondition(len(state.transitions) <= 1 or state.decision >= 0) else: self.checkCondition(len(state.transitions) <= 1 or isinstance(state, RuleStopState)) def checkCondition(self, condition:bool, message=None): if not condition: if message is None: message = "IllegalState" raise Exception(message) def readInt(self): i = self.data[self.pos] self.pos += 1 return i def readInt32(self): low = self.readInt() high = self.readInt() return low | (high << 16) def readLong(self): low = self.readInt32() high = self.readInt32() return (low & 0x00000000FFFFFFFF) | (high << 32) def readUUID(self): low = self.readLong() high = self.readLong() allBits = (low & 0xFFFFFFFFFFFFFFFF) | (high << 64) return UUID(int=allBits) edgeFactories = [ lambda args : None, lambda atn, src, trg, arg1, arg2, arg3, sets, target : EpsilonTransition(target), lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ RangeTransition(target, Token.EOF, arg2) if arg3 != 0 else RangeTransition(target, arg1, arg2), lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ RuleTransition(atn.states[arg1], arg2, arg3, target), lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ PredicateTransition(target, arg1, arg2, arg3 != 0), lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ AtomTransition(target, Token.EOF) if arg3 != 0 else AtomTransition(target, arg1), lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ ActionTransition(target, arg1, arg2, arg3 != 0), lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ SetTransition(target, sets[arg1]), lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ NotSetTransition(target, sets[arg1]), lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ WildcardTransition(target), lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ PrecedencePredicateTransition(target, arg1) ] def edgeFactory(self, atn:ATN, type:int, src:int, trg:int, arg1:int, arg2:int, arg3:int, sets:list): target = atn.states[trg] if type > len(self.edgeFactories) or self.edgeFactories[type] is None: raise Exception("The specified transition type: " + str(type) + " is not valid.") else: return self.edgeFactories[type](atn, src, trg, arg1, arg2, arg3, sets, target) stateFactories = [ lambda : None, lambda : BasicState(), lambda : RuleStartState(), lambda : BasicBlockStartState(), lambda : PlusBlockStartState(), lambda : StarBlockStartState(), lambda : TokensStartState(), lambda : RuleStopState(), lambda : BlockEndState(), lambda : StarLoopbackState(), lambda : StarLoopEntryState(), lambda : PlusLoopbackState(), lambda : LoopEndState() ] def stateFactory(self, type:int, ruleIndex:int): if type> len(self.stateFactories) or self.stateFactories[type] is None: raise Exception("The specified state type " + str(type) + " is not valid.") else: s = self.stateFactories[type]() if s is not None: s.ruleIndex = ruleIndex return s CHANNEL = 0 #The type of a {@link LexerChannelAction} action. CUSTOM = 1 #The type of a {@link LexerCustomAction} action. MODE = 2 #The type of a {@link LexerModeAction} action. MORE = 3 #The type of a {@link LexerMoreAction} action. POP_MODE = 4 #The type of a {@link LexerPopModeAction} action. PUSH_MODE = 5 #The type of a {@link LexerPushModeAction} action. SKIP = 6 #The type of a {@link LexerSkipAction} action. TYPE = 7 #The type of a {@link LexerTypeAction} action. actionFactories = [ lambda data1, data2: LexerChannelAction(data1), lambda data1, data2: LexerCustomAction(data1, data2), lambda data1, data2: LexerModeAction(data1), lambda data1, data2: LexerMoreAction.INSTANCE, lambda data1, data2: LexerPopModeAction.INSTANCE, lambda data1, data2: LexerPushModeAction(data1), lambda data1, data2: LexerSkipAction.INSTANCE, lambda data1, data2: LexerTypeAction(data1) ] def lexerActionFactory(self, type:int, data1:int, data2:int): if type > len(self.actionFactories) or self.actionFactories[type] is None: raise Exception("The specified lexer action type " + str(type) + " is not valid.") else: return self.actionFactories[type](data1, data2)
mit
ppiotr/Invenio
modules/miscutil/lib/upgrades/invenio_2012_11_27_new_selfcite_tables.py
24
1666
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2012 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. from invenio.dbquery import run_sql depends_on = ['invenio_release_1_1_0'] def info(): return "New selfcite tables" def do_upgrade(): run_sql(""" CREATE TABLE IF NOT EXISTS `rnkRECORDSCACHE` ( `id_bibrec` int(10) unsigned NOT NULL, `authorid` bigint(10) NOT NULL, PRIMARY KEY (`id_bibrec`,`authorid`) ) ENGINE=MyISAM""") run_sql(""" CREATE TABLE IF NOT EXISTS `rnkEXTENDEDAUTHORS` ( `id` int(10) unsigned NOT NULL, `authorid` bigint(10) NOT NULL, PRIMARY KEY (`id`,`authorid`) ) ENGINE=MyISAM""") run_sql(""" CREATE TABLE IF NOT EXISTS `rnkSELFCITES` ( `id_bibrec` int(10) unsigned NOT NULL, `count` int(10) unsigned NOT NULL, `references` text NOT NULL, `last_updated` datetime NOT NULL, PRIMARY KEY (`id_bibrec`) ) ENGINE=MyISAM""") def estimate(): return 1
gpl-2.0
dvliman/jaikuengine
.google_appengine/lib/django-0.96/django/newforms/extras/widgets.py
32
2008
""" Extra HTML Widget classes """ from django.newforms.widgets import Widget, Select from django.utils.dates import MONTHS import datetime __all__ = ('SelectDateWidget',) class SelectDateWidget(Widget): """ A Widget that splits date input into three <select> boxes. This also serves as an example of a Widget that has more than one HTML element and hence implements value_from_datadict. """ month_field = '%s_month' day_field = '%s_day' year_field = '%s_year' def __init__(self, attrs=None, years=None): # years is an optional list/tuple of years to use in the "year" select box. self.attrs = attrs or {} if years: self.years = years else: this_year = datetime.date.today().year self.years = range(this_year, this_year+10) def render(self, name, value, attrs=None): try: value = datetime.date(*map(int, value.split('-'))) year_val, month_val, day_val = value.year, value.month, value.day except (AttributeError, TypeError, ValueError): year_val = month_val = day_val = None output = [] month_choices = MONTHS.items() month_choices.sort() select_html = Select(choices=month_choices).render(self.month_field % name, month_val) output.append(select_html) day_choices = [(i, i) for i in range(1, 32)] select_html = Select(choices=day_choices).render(self.day_field % name, day_val) output.append(select_html) year_choices = [(i, i) for i in self.years] select_html = Select(choices=year_choices).render(self.year_field % name, year_val) output.append(select_html) return u'\n'.join(output) def value_from_datadict(self, data, name): y, m, d = data.get(self.year_field % name), data.get(self.month_field % name), data.get(self.day_field % name) if y and m and d: return '%s-%s-%s' % (y, m, d) return None
apache-2.0
alextruberg/custom_django
django/contrib/gis/db/backends/mysql/introspection.py
624
1426
from MySQLdb.constants import FIELD_TYPE from django.contrib.gis.gdal import OGRGeomType from django.db.backends.mysql.introspection import DatabaseIntrospection class MySQLIntrospection(DatabaseIntrospection): # Updating the data_types_reverse dictionary with the appropriate # type for Geometry fields. data_types_reverse = DatabaseIntrospection.data_types_reverse.copy() data_types_reverse[FIELD_TYPE.GEOMETRY] = 'GeometryField' def get_geometry_type(self, table_name, geo_col): cursor = self.connection.cursor() try: # In order to get the specific geometry type of the field, # we introspect on the table definition using `DESCRIBE`. cursor.execute('DESCRIBE %s' % self.connection.ops.quote_name(table_name)) # Increment over description info until we get to the geometry # column. for column, typ, null, key, default, extra in cursor.fetchall(): if column == geo_col: # Using OGRGeomType to convert from OGC name to Django field. # MySQL does not support 3D or SRIDs, so the field params # are empty. field_type = OGRGeomType(typ).django field_params = {} break finally: cursor.close() return field_type, field_params
bsd-3-clause
desarrollosimagos/svidb
administrativo/perfil/models.py
1
12346
#!/usr/bin/python -u # -*- coding: utf-8 -*- from django.db import models from datetime import datetime from django.contrib.auth.models import User from mapas.models import * from actores.models import * class PerfilPublico(models.Model): user = models.OneToOneField(User,verbose_name='Usuario') persona = models.OneToOneField(Directorios) class Meta: db_table = u'perfilpublico' verbose_name_plural='Perfil Público' verbose_name='Perfil Público' unique_together=('user','persona') #app_label = 'Sistematizacion_de_modulos_publicos' def __unicode__(self): return u"%s" %(self.persona.nombre) class SeccionesPanelPublico(models.Model): panel = models.CharField(max_length=180,verbose_name='Modulo') descripcion = models.TextField() # modulos = models.ManyToManyField(ModulosPublicos,related_name='Modulos Principales',verbose_name='Modulos',blank=True) activo = models.BooleanField(verbose_name="Activo") is_admmin = models.BooleanField(verbose_name="Solo para Administradores") posicion = models.IntegerField(verbose_name="Posicion") class Meta: verbose_name_plural='Secciones del Panel Publico' verbose_name='Secciones del Panel Publico' def __unicode__(self): return u"%s" %(self.panel) class ModulosPublicos(models.Model): paneles = models.ForeignKey(SeccionesPanelPublico) modulo = models.CharField(max_length=180,verbose_name='Modulo') url = models.CharField(max_length=180,verbose_name='URL',blank=True,null=True) boton = models.ImageField(upload_to='modulos') # submodulos = models.ManyToManyField(SubModulosPublicos,related_name='Submodulos',verbose_name='Sub Modulos',blank=True) descripcion = models.TextField() is_admmin = models.BooleanField(verbose_name="Solo para Administradores") activo = models.BooleanField(verbose_name="Activo") posicion = models.IntegerField(verbose_name="Posicion") target = models.CharField(max_length=40,choices=(('_blank',u'Abre el documento vinculado en una nueva ventana o pestaña'),('_self',u'Abre el documento vinculado en el mismo marco que se ha hecho clic'),('_parent',u'Abre el documento vinculado en el marco padre'),('_top',u'Abre el documento vinculado en el pleno de la ventana')),verbose_name='Target del Vinculo') class Meta: verbose_name_plural='Módulos Públicos' verbose_name='Módulos Públicos' #app_label = 'Sistematizacion_de_modulos_publicos' def __unicode__(self): return u"%s - %s" %(self.paneles.panel, self.modulo) def logo(self): logo = "" if self.boton: esta = "<img src='" + self.boton.url +"' alt='Activo' height='150px'>" else: esta = "<img src='/media/imgs/icon-pendiente.gif' alt='Pendiente'> sin imagen" return u"%s"%(esta) logo.allow_tags = True class SubModulosPublicos(models.Model): modulos = models.ForeignKey(ModulosPublicos) titulo = models.CharField(max_length=180,verbose_name='Modulo') url = models.CharField(max_length=180,verbose_name='URL',blank=True,null=True) boton = models.ImageField(upload_to='modulos') descripcion = models.TextField() is_admmin = models.BooleanField(verbose_name="Solo para Administradores") activo = models.BooleanField(verbose_name="Activo") posicion = models.IntegerField(verbose_name="Posicion") target = models.CharField(max_length=40,choices=(('_blank',u'Abre el documento vinculado en una nueva ventana o pestaña'),('_self',u'Abre el documento vinculado en el mismo marco que se ha hecho clic'),('_parent',u'Abre el documento vinculado en el marco padre'),('_top',u'Abre el documento vinculado en el pleno de la ventana')),verbose_name='Target del Vinculo') class Meta: verbose_name_plural='Sub Módulos Públicos' verbose_name='Sub Módulos Públicos' def __unicode__(self): return u"%s %s %s" %(self.modulos.paneles.panel, self.modulos.modulo,self.titulo) def logo(self): logo = "" if self.boton: esta = "<img src='" + self.boton.url +"' alt='Activo' height='150px'>" else: esta = "<img src='/media/imgs/icon-pendiente.gif' alt='Pendiente'> sin imagen" return u"%s"%(esta) logo.allow_tags = True class PerfilModulos(models.Model): perfil = models.ForeignKey(PerfilPublico) modulos = models.ForeignKey(ModulosPublicos,verbose_name='Modulos') ver = models.BooleanField(verbose_name="Ver") add = models.BooleanField(verbose_name="Agregar") edit = models.BooleanField(verbose_name="Modificar") activo = models.BooleanField(verbose_name="Activo") class Meta: db_table = u'perfilmodulos' verbose_name_plural='Permisos Perfiles Módulos' unique_together=('perfil','modulos','activo') verbose_name='Permisos Perfiles Módulos' #app_label = 'Sistematizacion_de_modulos_publicos' def __unicode__(self): return u"%s %s" %(self.perfil.persona.nombre,self.modulos.modulo) class PerfilSubModulos(models.Model): perfil = models.ForeignKey(PerfilPublico) submodulos = models.ForeignKey(SubModulosPublicos,verbose_name='SubModulos') ver = models.BooleanField(verbose_name="Ver") add = models.BooleanField(verbose_name="Agregar") edit = models.BooleanField(verbose_name="Modificar") activo = models.BooleanField(verbose_name="Activo") class Meta: verbose_name_plural='Permisos Perfiles Sub Módulos' verbose_name='Permisos Perfil Sub Módulos' unique_together=('perfil','submodulos','activo') #app_label = 'Sistematizacion_de_modulos_publicos' def __unicode__(self): return u"%s %s" %(self.perfil.persona.nombre,self.submodulos.titulo) #class PerfilPaneles(models.Model): # perfil = models.ForeignKey(PerfilPublico) # modulos = models.ManyToManyField(SeccionesPanelPublico,verbose_name='Paneles') # class Meta: # verbose_name_plural='Perfil Paneles' # verbose_name='Perfil Paneles' # def __unicode__(self): # return u"%s %s" %(self.perfil.persona.nombre,self.perfil.persona.documentoidentidad) class TipoSolicitud(models.Model): tipo = models.CharField(max_length=180,verbose_name='Tipo') descripcion = models.TextField() class Meta: verbose_name_plural='Tipo de Solicitud' verbose_name='Tipo de Solicitud' def __unicode__(self): return u"%s" %(self.tipo) class SistemaSolicitudes(models.Model): remi = models.ForeignKey(Directorios,verbose_name='Remitente') tipoSolicitud = models.ForeignKey(TipoSolicitud,verbose_name='Tipo de Solicitud',blank=True, null = True) destino = models.ManyToManyField(Directorios, related_name='destinodirect',verbose_name='Destinatarios',blank=True, null = True) destinoinst = models.ManyToManyField(Actores, related_name='destinoactor',verbose_name='Destinatarios Instituciones',blank=True, null = True) asunto = models.CharField(max_length=120,blank=True,null=True) mensaje = models.TextField(blank=True,null=True) fecha = models.DateTimeField(default=datetime.now(),editable = False) fechainicio = models.DateTimeField(verbose_name='Fecha de Inicio',blank=True,null=True) fechaentrega = models.DateTimeField(verbose_name='Fecha de Entrega',blank=True,null=True) fechaculminacion = models.DateTimeField(verbose_name='Fecha de Culminación',blank=True,null=True) fechaprorroga = models.DateTimeField(verbose_name='Prorroga',blank=True,null=True) proyect = models.BooleanField(verbose_name='Es Proyectable?') estrucorg = models.TextField(verbose_name='Recursos', blank=True, null=True) personasinvol = models.ManyToManyField(Directorios, related_name='persoinvol',verbose_name='Personas Involucradas',blank=True, null = True) personasinvoltext = models.TextField(verbose_name='Personas Involucradas, no registradas', blank=True, null=True) instituinvol = models.ManyToManyField(Actores, related_name='instiinvol',verbose_name='Instituciones Involucradas',blank=True, null = True) instituinvoltext = models.TextField(verbose_name='Institutos Involucrados, no registrados', blank=True, null=True) especies = models.ManyToManyField(Taxon, related_name='tax',verbose_name='Especies Involucradas',blank=True, null = True) especiestext = models.TextField(verbose_name='Especies Involucradas, no registradas', blank=True, null=True) areas = models.ManyToManyField(Areas, related_name='ar',verbose_name='Areas Involucradas',blank=True, null = True) areastext = models.TextField(verbose_name='Areas Involucradas, no registradas', blank=True, null=True) datos = models.FileField(upload_to='solicitudes',verbose_name='Datos Adjuntos',blank=True,null=True) prioridad = models.IntegerField(choices=((0,'Urgente'),(1,'Normal'),(2,'Especial')),verbose_name='Prioridad',null=True,blank=True) estatu = models.IntegerField(choices=((0,'Abierto'),(1,'Cerrado'),(2,'Pausado')),verbose_name='Estatus',null=True,blank=True,db_column='estatu_id') class Meta: verbose_name_plural='Sistema de Solicitudes' #app_label = 'Datos_Transversales' verbose_name = 'Sistema de Solicitudes' def __unicode__(self): return u" %s %s"%(self.remi,self.estatu) # def VerEspecies(self): # try: # espe = Taxon.objects.get(detalletaxon=self) # except Taxon.DoesNotExist: # espe = None # return u"<a href='/manager/especies/taxon/%s'>Ver Taxon</a>"%(tax.id) # VerTaxon.allow_tags = True class Seguimiento(models.Model): solicitud = models.ForeignKey(SistemaSolicitudes,verbose_name='Solicitud',blank=True, null = True) persona = models.ForeignKey(Directorios,verbose_name='Persona',blank=True, null = True,editable = False) mensaje = models.TextField() fecha = models.DateTimeField(default=datetime.now(),editable = False) class Meta: verbose_name_plural='Seguimiento' verbose_name='Seguimiento' def __unicode__(self): return u"%s" %(self.solicitud) class validaciones(models.Model): usuario = models.ForeignKey(PerfilPublico,verbose_name='Usuario') codigo = models.CharField(max_length=120) estatu = models.IntegerField(choices=((0,'Validacion'),(1,'Recuperacion'),(2,'Eliminacion')),verbose_name='Tipo',null=True,blank=True) fecha = models.DateTimeField(default=datetime.now(),editable = False) estado = models.BooleanField(verbose_name="Activo") class Meta: verbose_name_plural='Validacion de Cuentas' #app_label = 'Datos_Transversales' verbose_name = 'Validacion de Cuentas' def __unicode__(self): return u" %s %s"%(self.usuario,self.estatu) class GruposPermisos(models.Model): nombre = models.CharField(max_length=120) estado = models.BooleanField(verbose_name="Activo") class Meta: verbose_name_plural='Grupos de Permisos de Perfil' verbose_name = 'Grupos de Permisos de Perfil' def __unicode__(self): return u" %s %s"%(self.nombre,self.estado) class DetalleGruposPermisos(models.Model): grupo = models.ForeignKey(GruposPermisos,verbose_name='Grupo') seccion = models.ForeignKey(SeccionesPanelPublico,verbose_name='Panel') modulo = ChainedForeignKey(ModulosPublicos,chained_field="seccion",chained_model_field="paneles",show_all=False,auto_choose=True,verbose_name='Modulo',null=True,blank=True) #modulo = models.ForeignKey(ModulosPublicos,verbose_name='Modulo') submodulo = ChainedForeignKey(SubModulosPublicos,chained_field="modulo",chained_model_field="modulos",show_all=False,auto_choose=True,verbose_name='SubModulo',null=True,blank=True) #submodulo = models.ForeignKey(SubModulosPublicos,verbose_name='SubModulo') estado = models.BooleanField(verbose_name="Activo") class Meta: verbose_name_plural='Detalle Grupos de Permisos de Perfil' verbose_name = 'Detalle Grupos de Permisos de Perfil' def __unicode__(self): return u" %s %s"%(self.grupo,self.estado)
gpl-3.0
ocefpaf/compliance-checker
compliance_checker/tests/test_cf.py
2
105222
#!/usr/bin/env python # -*- coding: utf-8 -*- import copy import os import sqlite3 from itertools import chain from tempfile import gettempdir import numpy as np import pytest from netCDF4 import Dataset from compliance_checker import cfutil from compliance_checker.cf import ( CF1_6Check, CF1_7Check, dimless_vertical_coordinates_1_6, dimless_vertical_coordinates_1_7, ) from compliance_checker.cf.appendix_d import no_missing_terms from compliance_checker.cf.util import ( StandardNameTable, create_cached_data_dir, download_cf_standard_name_table, is_time_variable, is_vertical_coordinate, units_convertible, units_temporal, ) from compliance_checker.suite import CheckSuite from compliance_checker.tests import BaseTestCase from compliance_checker.tests.helpers import MockTimeSeries, MockVariable, MockRaggedArrayRepr from compliance_checker.tests.resources import STATIC_FILES def get_results(results): """ Returns a tuple of the value scored, possible, and a list of messages in the result set. """ out_of = 0 scored = 0 if isinstance(results, dict): results_list = results.values() else: results_list = results for r in results_list: if isinstance(r.value, tuple): out_of += r.value[1] scored += r.value[0] else: out_of += 1 scored += int(r.value) # Store the messages messages = [] for r in results_list: messages.extend(r.msgs) return scored, out_of, messages class TestCF1_6(BaseTestCase): def setUp(self): """Initialize a CF1_6Check object.""" self.cf = CF1_6Check() # -------------------------------------------------------------------------------- # Helper Methods # -------------------------------------------------------------------------------- def new_nc_file(self): """ Make a new temporary netCDF file for the scope of the test """ nc_file_path = os.path.join(gettempdir(), "example.nc") if os.path.exists(nc_file_path): raise IOError("File Exists: %s" % nc_file_path) nc = Dataset(nc_file_path, "w") self.addCleanup(os.remove, nc_file_path) self.addCleanup(nc.close) return nc def test_coord_data_vars(self): """Check that coordinate data variables are properly handled""" ds = MockTimeSeries() ds.createDimension("siglev", 20) temp = ds.createVariable( "temp", np.float64, dimensions=("time",), fill_value=np.float(99999999999999999999.0), ) temp.coordinates = "sigma noexist" ds.createVariable("sigma", np.float64, dimensions=("siglev",)) self.cf.setup(ds) # time is a NUG coordinate variable, sigma is not, but is referred to in # variables, so both should show up in cf_coord_data_vars. # noexist does not exist in the dataset's variables, so it is not # present in coord_data_vars self.assertEqual(self.cf.coord_data_vars, {"time", "sigma"}) def load_dataset(self, nc_dataset): """ Return a loaded NC Dataset for the given path """ if not isinstance(nc_dataset, str): raise ValueError("nc_dataset should be a string") nc_dataset = Dataset(nc_dataset, "r") self.addCleanup(nc_dataset.close) return nc_dataset # -------------------------------------------------------------------------------- # Compliance Tests # -------------------------------------------------------------------------------- def test_check_data_types(self): """ Invoke check_data_types() and loop through all variables to check data types. Pertains to 2.2. The netCDF data types char, byte, short, int, float or real, and double are all acceptable. NetCDF4 allows string as data type, which is also acceptable. """ # check default netCDF data types dataset = self.load_dataset(STATIC_FILES["rutgers"]) result = self.cf.check_data_types(dataset) assert result.value[0] == result.value[1] # check if variables of type `string` is properly processed dataset = self.load_dataset(STATIC_FILES["string"]) if dataset.file_format != "NETCDF4": raise RuntimeError( "netCDF file of wrong format (not netCDF4) was created for checking" ) result = self.cf.check_data_types(dataset) assert result.value[0] == result.value[1] # check bad data types dataset = self.load_dataset(STATIC_FILES["bad_data_type"]) result = self.cf.check_data_types(dataset) # TODO # the acdd_reformat_rebase branch has a new .nc file # which constructs the temp variable with an int64 dtype -- # upon rebasing, this should work as expected # assert result.msgs[0] == u'The variable temp failed because the datatype is int64' # assert result.value == (6, 7) def test_check_child_attr_data_types(self): """ Tests check_child_attr_data_types() to ensure the attributes specified in Section 2.5.1 have a matching data type to their parent variables.""" # create dataset using MockDataset (default constructor gives it time dimension) ds = MockTimeSeries() ds.createVariable( "temp", np.float64, dimensions=("time") ) # add variable "temp" with dimension "time" # check where no special data attrs are present, should result good result = self.cf.check_child_attr_data_types( ds ) # checks all special attrs for all variables self.assert_result_is_good(result) # delete the dataset and start over to create the variable with _FillValue at time of creation del ds ds = MockTimeSeries() ds.createVariable( "temp", np.float64, dimensions=("time",), fill_value=np.float(99999999999999999999.0), ) # give temp _FillValue as a float, expect good result result = self.cf.check_child_attr_data_types(ds) self.assert_result_is_good(result) # give temp valid_range as an array of floats, all should check out ds.variables["temp"].setncattr("valid_range", np.array([35.0, 38.0])) result = self.cf.check_child_attr_data_types(ds) self.assert_result_is_good(result) # dimensions would probably not be time for platform, # but this makes for an easy sanity check against string-like # variables and attributes var = ds.createVariable("platform", "S1", dimensions=("time",), fill_value="") # this probably doesn't make much sense -- more for _FillValue, # but _FillVaue data type checks are done at variable creation time? # Can't set manually var.setncattr("valid_max", -999) result = self.cf.check_child_attr_data_types(ds) self.assert_result_is_bad(result) # str or bytes should work var.setncattr("valid_max", "@") result = self.cf.check_child_attr_data_types(ds) self.assert_result_is_good(result) var.setncattr("valid_max", b"@") result = self.cf.check_child_attr_data_types(ds) self.assert_result_is_good(result) # now give invalid integer for valid_min; above two should still check out, this one should fail ds.variables["temp"].setncattr("valid_min", 45) result = self.cf.check_child_attr_data_types(ds) self.assert_result_is_bad(result) # now give invalid string for valid_max ds.variables["temp"].setncattr("valid_max", "eighty") result = self.cf.check_child_attr_data_types(ds) self.assert_result_is_bad(result) # TODO for CF-1.7: actual_range, actual_min/max def test_appendix_a(self): dataset = self.load_dataset(STATIC_FILES["bad_data_type"]) # Ordinarily, options would be specified in the checker constructor, but # we set them manually here so we don't have to monkey patch `setUp` self.cf.options = {"enable_appendix_a_checks"} new_check = copy.deepcopy(self.cf) self.cf.setup(dataset) aa_results = self.cf.check_appendix_a(dataset) flat_messages = {msg for res in aa_results for msg in res.msgs} self.assertIn( '[Appendix A] Attribute "compress" should not be present in non-coordinate data (D) variable "temp". This attribute may only appear in coordinate data (C).', flat_messages, ) self.assertIn("add_offset must be a numeric type", flat_messages) nc_obj = MockTimeSeries() nc_obj._FillValue = "-9999.00" new_check.setup(nc_obj) res2 = new_check.check_appendix_a(nc_obj) flat_messages = {msg for res in res2 for msg in res.msgs} self.assertIn( '[Appendix A] Attribute "_FillValue" should not be present in global (G) attributes. This attribute may only appear in coordinate data (C) and non-coordinate data (D).', flat_messages, ) def test_naming_conventions(self): """ Section 2.3 Naming Conventions Variable, dimension and attr names should begin with a letter and be composed of letters, digits, and underscores. """ # compliant dataset dataset = self.load_dataset(STATIC_FILES["rutgers"]) results = self.cf.check_naming_conventions(dataset) scored, out_of, messages = get_results(results) assert scored == out_of # non-compliant dataset dataset = self.load_dataset(STATIC_FILES["bad"]) results = self.cf.check_naming_conventions(dataset) scored, out_of, messages = get_results(results) assert len(results) == 3 assert scored < out_of assert len([r for r in results if r.value[0] < r.value[1]]) == 2 assert all(r.name == u"§2.3 Naming Conventions" for r in results) # another non-compliant dataset dataset = self.load_dataset(STATIC_FILES["chap2"]) results = self.cf.check_naming_conventions(dataset) scored, out_of, messages = get_results(results) assert len(results) == 3 assert scored < out_of assert len([r for r in results if r.value[0] < r.value[1]]) == 2 assert all(r.name == u"§2.3 Naming Conventions" for r in results) def test_check_names_unique(self): """ 2.3 names should not be distinguished purely by case, i.e., if case is disregarded, no two names should be the same. """ dataset = self.load_dataset(STATIC_FILES["rutgers"]) result = self.cf.check_names_unique(dataset) num_var = len(dataset.variables) expected = (num_var,) * 2 self.assertEqual(result.value, expected) dataset = self.load_dataset(STATIC_FILES["chap2"]) result = self.cf.check_names_unique(dataset) assert result.value == (6, 7) assert ( result.msgs[0] == u"Variables are not case sensitive. Duplicate variables named: not_unique" ) def test_check_dimension_names(self): """ 2.4 A variable may have any number of dimensions, including zero, and the dimensions must all have different names. """ dataset = self.load_dataset(STATIC_FILES["bad_data_type"]) result = self.cf.check_dimension_names(dataset) assert result.value == (6, 7) dataset = self.load_dataset(STATIC_FILES["chap2"]) result = self.cf.check_dimension_names(dataset) assert result.msgs[0] == u"no_reason has two or more dimensions named time" def test_check_dimension_order(self): """ 2.4 If any or all of the dimensions of a variable have the interpretations of "date or time" (T), "height or depth" (Z), "latitude" (Y), or "longitude" (X) then we recommend, those dimensions to appear in the relative order T, then Z, then Y, then X in the CDL definition corresponding to the file. All other dimensions should, whenever possible, be placed to the left of the spatiotemporal dimensions. """ dataset = self.load_dataset(STATIC_FILES["bad_data_type"]) result = self.cf.check_dimension_order(dataset) assert result.value == (5, 6) assert result.msgs[0] == ( u"really_bad's spatio-temporal dimensions are not in the " "recommended order T, Z, Y, X and/or further dimensions are not " "located left of T, Z, Y, X. The dimensions (and their guessed " "types) are latitude (Y), power (U) (with U: other/unknown; L: " "unlimited)." ) dataset = self.load_dataset(STATIC_FILES["dimension_order"]) result = self.cf.check_dimension_order(dataset) self.assertEqual((3, 3), result.value) self.assertEqual([], result.msgs) def test_check_fill_value_outside_valid_range(self): """ 2.5.1 The _FillValue should be outside the range specified by valid_range (if used) for a variable. """ dataset = self.load_dataset(STATIC_FILES["bad_data_type"]) result = self.cf.check_fill_value_outside_valid_range(dataset) assert result.msgs[0] == ( u"salinity:_FillValue (1.0) should be outside the " "range specified by valid_min/valid_max (-10, 10)" ) dataset = self.load_dataset(STATIC_FILES["chap2"]) result = self.cf.check_fill_value_outside_valid_range(dataset) assert result.value == (1, 2) assert result.msgs[0] == ( u"wind_speed:_FillValue (12.0) should be outside the " "range specified by valid_min/valid_max (0.0, 20.0)" ) def test_check_conventions_are_cf_16(self): """ §2.6.1 the NUG defined global attribute Conventions to the string value "CF-1.6" """ # :Conventions = "CF-1.6" dataset = self.load_dataset(STATIC_FILES["rutgers"]) result = self.cf.check_conventions_version(dataset) self.assertTrue(result.value) # :Conventions = "CF-1.6 ,ACDD" ; dataset = self.load_dataset(STATIC_FILES["conv_multi"]) result = self.cf.check_conventions_version(dataset) self.assertTrue(result.value) # :Conventions = "NoConvention" dataset = self.load_dataset(STATIC_FILES["conv_bad"]) result = self.cf.check_conventions_version(dataset) self.assertFalse(result.value) assert result.msgs[0] == ( u"§2.6.1 Conventions global attribute does not contain " '"CF-1.6"' ) def test_check_convention_globals(self): """ Load up a dataset and ensure title and history global attrs are checked properly (§2.6.2). """ # check for pass dataset = self.load_dataset(STATIC_FILES["rutgers"]) result = self.cf.check_convention_globals(dataset) assert result.value[0] == result.value[1] # check if it doesn't exist that we pass dataset = self.load_dataset(STATIC_FILES["bad_data_type"]) result = self.cf.check_convention_globals(dataset) assert result.value[0] != result.value[1] assert ( result.msgs[0] == u"§2.6.2 global attribute title should exist and be a non-empty string" ) def test_check_convention_possibly_var_attrs(self): """ §2.6.2 The units attribute is required for all variables that represent dimensional quantities (except for boundary variables defined in Section 7.1, "Cell Boundaries" and climatology variables defined in Section 7.4, "Climatological Statistics"). Units are not required for dimensionless quantities. A variable with no units attribute is assumed to be dimensionless. However, a units attribute specifying a dimensionless unit may optionally be included. - units required - type must be recognized by udunits - if std name specified, must be consistent with standard name table, must also be consistent with a specified cell_methods attribute if present """ dataset = self.load_dataset(STATIC_FILES["rutgers"]) result = self.cf.check_convention_possibly_var_attrs(dataset) # 10x comment attrs # 1x institution # 1x source # 1x EMPTY references assert result.value[0] != result.value[1] assert ( result.msgs[0] == u"§2.6.2 references global attribute should be a non-empty string" ) # load bad_data_type.nc dataset = self.load_dataset(STATIC_FILES["bad_data_type"]) result = self.cf.check_convention_possibly_var_attrs(dataset) # no references # institution is a 10L # no source # comments don't matter unless they're empty assert result.value[0] != result.value[1] assert ( result.msgs[0] == u"§2.6.2 salinity:institution should be a non-empty string" ) def test_check_standard_name(self): """ 3.3 A standard name is associated with a variable via the attribute standard_name which takes a string value comprised of a standard name optionally followed by one or more blanks and a standard name modifier """ dataset = self.load_dataset(STATIC_FILES["2dim"]) results = self.cf.check_standard_name(dataset) for each in results: self.assertTrue(each.value) # load failing ds dataset = self.load_dataset(STATIC_FILES["bad_data_type"]) results = self.cf.check_standard_name(dataset) score, out_of, messages = get_results(results) # 9 vars checked, 8 fail assert len(results) == 9 assert score < out_of assert all(r.name == u"§3.3 Standard Name" for r in results) # load different ds -- ll vars pass this check dataset = self.load_dataset(STATIC_FILES["reduced_horizontal_grid"]) results = self.cf.check_standard_name(dataset) score, out_of, messages = get_results(results) assert score == out_of def test_cell_bounds(self): dataset = self.load_dataset(STATIC_FILES["grid-boundaries"]) results = self.cf.check_cell_boundaries(dataset) score, out_of, messages = get_results(results) assert (score, out_of) == (2, 2) dataset = self.load_dataset(STATIC_FILES["cf_example_cell_measures"]) results = self.cf.check_cell_boundaries(dataset) dataset = self.load_dataset(STATIC_FILES["bad_data_type"]) results = self.cf.check_cell_boundaries(dataset) dataset = self.load_dataset(STATIC_FILES["bounds_bad_order"]) results = self.cf.check_cell_boundaries(dataset) score, out_of, messages = get_results(results) # Make sure that the rgrid coordinate variable isn't checked for standard_name assert (score, out_of) == (0, 2) dataset = self.load_dataset(STATIC_FILES["bounds_bad_num_coords"]) results = self.cf.check_cell_boundaries(dataset) score, out_of, messages = get_results(results) assert (score, out_of) == (0, 2) dataset = self.load_dataset(STATIC_FILES["1d_bound_bad"]) results = self.cf.check_cell_boundaries(dataset) score, out_of, messages = get_results(results) assert (score, out_of) == (0, 2) def test_cell_measures(self): dataset = self.load_dataset(STATIC_FILES["cell_measure"]) results = self.cf.check_cell_measures(dataset) score, out_of, messages = get_results(results) assert score == out_of assert score > 0 dataset = self.load_dataset(STATIC_FILES["bad_cell_measure1"]) results = self.cf.check_cell_measures(dataset) score, out_of, messages = get_results(results) message = ( "The cell_measures attribute for variable PS is formatted incorrectly. " "It should take the form of either 'area: cell_var' or 'volume: cell_var' " "where cell_var is the variable describing the cell measures" ) assert message in messages dataset = self.load_dataset(STATIC_FILES["bad_cell_measure2"]) results = self.cf.check_cell_measures(dataset) score, out_of, messages = get_results(results) message = u"Cell measure variable box_area referred to by PS is not present in dataset variables" assert message in messages def test_climatology_cell_methods(self): """ Checks that climatology cell_methods strings are properly validated """ dataset = self.load_dataset(STATIC_FILES["climatology"]) results = self.cf.check_climatological_statistics(dataset) # cell methods in this file is # "time: mean within days time: mean over days" score, out_of, messages = get_results(results) self.assertEqual(score, out_of) temp_var = dataset.variables["temperature"] = MockVariable( dataset.variables["temperature"] ) temp_var.cell_methods = "INVALID" results = self.cf.check_climatological_statistics(dataset) score, out_of, messages = get_results(results) self.assertNotEqual(score, out_of) # incorrect time units temp_var.cell_methods = "time: mean within years time: mean over days" results = self.cf.check_climatological_statistics(dataset) score, out_of, messages = get_results(results) self.assertNotEqual(score, out_of) # can only have third method over years if first two are within and # over days, respectively temp_var.cell_methods = ( "time: mean within years time: mean over years time: sum over years" ) results = self.cf.check_climatological_statistics(dataset) score, out_of, messages = get_results(results) self.assertNotEqual(score, out_of) # this, on the other hand, should work. temp_var.cell_methods = ( "time: mean within days time: mean over days time: sum over years" ) results = self.cf.check_climatological_statistics(dataset) score, out_of, messages = get_results(results) self.assertEqual(score, out_of) # parenthesized comment to describe climatology temp_var.cell_methods = ( "time: sum within days time: maximum over days (ENSO years)" ) results = self.cf.check_climatological_statistics(dataset) score, out_of, messages = get_results(results) self.assertEqual(score, out_of) def test_check_ancillary_variables(self): """ Test to ensure that ancillary variables are properly checked """ dataset = self.load_dataset(STATIC_FILES["rutgers"]) results = self.cf.check_ancillary_variables(dataset) result_dict = {result.name: result for result in results} result = result_dict[u"§3.4 Ancillary Data"] assert result.value == (2, 2) dataset = self.load_dataset(STATIC_FILES["bad_reference"]) results = self.cf.check_ancillary_variables(dataset) result_dict = {result.name: result for result in results} result = result_dict[u"§3.4 Ancillary Data"] assert result.value == (1, 2) assert u"temp_qc is not a variable in this dataset" == result.msgs[0] def test_download_standard_name_table(self): """ Test that a user can download a specific standard name table """ version = "35" data_directory = create_cached_data_dir() location = os.path.join( data_directory, "cf-standard-name-table-test-{0}.xml".format(version) ) download_cf_standard_name_table(version, location) # Test that the file now exists in location and is the right version self.assertTrue(os.path.isfile(location)) std_names = StandardNameTable(location) self.assertEqual(std_names._version, version) self.addCleanup(os.remove, location) def test_bad_standard_name_table(self): """ Test that failure in case a bad standard name table is passed. """ # would this ever actually be reached by the code? with pytest.raises(IOError): StandardNameTable("dummy_non_existent_file.ext") nc_obj = MockTimeSeries() nc_obj.standard_name_table = "dummy_non_existent_file.ext" self.assertFalse(self.cf._find_cf_standard_name_table(nc_obj)) nc_obj.standard_name_table = np.array([], np.float64) self.assertFalse(self.cf._find_cf_standard_name_table(nc_obj)) nc_obj.standard_name_vocabulary = "CF Standard Name Table vNN???" with pytest.warns( UserWarning, match="Cannot extract CF standard name version " "number from standard_name_vocabulary string", ): self.assertFalse(self.cf._find_cf_standard_name_table(nc_obj)) def test_check_flags(self): """Test that the check for flags works as expected.""" dataset = self.load_dataset(STATIC_FILES["rutgers"]) results = self.cf.check_flags(dataset) scored, out_of, messages = get_results(results) # only 4 variables in this dataset do not have perfect scores imperfect = [r.value for r in results if r.value[0] < r.value[1]] assert len(imperfect) == 4 def test_check_flag_masks(self): dataset = self.load_dataset(STATIC_FILES["ghrsst"]) results = self.cf.check_flags(dataset) scored, out_of, messages = get_results(results) # This is an example of a perfect dataset for flags assert scored > 0 assert scored == out_of def test_check_bad_units(self): """Load a dataset with units that are expected to fail (bad_units.nc). There are 6 variables in this dataset, three of which should give an error: - time, with units "s" (should be <units> since <epoch>) - lat, with units "degrees_E" (should be degrees) - lev, with units "level" (deprecated)""" dataset = self.load_dataset(STATIC_FILES["2dim"]) results = self.cf.check_units(dataset) for result in results: self.assert_result_is_good(result) # Not sure why bad_data_type was being used, we have a dataset specifically for bad units # dataset = self.load_dataset(STATIC_FILES['bad_data_type']) dataset = self.load_dataset(STATIC_FILES["bad_units"]) all_results = self.cf.check_units(dataset) # use itertools.chain() to unpack the lists of messages results_list = list(chain(*(r.msgs for r in all_results if r.msgs))) # check the results only have '§3.1 Units' as the header assert all(r.name == u"§3.1 Units" for r in all_results) # check that all the expected variables have been hit assert all( any(s in msg for msg in results_list) for s in ["time", "lat", "lev"] ) def test_latitude(self): """ Section 4.1 Latitude Coordinate """ # Check compliance dataset = self.load_dataset(STATIC_FILES["example-grid"]) results = self.cf.check_latitude(dataset) score, out_of, messages = get_results(results) assert score == out_of # Verify non-compliance -- 9/12 pass dataset = self.load_dataset(STATIC_FILES["bad"]) results = self.cf.check_latitude(dataset) scored, out_of, messages = get_results(results) assert len(results) == 12 assert scored < out_of assert len([r for r in results if r.value[0] < r.value[1]]) == 3 assert (r.name == u"§4.1 Latitude Coordinate" for r in results) # check with another ds -- all 6 vars checked pass dataset = self.load_dataset(STATIC_FILES["rotated_pole_grid"]) results = self.cf.check_latitude(dataset) scored, out_of, messages = get_results(results) assert len(results) == 6 assert scored == out_of assert (r.name == u"§4.1 Latitude Coordinate" for r in results) # hack to avoid writing to read-only file dataset.variables["rlat"] = MockVariable(dataset.variables["rlat"]) rlat = dataset.variables["rlat"] rlat.name = "rlat" # test with a bad value rlat.units = "degrees_north" results = self.cf.check_latitude(dataset) scored, out_of, messages = get_results(results) wrong_format = u"Grid latitude variable '{}' should use degree equivalent units without east or north components. Current units are {}" self.assertTrue(wrong_format.format(rlat.name, rlat.units) in messages) rlat.units = "radians" results = self.cf.check_latitude(dataset) scored, out_of, messages = get_results(results) self.assertTrue(wrong_format.format(rlat.name, rlat.units) in messages) def test_longitude(self): """ Section 4.2 Longitude Coordinate """ # Check compliance dataset = self.load_dataset(STATIC_FILES["example-grid"]) results = self.cf.check_longitude(dataset) score, out_of, messages = get_results(results) assert score == out_of # Verify non-compliance -- 12 checked, 3 fail dataset = self.load_dataset(STATIC_FILES["bad"]) results = self.cf.check_longitude(dataset) scored, out_of, messages = get_results(results) assert len(results) == 12 assert scored < out_of assert len([r for r in results if r.value[0] < r.value[1]]) == 3 assert all(r.name == u"§4.2 Longitude Coordinate" for r in results) # check different dataset # TODO can be improved for check_latitude too dataset = self.load_dataset(STATIC_FILES["rotated_pole_grid"]) results = self.cf.check_latitude(dataset) scored, out_of, messages = get_results(results) assert (scored, out_of) == (6, 6) # hack to avoid writing to read-only file dataset.variables["rlon"] = MockVariable(dataset.variables["rlon"]) rlon = dataset.variables["rlon"] rlon.name = "rlon" # test with a bad value rlon.units = "degrees_east" results = self.cf.check_longitude(dataset) scored, out_of, messages = get_results(results) wrong_format = u"Grid longitude variable '{}' should use degree equivalent units without east or north components. Current units are {}" self.assertTrue(wrong_format.format(rlon.name, rlon.units) in messages) rlon.units = "radians" results = self.cf.check_longitude(dataset) scored, out_of, messages = get_results(results) self.assertTrue(wrong_format.format(rlon.name, rlon.units) in messages) def test_is_vertical_coordinate(self): """ Section 4.3 Qualifiers for Vertical Coordinate NOTE: The standard doesn't explicitly say that vertical coordinates must be a coordinate type. """ # Make something that I can attach attrs to mock_variable = MockVariable # Proper name/standard_name known_name = mock_variable() known_name.standard_name = "depth" self.assertTrue(is_vertical_coordinate("not_known", known_name)) # Proper Axis axis_set = mock_variable() axis_set.axis = "Z" self.assertTrue(is_vertical_coordinate("not_known", axis_set)) # Proper units units_set = mock_variable() units_set.units = "dbar" self.assertTrue(is_vertical_coordinate("not_known", units_set)) # Proper units/positive positive = mock_variable() positive.units = "m" positive.positive = "up" self.assertTrue(is_vertical_coordinate("not_known", positive)) def test_vertical_dimension(self): """ Section 4.3.1 Dimensional Vertical Coordinate """ # Check for compliance dataset = self.load_dataset(STATIC_FILES["example-grid"]) results = self.cf.check_dimensional_vertical_coordinate(dataset) assert len(results) == 1 assert all(r.name == u"§4.3 Vertical Coordinate" for r in results) # non-compliance -- one check fails dataset = self.load_dataset(STATIC_FILES["illegal-vertical"]) results = self.cf.check_dimensional_vertical_coordinate(dataset) scored, out_of, messages = get_results(results) assert len(results) == 1 assert all(r.name == u"§4.3 Vertical Coordinate" for r in results) assert scored < out_of def test_appendix_d(self): """ CF 1.6 Appendix D The definitions given here allow an application to compute dimensional coordinate values from the dimensionless ones and associated variables. The formulas are expressed for a gridpoint (n,k,j,i) where i and j are the horizontal indices, k is the vertical index and n is the time index. A coordinate variable is associated with its definition by the value of the standard_name attribute. The terms in the definition are associated with file variables by the formula_terms attribute. The formula_terms attribute takes a string value, the string being comprised of blank-separated elements of the form "term: variable", where term is a keyword that represents one of the terms in the definition, and variable is the name of the variable in a netCDF file that contains the values for that term. The order of elements is not significant. """ # For each of the listed dimensionless vertical coordinates, # verify that the formula_terms match the provided set of terms self.assertTrue( no_missing_terms( "atmosphere_ln_pressure_coordinate", {"p0", "lev"}, dimless_vertical_coordinates_1_6, ) ) self.assertTrue( no_missing_terms( "atmosphere_sigma_coordinate", {"sigma", "ps", "ptop"}, dimless_vertical_coordinates_1_6, ) ) self.assertTrue( no_missing_terms( "atmosphere_hybrid_sigma_pressure_coordinate", {"a", "b", "ps"}, dimless_vertical_coordinates_1_6, ) ) # test alternative terms for # 'atmosphere_hybrid_sigma_pressure_coordinate' self.assertTrue( no_missing_terms( "atmosphere_hybrid_sigma_pressure_coordinate", {"ap", "b", "ps"}, dimless_vertical_coordinates_1_6, ) ) # check that an invalid set of terms fails self.assertFalse( no_missing_terms( "atmosphere_hybrid_sigma_pressure_coordinate", {"a", "b", "p"}, dimless_vertical_coordinates_1_6, ) ) self.assertTrue( no_missing_terms( "atmosphere_hybrid_height_coordinate", {"a", "b", "orog"}, dimless_vertical_coordinates_1_6, ) ) # missing terms should cause failure self.assertFalse( no_missing_terms( "atmosphere_hybrid_height_coordinate", {"a", "b"}, dimless_vertical_coordinates_1_6, ) ) # excess terms should cause failure self.assertFalse( no_missing_terms( "atmosphere_hybrid_height_coordinate", {"a", "b", "c", "orog"}, dimless_vertical_coordinates_1_6, ) ) self.assertTrue( no_missing_terms( "atmosphere_sleve_coordinate", {"a", "b1", "b2", "ztop", "zsurf1", "zsurf2"}, dimless_vertical_coordinates_1_6, ) ) self.assertTrue( no_missing_terms( "ocean_sigma_coordinate", {"sigma", "eta", "depth"}, dimless_vertical_coordinates_1_6, ) ) self.assertTrue( no_missing_terms( "ocean_s_coordinate", {"s", "eta", "depth", "a", "b", "depth_c"}, dimless_vertical_coordinates_1_6, ) ) self.assertTrue( no_missing_terms( "ocean_sigma_z_coordinate", {"sigma", "eta", "depth", "depth_c", "nsigma", "zlev"}, dimless_vertical_coordinates_1_6, ) ) self.assertTrue( no_missing_terms( "ocean_double_sigma_coordinate", {"sigma", "depth", "z1", "z2", "a", "href", "k_c"}, dimless_vertical_coordinates_1_6, ) ) def test_dimensionless_vertical(self): """ Section 4.3.2 """ # Check affirmative compliance dataset = self.load_dataset(STATIC_FILES["dimensionless"]) results = self.cf.check_dimensionless_vertical_coordinates(dataset) scored, out_of, messages = get_results(results) # all variables checked (2) pass assert len(results) == 2 assert scored == out_of assert all(r.name == u"§4.3 Vertical Coordinate" for r in results) # Check negative compliance -- 3 out of 4 pass dataset = self.load_dataset(STATIC_FILES["bad"]) results = self.cf.check_dimensionless_vertical_coordinates(dataset) scored, out_of, messages = get_results(results) assert len(results) == 4 assert scored <= out_of assert len([r for r in results if r.value[0] < r.value[1]]) == 2 assert all(r.name == u"§4.3 Vertical Coordinate" for r in results) # test with an invalid formula_terms dataset.variables["lev2"] = MockVariable(dataset.variables["lev2"]) lev2 = dataset.variables["lev2"] lev2.formula_terms = "a: var1 b:var2 orog:" # create a malformed formula_terms attribute and check that it fails # 2/4 still pass results = self.cf.check_dimensionless_vertical_coordinates(dataset) scored, out_of, messages = get_results(results) assert len(results) == 4 assert scored <= out_of assert len([r for r in results if r.value[0] < r.value[1]]) == 2 assert all(r.name == u"§4.3 Vertical Coordinate" for r in results) def test_is_time_variable(self): var1 = MockVariable() var1.standard_name = "time" self.assertTrue(is_time_variable("not_time", var1)) var2 = MockVariable() self.assertTrue(is_time_variable("time", var2)) self.assertFalse(is_time_variable("not_time", var2)) var3 = MockVariable() var3.axis = "T" self.assertTrue(is_time_variable("maybe_time", var3)) var4 = MockVariable() var4.units = "seconds since 1900-01-01" self.assertTrue(is_time_variable("maybe_time", var4)) def test_dimensionless_standard_names(self): """Check that dimensionless standard names are properly detected""" std_names_xml_root = self.cf._std_names._root # canonical_units are K, should be False self.assertFalse( cfutil.is_dimensionless_standard_name( std_names_xml_root, "sea_water_temperature" ) ) # canonical_units are 1, should be True self.assertTrue( cfutil.is_dimensionless_standard_name( std_names_xml_root, "sea_water_practical_salinity" ) ) # canonical_units are 1e-3, should be True self.assertTrue( cfutil.is_dimensionless_standard_name( std_names_xml_root, "sea_water_salinity" ) ) def test_check_time_coordinate(self): dataset = self.load_dataset(STATIC_FILES["example-grid"]) results = self.cf.check_time_coordinate(dataset) for r in results: self.assertTrue(r.value) dataset = self.load_dataset(STATIC_FILES["bad"]) results = self.cf.check_time_coordinate(dataset) scored, out_of, messages = get_results(results) assert u"time does not have correct time units" in messages assert (scored, out_of) == (1, 2) def test_check_calendar(self): """Load a dataset with an invalid calendar attribute (non-comp/bad.nc). This dataset has a variable, "time" with calendar attribute "nope".""" dataset = self.load_dataset(STATIC_FILES["example-grid"]) results = self.cf.check_calendar(dataset) for r in results: self.assertTrue(r.value) dataset = self.load_dataset(STATIC_FILES["bad"]) results = self.cf.check_calendar(dataset) scored, out_of, messages = get_results(results) assert ( u"§4.4.1 Variable time should have a valid calendar: 'nope' is not a valid calendar" in messages ) def test_check_aux_coordinates(self): dataset = self.load_dataset(STATIC_FILES["illegal-aux-coords"]) results = self.cf.check_aux_coordinates(dataset) result_dict = {result.name: result for result in results} result = result_dict[u"§5 Coordinate Systems"] assert result.msgs == [] # shouldn't have any messages assert result.value == (4, 4) def test_check_grid_coordinates(self): dataset = self.load_dataset(STATIC_FILES["2dim"]) results = self.cf.check_grid_coordinates(dataset) scored, out_of, messages = get_results(results) result_dict = {result.name: result for result in results} result = result_dict[ u"§5.6 Horizontal Coordinate Reference Systems, Grid Mappings, Projections" ] assert result.value == (2, 2) assert (scored, out_of) == (2, 2) def test_check_two_dimensional(self): dataset = self.load_dataset(STATIC_FILES["2dim"]) results = self.cf.check_grid_coordinates(dataset) for r in results: self.assertTrue(r.value) # Need the bad testing dataset = self.load_dataset(STATIC_FILES["bad2dim"]) results = self.cf.check_grid_coordinates(dataset) scored, out_of, messages = get_results(results) # all variables checked fail (2) assert len(results) == 2 assert scored < out_of assert all( r.name == u"§5.6 Horizontal Coordinate Reference Systems, Grid Mappings, Projections" for r in results ) def test_check_reduced_horizontal_grid(self): dataset = self.load_dataset(STATIC_FILES["rhgrid"]) results = self.cf.check_reduced_horizontal_grid(dataset) scored, out_of, messages = get_results(results) assert scored == out_of assert len(results) == 1 assert all(r.name == u"§5.3 Reduced Horizontal Grid" for r in results) # load failing ds -- one variable has failing check dataset = self.load_dataset(STATIC_FILES["bad-rhgrid"]) results = self.cf.check_reduced_horizontal_grid(dataset) scored, out_of, messages = get_results(results) assert scored != out_of assert len(results) == 2 assert len([r for r in results if r.value[0] < r.value[1]]) == 1 assert all(r.name == u"§5.3 Reduced Horizontal Grid" for r in results) def test_check_grid_mapping(self): dataset = self.load_dataset(STATIC_FILES["mapping"]) results = self.cf.check_grid_mapping(dataset) assert len(results) == 6 assert len([r.value for r in results.values() if r.value[0] < r.value[1]]) == 0 expected_name = ( "§5.6 Horizontal Coordinate Reference Systems, Grid Mappings, Projections" ) assert all(r.name == expected_name for r in results.values()) def test_is_geophysical(self): # check whether string type variable, which are not `cf_role`, are # properly processed dataset = self.load_dataset(STATIC_FILES["string"]) if dataset.file_format != "NETCDF4": raise RuntimeError( "netCDF file of wrong format (not netCDF4) was created for checking" ) try: result = cfutil.is_geophysical(dataset, "j") except AttributeError: pytest.fail( "Test probably fails because var.dtype.kind or var.dtype.char " "was tested on string-type variable. Consider checking for " "`var.dtype is str`" ) assert not result # assert False # TODO: overhaul to use netCDF global attributes or mocks and variable # attributes def test_check_attr_type(self): """ Check that the check_attr_type method checks grid_mapping attribute types correctly. """ # test good att_name = "test_att" att = np.int64(45) att_type = "N" # numeric res = self.cf._check_attr_type(att_name, att_type, att) self.assertTrue(res[0]) self.assertEqual(res[1], None) # create a temporary variable and test this only nc_obj = MockTimeSeries() nc_obj.createVariable("temperature", "d", ("time",)) nc_obj.variables["temperature"].setncattr("test_att", np.float64(45)) att_name = "test_att" _var = nc_obj.variables["temperature"] att = np.float64(45) att_type = "D" # numeric, types should match res = self.cf._check_attr_type(att_name, att_type, att, _var) self.assertTrue(res[0]) self.assertEqual(res[1], None) att_name = "test_att" att = "yo" att_type = "S" # string res = self.cf._check_attr_type(att_name, att_type, att) self.assertTrue(res[0]) self.assertEqual(res[1], None) # test bad att_name = "test_att" att = np.int64(45) att_type = "S" # string, but att type is numeric res = self.cf._check_attr_type(att_name, att_type, att) self.assertFalse(res[0]) self.assertEqual(res[1], "test_att must be a string") # test bad att_name = "test_att" att = "bad" att_type = "N" # numeric, but att type is string res = self.cf._check_attr_type(att_name, att_type, att) self.assertFalse(res[0]) self.assertEqual(res[1], "test_att must be a numeric type") # create a temporary variable and test this only nc_obj = MockTimeSeries() nc_obj.createVariable("temperature", "d", ("time",)) nc_obj.variables["temperature"].setncattr("test_att", np.int32(45)) _var = nc_obj.variables["temperature"] att_name = "test_att" att = np.int32(2) att_type = "D" # should be same datatypes res = self.cf._check_attr_type(att_name, att_type, att, _var) self.assertFalse(res[0]) self.assertEqual( res[1], "test_att must be numeric and must be equivalent to float64 dtype" ) def test_check_grid_mapping_attr_condition(self): """ Ensure the check_grid_mapping_attr_condition() method works as expected. """ # test passes attr_name = "latitude_of_projection_origin" val = 0 res = self.cf._check_grid_mapping_attr_condition(val, attr_name) self.assertTrue(res[0]) attr_name = "longitude_of_projection_origin" val = 0 res = self.cf._check_grid_mapping_attr_condition(val, attr_name) self.assertTrue(res[0]) attr_name = "longitude_of_prime_meridian" val = 0 res = self.cf._check_grid_mapping_attr_condition(val, attr_name) self.assertTrue(res[0]) attr_name = "scale_factor_at_central_meridian" val = 1 res = self.cf._check_grid_mapping_attr_condition(val, attr_name) self.assertTrue(res[0]) attr_name = "scale_factor_at_projection_origin" val = 1 res = self.cf._check_grid_mapping_attr_condition(val, attr_name) self.assertTrue(res[0]) attr_name = "standard_parallel" val = 0 res = self.cf._check_grid_mapping_attr_condition(val, attr_name) self.assertTrue(res[0]) attr_name = "straight_vertical_longitude_from_pole" val = 0 res = self.cf._check_grid_mapping_attr_condition(val, attr_name) self.assertTrue(res[0]) def test_check_geographic_region(self): dataset = self.load_dataset(STATIC_FILES["bad_region"]) results = self.cf.check_geographic_region(dataset) scored, out_of, messages = get_results(results) # only one variable failed this check in this ds out of 2 assert len(results) == 2 assert scored < out_of assert ( u"6.1.1 'Neverland' specified by 'neverland' is not a valid region" in messages ) def test_check_packed_data(self): dataset = self.load_dataset(STATIC_FILES["bad_data_type"]) results = self.cf.check_packed_data(dataset) score, out_of, messages = get_results(results) msgs = [ u"Type of tempvalid_min attribute (int32) does not match variable type (int64)", u"Type of temp:valid_max attribute (int32) does not match variable type (int64)", u"Type of salinityvalid_min attribute (int32) does not match variable type (float64)", u"Type of salinity:valid_max attribute (int32) does not match variable type (float64)", ] self.assertEqual(len(results), 4) self.assertTrue(score < out_of) self.assertTrue(all(m in messages for m in msgs)) def test_compress_packed(self): """Tests compressed indexed coordinates""" dataset = self.load_dataset(STATIC_FILES["reduced_horizontal_grid"]) results = self.cf.check_compression_gathering(dataset) self.assertTrue(results[0].value) dataset = self.load_dataset(STATIC_FILES["bad_data_type"]) results = self.cf.check_compression_gathering(dataset) self.assertFalse(results[0].value) self.assertFalse(results[1].value) #def test_check_all_features_are_same_type(self): # dataset = self.load_dataset(STATIC_FILES["rutgers"]) # result = self.cf.check_all_features_are_same_type(dataset) # assert result # dataset = self.load_dataset(STATIC_FILES["featureType"]) # result = self.cf.check_all_features_are_same_type(dataset) # assert result def test_featureType_is_case_insensitive(self): """ Tests that the featureType attribute is case insensitive """ nc = self.new_nc_file() nc.featureType = "timeseriesprofile" result = self.cf.check_feature_type(nc) self.assertTrue(result.value == (1, 1)) nc.featureType = "timeSeriesProfile" result = self.cf.check_feature_type(nc) self.assertTrue(result.value == (1, 1)) nc.featureType = "traJectorYpRofile" result = self.cf.check_feature_type(nc) self.assertTrue(result.value == (1, 1)) # This one should fail nc.featureType = "timeseriesprofilebad" result = self.cf.check_feature_type(nc) self.assertTrue(result.value == (0, 1)) def test_check_units(self): """ Ensure that container variables are not checked for units but geophysical variables are """ dataset = self.load_dataset(STATIC_FILES["units_check"]) results = self.cf.check_units(dataset) # We don't keep track of the variables names for checks that passed, so # we can make a strict assertion about how many checks were performed # and if there were errors, which there shouldn't be. # FIXME (badams): find a better way of grouping together results by # variable checked instead of checking the number of # points scored, which should be deprecated, and # furthermore is fragile and breaks tests when check # definitions change scored, out_of, messages = get_results(results) assert scored == 24 assert out_of == 24 assert messages == [] def test_check_duplicates(self): """ Test to verify that the check identifies duplicate axes. Load the duplicate_axis.nc dataset and verify the duplicate axes are accounted for. """ dataset = self.load_dataset(STATIC_FILES["duplicate_axis"]) results = self.cf.check_duplicate_axis(dataset) scored, out_of, messages = get_results(results) # only one check run here, so we can directly compare all the values assert scored != out_of assert messages[0] == u"'temp' has duplicate axis X defined by [lon_rho, lon_u]" def test_check_multi_dimensional_coords(self): """ Test to verify that multi dimensional coordinates are checked for sharing names with dimensions """ dataset = self.load_dataset(STATIC_FILES["multi-dim-coordinates"]) results = self.cf.check_multi_dimensional_coords(dataset) scored, out_of, messages = get_results(results) # 4 variables were checked in this ds, 2 of which passed assert len(results) == 4 assert len([r for r in results if r.value[0] < r.value[1]]) == 2 assert all(r.name == u"§5 Coordinate Systems" for r in results) def test_64bit(self): dataset = self.load_dataset(STATIC_FILES["ints64"]) suite = CheckSuite() suite.checkers = {"cf": CF1_6Check} suite.run(dataset, "cf") def test_variable_feature_check(self): # non-compliant dataset -- 1/1 fail dataset = self.load_dataset(STATIC_FILES["bad-trajectory"]) results = self.cf.check_variable_features(dataset) scored, out_of, messages = get_results(results) assert len(results) == 2 assert scored < out_of assert len([r for r in results if r.value[0] < r.value[1]]) == 1 assert all(r.name == u"§9.1 Features and feature types" for r in results) # compliant dataset dataset = self.load_dataset(STATIC_FILES["trajectory-complete"]) results = self.cf.check_variable_features(dataset) scored, out_of, messages = get_results(results) assert scored == out_of # compliant(?) dataset dataset = self.load_dataset(STATIC_FILES["trajectory-implied"]) results = self.cf.check_variable_features(dataset) scored, out_of, messages = get_results(results) assert scored == out_of def test_check_cell_methods(self): """Load a dataset (climatology.nc) and check the cell methods. This dataset has variable "temperature" which has valid cell_methods format, cell_methods attribute, and valid names within the cell_methods attribute.""" dataset = self.load_dataset(STATIC_FILES["climatology"]) results = self.cf.check_cell_methods(dataset) scored, out_of, messages = get_results(results) # use itertools.chain() to unpack the lists of messages results_list = list(chain(*(r.msgs for r in results if r.msgs))) # check the results only have expected headers assert set([r.name for r in results]).issubset( set([u"§7.1 Cell Boundaries", u"§7.3 Cell Methods"]) ) # check that all the expected variables have been hit assert all("temperature" in msg for msg in results_list) # check that all the results have come back passing assert all(r.value[0] == r.value[1] for r in results) # create a temporary variable and test this only nc_obj = MockTimeSeries() nc_obj.createVariable("temperature", "d", ("time",)) temp = nc_obj.variables["temperature"] temp.cell_methods = "lat: lon: mean depth: mean (interval: 20 meters)" results = self.cf.check_cell_methods(nc_obj) # invalid components lat, lon, and depth -- expect score == (6, 9) scored, out_of, messages = get_results(results) assert scored != out_of temp.cell_methods = "lat: lon: mean depth: mean (interval: x whizbangs)" results = self.cf.check_cell_methods(nc_obj) scored, out_of, messages = get_results(results) # check non-standard comments are gauged correctly temp.cell_methods = ( "lat: lon: mean depth: mean (comment: should not go here interval: 2.5 m)" ) results = self.cf.check_cell_methods(nc_obj) scored, out_of, messages = get_results(results) self.assertTrue( u'§7.3.3 The non-standard "comment:" element must come after any standard elements in cell_methods for variable temperature' in messages ) # standalone comments require no keyword temp.cell_methods = "lon: mean (This is a standalone comment)" results = self.cf.check_cell_methods(nc_obj) scored, out_of, messages = get_results(results) assert "standalone" not in messages # check that invalid keywords dealt with temp.cell_methods = ( "lat: lon: mean depth: mean (invalid_keyword: this is invalid)" ) results = self.cf.check_cell_methods(nc_obj) scored, out_of, messages = get_results(results) self.assertTrue( u'§7.3.3 Invalid cell_methods keyword "invalid_keyword:" for variable temperature. Must be one of [interval, comment]' in messages ) # check that "parenthetical elements" are well-formed (they should not be) temp.cell_methods = ( "lat: lon: mean depth: mean (interval 0.2 m interval: 0.01 degrees)" ) results = self.cf.check_cell_methods(nc_obj) scored, out_of, messages = get_results(results) assert ( u"§7.3.3 Parenthetical content inside temperature:cell_methods is not well formed: interval 0.2 m interval: 0.01 degrees" in messages ) # -------------------------------------------------------------------------------- # Utility Method Tests # -------------------------------------------------------------------------------- def test_temporal_unit_conversion(self): self.assertTrue(units_convertible("hours", "seconds")) self.assertFalse(units_convertible("hours", "hours since 2000-01-01")) def test_units_temporal(self): self.assertTrue(units_temporal("hours since 2000-01-01")) self.assertFalse(units_temporal("hours")) self.assertFalse(units_temporal("days since the big bang")) class TestCF1_7(BaseTestCase): """Extends the CF 1.6 tests. Most of the tests remain the same.""" def setUp(self): """Initialize a CF1_7Check object.""" self.cf = CF1_7Check() def test_check_actual_range(self): """Test the check_actual_range method works as expected""" # using a with block closes the ds; for checks operating on the data, we need # to initialize and then manually close dataset = MockTimeSeries() dataset.createVariable("a", "d", ("time",)) # dtype=double, dims=time # test that if the variable doesn't have an actual_range attr, no score result = self.cf.check_actual_range(dataset) assert result == [] dataset.close() # NOTE this is a data check # if variable values are equal, actual_range should not exist dataset = MockTimeSeries() dataset.createVariable("a", "d", ("time",)) # dtype=double, dims=time dataset.variables["a"][0:500] = 0 # set all 500 vals to 0 dataset.variables["a"].setncattr("actual_range", [1]) result = self.cf.check_actual_range(dataset) score, out_of, messages = get_results(result) assert score < out_of assert len(messages) == 1 assert messages[0] == u"actual_range of 'a' must be 2 elements" dataset.close() dataset = MockTimeSeries() dataset.createVariable("a", "d", ("time",)) # dtype=double, dims=time dataset.variables["a"][0] = 0 # set some arbitrary val so not all equal dataset.variables["a"].setncattr("actual_range", [1]) result = self.cf.check_actual_range(dataset) score, out_of, messages = get_results(result) assert score < out_of assert len(messages) == 1 assert messages[0] == "actual_range of 'a' must be 2 elements" dataset.close() # NOTE this is a data check # check equality to min and max values dataset = MockTimeSeries() dataset.createVariable("a", "d", ("time",)) dataset.variables["a"][0] = -299 # set some arbitrary minimum dataset.variables["a"][1] = 10e36 # set some arbitrary max > _FillValue default dataset.variables["a"].setncattr("actual_range", [0, 0]) # should fail result = self.cf.check_actual_range(dataset) score, out_of, messages = get_results(result) assert score < out_of assert len(messages) == 1 assert ( messages[0] == "actual_range elements of 'a' inconsistent with its min/max values" ) dataset.close() # check equality to valid_range attr dataset = MockTimeSeries() dataset.createVariable("a", "d", ("time",)) dataset.variables["a"][0] = -299 # set some arbitrary val to not all equal dataset.variables["a"][1] = 10e36 # set some arbitrary max > _FillValue default dataset.variables["a"].setncattr("valid_range", [1, 3]) # should conflict dataset.variables["a"].setncattr("actual_range", [-299, 10e36]) result = self.cf.check_actual_range(dataset) score, out_of, messages = get_results(result) assert score < out_of assert len(messages) == 1 assert messages[0] == '"a"\'s actual_range must be within valid_range' dataset.close() # check equality to valid_min and valid_max values dataset = MockTimeSeries() dataset.createVariable("a", "d", ("time",)) dataset.variables["a"][0] = -299 # set some arbitrary minimum dataset.variables["a"][1] = 10e36 # set some arbitrary max > _FillValue default dataset.variables["a"].setncattr("valid_min", 42) # conflicting valid_min/max dataset.variables["a"].setncattr("valid_max", 45) dataset.variables["a"].setncattr("actual_range", [-299, 10e36]) result = self.cf.check_actual_range(dataset) score, out_of, messages = get_results(result) assert score < out_of assert len(messages) == 2 assert ( messages[0] == '"a"\'s actual_range first element must be >= valid_min (42)' ) assert ( messages[1] == '"a"\'s actual_range second element must be <= valid_max (45)' ) dataset.close() def test_check_cell_boundaries(self): """Check our over-ridden check_cell_boundaries emthod behaves as expected""" dataset = self.load_dataset(STATIC_FILES["grid-boundaries"]) results = self.cf.check_cell_boundaries(dataset) score, out_of, messages = get_results(results) assert (score, out_of) == (2, 2) dataset = self.load_dataset(STATIC_FILES["cf_example_cell_measures"]) results = self.cf.check_cell_boundaries(dataset) dataset = self.load_dataset(STATIC_FILES["bad_data_type"]) results = self.cf.check_cell_boundaries(dataset) dataset = self.load_dataset(STATIC_FILES["bounds_bad_order"]) results = self.cf.check_cell_boundaries(dataset) score, out_of, messages = get_results(results) # Make sure that the rgrid coordinate variable isn't checked for standard_name assert (score, out_of) == (0, 2) dataset = self.load_dataset(STATIC_FILES["bounds_bad_num_coords"]) results = self.cf.check_cell_boundaries(dataset) score, out_of, messages = get_results(results) assert (score, out_of) == (0, 2) dataset = self.load_dataset(STATIC_FILES["1d_bound_bad"]) results = self.cf.check_cell_boundaries(dataset) score, out_of, messages = get_results(results) assert (score, out_of) == (0, 2) # if the variable has formula_terms, the bounds var must also with MockTimeSeries() as dataset: dataset.createVariable("a", "d", ("time",)) dataset.createVariable("b", "d", ("time",)) dataset.variables["a"].setncattr("bounds", "b") # set bounds variable dataset.variables["a"].setncattr("formula_terms", "test") results = self.cf.check_cell_boundaries(dataset) score, out_of, messages = get_results(results) assert score < out_of assert ( "'a' has 'formula_terms' attr, bounds variable 'b' must also have 'formula_terms'" in messages ) def test_cell_measures(self): """Over-ride the test_cell_measures from CF1_6""" # create a temporary variable and test this only with MockTimeSeries() as dataset: dataset.createVariable("PS", "d", ("time",)) # dtype=double, dims=time dataset.variables["PS"].setncattr("cell_measures", "area: cell_area") # ensure the cell_measures var is in the dataset dataset.createVariable("cell_area", "d", ("time",)) dataset.variables["cell_area"].setncattr("units", "m2") # run the check results = self.cf.check_cell_measures(dataset) score, out_of, messages = get_results(results) assert (score == out_of) and (score > 0) # same thing, but test that the cell_area variable is in # the global attr "external_variables" with MockTimeSeries() as dataset: dataset.createVariable("PS", "d", ("time",)) # dtype=double, dims=time dataset.variables["PS"].setncattr("cell_measures", "area: cell_area") dataset.setncattr("external_variables", ["cell_area"]) # run the check results = self.cf.check_cell_measures(dataset) score, out_of, messages = get_results(results) assert score > 0 assert score == out_of # now test a dataset with a poorly formatted cell_measure attr dataset = self.load_dataset(STATIC_FILES["bad_cell_measure1"]) results = self.cf.check_cell_measures(dataset) score, out_of, messages = get_results(results) message = ( "The cell_measures attribute for variable PS is formatted incorrectly. " "It should take the form of either 'area: cell_var' or 'volume: cell_var' " "where cell_var is the variable describing the cell measures" ) assert message in messages # test a dataset where the cell_measure attr is not in the dataset or external_variables # check for the variable should fail dataset = self.load_dataset(STATIC_FILES["bad_cell_measure2"]) results = self.cf.check_cell_measures(dataset) score, out_of, messages = get_results(results) message = u"Cell measure variable box_area referred to by PS is not present in dataset variables" assert message in messages def test_process_vdatum(self): # first, we set up a mock SQLite database conn_str = ":memory:" conn = sqlite3.connect(conn_str) cur = conn.cursor() # create alias and vertical datum tables without # triggers cur.execute( """ CREATE TABLE alias_name( table_name TEXT NOT NULL CHECK (table_name IN ( 'unit_of_measure', 'celestial_body', 'ellipsoid', 'area', 'prime_meridian', 'geodetic_datum', 'vertical_datum', 'geodetic_crs', 'projected_crs', 'vertical_crs', 'compound_crs', 'conversion', 'grid_transformation', 'helmert_transformation', 'other_transformation', 'concatenated_operation')), auth_name TEXT NOT NULL CHECK (length(auth_name) >= 1), code TEXT NOT NULL CHECK (length(code) >= 1), alt_name TEXT NOT NULL CHECK (length(alt_name) >= 2), source TEXT ); """ ) cur.execute( """ CREATE TABLE vertical_datum ( auth_name TEXT NOT NULL CHECK (length(auth_name) >= 1), code TEXT NOT NULL CHECK (length(code) >= 1), name TEXT NOT NULL CHECK (length(name) >= 2), description TEXT, scope TEXT, area_of_use_auth_name TEXT NOT NULL, area_of_use_code TEXT NOT NULL, deprecated BOOLEAN NOT NULL CHECK (deprecated IN (0, 1)), CONSTRAINT pk_vertical_datum PRIMARY KEY (auth_name, code) ); """ ) cur.execute( """INSERT INTO alias_name VALUES ('vertical_datum', 'EPSG', '5103', 'NAVD88', 'EPSG'); """ ) cur.execute( """INSERT INTO vertical_datum VALUES ('EPSG', '5101', 'Ordnance Datum Newlyn', NULL, NULL, 'EPSG', '2792', '0')""" ) cur.close() self.assertTrue(self.cf._process_v_datum_str("NAVD88", conn)) self.assertTrue(self.cf._process_v_datum_str("Ordnance Datum Newlyn", conn)) # NAD83 isn't a vertical datum to begin with, expect failure self.assertFalse(self.cf._process_v_datum_str("NAD83", conn)) def test_check_grid_mapping_crs_wkt(self): dataset = self.load_dataset(STATIC_FILES["mapping"]) valid_crs_check = copy.deepcopy(self.cf) dataset.variables["wgs84"] = MockVariable(dataset.variables["wgs84"]) dataset.variables["wgs84"].crs_wkt = 1 results = self.cf.check_grid_mapping(dataset) score, out_of, messages = get_results(results) self.assertIn("crs_wkt attribute must be a string", messages) # test with an invalid OGC CRS WKT string dataset.variables["wgs84"].crs_wkt = "EPSG:3785" results = self.cf.check_grid_mapping(dataset) # reuses and appends to old messages, but this is OK since we only need # to check that the invalid CRS string message was added score, out_of, messages = get_results(results) begin_crs_err_msg = "Cannot parse crs_wkt attribute to CRS using Proj4" invalid_crs_str = any(s.startswith(begin_crs_err_msg) for s in messages) self.assertTrue(invalid_crs_str) self.assertIn("crs_wkt attribute must be a string", messages) score, out_of, messages = get_results(results) valid_crs_wkt = """PROJCS ["OSGB 1936 / British National Grid", GEOGCS ["OSGB 1936", DATUM ["OSGB 1936", SPHEROID ["Airy 1830", 6377563.396, 299.3249646]], PRIMEM ["Greenwich", 0], UNIT ["degree", 0.0174532925199433]], PROJECTION ["Transverse Mercator"], PARAMETER ["False easting", 400000], PARAMETER ["False northing", -100000], PARAMETER ["Longitude of natural origin", -2.0], PARAMETER ["Latitude of natural origin", 49.0], PARAMETER ["Scale factor at natural origin", 0.9996012717], UNIT ["metre", 1.0]]""" dataset.variables["wgs84"].crs_wkt = valid_crs_wkt results = valid_crs_check.check_grid_mapping(dataset) score, out_of, messages = get_results(results) # without false_easting warning in current file msg_len = len( [ m for m in messages if m != "false_easting is a required attribute for grid mapping stereographic" ] ) self.assertEqual(msg_len, 0) def test_check_grid_mapping_coordinates(self): """ Checks that coordinates variables referred to by a grid mapping are well-formed and exist. """ dataset = self.load_dataset(STATIC_FILES["grid_mapping_coordinates"]) valid_grid_mapping = copy.deepcopy(self.cf) valid_grid_mapping_2 = copy.deepcopy(self.cf) dataset.variables["temp"] = MockVariable(dataset.variables["temp"]) results = self.cf.check_grid_mapping(dataset) self.assertEqual(results["temp"].value[0], results["temp"].value[1]) malformed_sep = "crsOSGB: x y : lat lon" dataset.variables["temp"].grid_mapping = malformed_sep results = valid_grid_mapping.check_grid_mapping(dataset) self.assertIn( "Could not consume entire grid_mapping expression, please check for well-formedness", results["temp"].msgs, ) self.assertLess(*results["temp"].value) malformed_var = "crsOSGB: x y_null z_null" dataset.variables["temp"].grid_mapping = malformed_var results = valid_grid_mapping_2.check_grid_mapping(dataset) self.assertEqual( [ "Coordinate-related variable y_null referenced by grid_mapping variable crsOSGB must exist in this dataset", "Coordinate-related variable z_null referenced by grid_mapping variable crsOSGB must exist in this dataset", ], results["temp"].msgs, ) self.assertLess(*results["temp"].value) def test_check_grid_mapping_vert_datum_geoid_name(self): """Checks that geoid_name works proerly""" dataset = self.load_dataset(STATIC_FILES["mapping"]) dataset.variables["wgs84"] = MockVariable(dataset.variables["wgs84"]) dataset.variables["wgs84"].geoid_name = "NAVD88" dataset.variables["wgs84"].geopotential_datum_name = "WGS84" geoid_name_good = copy.deepcopy(self.cf) geopotential_datum_name_bad = copy.deepcopy(self.cf) results = self.cf.check_grid_mapping(dataset) score, out_of, messages = get_results(results) self.assertIn( "Cannot have both 'geoid_name' and 'geopotential_datum_name' attributes in grid mapping variable 'wgs84'", messages, ) del dataset.variables["wgs84"].geopotential_datum_name results = geoid_name_good.check_grid_mapping(dataset) self.assertEqual(*results["wgs84"].value) # WGS84 isn't a valid vertical datum name, of course dataset.variables["wgs84"].geopotential_datum_name = "WGS84" del dataset.variables["wgs84"].geoid_name results = geopotential_datum_name_bad.check_grid_mapping(dataset) self.assertLess(*results["wgs84"].value) self.assertIn( "Vertical datum value 'WGS84' for attribute 'geopotential_datum_name' in grid mapping variable 'wgs84' is not valid", results["wgs84"].msgs, ) def test_check_conventions_are_cf_1_7(self): """Ensure the check_conventions_are_cf_1_7() check works as expected""" # create a temporary variable and test this only with MockTimeSeries() as dataset: # no Conventions attribute result = self.cf.check_conventions_version(dataset) self.assertFalse(result.value) with MockTimeSeries() as dataset: # incorrect Conventions attribute dataset.setncattr("Conventions", "CF-1.9999") result = self.cf.check_conventions_version(dataset) self.assertFalse(result.value) with MockTimeSeries() as dataset: # correct Conventions attribute dataset.setncattr("Conventions", "CF-1.7, ACDD-1.3") result = self.cf.check_conventions_version(dataset) self.assertTrue(result.value) def test_appendix_d(self): """ CF 1.7 Appendix D As the CF-1.7 dimensionless vertical coordinates dict extends the 1.6 version, this test only examines the extensions made there. """ # For each of the listed dimensionless vertical coordinates, # verify that the formula_terms match the provided set of terms self.assertTrue( no_missing_terms( "ocean_s_coordinate_g1", {"s", "C", "eta", "depth", "depth_c"}, dimless_vertical_coordinates_1_7, ) ) self.assertTrue( no_missing_terms( "ocean_s_coordinate_g2", {"s", "C", "eta", "depth", "depth_c"}, dimless_vertical_coordinates_1_7, ) ) def test_check_dimensionless_vertical_coordinate_1_7(self): """ Unit test for _check_dimensionless_vertical_coordinate_1_7 method. """ deprecated_units = ["level", "layer", "sigma_level"] ret_val = [] # create mock dataset for test; create three variables, one as dimensionless with MockTimeSeries() as dataset: dataset.createVariable("lev", "d") # dtype=double, dims=1 dataset.variables["lev"].setncattr( "standard_name", "atmosphere_sigma_coordinate" ) dataset.variables["lev"].setncattr( "formula_terms", "sigma: lev ps: PS ptop: PTOP" ) dataset.createVariable("PS", "d", ("time",)) # dtype=double, dims=time dataset.createVariable("PTOP", "d", ("time",)) # dtype=double, dims=time # run the check self.cf._check_dimensionless_vertical_coordinate_1_7( dataset, "lev", deprecated_units, ret_val, dimless_vertical_coordinates_1_7, ) # one should have failed, as no computed_standard_name is assigned score, out_of, messages = get_results(ret_val) assert score == 0 assert out_of == 1 # this time, assign compufted_standard_name ret_val = [] dataset.variables["lev"].setncattr("computed_standard_name", "air_pressure") # run the check self.cf._check_dimensionless_vertical_coordinate_1_7( dataset, "lev", deprecated_units, ret_val, dimless_vertical_coordinates_1_7, ) # computed_standard_name is assigned, should pass score, out_of, messages = get_results(ret_val) assert score == out_of def test_dimensionless_vertical(self): """ Section 4.3.2 check, but for CF-1.7 implementation. With the refactor in place, these are more of integration tests, but kept here for simplicity. """ # Check affirmative compliance dataset = self.load_dataset(STATIC_FILES["dimensionless"]) dataset.variables["lev"] = MockVariable(dataset.variables["lev"]) dataset.variables["lev"].computed_standard_name = "air_pressure" results = self.cf.check_dimensionless_vertical_coordinates(dataset) scored, out_of, messages = get_results(results) # all variables checked (2) pass assert len(results) == 3 assert scored == out_of assert all(r.name == u"§4.3 Vertical Coordinate" for r in results) # make one variable's computed_standard_name incorrect, one should fail dataset.variables["lev"].computed_standard_name = "definitely_not_right" results = self.cf.check_dimensionless_vertical_coordinates(dataset) scored, out_of, messages = get_results(results) assert len(results) == 3 assert scored < out_of assert all(r.name == u"§4.3 Vertical Coordinate" for r in results) def test_check_attr_type(self): """ Ensure the _check_attr_type method works as expected. """ # create a temporary variable and test this only nc_obj = MockTimeSeries() nc_obj.createVariable("temperature", "d", ("time",)) nc_obj.variables["temperature"].setncattr("test_att", np.float64(45)) att_name = "test_att" _var = nc_obj.variables["temperature"] # first, test all valid checks show that it's valid attr = "my_attr_value" # string attr_type = "S" result = self.cf._check_attr_type(att_name, attr_type, attr) self.assertTrue(result[0]) attr = np.int64(1) attr_type = "N" self.assertTrue(self.cf._check_attr_type(att_name, attr_type, attr)[0]) attr = np.float64(45) attr_type = "D" self.assertTrue(self.cf._check_attr_type(att_name, attr_type, attr, _var)[0]) # check failures attr = "my_attr_value" attr_type = "N" # should be numeric self.assertFalse(self.cf._check_attr_type(att_name, attr_type, attr)[0]) attr = np.int(64) attr_type = "S" # should be string self.assertFalse(self.cf._check_attr_type(att_name, attr_type, attr)[0]) nc_obj = MockTimeSeries() nc_obj.createVariable("temperature", "d", ("time",)) nc_obj.variables["temperature"].setncattr("test_att", np.int32(45)) _var = nc_obj.variables["temperature"] attr = np.int32(45) attr_type = "D" # should match self.assertFalse(self.cf._check_attr_type(att_name, attr_type, attr, _var)[0]) def test_check_grid_mapping_attr_condition(self): """ Ensure the CF-1.7 implementation of _check_grid_mapping_attr_condition() works as expected. """ # test good att_name = "horizontal_datum_name" att = "Monte Mario (Rome)" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertTrue(res[0]) att_name = "prime_meridian_name" att = "Athens" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertTrue(res[0]) att_name = "reference_ellipsoid_name" att = "Airy 1830" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertTrue(res[0]) att_name = "towgs84" att = np.array([0, 0, 0], dtype=np.float64) # len 3 res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertTrue(res[0]) att_name = "towgs84" att = np.array([0, 0, 0, 0, 0, 0], dtype=np.float64) # len 6 res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertTrue(res[0]) att_name = "towgs84" att = np.array([0, 0, 0, 0, 0, 0, 0], dtype=np.float64) # len 7 res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertTrue(res[0]) att_name = "geographic_crs_name" att = "NAD83(CSRS98)" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertTrue(res[0]) att_name = "geoid_name" att = "Mayotte 1950" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertTrue(res[0]) att_name = "geopotential_datum_name" att = "NAVD88" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertTrue(res[0]) att_name = "projected_crs_name" att = "Anguilla 1957 / British West Indies Grid" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertTrue(res[0]) # test bad att_name = "horizontal_datum_name" att = "bad" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertFalse(res[0]) att_name = "prime_meridian_name" att = "bad" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertFalse(res[0]) att_name = "reference_ellipsoid_name" att = "goofy goober" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertFalse(res[0]) att_name = "towgs84" att = np.array([0, 0, 0], dtype=np.int64) # len 3, wrong dtype res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertFalse(res[0]) att_name = "towgs84" att = np.array([0, 0, 0, 0], dtype=np.int64) # len 4 res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertFalse(res[0]) att_name = "towgs84" att = np.float64(0) # single value, right dtype res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertFalse(res[0]) att_name = "geographic_crs_name" att = "badbadbadbadbadnotinhere" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertFalse(res[0]) att_name = "geoid_name" att = "yooooooo" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertFalse(res[0]) att_name = "geopotential_datum_name" att = "NAVBAD BAD" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertFalse(res[0]) att_name = "projected_crs_name" att = "Teddy Bruschi" res = self.cf._check_grid_mapping_attr_condition(att, att_name) self.assertFalse(res[0]) def test_check_gmattr_existence_condition_geoid_name_geoptl_datum_name(self): # create mock dataset for test; create three variables, one as dimensionless # test good (either-or) dataset = MockTimeSeries() dataset.createVariable("lev", "d") # dtype=double, dims=1 dataset.variables["lev"].setncattr("geoid_name", "blah") res = self.cf._check_gmattr_existence_condition_geoid_name_geoptl_datum_name( dataset.variables["lev"] ) self.assertTrue(res[0]) dataset.close() dataset = MockTimeSeries() dataset.createVariable("lev", "d") # dtype=double, dims=1 dataset.variables["lev"].setncattr("geopotential_datum_name", "blah") res = self.cf._check_gmattr_existence_condition_geoid_name_geoptl_datum_name( dataset.variables["lev"] ) self.assertTrue(res[0]) dataset.close() # bad dataset = MockTimeSeries() dataset.createVariable("lev", "d") # dtype=double, dims=1 dataset.variables["lev"].setncattr("geopotential_datum_name", "blah") dataset.variables["lev"].setncattr("geoid_name", "blah") res = self.cf._check_gmattr_existence_condition_geoid_name_geoptl_datum_name( dataset.variables["lev"] ) self.assertFalse(res[0]) dataset.close() def test_check_gmattr_existence_condition_ell_pmerid_hdatum(self): # test good (all) dataset = MockTimeSeries() dataset.createVariable("lev", "d") # dtype=double, dims=1 dataset.variables["lev"].setncattr("reference_ellipsoid_name", "blah") dataset.variables["lev"].setncattr("prime_meridian_name", "blah") dataset.variables["lev"].setncattr("horizontal_datum_name", "blah") res = self.cf._check_gmattr_existence_condition_ell_pmerid_hdatum( dataset.variables["lev"] ) self.assertTrue(res[0]) dataset.close() # test bad (not all) dataset = MockTimeSeries() dataset.createVariable("lev", "d") # dtype=double, dims=1 dataset.variables["lev"].setncattr("reference_ellipsoid_name", "blah") res = self.cf._check_gmattr_existence_condition_ell_pmerid_hdatum( dataset.variables["lev"] ) self.assertFalse(res[0]) dataset.close() # test bad (not all) dataset = MockTimeSeries() dataset.createVariable("lev", "d") # dtype=double, dims=1 dataset.variables["lev"].setncattr("reference_ellipsoid_name", "blah") dataset.variables["lev"].setncattr("prime_meridian_name", "blah") res = self.cf._check_gmattr_existence_condition_ell_pmerid_hdatum( dataset.variables["lev"] ) self.assertFalse(res[0]) dataset.close() def test_check_add_offset_scale_factor_type(self): dataset = MockTimeSeries() # time lat lon depth temp = dataset.createVariable("temp", "d", dimensions=("time",)) # set att bad (str) temp.setncattr("add_offset", "foo") r = self.cf._check_add_offset_scale_factor_type(temp, "add_offset") self.assertFalse(r.value) temp.setncattr("scale_factor", "foo") r = self.cf._check_add_offset_scale_factor_type(temp, "scale_factor") self.assertFalse(r.value) # set bad np val temp.setncattr("scale_factor", np.float32(5)) r = self.cf._check_add_offset_scale_factor_type(temp, "scale_factor") self.assertFalse(r.value) temp.setncattr("scale_factor", np.uint(5)) r = self.cf._check_add_offset_scale_factor_type(temp, "scale_factor") self.assertFalse(r.value) # set good temp.setncattr("scale_factor", np.float(5)) r = self.cf._check_add_offset_scale_factor_type(temp, "scale_factor") self.assertTrue(r.value) temp.setncattr("scale_factor", np.double(5)) r = self.cf._check_add_offset_scale_factor_type(temp, "scale_factor") self.assertTrue(r.value) # set same dtype dataset = MockTimeSeries() # time lat lon depth temp = dataset.createVariable("temp", np.int, dimensions=("time",)) temp.setncattr("scale_factor", np.int(5)) r = self.cf._check_add_offset_scale_factor_type(temp, "scale_factor") self.assertTrue(r.value) class TestCFUtil(BaseTestCase): """ Class to test the cfutil module. """ def test_is_variable_valid_ragged_array_repr_featureType(self): nc = MockRaggedArrayRepr( "timeseries", "indexed" ) # add a variable that isn't recognized as geophysical v = nc.createVariable( "data1", "d", ("SAMPLE_DIMENSION",), fill_value=None ) v.setncattr("cf_role", "blah") self.assertFalse(cfutil.is_variable_valid_ragged_array_repr_featureType(nc, "data1")) # add geophysical variable with correct dimension nc = MockRaggedArrayRepr( "timeseries", "indexed" ) v = nc.createVariable( "data1", "d", ("SAMPLE_DIMENSION",), fill_value=None ) v.setncattr("standard_name", "sea_water_pressure") # test the variable self.assertTrue(cfutil.is_variable_valid_ragged_array_repr_featureType(nc, "data1")) # add good variable and another variable, this time with the improper dimension nc = MockRaggedArrayRepr( "timeseries", "indexed" ) v = nc.createVariable( "data1", "d", ("SAMPLE_DIMENSION",), fill_value=None ) v.setncattr("standard_name", "sea_water_pressure") v2 = nc.createVariable( "data2", "d", ("INSTANCE_DIMENSION",), fill_value=None ) v2.setncattr("standard_name", "sea_water_salinity") # good variable should pass, second should fail self.assertTrue(cfutil.is_variable_valid_ragged_array_repr_featureType(nc, "data1")) self.assertFalse(cfutil.is_variable_valid_ragged_array_repr_featureType(nc, "data2")) def test_is_dataset_valid_ragged_array_repr_featureType(self): # first test single featureType # ----- timeseries, indexed ----- # nc = MockRaggedArrayRepr( "timeseries", "indexed" ) self.assertTrue( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries") ) # we'll add another cf_role variable nc = MockRaggedArrayRepr( "timeseries", "indexed" ) v = nc.createVariable( "var2", "i", ("INSTANCE_DIMENSION",), fill_value=None) v.setncattr("cf_role", "yeetyeet_id") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries") ) # we'll add another index variable, also bad nc = MockRaggedArrayRepr( "timeseries", "indexed" ) v = nc.createVariable( "index_var2", "i", ("SAMPLE_DIMENSION",), fill_value=None) v.setncattr("instance_dimension", "INSTANCE_DIMENSION") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries") ) # ----- timeseries, contiguous ----- # nc = MockRaggedArrayRepr( "timeseries", "contiguous" ) self.assertTrue( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries") ) # add another cf_role var, bad nc = MockRaggedArrayRepr( "timeseries", "contiguous" ) v = nc.createVariable( "var2", "i", ("INSTANCE_DIMENSION",), fill_value=None) v.setncattr("cf_role", "yeetyeet_id") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries") ) # add another count variable, bad v = nc.createVariable( "count_var2", "i", ("INSTANCE_DIMENSION",), fill_value=None) v.setncattr("sample_dimension", "SAMPLE_DIMENSION") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries") ) # ----- profile, indexed ----- # nc = MockRaggedArrayRepr( "profile", "indexed" ) self.assertTrue( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "profile") ) # add another cf_role var nc = MockRaggedArrayRepr( "profile", "indexed" ) v = nc.createVariable( "var2", "i", ("INSTANCE_DIMENSION",), fill_value=None) v.setncattr("cf_role", "yeetyeet_id") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "profile") ) # we'll add another index variable, also bad nc = MockRaggedArrayRepr( "profile", "indexed" ) v = nc.createVariable( "index_var2", "i", ("SAMPLE_DIMENSION",), fill_value=None) v.setncattr("instance_dimension", "INSTANCE_DIMENSION") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "profile") ) # ----- profile, contiguous ----- # nc = MockRaggedArrayRepr( "profile", "contiguous" ) self.assertTrue( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "profile") ) # add another cf_role var nc = MockRaggedArrayRepr( "profile", "contiguous" ) v = nc.createVariable( "var2", "i", ("INSTANCE_DIMENSION",), fill_value=None) v.setncattr("cf_role", "yeetyeet_id") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "profile") ) # we'll add another count variable, also bad nc = MockRaggedArrayRepr( "profile", "contiguous" ) v = nc.createVariable( "index_var2", "i", ("INSTANCE_DIMENSION",), fill_value=None) v.setncattr("sample_dimension", "SAMPLE_DIMENSION") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "profile") ) # ----- trajectory, indexed ----- # nc = MockRaggedArrayRepr( "trajectory", "indexed" ) self.assertTrue( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory") ) # add another cf_role var nc = MockRaggedArrayRepr( "trajectory", "indexed" ) v = nc.createVariable( "var2", "i", ("INSTANCE_DIMENSION",), fill_value=None) v.setncattr("cf_role", "yeetyeet_id") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory") ) # we'll add another index variable, also bad nc = MockRaggedArrayRepr( "trajectory", "indexed" ) v = nc.createVariable( "index_var2", "i", ("SAMPLE_DIMENSION",), fill_value=None) v.setncattr("instance_dimension", "INSTANCE_DIMENSION") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory") ) # ----- trajectory, contiguous ----- # nc = MockRaggedArrayRepr( "trajectory", "contiguous" ) self.assertTrue( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory") ) # add another cf_role var nc = MockRaggedArrayRepr( "trajectory", "contiguous" ) v = nc.createVariable( "var2", "i", ("INSTANCE_DIMENSION",), fill_value=None) v.setncattr("cf_role", "yeetyeet_id") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory") ) # we'll add another count variable, also bad nc = MockRaggedArrayRepr( "trajectory", "contiguous" ) v = nc.createVariable( "index_var2", "i", ("INSTANCE_DIMENSION",), fill_value=None) v.setncattr("sample_dimension", "SAMPLE_DIMENSION") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory") ) # ----- now test compound featureType ----- # # ----- timeSeriesProfile ----- # nc = MockRaggedArrayRepr( "timeSeriesProfile" ) # NOTE # has no geophysical vars, so should (?) (will) fail self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile") ) # add a geophysical variable and test again nc = MockRaggedArrayRepr( "timeSeriesProfile" ) v1 = nc.createVariable( "data1", "i", ("SAMPLE_DIMENSION",), fill_value=None ) v1.setncattr("standard_name", "pressure") self.assertTrue( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile") ) nc = MockRaggedArrayRepr( "timeSeriesProfile" ) v1 = nc.createVariable( "data1", "i", ("SAMPLE_DIMENSION",), fill_value=None ) # add a thid cf_role variable - this should fail v = nc.createVariable( "cf_role_var3", "i", ("INSTANCE_DIMENSION",), fill_value=None) v.setncattr("cf_role", "yeetyeet_id") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile") ) # set the index variable to have an incorrect attr nc = MockRaggedArrayRepr( "timeSeriesProfile" ) v1 = nc.createVariable( "data1", "i", ("SAMPLE_DIMENSION",), fill_value=None ) nc.variables["station_index_variable"].instance_dimension = "SIKE!" self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile") ) # change the sample_dimension attr on the count variable, bad nc = MockRaggedArrayRepr( "timeSeriesProfile" ) v1 = nc.createVariable( "data1", "i", ("SAMPLE_DIMENSION",), fill_value=None ) nc.variables["counter_var"].sample_dimension = "SIKE!" self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile") ) # give another geophysical data variable a different dimension nc = MockRaggedArrayRepr( "timeSeriesProfile" ) v1 = nc.createVariable( "data1", "i", ("SAMPLE_DIMENSION",), fill_value=None ) v1 = nc.createVariable( "data2", "i", ("STATION_DIMENSION",), # bad! fill_value=None ) self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile") ) # ----- trajectoryProfile ----- # nc = MockRaggedArrayRepr( "trajectoryProfile" ) # NOTE # has no geophysical vars, so should (?) (will) fail self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile") ) # add a geophysical variable and test again nc = MockRaggedArrayRepr( "trajectoryProfile" ) v1 = nc.createVariable( "data1", "i", ("SAMPLE_DIMENSION",), fill_value=None ) v1.setncattr("standard_name", "pressure") self.assertTrue( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile") ) nc = MockRaggedArrayRepr( "trajectoryProfile" ) v1 = nc.createVariable( "data1", "i", ("SAMPLE_DIMENSION",), fill_value=None ) # add a thid cf_role variable - this should fail v = nc.createVariable( "cf_role_var3", "i", ("INSTANCE_DIMENSION",), fill_value=None) v.setncattr("cf_role", "yeetyeet_id") self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile") ) # set the index variable to have an incorrect attr nc = MockRaggedArrayRepr( "trajectoryProfile" ) v1 = nc.createVariable( "data1", "i", ("SAMPLE_DIMENSION",), fill_value=None ) nc.variables["station_index_variable"].instance_dimension = "SIKE!" self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile") ) # change the sample_dimension attr on the count variable, bad nc = MockRaggedArrayRepr( "trajectoryProfile" ) v1 = nc.createVariable( "data1", "i", ("SAMPLE_DIMENSION",), fill_value=None ) nc.variables["counter_var"].sample_dimension = "SIKE!" self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile") ) # give another geophysical data variable a different dimension nc = MockRaggedArrayRepr( "trajectoryProfile" ) v1 = nc.createVariable( "data1", "i", ("SAMPLE_DIMENSION",), fill_value=None ) v1 = nc.createVariable( "data2", "i", ("STATION_DIMENSION",), # bad! fill_value=None ) self.assertFalse( cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile") )
apache-2.0
zjuchenyuan/BioWeb
Lib/requests/packages/__init__.py
61
1494
''' Debian and other distributions "unbundle" requests' vendored dependencies, and rewrite all imports to use the global versions of ``urllib3`` and ``chardet``. The problem with this is that not only requests itself imports those dependencies, but third-party code outside of the distros' control too. In reaction to these problems, the distro maintainers replaced ``requests.packages`` with a magical "stub module" that imports the correct modules. The implementations were varying in quality and all had severe problems. For example, a symlink (or hardlink) that links the correct modules into place introduces problems regarding object identity, since you now have two modules in `sys.modules` with the same API, but different identities:: requests.packages.urllib3 is not urllib3 With version ``2.5.2``, requests started to maintain its own stub, so that distro-specific breakage would be reduced to a minimum, even though the whole issue is not requests' fault in the first place. See https://github.com/kennethreitz/requests/pull/2375 for the corresponding pull request. ''' from __future__ import absolute_import import sys try: from . import urllib3 except ImportError: import urllib3 sys.modules['%s.urllib3' % __name__] = urllib3 try: from . import chardet except ImportError: import chardet sys.modules['%s.chardet' % __name__] = chardet try: from . import idna except ImportError: import idna sys.modules['%s.idna' % __name__] = idna
mit
pschmitt/home-assistant
tests/components/local_file/test_camera.py
21
5287
"""The tests for local file camera component.""" from unittest import mock from homeassistant.components.local_file.const import DOMAIN, SERVICE_UPDATE_FILE_PATH from homeassistant.setup import async_setup_component from tests.common import mock_registry async def test_loading_file(hass, hass_client): """Test that it loads image from disk.""" mock_registry(hass) with mock.patch("os.path.isfile", mock.Mock(return_value=True)), mock.patch( "os.access", mock.Mock(return_value=True) ): await async_setup_component( hass, "camera", { "camera": { "name": "config_test", "platform": "local_file", "file_path": "mock.file", } }, ) await hass.async_block_till_done() client = await hass_client() m_open = mock.mock_open(read_data=b"hello") with mock.patch( "homeassistant.components.local_file.camera.open", m_open, create=True ): resp = await client.get("/api/camera_proxy/camera.config_test") assert resp.status == 200 body = await resp.text() assert body == "hello" async def test_file_not_readable(hass, caplog): """Test a warning is shown setup when file is not readable.""" mock_registry(hass) with mock.patch("os.path.isfile", mock.Mock(return_value=True)), mock.patch( "os.access", mock.Mock(return_value=False) ): await async_setup_component( hass, "camera", { "camera": { "name": "config_test", "platform": "local_file", "file_path": "mock.file", } }, ) await hass.async_block_till_done() assert "Could not read" in caplog.text assert "config_test" in caplog.text assert "mock.file" in caplog.text async def test_camera_content_type(hass, hass_client): """Test local_file camera content_type.""" cam_config_jpg = { "name": "test_jpg", "platform": "local_file", "file_path": "/path/to/image.jpg", } cam_config_png = { "name": "test_png", "platform": "local_file", "file_path": "/path/to/image.png", } cam_config_svg = { "name": "test_svg", "platform": "local_file", "file_path": "/path/to/image.svg", } cam_config_noext = { "name": "test_no_ext", "platform": "local_file", "file_path": "/path/to/image", } await async_setup_component( hass, "camera", {"camera": [cam_config_jpg, cam_config_png, cam_config_svg, cam_config_noext]}, ) await hass.async_block_till_done() client = await hass_client() image = "hello" m_open = mock.mock_open(read_data=image.encode()) with mock.patch( "homeassistant.components.local_file.camera.open", m_open, create=True ): resp_1 = await client.get("/api/camera_proxy/camera.test_jpg") resp_2 = await client.get("/api/camera_proxy/camera.test_png") resp_3 = await client.get("/api/camera_proxy/camera.test_svg") resp_4 = await client.get("/api/camera_proxy/camera.test_no_ext") assert resp_1.status == 200 assert resp_1.content_type == "image/jpeg" body = await resp_1.text() assert body == image assert resp_2.status == 200 assert resp_2.content_type == "image/png" body = await resp_2.text() assert body == image assert resp_3.status == 200 assert resp_3.content_type == "image/svg+xml" body = await resp_3.text() assert body == image # default mime type assert resp_4.status == 200 assert resp_4.content_type == "image/jpeg" body = await resp_4.text() assert body == image async def test_update_file_path(hass): """Test update_file_path service.""" # Setup platform mock_registry(hass) with mock.patch("os.path.isfile", mock.Mock(return_value=True)), mock.patch( "os.access", mock.Mock(return_value=True) ): camera_1 = {"platform": "local_file", "file_path": "mock/path.jpg"} camera_2 = { "platform": "local_file", "name": "local_file_camera_2", "file_path": "mock/path_2.jpg", } await async_setup_component(hass, "camera", {"camera": [camera_1, camera_2]}) await hass.async_block_till_done() # Fetch state and check motion detection attribute state = hass.states.get("camera.local_file") assert state.attributes.get("friendly_name") == "Local File" assert state.attributes.get("file_path") == "mock/path.jpg" service_data = {"entity_id": "camera.local_file", "file_path": "new/path.jpg"} await hass.services.async_call(DOMAIN, SERVICE_UPDATE_FILE_PATH, service_data) await hass.async_block_till_done() state = hass.states.get("camera.local_file") assert state.attributes.get("file_path") == "new/path.jpg" # Check that local_file_camera_2 file_path is still as configured state = hass.states.get("camera.local_file_camera_2") assert state.attributes.get("file_path") == "mock/path_2.jpg"
apache-2.0
emrah-b/oclapi
django-nonrel/ocl/integration_tests/tests/bulk_import_validation.py
4
13005
from django.contrib.auth.models import User from concepts.importer import ConceptsImporter, ValidationLogger from concepts.validation_messages import OPENMRS_NAMES_EXCEPT_SHORT_MUST_BE_UNIQUE, OPENMRS_MUST_HAVE_EXACTLY_ONE_PREFERRED_NAME, \ OPENMRS_SHORT_NAME_CANNOT_BE_PREFERRED, OPENMRS_PREFERRED_NAME_UNIQUE_PER_SOURCE_LOCALE, \ OPENMRS_AT_LEAST_ONE_FULLY_SPECIFIED_NAME, OPENMRS_FULLY_SPECIFIED_NAME_UNIQUE_PER_SOURCE_LOCALE from concepts.models import Concept, ConceptVersion from concepts.tests import ConceptBaseTest from integration_tests.models import TestStream from mappings.importer import MappingsImporter from mappings.models import Mapping from mappings.models import MappingVersion from mappings.tests import MappingBaseTest from sources.models import SourceVersion from oclapi.models import CUSTOM_VALIDATION_SCHEMA_OPENMRS, LOOKUP_CONCEPT_CLASSES from test_helper.base import create_source, create_user, create_concept class BulkConceptImporterTest(ConceptBaseTest): def setUp(self): super(BulkConceptImporterTest, self).setUp() User.objects.create( username='superuser', password='superuser', email='superuser@test.com', last_name='Super', first_name='User', is_superuser=True ) def test_import_single_concept_without_fully_specified_name(self): self.testfile = open('./integration_tests/fixtures/concept_without_fully_specified_name.json', 'rb') stderr_stub = TestStream() source = create_source(self.user1, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS) importer = ConceptsImporter(source, self.testfile, 'test', TestStream(), stderr_stub, save_validation_errors=False) importer.import_concepts(total=1) self.assertTrue(OPENMRS_AT_LEAST_ONE_FULLY_SPECIFIED_NAME in stderr_stub.getvalue()) def test_import_concepts_with_invalid_records(self): self.testfile = open('./integration_tests/fixtures/valid_invalid_concepts.json', 'rb') stderr_stub = TestStream() source = create_source(self.user1, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS) importer = ConceptsImporter(source, self.testfile, 'test', TestStream(), stderr_stub, save_validation_errors=False) importer.import_concepts(total=7) self.assertTrue(OPENMRS_AT_LEAST_ONE_FULLY_SPECIFIED_NAME in stderr_stub.getvalue()) self.assertTrue(OPENMRS_FULLY_SPECIFIED_NAME_UNIQUE_PER_SOURCE_LOCALE in stderr_stub.getvalue()) self.assertEquals(5, Concept.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES).count()) self.assertEquals(5, ConceptVersion.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES).count()) def test_update_concept_with_invalid_record(self): (concept, _) = create_concept(mnemonic='1', user=self.user1, source=self.source1, names=[self.name]) self.testfile = open('./integration_tests/fixtures/concept_without_fully_specified_name.json', 'rb') stderr_stub = TestStream() source = create_source(self.user1, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS) importer = ConceptsImporter(source, self.testfile, 'test', TestStream(), stderr_stub, save_validation_errors=False) importer.import_concepts(total=1) self.assertTrue(OPENMRS_AT_LEAST_ONE_FULLY_SPECIFIED_NAME in stderr_stub.getvalue()) self.assertEquals(1, Concept.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES).count()) self.assertEquals(1, ConceptVersion.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES).count()) def test_import_concepts_into_openmrs_validated_source_with_valid_records(self): test_file = open('./integration_tests/fixtures/concepts_for_openmrs_validation.json', 'rb') stderr_stub = TestStream() user = create_user() source = create_source(user, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS) importer = ConceptsImporter(source, test_file, 'test', TestStream(), stderr_stub, save_validation_errors=False) importer.import_concepts(total=5) self.assertTrue(OPENMRS_MUST_HAVE_EXACTLY_ONE_PREFERRED_NAME in stderr_stub.getvalue()) self.assertTrue(OPENMRS_SHORT_NAME_CANNOT_BE_PREFERRED in stderr_stub.getvalue()) self.assertTrue(OPENMRS_SHORT_NAME_CANNOT_BE_PREFERRED in stderr_stub.getvalue()) self.assertTrue(OPENMRS_NAMES_EXCEPT_SHORT_MUST_BE_UNIQUE in stderr_stub.getvalue()) self.assertEquals(2, Concept.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES).count()) self.assertEquals(2, ConceptVersion.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES).count()) def test_validation_error_file_output(self): self.testfile = open('./integration_tests/fixtures/valid_invalid_concepts.json', 'rb') stderr_stub = TestStream() logger = ValidationLogger(output=TestStream()) source = create_source(self.user1, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS) importer = ConceptsImporter(source, self.testfile, 'test', TestStream(), stderr_stub, validation_logger=logger) importer.import_concepts(total=7) self.assertTrue('MNEMONIC;ERROR;JSON' in logger.output.getvalue()) self.assertTrue('4;%s' % OPENMRS_AT_LEAST_ONE_FULLY_SPECIFIED_NAME in logger.output.getvalue()) self.assertTrue('7;%s' % OPENMRS_FULLY_SPECIFIED_NAME_UNIQUE_PER_SOURCE_LOCALE in logger.output.getvalue()) def test_validation_error_file_exists(self): self.testfile = open('./integration_tests/fixtures/valid_invalid_concepts.json', 'rb') stderr_stub = TestStream() output_file_name = 'test_file.csv' logger = ValidationLogger(output_file_name=output_file_name) importer = ConceptsImporter(create_source(user=self.user1, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS), self.testfile, 'test', TestStream(), stderr_stub, validation_logger=logger) importer.import_concepts(total=7) from os import path, remove self.assertTrue(path.exists(output_file_name)) remove(output_file_name) class ConceptImporterTest(ConceptBaseTest): def setUp(self): super(ConceptImporterTest, self).setUp() User.objects.create( username='superuser', password='superuser', email='superuser@test.com', last_name='Super', first_name='User', is_superuser=True ) self.testfile = open('./integration_tests/fixtures/one_concept.json', 'rb') def test_import_job_for_one_record(self): stdout_stub = TestStream() importer = ConceptsImporter(self.source1, self.testfile, 'test', stdout_stub, TestStream(), save_validation_errors=False) importer.import_concepts(total=1) self.assertTrue('Created new concept: 1 = Diagnosis' in stdout_stub.getvalue()) self.assertTrue('Finished importing concepts!' in stdout_stub.getvalue()) inserted_concept = Concept.objects.get(mnemonic='1') self.assertEquals(inserted_concept.parent, self.source1) inserted_concept_version = ConceptVersion.objects.get(versioned_object_id=inserted_concept.id) source_version_latest = SourceVersion.get_latest_version_of(self.source1) self.assertEquals(source_version_latest.concepts, [inserted_concept_version.id]) def test_import_job_for_change_in_data(self): stdout_stub = TestStream() create_concept(mnemonic='1', user=self.user1, source=self.source1) importer = ConceptsImporter(self.source1, self.testfile, 'test', stdout_stub, TestStream(), save_validation_errors=False) importer.import_concepts(total=1) all_concept_versions = ConceptVersion.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES) self.assertEquals(len(all_concept_versions), 2) latest_concept_version = [version for version in all_concept_versions if version.previous_version][0] self.assertEquals(len(latest_concept_version.names), 4) self.assertTrue(('Updated concept, replacing version ID ' + latest_concept_version.previous_version.id) in stdout_stub.getvalue()) self.assertTrue('**** Processed 1 out of 1 concepts - 1 updated, ****' in stdout_stub.getvalue()) class MappingImporterTest(MappingBaseTest): def setUp(self): super(MappingImporterTest, self).setUp() User.objects.create( username='superuser', password='superuser', email='superuser@test.com', last_name='Super', first_name='User', is_superuser=True ) self.testfile = open('./integration_tests/fixtures/one_mapping.json', 'rb') def test_import_job_for_one_record(self): stdout_stub = TestStream() stderr_stub = TestStream() importer = MappingsImporter(self.source1, self.testfile, stdout_stub, stderr_stub, 'test') importer.import_mappings(total=1) self.assertTrue('Created new mapping:' in stdout_stub.getvalue()) self.assertTrue('/users/user1/sources/source1/:413532003' in stdout_stub.getvalue()) inserted_mapping = Mapping.objects.get(to_concept_code='413532003') self.assertEquals(inserted_mapping.to_source, self.source1) self.assertEquals(inserted_mapping.from_source, self.source2) mapping_ids = SourceVersion.get_latest_version_of(self.source1).mappings mapping_version = MappingVersion.objects.get(versioned_object_id=inserted_mapping.id, is_latest_version=True) self.assertEquals(mapping_ids[0], mapping_version.id) def test_import_job_for_one_invalid_record(self): stdout_stub = TestStream() stderr_stub = TestStream() invalid_json_file = open('./integration_tests/fixtures/one_invalid_mapping.json', 'rb') importer = MappingsImporter(self.source1, invalid_json_file, stdout_stub, stderr_stub, 'test') importer.import_mappings(total=1) self.assertTrue('Cannot map concept to itself.' in stderr_stub.getvalue()) def test_import_job_for_change_in_data(self): stdout_stub = TestStream() stderr_stub = TestStream() mapping = Mapping( parent=self.source1, map_type='SAME-AS', from_concept=self.concept3, to_source=self.source1, to_concept_code='413532003', external_id='junk' ) kwargs = { 'parent_resource': self.source1, } Mapping.persist_new(mapping, self.user1, **kwargs) source_version = SourceVersion.get_latest_version_of(self.source1) source_version.mappings = [mapping.id] source_version.save() importer = MappingsImporter(self.source1, self.testfile, stdout_stub, stderr_stub, 'test') importer.import_mappings(total=1) self.assertTrue('**** Processed 1 out of 1 mappings - 1 updated, ****' in stdout_stub.getvalue()) self.assertTrue(('Updated mapping with ID ' + mapping.id) in stdout_stub.getvalue()) updated_mapping = Mapping.objects.get(to_concept_code='413532003') self.assertTrue(updated_mapping.retired) self.assertEquals(updated_mapping.external_id, '70279ABBBBBBBBBBBBBBBBBBBBBBBBBBBBBB') def test_update_mapping_with_invalid_record(self): mapping = Mapping( parent=self.source1, map_type='SAME-AS', from_concept=self.concept3, to_concept=self.concept1 ) kwargs = { 'parent_resource': self.source1, } Mapping.persist_new(mapping, self.user1, **kwargs) source_version = SourceVersion.get_latest_version_of(self.source1) source_version.mappings = [mapping.id] source_version.save() stderr_stub = TestStream() invalid_json_file = open('./integration_tests/fixtures/one_internal_invalid_mapping.json', 'rb') importer = MappingsImporter(self.source1, invalid_json_file, TestStream(), stderr_stub, 'test') importer.import_mappings(total=1) self.assertTrue( "Must specify either 'to_concept' or 'to_source' & 'to_concept_code'. Cannot specify both." in stderr_stub.getvalue()) def test_import_valid_invalid_mappings(self): stdout_stub = TestStream() stderr_stub = TestStream() invalid_json_file = open('./integration_tests/fixtures/valid_invalid_mapping.json', 'rb') importer = MappingsImporter(self.source1, invalid_json_file, stdout_stub, stderr_stub, 'test') importer.import_mappings(total=5) self.assertTrue('Cannot map concept to itself.' in stderr_stub.getvalue()) self.assertTrue("Must specify either 'to_concept' or 'to_source' & " in stderr_stub.getvalue()) self.assertEquals(3, Mapping.objects.count()) self.assertEquals(3, MappingVersion.objects.count())
mpl-2.0
IllusionRom-deprecated/android_platform_external_chromium_org_tools_grit
grit/node/misc_unittest.py
7
15597
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''Unit tests for misc.GritNode''' import os import sys if __name__ == '__main__': sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) import unittest import StringIO from grit import grd_reader import grit.exception from grit import util from grit.format import rc from grit.node import misc class GritNodeUnittest(unittest.TestCase): def testUniqueNameAttribute(self): try: restree = grd_reader.Parse( util.PathFromRoot('grit/testdata/duplicate-name-input.xml')) self.fail('Expected parsing exception because of duplicate names.') except grit.exception.Parsing: pass # Expected case def testReadFirstIdsFromFile(self): test_resource_ids = os.path.join(os.path.dirname(__file__), '..', 'testdata', 'resource_ids') base_dir = os.path.dirname(test_resource_ids) src_dir, id_dict = misc._ReadFirstIdsFromFile( test_resource_ids, { 'FOO': os.path.join(base_dir, 'bar'), 'SHARED_INTERMEDIATE_DIR': os.path.join(base_dir, 'out/Release/obj/gen'), }) self.assertEqual({}, id_dict.get('bar/file.grd', None)) self.assertEqual({}, id_dict.get('out/Release/obj/gen/devtools/devtools.grd', None)) class IfNodeUnittest(unittest.TestCase): def testIffyness(self): grd = grd_reader.Parse(StringIO.StringIO(''' <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <release seq="3"> <messages> <if expr="'bingo' in defs"> <message name="IDS_BINGO"> Bingo! </message> </if> <if expr="'hello' in defs"> <message name="IDS_HELLO"> Hello! </message> </if> <if expr="lang == 'fr' or 'FORCE_FRENCH' in defs"> <message name="IDS_HELLO" internal_comment="French version"> Good morning </message> </if> <if expr="is_win"> <message name="IDS_ISWIN">is_win</message> </if> </messages> </release> </grit>'''), dir='.') messages_node = grd.children[0].children[0] bingo_message = messages_node.children[0].children[0] hello_message = messages_node.children[1].children[0] french_message = messages_node.children[2].children[0] is_win_message = messages_node.children[3].children[0] self.assertTrue(bingo_message.name == 'message') self.assertTrue(hello_message.name == 'message') self.assertTrue(french_message.name == 'message') grd.SetOutputLanguage('fr') grd.SetDefines({'hello': '1'}) active = set(grd.ActiveDescendants()) self.failUnless(bingo_message not in active) self.failUnless(hello_message in active) self.failUnless(french_message in active) grd.SetOutputLanguage('en') grd.SetDefines({'bingo': 1}) active = set(grd.ActiveDescendants()) self.failUnless(bingo_message in active) self.failUnless(hello_message not in active) self.failUnless(french_message not in active) grd.SetOutputLanguage('en') grd.SetDefines({'FORCE_FRENCH': '1', 'bingo': '1'}) active = set(grd.ActiveDescendants()) self.failUnless(bingo_message in active) self.failUnless(hello_message not in active) self.failUnless(french_message in active) grd.SetOutputLanguage('en') grd.SetDefines({}) self.failUnless(grd.target_platform == sys.platform) grd.SetTargetPlatform('darwin') active = set(grd.ActiveDescendants()) self.failUnless(is_win_message not in active) grd.SetTargetPlatform('win32') active = set(grd.ActiveDescendants()) self.failUnless(is_win_message in active) def testElsiness(self): grd = util.ParseGrdForUnittest(''' <messages> <if expr="True"> <then> <message name="IDS_YES1"></message> </then> <else> <message name="IDS_NO1"></message> </else> </if> <if expr="True"> <then> <message name="IDS_YES2"></message> </then> <else> </else> </if> <if expr="True"> <then> </then> <else> <message name="IDS_NO2"></message> </else> </if> <if expr="True"> <then> </then> <else> </else> </if> <if expr="False"> <then> <message name="IDS_NO3"></message> </then> <else> <message name="IDS_YES3"></message> </else> </if> <if expr="False"> <then> <message name="IDS_NO4"></message> </then> <else> </else> </if> <if expr="False"> <then> </then> <else> <message name="IDS_YES4"></message> </else> </if> <if expr="False"> <then> </then> <else> </else> </if> </messages>''') included = [msg.attrs['name'] for msg in grd.ActiveDescendants() if msg.name == 'message'] self.assertEqual(['IDS_YES1', 'IDS_YES2', 'IDS_YES3', 'IDS_YES4'], included) def testIffynessWithOutputNodes(self): grd = grd_reader.Parse(StringIO.StringIO(''' <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <outputs> <output filename="uncond1.rc" type="rc_data" /> <if expr="lang == 'fr' or 'hello' in defs"> <output filename="only_fr.adm" type="adm" /> <output filename="only_fr.plist" type="plist" /> </if> <if expr="lang == 'ru'"> <output filename="doc.html" type="document" /> </if> <output filename="uncond2.adm" type="adm" /> <output filename="iftest.h" type="rc_header"> <emit emit_type='prepend'></emit> </output> </outputs> </grit>'''), dir='.') outputs_node = grd.children[0] uncond1_output = outputs_node.children[0] only_fr_adm_output = outputs_node.children[1].children[0] only_fr_plist_output = outputs_node.children[1].children[1] doc_output = outputs_node.children[2].children[0] uncond2_output = outputs_node.children[0] self.assertTrue(uncond1_output.name == 'output') self.assertTrue(only_fr_adm_output.name == 'output') self.assertTrue(only_fr_plist_output.name == 'output') self.assertTrue(doc_output.name == 'output') self.assertTrue(uncond2_output.name == 'output') grd.SetOutputLanguage('ru') grd.SetDefines({'hello': '1'}) outputs = [output.GetFilename() for output in grd.GetOutputFiles()] self.assertEquals( outputs, ['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'doc.html', 'uncond2.adm', 'iftest.h']) grd.SetOutputLanguage('ru') grd.SetDefines({'bingo': '2'}) outputs = [output.GetFilename() for output in grd.GetOutputFiles()] self.assertEquals( outputs, ['uncond1.rc', 'doc.html', 'uncond2.adm', 'iftest.h']) grd.SetOutputLanguage('fr') grd.SetDefines({'hello': '1'}) outputs = [output.GetFilename() for output in grd.GetOutputFiles()] self.assertEquals( outputs, ['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'uncond2.adm', 'iftest.h']) grd.SetOutputLanguage('en') grd.SetDefines({'bingo': '1'}) outputs = [output.GetFilename() for output in grd.GetOutputFiles()] self.assertEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h']) grd.SetOutputLanguage('fr') grd.SetDefines({'bingo': '1'}) outputs = [output.GetFilename() for output in grd.GetOutputFiles()] self.assertNotEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h']) def testChildrenAccepted(self): grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0"?> <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <release seq="3"> <includes> <if expr="'bingo' in defs"> <include type="gif" name="ID_LOGO2" file="images/logo2.gif" /> </if> <if expr="'bingo' in defs"> <if expr="'hello' in defs"> <include type="gif" name="ID_LOGO2" file="images/logo2.gif" /> </if> </if> </includes> <structures> <if expr="'bingo' in defs"> <structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" /> </if> <if expr="'bingo' in defs"> <if expr="'hello' in defs"> <structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" /> </if> </if> </structures> <messages> <if expr="'bingo' in defs"> <message name="IDS_BINGO">Bingo!</message> </if> <if expr="'bingo' in defs"> <if expr="'hello' in defs"> <message name="IDS_BINGO">Bingo!</message> </if> </if> </messages> </release> <translations> <if expr="'bingo' in defs"> <file lang="nl" path="nl_translations.xtb" /> </if> <if expr="'bingo' in defs"> <if expr="'hello' in defs"> <file lang="nl" path="nl_translations.xtb" /> </if> </if> </translations> </grit>'''), dir='.') def testIfBadChildrenNesting(self): # includes xml = StringIO.StringIO('''<?xml version="1.0"?> <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <release seq="3"> <includes> <if expr="'bingo' in defs"> <structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" /> </if> </includes> </release> </grit>''') self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml) # messages xml = StringIO.StringIO('''<?xml version="1.0"?> <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <release seq="3"> <messages> <if expr="'bingo' in defs"> <structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" /> </if> </messages> </release> </grit>''') self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml) # structures xml = StringIO.StringIO('''<?xml version="1.0"?> <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <release seq="3"> <structures> <if expr="'bingo' in defs"> <message name="IDS_BINGO">Bingo!</message> </if> </structures> </release> </grit>''') # translations self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml) xml = StringIO.StringIO('''<?xml version="1.0"?> <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <translations> <if expr="'bingo' in defs"> <message name="IDS_BINGO">Bingo!</message> </if> </translations> </grit>''') self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml) # same with nesting xml = StringIO.StringIO('''<?xml version="1.0"?> <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <release seq="3"> <includes> <if expr="'bingo' in defs"> <if expr="'hello' in defs"> <structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" /> </if> </if> </includes> </release> </grit>''') self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml) xml = StringIO.StringIO('''<?xml version="1.0"?> <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <release seq="3"> <messages> <if expr="'bingo' in defs"> <if expr="'hello' in defs"> <structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" /> </if> </if> </messages> </release> </grit>''') self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml) xml = StringIO.StringIO('''<?xml version="1.0"?> <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <release seq="3"> <structures> <if expr="'bingo' in defs"> <if expr="'hello' in defs"> <message name="IDS_BINGO">Bingo!</message> </if> </if> </structures> </release> </grit>''') self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml) xml = StringIO.StringIO('''<?xml version="1.0"?> <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir="."> <translations> <if expr="'bingo' in defs"> <if expr="'hello' in defs"> <message name="IDS_BINGO">Bingo!</message> </if> </if> </translations> </grit>''') self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml) class ReleaseNodeUnittest(unittest.TestCase): def testPseudoControl(self): grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?> <grit latest_public_release="1" source_lang_id="en-US" current_release="2" base_dir="."> <release seq="1" allow_pseudo="false"> <messages> <message name="IDS_HELLO"> Hello </message> </messages> <structures> <structure type="dialog" name="IDD_ABOUTBOX" encoding="utf-16" file="klonk.rc" /> </structures> </release> <release seq="2"> <messages> <message name="IDS_BINGO"> Bingo </message> </messages> <structures> <structure type="menu" name="IDC_KLONKMENU" encoding="utf-16" file="klonk.rc" /> </structures> </release> </grit>'''), util.PathFromRoot('grit/testdata')) grd.SetOutputLanguage('en') grd.RunGatherers() hello = grd.GetNodeById('IDS_HELLO') aboutbox = grd.GetNodeById('IDD_ABOUTBOX') bingo = grd.GetNodeById('IDS_BINGO') menu = grd.GetNodeById('IDC_KLONKMENU') for node in [hello, aboutbox]: self.failUnless(not node.PseudoIsAllowed()) for node in [bingo, menu]: self.failUnless(node.PseudoIsAllowed()) # TODO(benrg): There was a test here that formatting hello and aboutbox with # a pseudo language should fail, but they do not fail and the test was # broken and failed to catch it. Fix this. # Should not raise an exception since pseudo is allowed rc.FormatMessage(bingo, 'xyz-pseudo') rc.FormatStructure(menu, 'xyz-pseudo', '.') if __name__ == '__main__': unittest.main()
bsd-2-clause
illicitonion/givabit
lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_2/tests/regressiontests/m2m_regress/tests.py
39
3145
from django.core.exceptions import FieldError from django.test import TestCase from models import (SelfRefer, Tag, TagCollection, Entry, SelfReferChild, SelfReferChildSibling, Worksheet) class M2MRegressionTests(TestCase): def assertRaisesErrorWithMessage(self, error, message, callable, *args, **kwargs): self.assertRaises(error, callable, *args, **kwargs) try: callable(*args, **kwargs) except error, e: self.assertEqual(message, str(e)) def test_multiple_m2m(self): # Multiple m2m references to model must be distinguished when # accessing the relations through an instance attribute. s1 = SelfRefer.objects.create(name='s1') s2 = SelfRefer.objects.create(name='s2') s3 = SelfRefer.objects.create(name='s3') s1.references.add(s2) s1.related.add(s3) e1 = Entry.objects.create(name='e1') t1 = Tag.objects.create(name='t1') t2 = Tag.objects.create(name='t2') e1.topics.add(t1) e1.related.add(t2) self.assertQuerysetEqual(s1.references.all(), ["<SelfRefer: s2>"]) self.assertQuerysetEqual(s1.related.all(), ["<SelfRefer: s3>"]) self.assertQuerysetEqual(e1.topics.all(), ["<Tag: t1>"]) self.assertQuerysetEqual(e1.related.all(), ["<Tag: t2>"]) def test_internal_related_name_not_in_error_msg(self): # The secret internal related names for self-referential many-to-many # fields shouldn't appear in the list when an error is made. self.assertRaisesErrorWithMessage(FieldError, "Cannot resolve keyword 'porcupine' into field. Choices are: id, name, references, related, selfreferchild, selfreferchildsibling", lambda: SelfRefer.objects.filter(porcupine='fred') ) def test_m2m_inheritance_symmetry(self): # Test to ensure that the relationship between two inherited models # with a self-referential m2m field maintains symmetry sr_child = SelfReferChild(name="Hanna") sr_child.save() sr_sibling = SelfReferChildSibling(name="Beth") sr_sibling.save() sr_child.related.add(sr_sibling) self.assertQuerysetEqual(sr_child.related.all(), ["<SelfRefer: Beth>"]) self.assertQuerysetEqual(sr_sibling.related.all(), ["<SelfRefer: Hanna>"]) def test_m2m_pk_field_type(self): # Regression for #11311 - The primary key for models in a m2m relation # doesn't have to be an AutoField w = Worksheet(id='abc') w.save() w.delete() def test_add_m2m_with_base_class(self): # Regression for #11956 -- You can add an object to a m2m with the # base class without causing integrity errors t1 = Tag.objects.create(name='t1') t2 = Tag.objects.create(name='t2') c1 = TagCollection.objects.create(name='c1') c1.tags = [t1,t2] c1 = TagCollection.objects.get(name='c1') self.assertQuerysetEqual(c1.tags.all(), ["<Tag: t1>", "<Tag: t2>"]) self.assertQuerysetEqual(t1.tag_collections.all(), ["<TagCollection: c1>"])
apache-2.0
Stanford-Online/edx-platform
lms/djangoapps/courseware/tests/test_middleware.py
19
1491
""" Tests for courseware middleware """ from django.http import Http404 from django.test.client import RequestFactory from nose.plugins.attrib import attr from lms.djangoapps.courseware.exceptions import Redirect from lms.djangoapps.courseware.middleware import RedirectMiddleware from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory @attr(shard=1) class CoursewareMiddlewareTestCase(SharedModuleStoreTestCase): """Tests that courseware middleware is correctly redirected""" @classmethod def setUpClass(cls): super(CoursewareMiddlewareTestCase, cls).setUpClass() cls.course = CourseFactory.create() def test_process_404(self): """A 404 should not trigger anything""" request = RequestFactory().get("dummy_url") response = RedirectMiddleware().process_exception( request, Http404() ) self.assertIsNone(response) def test_redirect_exceptions(self): """ Unit tests for handling of Redirect exceptions. """ request = RequestFactory().get("dummy_url") test_url = '/test_url' exception = Redirect(test_url) response = RedirectMiddleware().process_exception( request, exception ) self.assertEqual(response.status_code, 302) target_url = response._headers['location'][1] self.assertTrue(target_url.endswith(test_url))
agpl-3.0
ecino/compassion-modules
partner_communication/models/email.py
4
2485
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2016 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: Emanuel Cino <ecino@compassion.ch> # # The licence is in the file __manifest__.py # ############################################################################## from odoo import api, models, fields class Email(models.Model): """ Add relation to communication configuration to track generated e-mails. """ _inherit = 'mail.mail' ########################################################################## # FIELDS # ########################################################################## communication_config_id = fields.Many2one('partner.communication.config') @api.multi def send(self, auto_commit=False, raise_exception=False): """ Create communication for partner, if not already existing. """ comm_obj = self.env['partner.communication.job'].with_context( {}).with_context(no_print=True) config = self.env.ref( 'partner_communication.default_communication') for email in self.exists().filtered( lambda e: e.mail_message_id.model != 'partner.communication.job'): communication = comm_obj.search([('email_id', '=', email.id)]) if not communication: for partner in email.recipient_ids.filtered( lambda p: not p.user_ids or reduce( lambda u1, u2: u1 and u2, p.user_ids.mapped('share'))): comm_obj.create({ 'config_id': config.id, 'partner_id': partner.id, 'user_id': email.author_id.user_ids.id, 'object_ids': email.recipient_ids.ids, 'state': 'done', 'auto_send': False, 'email_id': email.id, 'sent_date': fields.Datetime.now(), 'body_html': email.body_html, 'subject': email.subject, 'ir_attachment_ids': [(6, 0, email.attachment_ids.ids)] }) return super(Email, self).send(auto_commit, raise_exception)
agpl-3.0
lscheinkman/nupic
src/nupic/data/dict_utils.py
49
5295
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import copy # TODO: Note the functions 'rUpdate' are duplicated in # the swarming.hypersearch.utils.py module class DictObj(dict): """Dictionary that allows attribute-like access to its elements. Attributes are read-only.""" def __getattr__(self, name): if name == '__deepcopy__': return super(DictObj, self).__getattribute__("__deepcopy__") return self[name] def __setstate__(self, state): for k, v in state.items(): self[k] = v def rUpdate(original, updates): """Recursively updates the values in original with the values from updates.""" # Keep a list of the sub-dictionaries that need to be updated to avoid having # to use recursion (which could fail for dictionaries with a lot of nesting. dictPairs = [(original, updates)] while len(dictPairs) > 0: original, updates = dictPairs.pop() for k, v in updates.iteritems(): if k in original and isinstance(original[k], dict) and isinstance(v, dict): dictPairs.append((original[k], v)) else: original[k] = v def rApply(d, f): """Recursively applies f to the values in dict d. Args: d: The dict to recurse over. f: A function to apply to values in d that takes the value and a list of keys from the root of the dict to the value. """ remainingDicts = [(d, ())] while len(remainingDicts) > 0: current, prevKeys = remainingDicts.pop() for k, v in current.iteritems(): keys = prevKeys + (k,) if isinstance(v, dict): remainingDicts.insert(0, (v, keys)) else: f(v, keys) def find(d, target): remainingDicts = [d] while len(remainingDicts) > 0: current = remainingDicts.pop() for k, v in current.iteritems(): if k == target: return v if isinstance(v, dict): remainingDicts.insert(0, v) return None def get(d, keys): for key in keys: d = d[key] return d def set(d, keys, value): for key in keys[:-1]: d = d[key] d[keys[-1]] = value def dictDiffAndReport(da, db): """ Compares two python dictionaries at the top level and report differences, if any, to stdout da: first dictionary db: second dictionary Returns: The same value as returned by dictDiff() for the given args """ differences = dictDiff(da, db) if not differences: return differences if differences['inAButNotInB']: print ">>> inAButNotInB: %s" % differences['inAButNotInB'] if differences['inBButNotInA']: print ">>> inBButNotInA: %s" % differences['inBButNotInA'] for key in differences['differentValues']: print ">>> da[%s] != db[%s]" % (key, key) print "da[%s] = %r" % (key, da[key]) print "db[%s] = %r" % (key, db[key]) return differences def dictDiff(da, db): """ Compares two python dictionaries at the top level and return differences da: first dictionary db: second dictionary Returns: None if dictionaries test equal; otherwise returns a dictionary as follows: { 'inAButNotInB': <sequence of keys that are in da but not in db> 'inBButNotInA': <sequence of keys that are in db but not in da> 'differentValues': <sequence of keys whose corresponding values differ between da and db> } """ different = False resultDict = dict() resultDict['inAButNotInB'] = set(da) - set(db) if resultDict['inAButNotInB']: different = True resultDict['inBButNotInA'] = set(db) - set(da) if resultDict['inBButNotInA']: different = True resultDict['differentValues'] = [] for key in (set(da) - resultDict['inAButNotInB']): comparisonResult = da[key] == db[key] if isinstance(comparisonResult, bool): isEqual = comparisonResult else: # This handles numpy arrays (but only at the top level) isEqual = comparisonResult.all() if not isEqual: resultDict['differentValues'].append(key) different = True assert (((resultDict['inAButNotInB'] or resultDict['inBButNotInA'] or resultDict['differentValues']) and different) or not different) return resultDict if different else None
agpl-3.0
GIC-de/ncclient
test/unit/devices/test_junos.py
2
2800
import unittest from ncclient.devices.junos import * import ncclient.transport from mock import patch import paramiko import sys xml = '''<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/|comment()|processing-instruction()"> <xsl:copy> <xsl:apply-templates/> </xsl:copy> </xsl:template> <xsl:template match="*"> <xsl:element name="{local-name()}"> <xsl:apply-templates select="@*|node()"/> </xsl:element> </xsl:template> <xsl:template match="@*"> <xsl:attribute name="{local-name()}"> <xsl:value-of select="."/> </xsl:attribute> </xsl:template> </xsl:stylesheet> ''' xml2 = """<rpc-reply xmlns:junos="http://xml.juniper.net/junos/12.1X46/junos"> <routing-engine> <name>reX</name> <commit-success/> <ok/> </rpc-reply>""" xml3 = """<rpc-reply xmlns:junos="http://xml.juniper.net/junos/12.1X46/junos"> <routing-engine> <name>reX</name> <commit-success/> <routing-engine/> <ok/> </rpc-reply>""" class TestJunosDevice(unittest.TestCase): def setUp(self): self.obj = JunosDeviceHandler({'name': 'junos'}) @patch('paramiko.Channel.exec_command') @patch('paramiko.Transport.__init__') @patch('paramiko.Transport.open_channel') def test_handle_connection_exceptions( self, mock_open, mock_init, mock_channel): session = ncclient.transport.SSHSession(self.obj) session._channel_id = 100 mock_init.return_value = None session._transport = paramiko.Transport() channel = paramiko.Channel(100) mock_open.return_value = channel self.obj.handle_connection_exceptions(session) self.assertEqual(channel._name, "netconf-command-100") self.assertEqual( mock_channel.call_args_list[0][0][0], "xml-mode netconf need-trailer") def test_additional_operations(self): dict = {} dict["rpc"] = ExecuteRpc dict["get_configuration"] = GetConfiguration dict["load_configuration"] = LoadConfiguration dict["compare_configuration"] = CompareConfiguration dict["command"] = Command dict["reboot"] = Reboot dict["halt"] = Halt dict["commit"] = Commit self.assertEqual(dict, self.obj.add_additional_operations()) def test_transform_reply(self): if sys.version >= '3': reply = xml.encode('utf-8') else: reply = xml self.assertEqual(self.obj.transform_reply(), reply) def test_perform_quality_check(self): self.assertFalse(self.obj.perform_qualify_check())
apache-2.0
mfherbst/spack
var/spack/repos/builtin/packages/sw4lite/package.py
2
3776
############################################################################## # Copyright (c) 2017, Los Alamos National Security, LLC # Produced at the Los Alamos National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * import glob class Sw4lite(MakefilePackage): """Sw4lite is a bare bone version of SW4 intended for testing performance optimizations in a few important numerical kernels of SW4.""" tags = ['proxy-app', 'ecp-proxy-app'] homepage = "https://geodynamics.org/cig/software/sw4" url = "https://github.com/geodynamics/sw4lite/archive/v1.0.zip" git = "https://github.com/geodynamics/sw4lite.git" version('develop', branch='master') version('1.0', '3d911165f4f2ff6d5f9c1bd56ab6723f') variant('openmp', default=True, description='Build with OpenMP support') variant('precision', default='double', values=('float', 'double'), multi=False, description='Floating point precision') variant('ckernel', default=False, description='C or Fortran kernel') depends_on('blas') depends_on('lapack') depends_on('mpi') parallel = False @property def build_targets(self): targets = [] spec = self.spec if spec.variants['precision'].value == 'double': cxxflags = ['-I../src', '-I../src/double'] else: cxxflags = ['-I../src', '-I../src/float'] cflags = [] fflags = [] if '+openmp' in self.spec: cflags.append('-DSW4_OPENMP') cflags.append(self.compiler.openmp_flag) cxxflags.append('-DSW4_OPENMP') cxxflags.append(self.compiler.openmp_flag) fflags.append(self.compiler.openmp_flag) if spec.variants['ckernel'].value is True: cxxflags.append('-DSW4_CROUTINES') targets.append('ckernel=yes') targets.append('FC=' + spec['mpi'].mpifc) targets.append('CXX=' + spec['mpi'].mpicxx) targets.append('CFLAGS={0}'.format(' '.join(cflags))) targets.append('CXXFLAGS={0}'.format(' '.join(cxxflags))) targets.append('FFLAGS={0}'.format(' '.join(fflags))) targets.append('EXTRA_CXX_FLAGS=') targets.append('EXTRA_FORT_FLAGS=') lapack_blas = spec['lapack'].libs + spec['blas'].libs if spec.satisfies('%gcc'): targets.append('EXTRA_LINK_FLAGS={0} -lgfortran' .format(lapack_blas.ld_flags)) else: targets.append('EXTRA_LINK_FLAGS={0}'.format(lapack_blas.ld_flags)) return targets def install(self, spec, prefix): mkdir(prefix.bin) exe_name = glob.glob('*/sw4lite')[0] install(exe_name, prefix.bin) install_tree('tests', prefix.tests)
lgpl-2.1
servo-automation/highfive
tests/api_provider_tests.py
2
7671
from highfive.runner import Configuration, Response from highfive.api_provider.interface import APIProvider, CONTRIBUTORS_STORE_KEY, DEFAULTS from handler_tests import TestStore from datetime import datetime from dateutil.parser import parse as datetime_parse from unittest import TestCase def create_config(): config = Configuration() config.name = 'test_app' config.imgur_client_id = None return config class APIProviderTests(TestCase): def test_api_init(self): '''The default interface will only initialize the app name and payload.''' config = Configuration() config.name = 'test_app' api = APIProvider(config=config, payload={}) self.assertEqual(api.name, 'test_app') self.assertEqual(api.payload, {}) self.assertEqual(api.config, config) for attr in DEFAULTS: self.assertTrue(getattr(api, attr) is None) def test_api_issue_payload(self): ''' If the payload is related to an issue (or an issue comment in an issue/PR), then this should've initialized the commonly used issue-related stuff. ''' payload = { 'issue': { 'user': { 'login': 'Foobar' }, 'state': 'open', 'labels': [ { 'name': 'Foo' }, { 'name': 'Bar' } ], 'number': 200, 'updated_at': '1970-01-01T00:00:00Z' }, } api = APIProvider(config=create_config(), payload=payload) self.assertEqual(api.payload, payload) self.assertFalse(api.is_pull) self.assertTrue(api.is_open) self.assertEqual(api.creator, 'foobar') self.assertEqual(api.last_updated, payload['issue']['updated_at']) self.assertEqual(api.number, '200') self.assertTrue(api.pull_url is None) self.assertEqual(api.labels, ['foo', 'bar']) def test_api_pr_payload(self): ''' If the payload is related to a PR, then the commonly used PR attributes should've been initialized. ''' payload = { 'pull_request': { 'user': { 'login': 'Foobar' }, 'assignee': { 'login': 'Baz' }, 'state': 'open', 'number': 50, 'url': 'some url', 'updated_at': '1970-01-01T00:00:00Z' } } api = APIProvider(config=create_config(), payload=payload) self.assertEqual(api.payload, payload) self.assertTrue(api.is_open) self.assertTrue(api.is_pull) self.assertEqual(api.creator, 'foobar') self.assertEqual(api.assignee, 'baz') self.assertEqual(api.last_updated, payload['pull_request']['updated_at']) self.assertEqual(api.number, '50') self.assertEqual(api.pull_url, 'some url') def test_api_other_events(self): '''Test for payload belonging to other events such as comment, label, etc.''' payload = { # This is a hypothetical payload just for tests 'sender': { 'login': 'Someone' }, 'label': { 'name': 'Label' }, 'repository': { 'owner': { 'login': 'foo' }, 'name': 'bar' }, 'comment': { 'body': 'Hello, world!', }, 'issue': { 'pull_request': {}, 'labels': [], 'user': { 'login': 'Foobar' }, 'state': 'open', 'number': 200, } } api = APIProvider(config=create_config(), payload=payload) self.assertTrue(api.is_pull) self.assertEqual(api.sender, 'someone') self.assertEqual(api.comment, 'Hello, world!') self.assertEqual(api.current_label, 'label') self.assertEqual(api.owner, 'foo') self.assertEqual(api.repo, 'bar') def test_api_imgur_upload(self): '''Test Imgur API upload''' config = create_config() api = APIProvider(config=config, payload={}) resp = api.post_image_to_imgur('some data') self.assertTrue(resp is None) # No client ID - returns None config.imgur_client_id = 'foobar' def test_valid_request(method, url, data, headers): self.assertEqual(headers['Authorization'], 'Client-ID foobar') self.assertEqual(method, 'POST') self.assertEqual(url, 'https://api.imgur.com/3/image') self.assertEqual(data, {'image': 'some data'}) return Response(data={'data': {'link': 'hello'}}) tests = [ (test_valid_request, 'hello'), (lambda method, url, data, headers: Response(data='', code=400), None), (lambda method, url, data, headers: Response(data=''), None) ] for func, expected in tests: resp = api.post_image_to_imgur('some data', json_request=func) self.assertEqual(resp, expected) def test_contributors_update(self): ''' Contributors list (cache) live only for an hour (by default). Once it's outdated, the next call to `get_contributors` calls `fetch_contributors`, writes it to the store and returns the list. Any calls within the next hour will return the existing contributors without calling the API. ''' class TestAPI(APIProvider): fetched = False def fetch_contributors(self): self.fetched = True return [] config = create_config() api = TestAPI(config=config, payload={}, store=None) self.assertFalse(api.fetched) api.get_contributors() # No store. This will always call the API. self.assertTrue(api.fetched) store = TestStore() api = TestAPI(config=config, payload={}, store=store) self.assertFalse(api.fetched) now = datetime.now() api.get_contributors() data = store.get_object(CONTRIBUTORS_STORE_KEY) updated_time = datetime_parse(data['last_update_time']) # Store doesn't have contributors. It's been updated for the first time. self.assertTrue(updated_time >= now) self.assertTrue(api.fetched) store = TestStore() store.write_object(CONTRIBUTORS_STORE_KEY, { 'last_update_time': str(now), 'list': ['booya'] }) api = TestAPI(config=config, payload={}, store=store) self.assertFalse(api.fetched) api.get_contributors() data = store.get_object(CONTRIBUTORS_STORE_KEY) updated_time = datetime_parse(data['last_update_time']) # Called within a cycle - no fetch occurs. self.assertEqual(updated_time, now) self.assertFalse(api.fetched) store = TestStore() store.write_object(CONTRIBUTORS_STORE_KEY, { 'last_update_time': str(now), 'list': ['booya'] }) api = TestAPI(config=config, payload={}, store=store) self.assertFalse(api.fetched) api.get_contributors(fetch=True) # When `fetch` is enabled, API is called regardless. self.assertTrue(api.fetched) data = store.get_object(CONTRIBUTORS_STORE_KEY) updated_time = datetime_parse(data['last_update_time']) self.assertTrue(updated_time > now)
mpl-2.0
djeraseit/PredictionIO
examples/experimental/scala-local-friend-recommendation/file_random.py
48
4883
import sys import random read_file = open("data/user_profile.txt", 'r') write_file = open("data/mini_user_profile.txt", 'w') number_of_lines = int(sys.argv[1]) number_of_items = int(sys.argv[2]) #record number of lines count = 0 random_num_list = [] # loop through the file to get number of lines in the file for line in read_file: count += 1 print "generating random numbers" # generating a list of random lines to read from for i in range(0, number_of_lines): random_num_list.append(random.randint(0, count)) #get rid of any duplicates no_duplicate_list = list(set(random_num_list)) #sort the list no_duplicate_list.sort() #print no_duplicate_list #go to file begining read_file.seek(0) count = 0 index = 0 user_id_list = [] print "getting lines from user_profile" for line in read_file: if count == no_duplicate_list[index]: write_file.write(line) index += 1 user_id_list.append(int(line.split()[0])) if index == len(no_duplicate_list): break count += 1 #user_id_list is sorted user_id_list = map(str, user_id_list) user_id_list.sort() #print user_id_list print "user_id finished" print "getting lines from item" read_file = open("data/item.txt", 'r') write_file = open("data/mini_item.txt", 'w') count = 0 random_num_list = [] for line in read_file: count += 1 for i in range(0, number_of_items): random_num_list.append(random.randint(0, count)) #no duplicate random_num_list = list(set(random_num_list)) random_num_list.sort() read_file.seek(0) count = 0 index = 0 item_id_list = [] for line in read_file: if count == random_num_list[index]: write_file.write(line) index += 1 item_id_list.append(int(line.split()[0])) if index == len(random_num_list): break count += 1 print "item finished" print "getting mini user_key_word" read_file = open("data/user_key_word.txt", 'r') write_file = open("data/mini_user_key_word.txt", 'w') #record number of lines count = 0 index = 0 # loop through the file to get number of lines in the file for line in read_file: if line.split()[0] == user_id_list[index]: write_file.write(line) index += 1 if index == len(user_id_list): #print "break" break print "user keyword finished" #go to file begining #getting the user_sns_small print "getting user sns" #print user_id_list read_file = open("data/user_sns.txt", 'r') #write_file = open("data/mini_user_sns_small.txt", 'w') user_sns_list = [] index = 0 met = False count = 0 for line in read_file: count += 1 #print count #Same user multiple following if met: if line.split()[0] != user_id_list[index]: index += 1 met = False if index == len(user_id_list): break if line.split()[0] == user_id_list[index]: #print "here" user_sns_list.append(line) met = True # if the current line's user is greater than the user list, that means # the user doesn't follow or are following, then we move to next user if line.split()[0] > user_id_list[index]: index += 1 if index == len(user_id_list): break #print user_sns_list write_file = open("data/mini_user_sns.txt",'w') for line in user_sns_list: for user_id in user_id_list: if line.split()[1] == user_id: write_file.write(line) break print "sns got" print "getting user action" #for line in write_file: read_file = open("data/user_action.txt", 'r') user_action_list = [] index = 0 met = False count = 0 for line in read_file: count += 1 #print count if met: if line.split()[0] != user_id_list[index]: index += 1 met = False if index == len(user_id_list): break if line.split()[0] == user_id_list[index]: #print "here" user_action_list.append(line) met = True if line.split()[0] > user_id_list[index]: index += 1 if index == len(user_id_list): break #print user_action_list write_file = open("data/mini_user_action.txt",'w') for line in user_action_list: for user_id in user_id_list: if line.split()[1] == user_id: write_file.write(line) break print "user action got" print "getting rec_log_train" user_set = set(user_id_list) item_set = set(item_id_list) read_file = open("data/rec_log_train.txt", 'r') write_file = open("data/mini_rec_log_train.txt",'w') count = 0 #for item in item_set: # print type(item) #for user in user_set: # print type(user) for line in read_file: words = line.split() # if words[0] in user_set and (words[1] in user_set or words[1] in item_set): if words[0] in user_set and words[1] in item_set: write_file.write(line) print count count += 1 print "Done"
apache-2.0
Softmotions/edx-platform
common/test/acceptance/pages/lms/discussion.py
36
25473
from contextlib import contextmanager from bok_choy.javascript import wait_for_js from bok_choy.page_object import PageObject from bok_choy.promise import EmptyPromise, Promise from .course_page import CoursePage class DiscussionPageMixin(object): def is_ajax_finished(self): return self.browser.execute_script("return jQuery.active") == 0 class DiscussionThreadPage(PageObject, DiscussionPageMixin): url = None def __init__(self, browser, thread_selector): super(DiscussionThreadPage, self).__init__(browser) self.thread_selector = thread_selector def _find_within(self, selector): """ Returns a query corresponding to the given CSS selector within the scope of this thread page """ return self.q(css=self.thread_selector + " " + selector) def is_browser_on_page(self): return self.q(css=self.thread_selector).present def _get_element_text(self, selector): """ Returns the text of the first element matching the given selector, or None if no such element exists """ text_list = self._find_within(selector).text return text_list[0] if text_list else None def _is_element_visible(self, selector): query = self._find_within(selector) return query.present and query.visible @contextmanager def _secondary_action_menu_open(self, ancestor_selector): """ Given the selector for an ancestor of a secondary menu, return a context manager that will open and close the menu """ self._find_within(ancestor_selector + " .action-more").click() EmptyPromise( lambda: self._is_element_visible(ancestor_selector + " .actions-dropdown"), "Secondary action menu opened" ).fulfill() yield if self._is_element_visible(ancestor_selector + " .actions-dropdown"): self._find_within(ancestor_selector + " .action-more").click() EmptyPromise( lambda: not self._is_element_visible(ancestor_selector + " .actions-dropdown"), "Secondary action menu closed" ).fulfill() def get_group_visibility_label(self): """ Returns the group visibility label shown for the thread. """ return self._get_element_text(".group-visibility-label") def get_response_total_text(self): """Returns the response count text, or None if not present""" return self._get_element_text(".response-count") def get_num_displayed_responses(self): """Returns the number of responses actually rendered""" return len(self._find_within(".discussion-response")) def get_shown_responses_text(self): """Returns the shown response count text, or None if not present""" return self._get_element_text(".response-display-count") def get_load_responses_button_text(self): """Returns the load more responses button text, or None if not present""" return self._get_element_text(".load-response-button") def load_more_responses(self): """Clicks the load more responses button and waits for responses to load""" self._find_within(".load-response-button").click() EmptyPromise( self.is_ajax_finished, "Loading more Responses" ).fulfill() def has_add_response_button(self): """Returns true if the add response button is visible, false otherwise""" return self._is_element_visible(".add-response-btn") def click_add_response_button(self): """ Clicks the add response button and ensures that the response text field receives focus """ self._find_within(".add-response-btn").first.click() EmptyPromise( lambda: self._find_within(".discussion-reply-new textarea:focus").present, "Response field received focus" ).fulfill() @wait_for_js def is_response_editor_visible(self, response_id): """Returns true if the response editor is present, false otherwise""" return self._is_element_visible(".response_{} .edit-post-body".format(response_id)) @wait_for_js def is_discussion_body_visible(self): return self._is_element_visible(".post-body") def is_mathjax_preview_available(self): return self.q(css=".MathJax_Preview").text[0] == "" def is_mathjax_rendered(self): return self._is_element_visible(".MathJax") def is_response_visible(self, comment_id): """Returns true if the response is viewable onscreen""" return self._is_element_visible(".response_{} .response-body".format(comment_id)) def is_response_editable(self, response_id): """Returns true if the edit response button is present, false otherwise""" with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)): return self._is_element_visible(".response_{} .discussion-response .action-edit".format(response_id)) def get_response_body(self, response_id): return self._get_element_text(".response_{} .response-body".format(response_id)) def start_response_edit(self, response_id): """Click the edit button for the response, loading the editing view""" with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)): self._find_within(".response_{} .discussion-response .action-edit".format(response_id)).first.click() EmptyPromise( lambda: self.is_response_editor_visible(response_id), "Response edit started" ).fulfill() def get_link_href(self): """Extracts href attribute of the referenced link""" link_href = self._find_within(".post-body p a").attrs('href') return link_href[0] if link_href else None def get_response_vote_count(self, response_id): return self._get_element_text(".response_{} .discussion-response .action-vote .vote-count".format(response_id)) def vote_response(self, response_id): current_count = self._get_element_text(".response_{} .discussion-response .action-vote .vote-count".format(response_id)) self._find_within(".response_{} .discussion-response .action-vote".format(response_id)).first.click() self.wait_for_ajax() EmptyPromise( lambda: current_count != self.get_response_vote_count(response_id), "Response is voted" ).fulfill() def is_response_reported(self, response_id): return self._is_element_visible(".response_{} .discussion-response .post-label-reported".format(response_id)) def report_response(self, response_id): with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)): self._find_within(".response_{} .discussion-response .action-report".format(response_id)).first.click() self.wait_for_ajax() EmptyPromise( lambda: self.is_response_reported(response_id), "Response is reported" ).fulfill() def is_response_endorsed(self, response_id): return "endorsed" in self._get_element_text(".response_{} .discussion-response .posted-details".format(response_id)) def endorse_response(self, response_id): self._find_within(".response_{} .discussion-response .action-endorse".format(response_id)).first.click() self.wait_for_ajax() EmptyPromise( lambda: self.is_response_endorsed(response_id), "Response edit started" ).fulfill() def set_response_editor_value(self, response_id, new_body): """Replace the contents of the response editor""" self._find_within(".response_{} .discussion-response .wmd-input".format(response_id)).fill(new_body) def submit_response_edit(self, response_id, new_response_body): """Click the submit button on the response editor""" self._find_within(".response_{} .discussion-response .post-update".format(response_id)).first.click() EmptyPromise( lambda: ( not self.is_response_editor_visible(response_id) and self.is_response_visible(response_id) and self.get_response_body(response_id) == new_response_body ), "Comment edit succeeded" ).fulfill() def is_show_comments_visible(self, response_id): """Returns true if the "show comments" link is visible for a response""" return self._is_element_visible(".response_{} .action-show-comments".format(response_id)) def show_comments(self, response_id): """Click the "show comments" link for a response""" self._find_within(".response_{} .action-show-comments".format(response_id)).first.click() EmptyPromise( lambda: self._is_element_visible(".response_{} .comments".format(response_id)), "Comments shown" ).fulfill() def is_add_comment_visible(self, response_id): """Returns true if the "add comment" form is visible for a response""" return self._is_element_visible("#wmd-input-comment-body-{}".format(response_id)) def is_comment_visible(self, comment_id): """Returns true if the comment is viewable onscreen""" return self._is_element_visible("#comment_{} .response-body".format(comment_id)) def get_comment_body(self, comment_id): return self._get_element_text("#comment_{} .response-body".format(comment_id)) def is_comment_deletable(self, comment_id): """Returns true if the delete comment button is present, false otherwise""" with self._secondary_action_menu_open("#comment_{}".format(comment_id)): return self._is_element_visible("#comment_{} .action-delete".format(comment_id)) def delete_comment(self, comment_id): with self.handle_alert(): with self._secondary_action_menu_open("#comment_{}".format(comment_id)): self._find_within("#comment_{} .action-delete".format(comment_id)).first.click() EmptyPromise( lambda: not self.is_comment_visible(comment_id), "Deleted comment was removed" ).fulfill() def is_comment_editable(self, comment_id): """Returns true if the edit comment button is present, false otherwise""" with self._secondary_action_menu_open("#comment_{}".format(comment_id)): return self._is_element_visible("#comment_{} .action-edit".format(comment_id)) def is_comment_editor_visible(self, comment_id): """Returns true if the comment editor is present, false otherwise""" return self._is_element_visible(".edit-comment-body[data-id='{}']".format(comment_id)) def _get_comment_editor_value(self, comment_id): return self._find_within("#wmd-input-edit-comment-body-{}".format(comment_id)).text[0] def start_comment_edit(self, comment_id): """Click the edit button for the comment, loading the editing view""" old_body = self.get_comment_body(comment_id) with self._secondary_action_menu_open("#comment_{}".format(comment_id)): self._find_within("#comment_{} .action-edit".format(comment_id)).first.click() EmptyPromise( lambda: ( self.is_comment_editor_visible(comment_id) and not self.is_comment_visible(comment_id) and self._get_comment_editor_value(comment_id) == old_body ), "Comment edit started" ).fulfill() def set_comment_editor_value(self, comment_id, new_body): """Replace the contents of the comment editor""" self._find_within("#comment_{} .wmd-input".format(comment_id)).fill(new_body) def submit_comment_edit(self, comment_id, new_comment_body): """Click the submit button on the comment editor""" self._find_within("#comment_{} .post-update".format(comment_id)).first.click() EmptyPromise( lambda: ( not self.is_comment_editor_visible(comment_id) and self.is_comment_visible(comment_id) and self.get_comment_body(comment_id) == new_comment_body ), "Comment edit succeeded" ).fulfill() def cancel_comment_edit(self, comment_id, original_body): """Click the cancel button on the comment editor""" self._find_within("#comment_{} .post-cancel".format(comment_id)).first.click() EmptyPromise( lambda: ( not self.is_comment_editor_visible(comment_id) and self.is_comment_visible(comment_id) and self.get_comment_body(comment_id) == original_body ), "Comment edit was canceled" ).fulfill() class DiscussionSortPreferencePage(CoursePage): """ Page that contain the discussion board with sorting options """ def __init__(self, browser, course_id): super(DiscussionSortPreferencePage, self).__init__(browser, course_id) self.url_path = "discussion/forum" def is_browser_on_page(self): """ Return true if the browser is on the right page else false. """ return self.q(css="body.discussion .forum-nav-sort-control").present def get_selected_sort_preference(self): """ Return the text of option that is selected for sorting. """ options = self.q(css="body.discussion .forum-nav-sort-control option") return options.filter(lambda el: el.is_selected())[0].get_attribute("value") def change_sort_preference(self, sort_by): """ Change the option of sorting by clicking on new option. """ self.q(css="body.discussion .forum-nav-sort-control option[value='{0}']".format(sort_by)).click() def refresh_page(self): """ Reload the page. """ self.browser.refresh() class DiscussionTabSingleThreadPage(CoursePage): def __init__(self, browser, course_id, discussion_id, thread_id): super(DiscussionTabSingleThreadPage, self).__init__(browser, course_id) self.thread_page = DiscussionThreadPage( browser, "body.discussion .discussion-article[data-id='{thread_id}']".format(thread_id=thread_id) ) self.url_path = "discussion/forum/{discussion_id}/threads/{thread_id}".format( discussion_id=discussion_id, thread_id=thread_id ) def is_browser_on_page(self): return self.thread_page.is_browser_on_page() def __getattr__(self, name): return getattr(self.thread_page, name) def close_open_thread(self): with self.thread_page._secondary_action_menu_open(".forum-thread-main-wrapper"): self._find_within(".forum-thread-main-wrapper .action-close").first.click() @wait_for_js def is_window_on_top(self): """ Check if window's scroll is at top """ return self.browser.execute_script("return $('html, body').offset().top") == 0 def _thread_is_rendered_successfully(self, thread_id): return self.q(css=".discussion-article[data-id='{}']".format(thread_id)).visible def click_and_open_thread(self, thread_id): """ Click specific thread on the list. """ thread_selector = "li[data-id='{}']".format(thread_id) self.q(css=thread_selector).first.click() EmptyPromise( lambda: self._thread_is_rendered_successfully(thread_id), "Thread has been rendered" ).fulfill() def check_threads_rendered_successfully(self, thread_count): """ Count the number of threads available on page. """ return len(self.q(css=".forum-nav-thread").results) == thread_count def check_window_is_on_top(self): """ Check window is on top of the page """ EmptyPromise( self.is_window_on_top, "Window is on top" ).fulfill() class InlineDiscussionPage(PageObject): url = None def __init__(self, browser, discussion_id): super(InlineDiscussionPage, self).__init__(browser) self._discussion_selector = ( ".discussion-module[data-discussion-id='{discussion_id}'] ".format( discussion_id=discussion_id ) ) def _find_within(self, selector): """ Returns a query corresponding to the given CSS selector within the scope of this discussion page """ return self.q(css=self._discussion_selector + " " + selector) def is_browser_on_page(self): self.wait_for_ajax() return self.q(css=self._discussion_selector).present def is_discussion_expanded(self): return self._find_within(".discussion").present def expand_discussion(self): """Click the link to expand the discussion""" self._find_within(".discussion-show").first.click() EmptyPromise( self.is_discussion_expanded, "Discussion expanded" ).fulfill() def get_num_displayed_threads(self): return len(self._find_within(".discussion-thread")) def has_thread(self, thread_id): """Returns true if this page is showing the thread with the specified id.""" return self._find_within('.discussion-thread#thread_{}'.format(thread_id)).present def element_exists(self, selector): return self.q(css=self._discussion_selector + " " + selector).present def is_new_post_opened(self): return self._find_within(".new-post-article").visible def click_element(self, selector): self.wait_for_element_presence( "{discussion} {selector}".format(discussion=self._discussion_selector, selector=selector), "{selector} is visible".format(selector=selector) ) self._find_within(selector).click() def click_cancel_new_post(self): self.click_element(".cancel") EmptyPromise( lambda: not self.is_new_post_opened(), "New post closed" ).fulfill() def click_new_post_button(self): self.click_element(".new-post-btn") EmptyPromise( self.is_new_post_opened, "New post opened" ).fulfill() @wait_for_js def _is_element_visible(self, selector): query = self._find_within(selector) return query.present and query.visible class InlineDiscussionThreadPage(DiscussionThreadPage): def __init__(self, browser, thread_id): super(InlineDiscussionThreadPage, self).__init__( browser, "body.courseware .discussion-module #thread_{thread_id}".format(thread_id=thread_id) ) def expand(self): """Clicks the link to expand the thread""" self._find_within(".forum-thread-expand").first.click() EmptyPromise( lambda: bool(self.get_response_total_text()), "Thread expanded" ).fulfill() def is_thread_anonymous(self): return not self.q(css=".posted-details > .username").present @wait_for_js def check_if_selector_is_focused(self, selector): """ Check if selector is focused """ return self.browser.execute_script("return $('{}').is(':focus')".format(selector)) class DiscussionUserProfilePage(CoursePage): TEXT_NEXT = u'Next >' TEXT_PREV = u'< Previous' PAGING_SELECTOR = "a.discussion-pagination[data-page-number]" def __init__(self, browser, course_id, user_id, username, page=1): super(DiscussionUserProfilePage, self).__init__(browser, course_id) self.url_path = "discussion/forum/dummy/users/{}?page={}".format(user_id, page) self.username = username def is_browser_on_page(self): return ( self.q(css='section.discussion-user-threads[data-course-id="{}"]'.format(self.course_id)).present and self.q(css='section.user-profile a.learner-profile-link').present and self.q(css='section.user-profile a.learner-profile-link').text[0] == self.username ) @wait_for_js def is_window_on_top(self): return self.browser.execute_script("return $('html, body').offset().top") == 0 def get_shown_thread_ids(self): elems = self.q(css="article.discussion-thread") return [elem.get_attribute("id")[7:] for elem in elems] def get_current_page(self): def check_func(): try: current_page = int(self.q(css="nav.discussion-paginator li.current-page").text[0]) except: return False, None return True, current_page return Promise( check_func, 'discussion-paginator current page has text', timeout=5, ).fulfill() def _check_pager(self, text, page_number=None): """ returns True if 'text' matches the text in any of the pagination elements. If page_number is provided, only return True if the element points to that result page. """ elems = self.q(css=self.PAGING_SELECTOR).filter(lambda elem: elem.text == text) if page_number: elems = elems.filter(lambda elem: int(elem.get_attribute('data-page-number')) == page_number) return elems.present def get_clickable_pages(self): return sorted([ int(elem.get_attribute('data-page-number')) for elem in self.q(css=self.PAGING_SELECTOR) if str(elem.text).isdigit() ]) def is_prev_button_shown(self, page_number=None): return self._check_pager(self.TEXT_PREV, page_number) def is_next_button_shown(self, page_number=None): return self._check_pager(self.TEXT_NEXT, page_number) def _click_pager_with_text(self, text, page_number): """ click the first pagination element with whose text is `text` and ensure the resulting page number matches `page_number`. """ targets = [elem for elem in self.q(css=self.PAGING_SELECTOR) if elem.text == text] targets[0].click() EmptyPromise( lambda: self.get_current_page() == page_number, "navigated to desired page" ).fulfill() def click_prev_page(self): self._click_pager_with_text(self.TEXT_PREV, self.get_current_page() - 1) EmptyPromise( self.is_window_on_top, "Window is on top" ).fulfill() def click_next_page(self): self._click_pager_with_text(self.TEXT_NEXT, self.get_current_page() + 1) EmptyPromise( self.is_window_on_top, "Window is on top" ).fulfill() def click_on_page(self, page_number): self._click_pager_with_text(unicode(page_number), page_number) EmptyPromise( self.is_window_on_top, "Window is on top" ).fulfill() def click_on_sidebar_username(self): self.wait_for_page() self.q(css='.learner-profile-link').first.click() class DiscussionTabHomePage(CoursePage, DiscussionPageMixin): ALERT_SELECTOR = ".discussion-body .forum-nav .search-alert" def __init__(self, browser, course_id): super(DiscussionTabHomePage, self).__init__(browser, course_id) self.url_path = "discussion/forum/" def is_browser_on_page(self): return self.q(css=".discussion-body section.home-header").present def perform_search(self, text="dummy"): self.q(css=".forum-nav-search-input").fill(text + chr(10)) EmptyPromise( self.is_ajax_finished, "waiting for server to return result" ).fulfill() def get_search_alert_messages(self): return self.q(css=self.ALERT_SELECTOR + " .message").text def get_search_alert_links(self): return self.q(css=self.ALERT_SELECTOR + " .link-jump") def dismiss_alert_message(self, text): """ dismiss any search alert message containing the specified text. """ def _match_messages(text): return self.q(css=".search-alert").filter(lambda elem: text in elem.text) for alert_id in _match_messages(text).attrs("id"): self.q(css="{}#{} a.dismiss".format(self.ALERT_SELECTOR, alert_id)).click() EmptyPromise( lambda: _match_messages(text).results == [], "waiting for dismissed alerts to disappear" ).fulfill() def click_new_post_button(self): """ Clicks the 'New Post' button. """ self.new_post_button.click() EmptyPromise( lambda: ( self.new_post_form ), "New post action succeeded" ).fulfill() @property def new_post_button(self): """ Returns the new post button. """ elements = self.q(css="ol.course-tabs .new-post-btn") return elements.first if elements.visible and len(elements) == 1 else None @property def new_post_form(self): """ Returns the new post form. """ elements = self.q(css=".forum-new-post-form") return elements[0] if elements.visible and len(elements) == 1 else None
agpl-3.0
Balannen/LSMASOMM
atom3/Kernel/ColoredText/configHandler.py
1
27398
"""Provides access to stored IDLE configuration information. Refer to the comments at the beginning of config-main.def for a description of the available configuration files and the design implemented to update user configuration information. In particular, user configuration choices which duplicate the defaults will be removed from the user's configuration files, and if a file becomes empty, it will be deleted. The contents of the user files may be altered using the Options/Configure IDLE menu to access the configuration GUI (configDialog.py), or manually. Throughout this module there is an emphasis on returning useable defaults when a problem occurs in returning a requested configuration value back to idle. This is to allow IDLE to continue to function in spite of errors in the retrieval of config information. When a default is returned instead of a requested config value, a message is printed to stderr to aid in configuration problem notification and resolution. """ import os import sys import string from ConfigParser import ConfigParser, NoOptionError, NoSectionError class InvalidConfigType(Exception): pass class InvalidConfigSet(Exception): pass class InvalidFgBg(Exception): pass class InvalidTheme(Exception): pass class IdleConfParser(ConfigParser): """ A ConfigParser specialised for idle configuration file handling """ def __init__(self, cfgFile, cfgDefaults=None): """ cfgFile - string, fully specified configuration file name """ self.file=cfgFile ConfigParser.__init__(self,defaults=cfgDefaults) def Get(self, section, option, type=None, default=None): """ Get an option value for given section/option or return default. If type is specified, return as type. """ if type=='bool': getVal=self.getboolean elif type=='int': getVal=self.getint else: getVal=self.get if self.has_option(section,option): #return getVal(section, option, raw, vars, default) return getVal(section, option) else: return default def GetOptionList(self,section): """ Get an option list for given section """ if self.has_section(section): return self.options(section) else: #return a default value return [] def Load(self): """ Load the configuration file from disk """ self.read(self.file) class IdleUserConfParser(IdleConfParser): """ IdleConfigParser specialised for user configuration handling. """ def AddSection(self,section): """ if section doesn't exist, add it """ if not self.has_section(section): self.add_section(section) def RemoveEmptySections(self): """ remove any sections that have no options """ for section in self.sections(): if not self.GetOptionList(section): self.remove_section(section) def IsEmpty(self): """ Remove empty sections and then return 1 if parser has no sections left, else return 0. """ self.RemoveEmptySections() if self.sections(): return 0 else: return 1 def RemoveOption(self,section,option): """ If section/option exists, remove it. Returns 1 if option was removed, 0 otherwise. """ if self.has_section(section): return self.remove_option(section,option) def SetOption(self,section,option,value): """ Sets option to value, adding section if required. Returns 1 if option was added or changed, otherwise 0. """ if self.has_option(section,option): if self.get(section,option)==value: return 0 else: self.set(section,option,value) return 1 else: if not self.has_section(section): self.add_section(section) self.set(section,option,value) return 1 def RemoveFile(self): """ Removes the user config file from disk if it exists. """ if os.path.exists(self.file): os.remove(self.file) def Save(self): """Update user configuration file. Remove empty sections. If resulting config isn't empty, write the file to disk. If config is empty, remove the file from disk if it exists. """ if not self.IsEmpty(): cfgFile=open(self.file,'w') self.write(cfgFile) else: self.RemoveFile() class IdleConf: """ holds config parsers for all idle config files: default config files (idle install dir)/config-main.def (idle install dir)/config-extensions.def (idle install dir)/config-highlight.def (idle install dir)/config-keys.def user config files (user home dir)/.idlerc/config-main.cfg (user home dir)/.idlerc/config-extensions.cfg (user home dir)/.idlerc/config-highlight.cfg (user home dir)/.idlerc/config-keys.cfg """ def __init__(self): self.defaultCfg={} self.userCfg={} self.cfg={} self.CreateConfigHandlers() self.LoadCfgFiles() #self.LoadCfg() def CreateConfigHandlers(self): """ set up a dictionary of config parsers for default and user configurations respectively """ #build idle install path if __name__ != '__main__': # we were imported idleDir=os.path.dirname(__file__) else: # we were exec'ed (for testing only) idleDir=os.path.abspath(sys.path[0]) userDir=self.GetUserCfgDir() configTypes=('main','extensions','highlight','keys') defCfgFiles={} usrCfgFiles={} for cfgType in configTypes: #build config file names defCfgFiles[cfgType]=os.path.join(idleDir,'config-'+cfgType+'.def') usrCfgFiles[cfgType]=os.path.join(userDir,'config-'+cfgType+'.cfg') for cfgType in configTypes: #create config parsers self.defaultCfg[cfgType]=IdleConfParser(defCfgFiles[cfgType]) self.userCfg[cfgType]=IdleUserConfParser(usrCfgFiles[cfgType]) def GetUserCfgDir(self): """ Creates (if required) and returns a filesystem directory for storing user config files. """ cfgDir='.idlerc' userDir=os.path.expanduser('~') if userDir != '~': #'HOME' exists as a key in os.environ if not os.path.exists(userDir): warn=('\n Warning: HOME environment variable points to\n '+ userDir+'\n but the path does not exist.\n') sys.stderr.write(warn) userDir='~' if userDir=='~': #we still don't have a home directory #traditionally idle has defaulted to os.getcwd(), is this adeqate? userDir = os.getcwd() #hack for no real homedir userDir=os.path.join(userDir,cfgDir) if not os.path.exists(userDir): try: #make the config dir if it doesn't exist yet os.mkdir(userDir) except IOError: warn=('\n Warning: unable to create user config directory\n '+ userDir+'\n') sys.stderr.write(warn) return userDir def GetOption(self, configType, section, option, default=None, type=None): """ Get an option value for given config type and given general configuration section/option or return a default. If type is specified, return as type. Firstly the user configuration is checked, with a fallback to the default configuration, and a final 'catch all' fallback to a useable passed-in default if the option isn't present in either the user or the default configuration. configType must be one of ('main','extensions','highlight','keys') If a default is returned a warning is printed to stderr. """ if self.userCfg[configType].has_option(section,option): return self.userCfg[configType].Get(section, option, type=type) elif self.defaultCfg[configType].has_option(section,option): return self.defaultCfg[configType].Get(section, option, type=type) else: #returning default, print warning warning=('\n Warning: configHandler.py - IdleConf.GetOption -\n'+ ' problem retrieving configration option '+`option`+'\n'+ ' from section '+`section`+'.\n'+ ' returning default value: '+`default`+'\n') sys.stderr.write(warning) return default def GetSectionList(self, configSet, configType): """ Get a list of sections from either the user or default config for the given config type. configSet must be either 'user' or 'default' configType must be one of ('main','extensions','highlight','keys') """ if not (configType in ('main','extensions','highlight','keys')): raise InvalidConfigType, 'Invalid configType specified' if configSet == 'user': cfgParser=self.userCfg[configType] elif configSet == 'default': cfgParser=self.defaultCfg[configType] else: raise InvalidConfigSet, 'Invalid configSet specified' return cfgParser.sections() def GetHighlight(self, theme, element, fgBg=None): """ return individual highlighting theme elements. fgBg - string ('fg'or'bg') or None, if None return a dictionary containing fg and bg colours (appropriate for passing to Tkinter in, e.g., a tag_config call), otherwise fg or bg colour only as specified. """ if self.defaultCfg['highlight'].has_section(theme): themeDict=self.GetThemeDict('default',theme) else: themeDict=self.GetThemeDict('user',theme) fore=themeDict[element+'-foreground'] if element=='cursor': #there is no config value for cursor bg back=themeDict['normal-background'] else: back=themeDict[element+'-background'] highlight={"foreground": fore,"background": back} if not fgBg: #return dict of both colours return highlight else: #return specified colour only if fgBg == 'fg': return highlight["foreground"] if fgBg == 'bg': return highlight["background"] else: raise InvalidFgBg, 'Invalid fgBg specified' def GetThemeDict(self,type,themeName): """ type - string, 'default' or 'user' theme type themeName - string, theme name Returns a dictionary which holds {option:value} for each element in the specified theme. Values are loaded over a set of ultimate last fallback defaults to guarantee that all theme elements are present in a newly created theme. """ if type == 'user': cfgParser=self.userCfg['highlight'] elif type == 'default': cfgParser=self.defaultCfg['highlight'] else: raise InvalidTheme, 'Invalid theme type specified' #foreground and background values are provded for each theme element #(apart from cursor) even though all these values are not yet used #by idle, to allow for their use in the future. Default values are #generally black and white. theme={ 'normal-foreground':'#000000', 'normal-background':'#ffffff', 'keyword-foreground':'#000000', 'keyword-background':'#ffffff', 'comment-foreground':'#000000', 'comment-background':'#ffffff', 'string-foreground':'#000000', 'string-background':'#ffffff', 'definition-foreground':'#000000', 'definition-background':'#ffffff', 'hilite-foreground':'#000000', 'hilite-background':'gray', 'break-foreground':'#ffffff', 'break-background':'#000000', 'hit-foreground':'#ffffff', 'hit-background':'#000000', 'error-foreground':'#ffffff', 'error-background':'#000000', #cursor (only foreground can be set) 'cursor-foreground':'#000000', #shell window 'stdout-foreground':'#000000', 'stdout-background':'#ffffff', 'stderr-foreground':'#000000', 'stderr-background':'#ffffff', 'console-foreground':'#000000', 'console-background':'#ffffff' } for element in theme.keys(): if not cfgParser.has_option(themeName,element): #we are going to return a default, print warning warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict'+ ' -\n problem retrieving theme element '+`element`+ '\n from theme '+`themeName`+'.\n'+ ' returning default value: '+`theme[element]`+'\n') sys.stderr.write(warning) colour=cfgParser.Get(themeName,element,default=theme[element]) theme[element]=colour return theme def CurrentTheme(self): """ Returns the name of the currently active theme """ return self.GetOption('main','Theme','name',default='') def CurrentKeys(self): """ Returns the name of the currently active key set """ return self.GetOption('main','Keys','name',default='') def GetExtensions(self, activeOnly=1): """ Gets a list of all idle extensions declared in the config files. activeOnly - boolean, if true only return active (enabled) extensions """ extns=self.RemoveKeyBindNames( self.GetSectionList('default','extensions')) userExtns=self.RemoveKeyBindNames( self.GetSectionList('user','extensions')) for extn in userExtns: if extn not in extns: #user has added own extension extns.append(extn) if activeOnly: activeExtns=[] for extn in extns: if self.GetOption('extensions',extn,'enable',default=1, type='bool'): #the extension is enabled activeExtns.append(extn) return activeExtns else: return extns def RemoveKeyBindNames(self,extnNameList): #get rid of keybinding section names names=extnNameList kbNameIndicies=[] for name in names: if name.endswith('_bindings') or name.endswith('_cfgBindings'): kbNameIndicies.append(names.index(name)) kbNameIndicies.sort() kbNameIndicies.reverse() for index in kbNameIndicies: #delete each keybinding section name del(names[index]) return names def GetExtnNameForEvent(self,virtualEvent): """ Returns the name of the extension that virtualEvent is bound in, or None if not bound in any extension. virtualEvent - string, name of the virtual event to test for, without the enclosing '<< >>' """ extName=None vEvent='<<'+virtualEvent+'>>' for extn in self.GetExtensions(activeOnly=0): for event in self.GetExtensionKeys(extn).keys(): if event == vEvent: extName=extn return extName def GetExtensionKeys(self,extensionName): """ returns a dictionary of the configurable keybindings for a particular extension,as they exist in the dictionary returned by GetCurrentKeySet; that is, where previously used bindings are disabled. """ keysName=extensionName+'_cfgBindings' activeKeys=self.GetCurrentKeySet() extKeys={} if self.defaultCfg['extensions'].has_section(keysName): eventNames=self.defaultCfg['extensions'].GetOptionList(keysName) for eventName in eventNames: event='<<'+eventName+'>>' binding=activeKeys[event] extKeys[event]=binding return extKeys def __GetRawExtensionKeys(self,extensionName): """ returns a dictionary of the configurable keybindings for a particular extension, as defined in the configuration files, or an empty dictionary if no bindings are found """ keysName=extensionName+'_cfgBindings' extKeys={} if self.defaultCfg['extensions'].has_section(keysName): eventNames=self.defaultCfg['extensions'].GetOptionList(keysName) for eventName in eventNames: binding=self.GetOption('extensions',keysName, eventName,default='').split() event='<<'+eventName+'>>' extKeys[event]=binding return extKeys def GetExtensionBindings(self,extensionName): """ Returns a dictionary of all the event bindings for a particular extension. The configurable keybindings are returned as they exist in the dictionary returned by GetCurrentKeySet; that is, where re-used keybindings are disabled. """ bindsName=extensionName+'_bindings' extBinds=self.GetExtensionKeys(extensionName) #add the non-configurable bindings if self.defaultCfg['extensions'].has_section(bindsName): eventNames=self.defaultCfg['extensions'].GetOptionList(bindsName) for eventName in eventNames: binding=self.GetOption('extensions',bindsName, eventName,default='').split() event='<<'+eventName+'>>' extBinds[event]=binding return extBinds def GetKeyBinding(self, keySetName, eventStr): """ returns the keybinding for a specific event. keySetName - string, name of key binding set eventStr - string, the virtual event we want the binding for, represented as a string, eg. '<<event>>' """ eventName=eventStr[2:-2] #trim off the angle brackets binding=self.GetOption('keys',keySetName,eventName,default='').split() return binding def GetCurrentKeySet(self): return self.GetKeySet(self.CurrentKeys()) def GetKeySet(self,keySetName): """ Returns a dictionary of: all requested core keybindings, plus the keybindings for all currently active extensions. If a binding defined in an extension is already in use, that binding is disabled. """ keySet=self.GetCoreKeys(keySetName) activeExtns=self.GetExtensions(activeOnly=1) for extn in activeExtns: extKeys=self.__GetRawExtensionKeys(extn) if extKeys: #the extension defines keybindings for event in extKeys.keys(): if extKeys[event] in keySet.values(): #the binding is already in use extKeys[event]='' #disable this binding keySet[event]=extKeys[event] #add binding return keySet def IsCoreBinding(self,virtualEvent): """ returns true if the virtual event is bound in the core idle keybindings. virtualEvent - string, name of the virtual event to test for, without the enclosing '<< >>' """ return ('<<'+virtualEvent+'>>') in self.GetCoreKeys().keys() def GetCoreKeys(self, keySetName=None): """ returns the requested set of core keybindings, with fallbacks if required. Keybindings loaded from the config file(s) are loaded _over_ these defaults, so if there is a problem getting any core binding there will be an 'ultimate last resort fallback' to the CUA-ish bindings defined here. """ keyBindings={ '<<copy>>': ['<Control-c>', '<Control-C>'], '<<cut>>': ['<Control-x>', '<Control-X>'], '<<paste>>': ['<Control-v>', '<Control-V>'], '<<beginning-of-line>>': ['<Control-a>', '<Home>'], '<<center-insert>>': ['<Control-l>'], '<<close-all-windows>>': ['<Control-q>'], '<<close-window>>': ['<Alt-F4>'], '<<do-nothing>>': ['<Control-x>'], '<<end-of-file>>': ['<Control-d>'], '<<python-docs>>': ['<F1>'], '<<python-context-help>>': ['<Shift-F1>'], '<<history-next>>': ['<Alt-n>'], '<<history-previous>>': ['<Alt-p>'], '<<interrupt-execution>>': ['<Control-c>'], '<<view-restart>>': ['<F6>'], '<<restart-shell>>': ['<Control-F6>'], '<<open-class-browser>>': ['<Alt-c>'], '<<open-module>>': ['<Alt-m>'], '<<open-new-window>>': ['<Control-n>'], '<<open-window-from-file>>': ['<Control-o>'], '<<plain-newline-and-indent>>': ['<Control-j>'], '<<print-window>>': ['<Control-p>'], '<<redo>>': ['<Control-y>'], '<<remove-selection>>': ['<Escape>'], '<<save-copy-of-window-as-file>>': ['<Alt-Shift-S>'], '<<save-window-as-file>>': ['<Alt-s>'], '<<save-window>>': ['<Control-s>'], '<<select-all>>': ['<Alt-a>'], '<<toggle-auto-coloring>>': ['<Control-slash>'], '<<undo>>': ['<Control-z>'], '<<find-again>>': ['<Control-g>', '<F3>'], '<<find-in-files>>': ['<Alt-F3>'], '<<find-selection>>': ['<Control-F3>'], '<<find>>': ['<Control-f>'], '<<replace>>': ['<Control-h>'], '<<goto-line>>': ['<Alt-g>'], '<<smart-backspace>>': ['<Key-BackSpace>'], '<<newline-and-indent>>': ['<Key-Return> <Key-KP_Enter>'], '<<smart-indent>>': ['<Key-Tab>'], '<<indent-region>>': ['<Control-Key-bracketright>'], '<<dedent-region>>': ['<Control-Key-bracketleft>'], '<<comment-region>>': ['<Alt-Key-3>'], '<<uncomment-region>>': ['<Alt-Key-4>'], '<<tabify-region>>': ['<Alt-Key-5>'], '<<untabify-region>>': ['<Alt-Key-6>'], '<<toggle-tabs>>': ['<Alt-Key-t>'], '<<change-indentwidth>>': ['<Alt-Key-u>'] } if keySetName: for event in keyBindings.keys(): binding=self.GetKeyBinding(keySetName,event) if binding: keyBindings[event]=binding else: #we are going to return a default, print warning warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys'+ ' -\n problem retrieving key binding for event '+ `event`+'\n from key set '+`keySetName`+'.\n'+ ' returning default value: '+`keyBindings[event]`+'\n') sys.stderr.write(warning) return keyBindings def GetExtraHelpSourceList(self,configSet): """Fetch list of extra help sources from a given configSet. Valid configSets are 'user' or 'default'. Return a list of tuples of the form (menu_item , path_to_help_file , option), or return the empty list. 'option' is the sequence number of the help resource. 'option' values determine the position of the menu items on the Help menu, therefore the returned list must be sorted by 'option'. """ helpSources=[] if configSet=='user': cfgParser=self.userCfg['main'] elif configSet=='default': cfgParser=self.defaultCfg['main'] else: raise InvalidConfigSet, 'Invalid configSet specified' options=cfgParser.GetOptionList('HelpFiles') for option in options: value=cfgParser.Get('HelpFiles',option,default=';') if value.find(';')==-1: #malformed config entry with no ';' menuItem='' #make these empty helpPath='' #so value won't be added to list else: #config entry contains ';' as expected value=string.split(value,';') menuItem=value[0].strip() helpPath=value[1].strip() if menuItem and helpPath: #neither are empty strings helpSources.append( (menuItem,helpPath,option) ) helpSources.sort(self.__helpsort) return helpSources def __helpsort(self, h1, h2): if int(h1[2]) < int(h2[2]): return -1 elif int(h1[2]) > int(h2[2]): return 1 else: return 0 def GetAllExtraHelpSourcesList(self): """ Returns a list of tuples containing the details of all additional help sources configured, or an empty list if there are none. Tuples are of the format returned by GetExtraHelpSourceList. """ allHelpSources=( self.GetExtraHelpSourceList('default')+ self.GetExtraHelpSourceList('user') ) return allHelpSources def LoadCfgFiles(self): """ load all configuration files. """ for key in self.defaultCfg.keys(): self.defaultCfg[key].Load() self.userCfg[key].Load() #same keys def SaveUserCfgFiles(self): """ write all loaded user configuration files back to disk """ for key in self.userCfg.keys(): self.userCfg[key].Save() idleConf=IdleConf() ### module test if __name__ == '__main__': def dumpCfg(cfg): print '\n',cfg,'\n' for key in cfg.keys(): sections=cfg[key].sections() print key print sections for section in sections: options=cfg[key].options(section) print section print options for option in options: print option, '=', cfg[key].Get(section,option) dumpCfg(idleConf.defaultCfg) dumpCfg(idleConf.userCfg) print idleConf.userCfg['main'].Get('Theme','name') #print idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')
gpl-3.0
0sc0d3r/enigma2
lib/python/Plugins/Extensions/DVDBurn/TitleCutter.py
52
3783
from Plugins.Extensions.CutListEditor.plugin import CutListEditor from Components.ServiceEventTracker import ServiceEventTracker from enigma import iPlayableService, iServiceInformation from Tools.Directories import fileExists class TitleCutter(CutListEditor): def __init__(self, session, t): CutListEditor.__init__(self, session, t.source) self.skin = CutListEditor.skin self.session = session self.t = t self.__event_tracker = ServiceEventTracker(screen=self, eventmap= { iPlayableService.evUpdatedInfo: self.getPMTInfo, iPlayableService.evCuesheetChanged: self.refillList }) self.onExecBegin.remove(self.showTutorial) def getPMTInfo(self): service = self.session.nav.getCurrentService() audio = service and service.audioTracks() n = audio and audio.getNumberOfTracks() or 0 if n > 0: from Title import ConfigFixedText from Project import iso639language from Components.config import config, ConfigSubsection, ConfigSubList, ConfigSelection, ConfigYesNo self.t.properties.audiotracks = ConfigSubList() for x in range(n): i = audio.getTrackInfo(x) DVB_lang = i.getLanguage() description = i.getDescription() pid = str(i.getPID()) if description == "MPEG": description = "MP2" print "[audiotrack] pid:", pid, "description:", description, "language:", DVB_lang, "count:", x, "active:", (x < 8) self.t.properties.audiotracks.append(ConfigSubsection()) self.t.properties.audiotracks[-1].active = ConfigYesNo(default = (x < 8)) self.t.properties.audiotracks[-1].format = ConfigFixedText(description) choicelist = iso639language.getChoices() determined_language = iso639language.determineLanguage(DVB_lang) self.t.properties.audiotracks[-1].language = ConfigSelection(choices = choicelist, default=determined_language) self.t.properties.audiotracks[-1].pid = ConfigFixedText(pid) self.t.properties.audiotracks[-1].DVB_lang = ConfigFixedText(DVB_lang) sAspect = service.info().getInfo(iServiceInformation.sAspect) if sAspect in ( 1, 2, 5, 6, 9, 0xA, 0xD, 0xE ): aspect = "4:3" else: aspect = "16:9" self.t.properties.aspect.setValue(aspect) self.t.VideoType = service.info().getInfo(iServiceInformation.sVideoType) self.t.VideoPID = service.info().getInfo(iServiceInformation.sVideoPID) xres = service.info().getInfo(iServiceInformation.sVideoWidth) yres = service.info().getInfo(iServiceInformation.sVideoHeight) self.t.resolution = (xres, yres) self.t.framerate = service.info().getInfo(iServiceInformation.sFrameRate) self.t.progressive = service.info().getInfo(iServiceInformation.sProgressive) def checkAndGrabThumb(self): if not fileExists(self.t.inputfile.rsplit('.',1)[0] + ".png"): CutListEditor.grabFrame(self) def exit(self): if self.t.VideoType == -1: self.getPMTInfo() self.checkAndGrabThumb() self.session.nav.stopService() self.close(self.cut_list[:]) class CutlistReader(TitleCutter): skin = """ <screen position="0,0" size="720,576"> <eLabel position="0,0" size="720,576" zPosition="1" backgroundColor="#000000" /> <widget name="Video" position="0,0" size="100,75" /> <widget name="SeekState" position="0,0" /> <widget source="cutlist" position="0,0" render="Listbox" > <convert type="TemplatedMultiContent"> {"template": [ MultiContentEntryText(text = 1), MultiContentEntryText(text = 2) ], "fonts": [gFont("Regular", 18)], "itemHeight": 20 } </convert> </widget> <widget name="Timeline" position="0,0" /> </screen>""" def __init__(self, session, t): TitleCutter.__init__(self, session, t) self.skin = CutlistReader.skin def getPMTInfo(self): TitleCutter.getPMTInfo(self) TitleCutter.checkAndGrabThumb(self) self.close(self.cut_list[:])
gpl-2.0
caspartse/QQ-Groups-Spider
vendor/idna/uts46data.py
143
184931
# This file is automatically generated by tools/build-uts46data.py # vim: set fileencoding=utf-8 : """IDNA Mapping Table from UTS46.""" def _seg_0(): return [ (0x0, '3'), (0x1, '3'), (0x2, '3'), (0x3, '3'), (0x4, '3'), (0x5, '3'), (0x6, '3'), (0x7, '3'), (0x8, '3'), (0x9, '3'), (0xA, '3'), (0xB, '3'), (0xC, '3'), (0xD, '3'), (0xE, '3'), (0xF, '3'), (0x10, '3'), (0x11, '3'), (0x12, '3'), (0x13, '3'), (0x14, '3'), (0x15, '3'), (0x16, '3'), (0x17, '3'), (0x18, '3'), (0x19, '3'), (0x1A, '3'), (0x1B, '3'), (0x1C, '3'), (0x1D, '3'), (0x1E, '3'), (0x1F, '3'), (0x20, '3'), (0x21, '3'), (0x22, '3'), (0x23, '3'), (0x24, '3'), (0x25, '3'), (0x26, '3'), (0x27, '3'), (0x28, '3'), (0x29, '3'), (0x2A, '3'), (0x2B, '3'), (0x2C, '3'), (0x2D, 'V'), (0x2E, 'V'), (0x2F, '3'), (0x30, 'V'), (0x31, 'V'), (0x32, 'V'), (0x33, 'V'), (0x34, 'V'), (0x35, 'V'), (0x36, 'V'), (0x37, 'V'), (0x38, 'V'), (0x39, 'V'), (0x3A, '3'), (0x3B, '3'), (0x3C, '3'), (0x3D, '3'), (0x3E, '3'), (0x3F, '3'), (0x40, '3'), (0x41, 'M', u'a'), (0x42, 'M', u'b'), (0x43, 'M', u'c'), (0x44, 'M', u'd'), (0x45, 'M', u'e'), (0x46, 'M', u'f'), (0x47, 'M', u'g'), (0x48, 'M', u'h'), (0x49, 'M', u'i'), (0x4A, 'M', u'j'), (0x4B, 'M', u'k'), (0x4C, 'M', u'l'), (0x4D, 'M', u'm'), (0x4E, 'M', u'n'), (0x4F, 'M', u'o'), (0x50, 'M', u'p'), (0x51, 'M', u'q'), (0x52, 'M', u'r'), (0x53, 'M', u's'), (0x54, 'M', u't'), (0x55, 'M', u'u'), (0x56, 'M', u'v'), (0x57, 'M', u'w'), (0x58, 'M', u'x'), (0x59, 'M', u'y'), (0x5A, 'M', u'z'), (0x5B, '3'), (0x5C, '3'), (0x5D, '3'), (0x5E, '3'), (0x5F, '3'), (0x60, '3'), (0x61, 'V'), (0x62, 'V'), (0x63, 'V'), ] def _seg_1(): return [ (0x64, 'V'), (0x65, 'V'), (0x66, 'V'), (0x67, 'V'), (0x68, 'V'), (0x69, 'V'), (0x6A, 'V'), (0x6B, 'V'), (0x6C, 'V'), (0x6D, 'V'), (0x6E, 'V'), (0x6F, 'V'), (0x70, 'V'), (0x71, 'V'), (0x72, 'V'), (0x73, 'V'), (0x74, 'V'), (0x75, 'V'), (0x76, 'V'), (0x77, 'V'), (0x78, 'V'), (0x79, 'V'), (0x7A, 'V'), (0x7B, '3'), (0x7C, '3'), (0x7D, '3'), (0x7E, '3'), (0x7F, '3'), (0x80, 'X'), (0x81, 'X'), (0x82, 'X'), (0x83, 'X'), (0x84, 'X'), (0x85, 'X'), (0x86, 'X'), (0x87, 'X'), (0x88, 'X'), (0x89, 'X'), (0x8A, 'X'), (0x8B, 'X'), (0x8C, 'X'), (0x8D, 'X'), (0x8E, 'X'), (0x8F, 'X'), (0x90, 'X'), (0x91, 'X'), (0x92, 'X'), (0x93, 'X'), (0x94, 'X'), (0x95, 'X'), (0x96, 'X'), (0x97, 'X'), (0x98, 'X'), (0x99, 'X'), (0x9A, 'X'), (0x9B, 'X'), (0x9C, 'X'), (0x9D, 'X'), (0x9E, 'X'), (0x9F, 'X'), (0xA0, '3', u' '), (0xA1, 'V'), (0xA2, 'V'), (0xA3, 'V'), (0xA4, 'V'), (0xA5, 'V'), (0xA6, 'V'), (0xA7, 'V'), (0xA8, '3', u' ̈'), (0xA9, 'V'), (0xAA, 'M', u'a'), (0xAB, 'V'), (0xAC, 'V'), (0xAD, 'I'), (0xAE, 'V'), (0xAF, '3', u' ̄'), (0xB0, 'V'), (0xB1, 'V'), (0xB2, 'M', u'2'), (0xB3, 'M', u'3'), (0xB4, '3', u' ́'), (0xB5, 'M', u'μ'), (0xB6, 'V'), (0xB7, 'V'), (0xB8, '3', u' ̧'), (0xB9, 'M', u'1'), (0xBA, 'M', u'o'), (0xBB, 'V'), (0xBC, 'M', u'1⁄4'), (0xBD, 'M', u'1⁄2'), (0xBE, 'M', u'3⁄4'), (0xBF, 'V'), (0xC0, 'M', u'à'), (0xC1, 'M', u'á'), (0xC2, 'M', u'â'), (0xC3, 'M', u'ã'), (0xC4, 'M', u'ä'), (0xC5, 'M', u'å'), (0xC6, 'M', u'æ'), (0xC7, 'M', u'ç'), ] def _seg_2(): return [ (0xC8, 'M', u'è'), (0xC9, 'M', u'é'), (0xCA, 'M', u'ê'), (0xCB, 'M', u'ë'), (0xCC, 'M', u'ì'), (0xCD, 'M', u'í'), (0xCE, 'M', u'î'), (0xCF, 'M', u'ï'), (0xD0, 'M', u'ð'), (0xD1, 'M', u'ñ'), (0xD2, 'M', u'ò'), (0xD3, 'M', u'ó'), (0xD4, 'M', u'ô'), (0xD5, 'M', u'õ'), (0xD6, 'M', u'ö'), (0xD7, 'V'), (0xD8, 'M', u'ø'), (0xD9, 'M', u'ù'), (0xDA, 'M', u'ú'), (0xDB, 'M', u'û'), (0xDC, 'M', u'ü'), (0xDD, 'M', u'ý'), (0xDE, 'M', u'þ'), (0xDF, 'D', u'ss'), (0xE0, 'V'), (0xE1, 'V'), (0xE2, 'V'), (0xE3, 'V'), (0xE4, 'V'), (0xE5, 'V'), (0xE6, 'V'), (0xE7, 'V'), (0xE8, 'V'), (0xE9, 'V'), (0xEA, 'V'), (0xEB, 'V'), (0xEC, 'V'), (0xED, 'V'), (0xEE, 'V'), (0xEF, 'V'), (0xF0, 'V'), (0xF1, 'V'), (0xF2, 'V'), (0xF3, 'V'), (0xF4, 'V'), (0xF5, 'V'), (0xF6, 'V'), (0xF7, 'V'), (0xF8, 'V'), (0xF9, 'V'), (0xFA, 'V'), (0xFB, 'V'), (0xFC, 'V'), (0xFD, 'V'), (0xFE, 'V'), (0xFF, 'V'), (0x100, 'M', u'ā'), (0x101, 'V'), (0x102, 'M', u'ă'), (0x103, 'V'), (0x104, 'M', u'ą'), (0x105, 'V'), (0x106, 'M', u'ć'), (0x107, 'V'), (0x108, 'M', u'ĉ'), (0x109, 'V'), (0x10A, 'M', u'ċ'), (0x10B, 'V'), (0x10C, 'M', u'č'), (0x10D, 'V'), (0x10E, 'M', u'ď'), (0x10F, 'V'), (0x110, 'M', u'đ'), (0x111, 'V'), (0x112, 'M', u'ē'), (0x113, 'V'), (0x114, 'M', u'ĕ'), (0x115, 'V'), (0x116, 'M', u'ė'), (0x117, 'V'), (0x118, 'M', u'ę'), (0x119, 'V'), (0x11A, 'M', u'ě'), (0x11B, 'V'), (0x11C, 'M', u'ĝ'), (0x11D, 'V'), (0x11E, 'M', u'ğ'), (0x11F, 'V'), (0x120, 'M', u'ġ'), (0x121, 'V'), (0x122, 'M', u'ģ'), (0x123, 'V'), (0x124, 'M', u'ĥ'), (0x125, 'V'), (0x126, 'M', u'ħ'), (0x127, 'V'), (0x128, 'M', u'ĩ'), (0x129, 'V'), (0x12A, 'M', u'ī'), (0x12B, 'V'), ] def _seg_3(): return [ (0x12C, 'M', u'ĭ'), (0x12D, 'V'), (0x12E, 'M', u'į'), (0x12F, 'V'), (0x130, 'M', u'i̇'), (0x131, 'V'), (0x132, 'M', u'ij'), (0x134, 'M', u'ĵ'), (0x135, 'V'), (0x136, 'M', u'ķ'), (0x137, 'V'), (0x139, 'M', u'ĺ'), (0x13A, 'V'), (0x13B, 'M', u'ļ'), (0x13C, 'V'), (0x13D, 'M', u'ľ'), (0x13E, 'V'), (0x13F, 'M', u'l·'), (0x141, 'M', u'ł'), (0x142, 'V'), (0x143, 'M', u'ń'), (0x144, 'V'), (0x145, 'M', u'ņ'), (0x146, 'V'), (0x147, 'M', u'ň'), (0x148, 'V'), (0x149, 'M', u'ʼn'), (0x14A, 'M', u'ŋ'), (0x14B, 'V'), (0x14C, 'M', u'ō'), (0x14D, 'V'), (0x14E, 'M', u'ŏ'), (0x14F, 'V'), (0x150, 'M', u'ő'), (0x151, 'V'), (0x152, 'M', u'œ'), (0x153, 'V'), (0x154, 'M', u'ŕ'), (0x155, 'V'), (0x156, 'M', u'ŗ'), (0x157, 'V'), (0x158, 'M', u'ř'), (0x159, 'V'), (0x15A, 'M', u'ś'), (0x15B, 'V'), (0x15C, 'M', u'ŝ'), (0x15D, 'V'), (0x15E, 'M', u'ş'), (0x15F, 'V'), (0x160, 'M', u'š'), (0x161, 'V'), (0x162, 'M', u'ţ'), (0x163, 'V'), (0x164, 'M', u'ť'), (0x165, 'V'), (0x166, 'M', u'ŧ'), (0x167, 'V'), (0x168, 'M', u'ũ'), (0x169, 'V'), (0x16A, 'M', u'ū'), (0x16B, 'V'), (0x16C, 'M', u'ŭ'), (0x16D, 'V'), (0x16E, 'M', u'ů'), (0x16F, 'V'), (0x170, 'M', u'ű'), (0x171, 'V'), (0x172, 'M', u'ų'), (0x173, 'V'), (0x174, 'M', u'ŵ'), (0x175, 'V'), (0x176, 'M', u'ŷ'), (0x177, 'V'), (0x178, 'M', u'ÿ'), (0x179, 'M', u'ź'), (0x17A, 'V'), (0x17B, 'M', u'ż'), (0x17C, 'V'), (0x17D, 'M', u'ž'), (0x17E, 'V'), (0x17F, 'M', u's'), (0x180, 'V'), (0x181, 'M', u'ɓ'), (0x182, 'M', u'ƃ'), (0x183, 'V'), (0x184, 'M', u'ƅ'), (0x185, 'V'), (0x186, 'M', u'ɔ'), (0x187, 'M', u'ƈ'), (0x188, 'V'), (0x189, 'M', u'ɖ'), (0x18A, 'M', u'ɗ'), (0x18B, 'M', u'ƌ'), (0x18C, 'V'), (0x18E, 'M', u'ǝ'), (0x18F, 'M', u'ə'), (0x190, 'M', u'ɛ'), (0x191, 'M', u'ƒ'), (0x192, 'V'), (0x193, 'M', u'ɠ'), ] def _seg_4(): return [ (0x194, 'M', u'ɣ'), (0x195, 'V'), (0x196, 'M', u'ɩ'), (0x197, 'M', u'ɨ'), (0x198, 'M', u'ƙ'), (0x199, 'V'), (0x19C, 'M', u'ɯ'), (0x19D, 'M', u'ɲ'), (0x19E, 'V'), (0x19F, 'M', u'ɵ'), (0x1A0, 'M', u'ơ'), (0x1A1, 'V'), (0x1A2, 'M', u'ƣ'), (0x1A3, 'V'), (0x1A4, 'M', u'ƥ'), (0x1A5, 'V'), (0x1A6, 'M', u'ʀ'), (0x1A7, 'M', u'ƨ'), (0x1A8, 'V'), (0x1A9, 'M', u'ʃ'), (0x1AA, 'V'), (0x1AC, 'M', u'ƭ'), (0x1AD, 'V'), (0x1AE, 'M', u'ʈ'), (0x1AF, 'M', u'ư'), (0x1B0, 'V'), (0x1B1, 'M', u'ʊ'), (0x1B2, 'M', u'ʋ'), (0x1B3, 'M', u'ƴ'), (0x1B4, 'V'), (0x1B5, 'M', u'ƶ'), (0x1B6, 'V'), (0x1B7, 'M', u'ʒ'), (0x1B8, 'M', u'ƹ'), (0x1B9, 'V'), (0x1BC, 'M', u'ƽ'), (0x1BD, 'V'), (0x1C4, 'M', u'dž'), (0x1C7, 'M', u'lj'), (0x1CA, 'M', u'nj'), (0x1CD, 'M', u'ǎ'), (0x1CE, 'V'), (0x1CF, 'M', u'ǐ'), (0x1D0, 'V'), (0x1D1, 'M', u'ǒ'), (0x1D2, 'V'), (0x1D3, 'M', u'ǔ'), (0x1D4, 'V'), (0x1D5, 'M', u'ǖ'), (0x1D6, 'V'), (0x1D7, 'M', u'ǘ'), (0x1D8, 'V'), (0x1D9, 'M', u'ǚ'), (0x1DA, 'V'), (0x1DB, 'M', u'ǜ'), (0x1DC, 'V'), (0x1DE, 'M', u'ǟ'), (0x1DF, 'V'), (0x1E0, 'M', u'ǡ'), (0x1E1, 'V'), (0x1E2, 'M', u'ǣ'), (0x1E3, 'V'), (0x1E4, 'M', u'ǥ'), (0x1E5, 'V'), (0x1E6, 'M', u'ǧ'), (0x1E7, 'V'), (0x1E8, 'M', u'ǩ'), (0x1E9, 'V'), (0x1EA, 'M', u'ǫ'), (0x1EB, 'V'), (0x1EC, 'M', u'ǭ'), (0x1ED, 'V'), (0x1EE, 'M', u'ǯ'), (0x1EF, 'V'), (0x1F1, 'M', u'dz'), (0x1F4, 'M', u'ǵ'), (0x1F5, 'V'), (0x1F6, 'M', u'ƕ'), (0x1F7, 'M', u'ƿ'), (0x1F8, 'M', u'ǹ'), (0x1F9, 'V'), (0x1FA, 'M', u'ǻ'), (0x1FB, 'V'), (0x1FC, 'M', u'ǽ'), (0x1FD, 'V'), (0x1FE, 'M', u'ǿ'), (0x1FF, 'V'), (0x200, 'M', u'ȁ'), (0x201, 'V'), (0x202, 'M', u'ȃ'), (0x203, 'V'), (0x204, 'M', u'ȅ'), (0x205, 'V'), (0x206, 'M', u'ȇ'), (0x207, 'V'), (0x208, 'M', u'ȉ'), (0x209, 'V'), (0x20A, 'M', u'ȋ'), (0x20B, 'V'), (0x20C, 'M', u'ȍ'), ] def _seg_5(): return [ (0x20D, 'V'), (0x20E, 'M', u'ȏ'), (0x20F, 'V'), (0x210, 'M', u'ȑ'), (0x211, 'V'), (0x212, 'M', u'ȓ'), (0x213, 'V'), (0x214, 'M', u'ȕ'), (0x215, 'V'), (0x216, 'M', u'ȗ'), (0x217, 'V'), (0x218, 'M', u'ș'), (0x219, 'V'), (0x21A, 'M', u'ț'), (0x21B, 'V'), (0x21C, 'M', u'ȝ'), (0x21D, 'V'), (0x21E, 'M', u'ȟ'), (0x21F, 'V'), (0x220, 'M', u'ƞ'), (0x221, 'V'), (0x222, 'M', u'ȣ'), (0x223, 'V'), (0x224, 'M', u'ȥ'), (0x225, 'V'), (0x226, 'M', u'ȧ'), (0x227, 'V'), (0x228, 'M', u'ȩ'), (0x229, 'V'), (0x22A, 'M', u'ȫ'), (0x22B, 'V'), (0x22C, 'M', u'ȭ'), (0x22D, 'V'), (0x22E, 'M', u'ȯ'), (0x22F, 'V'), (0x230, 'M', u'ȱ'), (0x231, 'V'), (0x232, 'M', u'ȳ'), (0x233, 'V'), (0x23A, 'M', u'ⱥ'), (0x23B, 'M', u'ȼ'), (0x23C, 'V'), (0x23D, 'M', u'ƚ'), (0x23E, 'M', u'ⱦ'), (0x23F, 'V'), (0x241, 'M', u'ɂ'), (0x242, 'V'), (0x243, 'M', u'ƀ'), (0x244, 'M', u'ʉ'), (0x245, 'M', u'ʌ'), (0x246, 'M', u'ɇ'), (0x247, 'V'), (0x248, 'M', u'ɉ'), (0x249, 'V'), (0x24A, 'M', u'ɋ'), (0x24B, 'V'), (0x24C, 'M', u'ɍ'), (0x24D, 'V'), (0x24E, 'M', u'ɏ'), (0x24F, 'V'), (0x2B0, 'M', u'h'), (0x2B1, 'M', u'ɦ'), (0x2B2, 'M', u'j'), (0x2B3, 'M', u'r'), (0x2B4, 'M', u'ɹ'), (0x2B5, 'M', u'ɻ'), (0x2B6, 'M', u'ʁ'), (0x2B7, 'M', u'w'), (0x2B8, 'M', u'y'), (0x2B9, 'V'), (0x2D8, '3', u' ̆'), (0x2D9, '3', u' ̇'), (0x2DA, '3', u' ̊'), (0x2DB, '3', u' ̨'), (0x2DC, '3', u' ̃'), (0x2DD, '3', u' ̋'), (0x2DE, 'V'), (0x2E0, 'M', u'ɣ'), (0x2E1, 'M', u'l'), (0x2E2, 'M', u's'), (0x2E3, 'M', u'x'), (0x2E4, 'M', u'ʕ'), (0x2E5, 'V'), (0x340, 'M', u'̀'), (0x341, 'M', u'́'), (0x342, 'V'), (0x343, 'M', u'̓'), (0x344, 'M', u'̈́'), (0x345, 'M', u'ι'), (0x346, 'V'), (0x34F, 'I'), (0x350, 'V'), (0x370, 'M', u'ͱ'), (0x371, 'V'), (0x372, 'M', u'ͳ'), (0x373, 'V'), (0x374, 'M', u'ʹ'), (0x375, 'V'), (0x376, 'M', u'ͷ'), (0x377, 'V'), ] def _seg_6(): return [ (0x378, 'X'), (0x37A, '3', u' ι'), (0x37B, 'V'), (0x37E, '3', u';'), (0x37F, 'X'), (0x384, '3', u' ́'), (0x385, '3', u' ̈́'), (0x386, 'M', u'ά'), (0x387, 'M', u'·'), (0x388, 'M', u'έ'), (0x389, 'M', u'ή'), (0x38A, 'M', u'ί'), (0x38B, 'X'), (0x38C, 'M', u'ό'), (0x38D, 'X'), (0x38E, 'M', u'ύ'), (0x38F, 'M', u'ώ'), (0x390, 'V'), (0x391, 'M', u'α'), (0x392, 'M', u'β'), (0x393, 'M', u'γ'), (0x394, 'M', u'δ'), (0x395, 'M', u'ε'), (0x396, 'M', u'ζ'), (0x397, 'M', u'η'), (0x398, 'M', u'θ'), (0x399, 'M', u'ι'), (0x39A, 'M', u'κ'), (0x39B, 'M', u'λ'), (0x39C, 'M', u'μ'), (0x39D, 'M', u'ν'), (0x39E, 'M', u'ξ'), (0x39F, 'M', u'ο'), (0x3A0, 'M', u'π'), (0x3A1, 'M', u'ρ'), (0x3A2, 'X'), (0x3A3, 'M', u'σ'), (0x3A4, 'M', u'τ'), (0x3A5, 'M', u'υ'), (0x3A6, 'M', u'φ'), (0x3A7, 'M', u'χ'), (0x3A8, 'M', u'ψ'), (0x3A9, 'M', u'ω'), (0x3AA, 'M', u'ϊ'), (0x3AB, 'M', u'ϋ'), (0x3AC, 'V'), (0x3C2, 'D', u'σ'), (0x3C3, 'V'), (0x3CF, 'M', u'ϗ'), (0x3D0, 'M', u'β'), (0x3D1, 'M', u'θ'), (0x3D2, 'M', u'υ'), (0x3D3, 'M', u'ύ'), (0x3D4, 'M', u'ϋ'), (0x3D5, 'M', u'φ'), (0x3D6, 'M', u'π'), (0x3D7, 'V'), (0x3D8, 'M', u'ϙ'), (0x3D9, 'V'), (0x3DA, 'M', u'ϛ'), (0x3DB, 'V'), (0x3DC, 'M', u'ϝ'), (0x3DD, 'V'), (0x3DE, 'M', u'ϟ'), (0x3DF, 'V'), (0x3E0, 'M', u'ϡ'), (0x3E1, 'V'), (0x3E2, 'M', u'ϣ'), (0x3E3, 'V'), (0x3E4, 'M', u'ϥ'), (0x3E5, 'V'), (0x3E6, 'M', u'ϧ'), (0x3E7, 'V'), (0x3E8, 'M', u'ϩ'), (0x3E9, 'V'), (0x3EA, 'M', u'ϫ'), (0x3EB, 'V'), (0x3EC, 'M', u'ϭ'), (0x3ED, 'V'), (0x3EE, 'M', u'ϯ'), (0x3EF, 'V'), (0x3F0, 'M', u'κ'), (0x3F1, 'M', u'ρ'), (0x3F2, 'M', u'σ'), (0x3F3, 'V'), (0x3F4, 'M', u'θ'), (0x3F5, 'M', u'ε'), (0x3F6, 'V'), (0x3F7, 'M', u'ϸ'), (0x3F8, 'V'), (0x3F9, 'M', u'σ'), (0x3FA, 'M', u'ϻ'), (0x3FB, 'V'), (0x3FD, 'M', u'ͻ'), (0x3FE, 'M', u'ͼ'), (0x3FF, 'M', u'ͽ'), (0x400, 'M', u'ѐ'), (0x401, 'M', u'ё'), (0x402, 'M', u'ђ'), (0x403, 'M', u'ѓ'), ] def _seg_7(): return [ (0x404, 'M', u'є'), (0x405, 'M', u'ѕ'), (0x406, 'M', u'і'), (0x407, 'M', u'ї'), (0x408, 'M', u'ј'), (0x409, 'M', u'љ'), (0x40A, 'M', u'њ'), (0x40B, 'M', u'ћ'), (0x40C, 'M', u'ќ'), (0x40D, 'M', u'ѝ'), (0x40E, 'M', u'ў'), (0x40F, 'M', u'џ'), (0x410, 'M', u'а'), (0x411, 'M', u'б'), (0x412, 'M', u'в'), (0x413, 'M', u'г'), (0x414, 'M', u'д'), (0x415, 'M', u'е'), (0x416, 'M', u'ж'), (0x417, 'M', u'з'), (0x418, 'M', u'и'), (0x419, 'M', u'й'), (0x41A, 'M', u'к'), (0x41B, 'M', u'л'), (0x41C, 'M', u'м'), (0x41D, 'M', u'н'), (0x41E, 'M', u'о'), (0x41F, 'M', u'п'), (0x420, 'M', u'р'), (0x421, 'M', u'с'), (0x422, 'M', u'т'), (0x423, 'M', u'у'), (0x424, 'M', u'ф'), (0x425, 'M', u'х'), (0x426, 'M', u'ц'), (0x427, 'M', u'ч'), (0x428, 'M', u'ш'), (0x429, 'M', u'щ'), (0x42A, 'M', u'ъ'), (0x42B, 'M', u'ы'), (0x42C, 'M', u'ь'), (0x42D, 'M', u'э'), (0x42E, 'M', u'ю'), (0x42F, 'M', u'я'), (0x430, 'V'), (0x460, 'M', u'ѡ'), (0x461, 'V'), (0x462, 'M', u'ѣ'), (0x463, 'V'), (0x464, 'M', u'ѥ'), (0x465, 'V'), (0x466, 'M', u'ѧ'), (0x467, 'V'), (0x468, 'M', u'ѩ'), (0x469, 'V'), (0x46A, 'M', u'ѫ'), (0x46B, 'V'), (0x46C, 'M', u'ѭ'), (0x46D, 'V'), (0x46E, 'M', u'ѯ'), (0x46F, 'V'), (0x470, 'M', u'ѱ'), (0x471, 'V'), (0x472, 'M', u'ѳ'), (0x473, 'V'), (0x474, 'M', u'ѵ'), (0x475, 'V'), (0x476, 'M', u'ѷ'), (0x477, 'V'), (0x478, 'M', u'ѹ'), (0x479, 'V'), (0x47A, 'M', u'ѻ'), (0x47B, 'V'), (0x47C, 'M', u'ѽ'), (0x47D, 'V'), (0x47E, 'M', u'ѿ'), (0x47F, 'V'), (0x480, 'M', u'ҁ'), (0x481, 'V'), (0x48A, 'M', u'ҋ'), (0x48B, 'V'), (0x48C, 'M', u'ҍ'), (0x48D, 'V'), (0x48E, 'M', u'ҏ'), (0x48F, 'V'), (0x490, 'M', u'ґ'), (0x491, 'V'), (0x492, 'M', u'ғ'), (0x493, 'V'), (0x494, 'M', u'ҕ'), (0x495, 'V'), (0x496, 'M', u'җ'), (0x497, 'V'), (0x498, 'M', u'ҙ'), (0x499, 'V'), (0x49A, 'M', u'қ'), (0x49B, 'V'), (0x49C, 'M', u'ҝ'), (0x49D, 'V'), (0x49E, 'M', u'ҟ'), ] def _seg_8(): return [ (0x49F, 'V'), (0x4A0, 'M', u'ҡ'), (0x4A1, 'V'), (0x4A2, 'M', u'ң'), (0x4A3, 'V'), (0x4A4, 'M', u'ҥ'), (0x4A5, 'V'), (0x4A6, 'M', u'ҧ'), (0x4A7, 'V'), (0x4A8, 'M', u'ҩ'), (0x4A9, 'V'), (0x4AA, 'M', u'ҫ'), (0x4AB, 'V'), (0x4AC, 'M', u'ҭ'), (0x4AD, 'V'), (0x4AE, 'M', u'ү'), (0x4AF, 'V'), (0x4B0, 'M', u'ұ'), (0x4B1, 'V'), (0x4B2, 'M', u'ҳ'), (0x4B3, 'V'), (0x4B4, 'M', u'ҵ'), (0x4B5, 'V'), (0x4B6, 'M', u'ҷ'), (0x4B7, 'V'), (0x4B8, 'M', u'ҹ'), (0x4B9, 'V'), (0x4BA, 'M', u'һ'), (0x4BB, 'V'), (0x4BC, 'M', u'ҽ'), (0x4BD, 'V'), (0x4BE, 'M', u'ҿ'), (0x4BF, 'V'), (0x4C0, 'X'), (0x4C1, 'M', u'ӂ'), (0x4C2, 'V'), (0x4C3, 'M', u'ӄ'), (0x4C4, 'V'), (0x4C5, 'M', u'ӆ'), (0x4C6, 'V'), (0x4C7, 'M', u'ӈ'), (0x4C8, 'V'), (0x4C9, 'M', u'ӊ'), (0x4CA, 'V'), (0x4CB, 'M', u'ӌ'), (0x4CC, 'V'), (0x4CD, 'M', u'ӎ'), (0x4CE, 'V'), (0x4D0, 'M', u'ӑ'), (0x4D1, 'V'), (0x4D2, 'M', u'ӓ'), (0x4D3, 'V'), (0x4D4, 'M', u'ӕ'), (0x4D5, 'V'), (0x4D6, 'M', u'ӗ'), (0x4D7, 'V'), (0x4D8, 'M', u'ә'), (0x4D9, 'V'), (0x4DA, 'M', u'ӛ'), (0x4DB, 'V'), (0x4DC, 'M', u'ӝ'), (0x4DD, 'V'), (0x4DE, 'M', u'ӟ'), (0x4DF, 'V'), (0x4E0, 'M', u'ӡ'), (0x4E1, 'V'), (0x4E2, 'M', u'ӣ'), (0x4E3, 'V'), (0x4E4, 'M', u'ӥ'), (0x4E5, 'V'), (0x4E6, 'M', u'ӧ'), (0x4E7, 'V'), (0x4E8, 'M', u'ө'), (0x4E9, 'V'), (0x4EA, 'M', u'ӫ'), (0x4EB, 'V'), (0x4EC, 'M', u'ӭ'), (0x4ED, 'V'), (0x4EE, 'M', u'ӯ'), (0x4EF, 'V'), (0x4F0, 'M', u'ӱ'), (0x4F1, 'V'), (0x4F2, 'M', u'ӳ'), (0x4F3, 'V'), (0x4F4, 'M', u'ӵ'), (0x4F5, 'V'), (0x4F6, 'M', u'ӷ'), (0x4F7, 'V'), (0x4F8, 'M', u'ӹ'), (0x4F9, 'V'), (0x4FA, 'M', u'ӻ'), (0x4FB, 'V'), (0x4FC, 'M', u'ӽ'), (0x4FD, 'V'), (0x4FE, 'M', u'ӿ'), (0x4FF, 'V'), (0x500, 'M', u'ԁ'), (0x501, 'V'), (0x502, 'M', u'ԃ'), (0x503, 'V'), ] def _seg_9(): return [ (0x504, 'M', u'ԅ'), (0x505, 'V'), (0x506, 'M', u'ԇ'), (0x507, 'V'), (0x508, 'M', u'ԉ'), (0x509, 'V'), (0x50A, 'M', u'ԋ'), (0x50B, 'V'), (0x50C, 'M', u'ԍ'), (0x50D, 'V'), (0x50E, 'M', u'ԏ'), (0x50F, 'V'), (0x510, 'M', u'ԑ'), (0x511, 'V'), (0x512, 'M', u'ԓ'), (0x513, 'V'), (0x514, 'M', u'ԕ'), (0x515, 'V'), (0x516, 'M', u'ԗ'), (0x517, 'V'), (0x518, 'M', u'ԙ'), (0x519, 'V'), (0x51A, 'M', u'ԛ'), (0x51B, 'V'), (0x51C, 'M', u'ԝ'), (0x51D, 'V'), (0x51E, 'M', u'ԟ'), (0x51F, 'V'), (0x520, 'M', u'ԡ'), (0x521, 'V'), (0x522, 'M', u'ԣ'), (0x523, 'V'), (0x524, 'M', u'ԥ'), (0x525, 'V'), (0x526, 'M', u'ԧ'), (0x527, 'V'), (0x528, 'X'), (0x531, 'M', u'ա'), (0x532, 'M', u'բ'), (0x533, 'M', u'գ'), (0x534, 'M', u'դ'), (0x535, 'M', u'ե'), (0x536, 'M', u'զ'), (0x537, 'M', u'է'), (0x538, 'M', u'ը'), (0x539, 'M', u'թ'), (0x53A, 'M', u'ժ'), (0x53B, 'M', u'ի'), (0x53C, 'M', u'լ'), (0x53D, 'M', u'խ'), (0x53E, 'M', u'ծ'), (0x53F, 'M', u'կ'), (0x540, 'M', u'հ'), (0x541, 'M', u'ձ'), (0x542, 'M', u'ղ'), (0x543, 'M', u'ճ'), (0x544, 'M', u'մ'), (0x545, 'M', u'յ'), (0x546, 'M', u'ն'), (0x547, 'M', u'շ'), (0x548, 'M', u'ո'), (0x549, 'M', u'չ'), (0x54A, 'M', u'պ'), (0x54B, 'M', u'ջ'), (0x54C, 'M', u'ռ'), (0x54D, 'M', u'ս'), (0x54E, 'M', u'վ'), (0x54F, 'M', u'տ'), (0x550, 'M', u'ր'), (0x551, 'M', u'ց'), (0x552, 'M', u'ւ'), (0x553, 'M', u'փ'), (0x554, 'M', u'ք'), (0x555, 'M', u'օ'), (0x556, 'M', u'ֆ'), (0x557, 'X'), (0x559, 'V'), (0x560, 'X'), (0x561, 'V'), (0x587, 'M', u'եւ'), (0x588, 'X'), (0x589, 'V'), (0x58B, 'X'), (0x58F, 'V'), (0x590, 'X'), (0x591, 'V'), (0x5C8, 'X'), (0x5D0, 'V'), (0x5EB, 'X'), (0x5F0, 'V'), (0x5F5, 'X'), (0x606, 'V'), (0x61C, 'X'), (0x61E, 'V'), (0x675, 'M', u'اٴ'), (0x676, 'M', u'وٴ'), (0x677, 'M', u'ۇٴ'), (0x678, 'M', u'يٴ'), (0x679, 'V'), (0x6DD, 'X'), ] def _seg_10(): return [ (0x6DE, 'V'), (0x70E, 'X'), (0x710, 'V'), (0x74B, 'X'), (0x74D, 'V'), (0x7B2, 'X'), (0x7C0, 'V'), (0x7FB, 'X'), (0x800, 'V'), (0x82E, 'X'), (0x830, 'V'), (0x83F, 'X'), (0x840, 'V'), (0x85C, 'X'), (0x85E, 'V'), (0x85F, 'X'), (0x8A0, 'V'), (0x8A1, 'X'), (0x8A2, 'V'), (0x8AD, 'X'), (0x8E4, 'V'), (0x8FF, 'X'), (0x900, 'V'), (0x958, 'M', u'क़'), (0x959, 'M', u'ख़'), (0x95A, 'M', u'ग़'), (0x95B, 'M', u'ज़'), (0x95C, 'M', u'ड़'), (0x95D, 'M', u'ढ़'), (0x95E, 'M', u'फ़'), (0x95F, 'M', u'य़'), (0x960, 'V'), (0x978, 'X'), (0x979, 'V'), (0x980, 'X'), (0x981, 'V'), (0x984, 'X'), (0x985, 'V'), (0x98D, 'X'), (0x98F, 'V'), (0x991, 'X'), (0x993, 'V'), (0x9A9, 'X'), (0x9AA, 'V'), (0x9B1, 'X'), (0x9B2, 'V'), (0x9B3, 'X'), (0x9B6, 'V'), (0x9BA, 'X'), (0x9BC, 'V'), (0x9C5, 'X'), (0x9C7, 'V'), (0x9C9, 'X'), (0x9CB, 'V'), (0x9CF, 'X'), (0x9D7, 'V'), (0x9D8, 'X'), (0x9DC, 'M', u'ড়'), (0x9DD, 'M', u'ঢ়'), (0x9DE, 'X'), (0x9DF, 'M', u'য়'), (0x9E0, 'V'), (0x9E4, 'X'), (0x9E6, 'V'), (0x9FC, 'X'), (0xA01, 'V'), (0xA04, 'X'), (0xA05, 'V'), (0xA0B, 'X'), (0xA0F, 'V'), (0xA11, 'X'), (0xA13, 'V'), (0xA29, 'X'), (0xA2A, 'V'), (0xA31, 'X'), (0xA32, 'V'), (0xA33, 'M', u'ਲ਼'), (0xA34, 'X'), (0xA35, 'V'), (0xA36, 'M', u'ਸ਼'), (0xA37, 'X'), (0xA38, 'V'), (0xA3A, 'X'), (0xA3C, 'V'), (0xA3D, 'X'), (0xA3E, 'V'), (0xA43, 'X'), (0xA47, 'V'), (0xA49, 'X'), (0xA4B, 'V'), (0xA4E, 'X'), (0xA51, 'V'), (0xA52, 'X'), (0xA59, 'M', u'ਖ਼'), (0xA5A, 'M', u'ਗ਼'), (0xA5B, 'M', u'ਜ਼'), (0xA5C, 'V'), (0xA5D, 'X'), (0xA5E, 'M', u'ਫ਼'), (0xA5F, 'X'), ] def _seg_11(): return [ (0xA66, 'V'), (0xA76, 'X'), (0xA81, 'V'), (0xA84, 'X'), (0xA85, 'V'), (0xA8E, 'X'), (0xA8F, 'V'), (0xA92, 'X'), (0xA93, 'V'), (0xAA9, 'X'), (0xAAA, 'V'), (0xAB1, 'X'), (0xAB2, 'V'), (0xAB4, 'X'), (0xAB5, 'V'), (0xABA, 'X'), (0xABC, 'V'), (0xAC6, 'X'), (0xAC7, 'V'), (0xACA, 'X'), (0xACB, 'V'), (0xACE, 'X'), (0xAD0, 'V'), (0xAD1, 'X'), (0xAE0, 'V'), (0xAE4, 'X'), (0xAE6, 'V'), (0xAF2, 'X'), (0xB01, 'V'), (0xB04, 'X'), (0xB05, 'V'), (0xB0D, 'X'), (0xB0F, 'V'), (0xB11, 'X'), (0xB13, 'V'), (0xB29, 'X'), (0xB2A, 'V'), (0xB31, 'X'), (0xB32, 'V'), (0xB34, 'X'), (0xB35, 'V'), (0xB3A, 'X'), (0xB3C, 'V'), (0xB45, 'X'), (0xB47, 'V'), (0xB49, 'X'), (0xB4B, 'V'), (0xB4E, 'X'), (0xB56, 'V'), (0xB58, 'X'), (0xB5C, 'M', u'ଡ଼'), (0xB5D, 'M', u'ଢ଼'), (0xB5E, 'X'), (0xB5F, 'V'), (0xB64, 'X'), (0xB66, 'V'), (0xB78, 'X'), (0xB82, 'V'), (0xB84, 'X'), (0xB85, 'V'), (0xB8B, 'X'), (0xB8E, 'V'), (0xB91, 'X'), (0xB92, 'V'), (0xB96, 'X'), (0xB99, 'V'), (0xB9B, 'X'), (0xB9C, 'V'), (0xB9D, 'X'), (0xB9E, 'V'), (0xBA0, 'X'), (0xBA3, 'V'), (0xBA5, 'X'), (0xBA8, 'V'), (0xBAB, 'X'), (0xBAE, 'V'), (0xBBA, 'X'), (0xBBE, 'V'), (0xBC3, 'X'), (0xBC6, 'V'), (0xBC9, 'X'), (0xBCA, 'V'), (0xBCE, 'X'), (0xBD0, 'V'), (0xBD1, 'X'), (0xBD7, 'V'), (0xBD8, 'X'), (0xBE6, 'V'), (0xBFB, 'X'), (0xC01, 'V'), (0xC04, 'X'), (0xC05, 'V'), (0xC0D, 'X'), (0xC0E, 'V'), (0xC11, 'X'), (0xC12, 'V'), (0xC29, 'X'), (0xC2A, 'V'), (0xC34, 'X'), (0xC35, 'V'), ] def _seg_12(): return [ (0xC3A, 'X'), (0xC3D, 'V'), (0xC45, 'X'), (0xC46, 'V'), (0xC49, 'X'), (0xC4A, 'V'), (0xC4E, 'X'), (0xC55, 'V'), (0xC57, 'X'), (0xC58, 'V'), (0xC5A, 'X'), (0xC60, 'V'), (0xC64, 'X'), (0xC66, 'V'), (0xC70, 'X'), (0xC78, 'V'), (0xC80, 'X'), (0xC82, 'V'), (0xC84, 'X'), (0xC85, 'V'), (0xC8D, 'X'), (0xC8E, 'V'), (0xC91, 'X'), (0xC92, 'V'), (0xCA9, 'X'), (0xCAA, 'V'), (0xCB4, 'X'), (0xCB5, 'V'), (0xCBA, 'X'), (0xCBC, 'V'), (0xCC5, 'X'), (0xCC6, 'V'), (0xCC9, 'X'), (0xCCA, 'V'), (0xCCE, 'X'), (0xCD5, 'V'), (0xCD7, 'X'), (0xCDE, 'V'), (0xCDF, 'X'), (0xCE0, 'V'), (0xCE4, 'X'), (0xCE6, 'V'), (0xCF0, 'X'), (0xCF1, 'V'), (0xCF3, 'X'), (0xD02, 'V'), (0xD04, 'X'), (0xD05, 'V'), (0xD0D, 'X'), (0xD0E, 'V'), (0xD11, 'X'), (0xD12, 'V'), (0xD3B, 'X'), (0xD3D, 'V'), (0xD45, 'X'), (0xD46, 'V'), (0xD49, 'X'), (0xD4A, 'V'), (0xD4F, 'X'), (0xD57, 'V'), (0xD58, 'X'), (0xD60, 'V'), (0xD64, 'X'), (0xD66, 'V'), (0xD76, 'X'), (0xD79, 'V'), (0xD80, 'X'), (0xD82, 'V'), (0xD84, 'X'), (0xD85, 'V'), (0xD97, 'X'), (0xD9A, 'V'), (0xDB2, 'X'), (0xDB3, 'V'), (0xDBC, 'X'), (0xDBD, 'V'), (0xDBE, 'X'), (0xDC0, 'V'), (0xDC7, 'X'), (0xDCA, 'V'), (0xDCB, 'X'), (0xDCF, 'V'), (0xDD5, 'X'), (0xDD6, 'V'), (0xDD7, 'X'), (0xDD8, 'V'), (0xDE0, 'X'), (0xDF2, 'V'), (0xDF5, 'X'), (0xE01, 'V'), (0xE33, 'M', u'ํา'), (0xE34, 'V'), (0xE3B, 'X'), (0xE3F, 'V'), (0xE5C, 'X'), (0xE81, 'V'), (0xE83, 'X'), (0xE84, 'V'), (0xE85, 'X'), (0xE87, 'V'), ] def _seg_13(): return [ (0xE89, 'X'), (0xE8A, 'V'), (0xE8B, 'X'), (0xE8D, 'V'), (0xE8E, 'X'), (0xE94, 'V'), (0xE98, 'X'), (0xE99, 'V'), (0xEA0, 'X'), (0xEA1, 'V'), (0xEA4, 'X'), (0xEA5, 'V'), (0xEA6, 'X'), (0xEA7, 'V'), (0xEA8, 'X'), (0xEAA, 'V'), (0xEAC, 'X'), (0xEAD, 'V'), (0xEB3, 'M', u'ໍາ'), (0xEB4, 'V'), (0xEBA, 'X'), (0xEBB, 'V'), (0xEBE, 'X'), (0xEC0, 'V'), (0xEC5, 'X'), (0xEC6, 'V'), (0xEC7, 'X'), (0xEC8, 'V'), (0xECE, 'X'), (0xED0, 'V'), (0xEDA, 'X'), (0xEDC, 'M', u'ຫນ'), (0xEDD, 'M', u'ຫມ'), (0xEDE, 'V'), (0xEE0, 'X'), (0xF00, 'V'), (0xF0C, 'M', u'་'), (0xF0D, 'V'), (0xF43, 'M', u'གྷ'), (0xF44, 'V'), (0xF48, 'X'), (0xF49, 'V'), (0xF4D, 'M', u'ཌྷ'), (0xF4E, 'V'), (0xF52, 'M', u'དྷ'), (0xF53, 'V'), (0xF57, 'M', u'བྷ'), (0xF58, 'V'), (0xF5C, 'M', u'ཛྷ'), (0xF5D, 'V'), (0xF69, 'M', u'ཀྵ'), (0xF6A, 'V'), (0xF6D, 'X'), (0xF71, 'V'), (0xF73, 'M', u'ཱི'), (0xF74, 'V'), (0xF75, 'M', u'ཱུ'), (0xF76, 'M', u'ྲྀ'), (0xF77, 'M', u'ྲཱྀ'), (0xF78, 'M', u'ླྀ'), (0xF79, 'M', u'ླཱྀ'), (0xF7A, 'V'), (0xF81, 'M', u'ཱྀ'), (0xF82, 'V'), (0xF93, 'M', u'ྒྷ'), (0xF94, 'V'), (0xF98, 'X'), (0xF99, 'V'), (0xF9D, 'M', u'ྜྷ'), (0xF9E, 'V'), (0xFA2, 'M', u'ྡྷ'), (0xFA3, 'V'), (0xFA7, 'M', u'ྦྷ'), (0xFA8, 'V'), (0xFAC, 'M', u'ྫྷ'), (0xFAD, 'V'), (0xFB9, 'M', u'ྐྵ'), (0xFBA, 'V'), (0xFBD, 'X'), (0xFBE, 'V'), (0xFCD, 'X'), (0xFCE, 'V'), (0xFDB, 'X'), (0x1000, 'V'), (0x10A0, 'X'), (0x10C7, 'M', u'ⴧ'), (0x10C8, 'X'), (0x10CD, 'M', u'ⴭ'), (0x10CE, 'X'), (0x10D0, 'V'), (0x10FC, 'M', u'ნ'), (0x10FD, 'V'), (0x115F, 'X'), (0x1161, 'V'), (0x1249, 'X'), (0x124A, 'V'), (0x124E, 'X'), (0x1250, 'V'), (0x1257, 'X'), (0x1258, 'V'), ] def _seg_14(): return [ (0x1259, 'X'), (0x125A, 'V'), (0x125E, 'X'), (0x1260, 'V'), (0x1289, 'X'), (0x128A, 'V'), (0x128E, 'X'), (0x1290, 'V'), (0x12B1, 'X'), (0x12B2, 'V'), (0x12B6, 'X'), (0x12B8, 'V'), (0x12BF, 'X'), (0x12C0, 'V'), (0x12C1, 'X'), (0x12C2, 'V'), (0x12C6, 'X'), (0x12C8, 'V'), (0x12D7, 'X'), (0x12D8, 'V'), (0x1311, 'X'), (0x1312, 'V'), (0x1316, 'X'), (0x1318, 'V'), (0x135B, 'X'), (0x135D, 'V'), (0x137D, 'X'), (0x1380, 'V'), (0x139A, 'X'), (0x13A0, 'V'), (0x13F5, 'X'), (0x1400, 'V'), (0x1680, 'X'), (0x1681, 'V'), (0x169D, 'X'), (0x16A0, 'V'), (0x16F1, 'X'), (0x1700, 'V'), (0x170D, 'X'), (0x170E, 'V'), (0x1715, 'X'), (0x1720, 'V'), (0x1737, 'X'), (0x1740, 'V'), (0x1754, 'X'), (0x1760, 'V'), (0x176D, 'X'), (0x176E, 'V'), (0x1771, 'X'), (0x1772, 'V'), (0x1774, 'X'), (0x1780, 'V'), (0x17B4, 'X'), (0x17B6, 'V'), (0x17DE, 'X'), (0x17E0, 'V'), (0x17EA, 'X'), (0x17F0, 'V'), (0x17FA, 'X'), (0x1800, 'V'), (0x1806, 'X'), (0x1807, 'V'), (0x180B, 'I'), (0x180E, 'X'), (0x1810, 'V'), (0x181A, 'X'), (0x1820, 'V'), (0x1878, 'X'), (0x1880, 'V'), (0x18AB, 'X'), (0x18B0, 'V'), (0x18F6, 'X'), (0x1900, 'V'), (0x191D, 'X'), (0x1920, 'V'), (0x192C, 'X'), (0x1930, 'V'), (0x193C, 'X'), (0x1940, 'V'), (0x1941, 'X'), (0x1944, 'V'), (0x196E, 'X'), (0x1970, 'V'), (0x1975, 'X'), (0x1980, 'V'), (0x19AC, 'X'), (0x19B0, 'V'), (0x19CA, 'X'), (0x19D0, 'V'), (0x19DB, 'X'), (0x19DE, 'V'), (0x1A1C, 'X'), (0x1A1E, 'V'), (0x1A5F, 'X'), (0x1A60, 'V'), (0x1A7D, 'X'), (0x1A7F, 'V'), (0x1A8A, 'X'), (0x1A90, 'V'), (0x1A9A, 'X'), ] def _seg_15(): return [ (0x1AA0, 'V'), (0x1AAE, 'X'), (0x1B00, 'V'), (0x1B4C, 'X'), (0x1B50, 'V'), (0x1B7D, 'X'), (0x1B80, 'V'), (0x1BF4, 'X'), (0x1BFC, 'V'), (0x1C38, 'X'), (0x1C3B, 'V'), (0x1C4A, 'X'), (0x1C4D, 'V'), (0x1C80, 'X'), (0x1CC0, 'V'), (0x1CC8, 'X'), (0x1CD0, 'V'), (0x1CF7, 'X'), (0x1D00, 'V'), (0x1D2C, 'M', u'a'), (0x1D2D, 'M', u'æ'), (0x1D2E, 'M', u'b'), (0x1D2F, 'V'), (0x1D30, 'M', u'd'), (0x1D31, 'M', u'e'), (0x1D32, 'M', u'ǝ'), (0x1D33, 'M', u'g'), (0x1D34, 'M', u'h'), (0x1D35, 'M', u'i'), (0x1D36, 'M', u'j'), (0x1D37, 'M', u'k'), (0x1D38, 'M', u'l'), (0x1D39, 'M', u'm'), (0x1D3A, 'M', u'n'), (0x1D3B, 'V'), (0x1D3C, 'M', u'o'), (0x1D3D, 'M', u'ȣ'), (0x1D3E, 'M', u'p'), (0x1D3F, 'M', u'r'), (0x1D40, 'M', u't'), (0x1D41, 'M', u'u'), (0x1D42, 'M', u'w'), (0x1D43, 'M', u'a'), (0x1D44, 'M', u'ɐ'), (0x1D45, 'M', u'ɑ'), (0x1D46, 'M', u'ᴂ'), (0x1D47, 'M', u'b'), (0x1D48, 'M', u'd'), (0x1D49, 'M', u'e'), (0x1D4A, 'M', u'ə'), (0x1D4B, 'M', u'ɛ'), (0x1D4C, 'M', u'ɜ'), (0x1D4D, 'M', u'g'), (0x1D4E, 'V'), (0x1D4F, 'M', u'k'), (0x1D50, 'M', u'm'), (0x1D51, 'M', u'ŋ'), (0x1D52, 'M', u'o'), (0x1D53, 'M', u'ɔ'), (0x1D54, 'M', u'ᴖ'), (0x1D55, 'M', u'ᴗ'), (0x1D56, 'M', u'p'), (0x1D57, 'M', u't'), (0x1D58, 'M', u'u'), (0x1D59, 'M', u'ᴝ'), (0x1D5A, 'M', u'ɯ'), (0x1D5B, 'M', u'v'), (0x1D5C, 'M', u'ᴥ'), (0x1D5D, 'M', u'β'), (0x1D5E, 'M', u'γ'), (0x1D5F, 'M', u'δ'), (0x1D60, 'M', u'φ'), (0x1D61, 'M', u'χ'), (0x1D62, 'M', u'i'), (0x1D63, 'M', u'r'), (0x1D64, 'M', u'u'), (0x1D65, 'M', u'v'), (0x1D66, 'M', u'β'), (0x1D67, 'M', u'γ'), (0x1D68, 'M', u'ρ'), (0x1D69, 'M', u'φ'), (0x1D6A, 'M', u'χ'), (0x1D6B, 'V'), (0x1D78, 'M', u'н'), (0x1D79, 'V'), (0x1D9B, 'M', u'ɒ'), (0x1D9C, 'M', u'c'), (0x1D9D, 'M', u'ɕ'), (0x1D9E, 'M', u'ð'), (0x1D9F, 'M', u'ɜ'), (0x1DA0, 'M', u'f'), (0x1DA1, 'M', u'ɟ'), (0x1DA2, 'M', u'ɡ'), (0x1DA3, 'M', u'ɥ'), (0x1DA4, 'M', u'ɨ'), (0x1DA5, 'M', u'ɩ'), (0x1DA6, 'M', u'ɪ'), (0x1DA7, 'M', u'ᵻ'), (0x1DA8, 'M', u'ʝ'), (0x1DA9, 'M', u'ɭ'), ] def _seg_16(): return [ (0x1DAA, 'M', u'ᶅ'), (0x1DAB, 'M', u'ʟ'), (0x1DAC, 'M', u'ɱ'), (0x1DAD, 'M', u'ɰ'), (0x1DAE, 'M', u'ɲ'), (0x1DAF, 'M', u'ɳ'), (0x1DB0, 'M', u'ɴ'), (0x1DB1, 'M', u'ɵ'), (0x1DB2, 'M', u'ɸ'), (0x1DB3, 'M', u'ʂ'), (0x1DB4, 'M', u'ʃ'), (0x1DB5, 'M', u'ƫ'), (0x1DB6, 'M', u'ʉ'), (0x1DB7, 'M', u'ʊ'), (0x1DB8, 'M', u'ᴜ'), (0x1DB9, 'M', u'ʋ'), (0x1DBA, 'M', u'ʌ'), (0x1DBB, 'M', u'z'), (0x1DBC, 'M', u'ʐ'), (0x1DBD, 'M', u'ʑ'), (0x1DBE, 'M', u'ʒ'), (0x1DBF, 'M', u'θ'), (0x1DC0, 'V'), (0x1DE7, 'X'), (0x1DFC, 'V'), (0x1E00, 'M', u'ḁ'), (0x1E01, 'V'), (0x1E02, 'M', u'ḃ'), (0x1E03, 'V'), (0x1E04, 'M', u'ḅ'), (0x1E05, 'V'), (0x1E06, 'M', u'ḇ'), (0x1E07, 'V'), (0x1E08, 'M', u'ḉ'), (0x1E09, 'V'), (0x1E0A, 'M', u'ḋ'), (0x1E0B, 'V'), (0x1E0C, 'M', u'ḍ'), (0x1E0D, 'V'), (0x1E0E, 'M', u'ḏ'), (0x1E0F, 'V'), (0x1E10, 'M', u'ḑ'), (0x1E11, 'V'), (0x1E12, 'M', u'ḓ'), (0x1E13, 'V'), (0x1E14, 'M', u'ḕ'), (0x1E15, 'V'), (0x1E16, 'M', u'ḗ'), (0x1E17, 'V'), (0x1E18, 'M', u'ḙ'), (0x1E19, 'V'), (0x1E1A, 'M', u'ḛ'), (0x1E1B, 'V'), (0x1E1C, 'M', u'ḝ'), (0x1E1D, 'V'), (0x1E1E, 'M', u'ḟ'), (0x1E1F, 'V'), (0x1E20, 'M', u'ḡ'), (0x1E21, 'V'), (0x1E22, 'M', u'ḣ'), (0x1E23, 'V'), (0x1E24, 'M', u'ḥ'), (0x1E25, 'V'), (0x1E26, 'M', u'ḧ'), (0x1E27, 'V'), (0x1E28, 'M', u'ḩ'), (0x1E29, 'V'), (0x1E2A, 'M', u'ḫ'), (0x1E2B, 'V'), (0x1E2C, 'M', u'ḭ'), (0x1E2D, 'V'), (0x1E2E, 'M', u'ḯ'), (0x1E2F, 'V'), (0x1E30, 'M', u'ḱ'), (0x1E31, 'V'), (0x1E32, 'M', u'ḳ'), (0x1E33, 'V'), (0x1E34, 'M', u'ḵ'), (0x1E35, 'V'), (0x1E36, 'M', u'ḷ'), (0x1E37, 'V'), (0x1E38, 'M', u'ḹ'), (0x1E39, 'V'), (0x1E3A, 'M', u'ḻ'), (0x1E3B, 'V'), (0x1E3C, 'M', u'ḽ'), (0x1E3D, 'V'), (0x1E3E, 'M', u'ḿ'), (0x1E3F, 'V'), (0x1E40, 'M', u'ṁ'), (0x1E41, 'V'), (0x1E42, 'M', u'ṃ'), (0x1E43, 'V'), (0x1E44, 'M', u'ṅ'), (0x1E45, 'V'), (0x1E46, 'M', u'ṇ'), (0x1E47, 'V'), (0x1E48, 'M', u'ṉ'), (0x1E49, 'V'), (0x1E4A, 'M', u'ṋ'), ] def _seg_17(): return [ (0x1E4B, 'V'), (0x1E4C, 'M', u'ṍ'), (0x1E4D, 'V'), (0x1E4E, 'M', u'ṏ'), (0x1E4F, 'V'), (0x1E50, 'M', u'ṑ'), (0x1E51, 'V'), (0x1E52, 'M', u'ṓ'), (0x1E53, 'V'), (0x1E54, 'M', u'ṕ'), (0x1E55, 'V'), (0x1E56, 'M', u'ṗ'), (0x1E57, 'V'), (0x1E58, 'M', u'ṙ'), (0x1E59, 'V'), (0x1E5A, 'M', u'ṛ'), (0x1E5B, 'V'), (0x1E5C, 'M', u'ṝ'), (0x1E5D, 'V'), (0x1E5E, 'M', u'ṟ'), (0x1E5F, 'V'), (0x1E60, 'M', u'ṡ'), (0x1E61, 'V'), (0x1E62, 'M', u'ṣ'), (0x1E63, 'V'), (0x1E64, 'M', u'ṥ'), (0x1E65, 'V'), (0x1E66, 'M', u'ṧ'), (0x1E67, 'V'), (0x1E68, 'M', u'ṩ'), (0x1E69, 'V'), (0x1E6A, 'M', u'ṫ'), (0x1E6B, 'V'), (0x1E6C, 'M', u'ṭ'), (0x1E6D, 'V'), (0x1E6E, 'M', u'ṯ'), (0x1E6F, 'V'), (0x1E70, 'M', u'ṱ'), (0x1E71, 'V'), (0x1E72, 'M', u'ṳ'), (0x1E73, 'V'), (0x1E74, 'M', u'ṵ'), (0x1E75, 'V'), (0x1E76, 'M', u'ṷ'), (0x1E77, 'V'), (0x1E78, 'M', u'ṹ'), (0x1E79, 'V'), (0x1E7A, 'M', u'ṻ'), (0x1E7B, 'V'), (0x1E7C, 'M', u'ṽ'), (0x1E7D, 'V'), (0x1E7E, 'M', u'ṿ'), (0x1E7F, 'V'), (0x1E80, 'M', u'ẁ'), (0x1E81, 'V'), (0x1E82, 'M', u'ẃ'), (0x1E83, 'V'), (0x1E84, 'M', u'ẅ'), (0x1E85, 'V'), (0x1E86, 'M', u'ẇ'), (0x1E87, 'V'), (0x1E88, 'M', u'ẉ'), (0x1E89, 'V'), (0x1E8A, 'M', u'ẋ'), (0x1E8B, 'V'), (0x1E8C, 'M', u'ẍ'), (0x1E8D, 'V'), (0x1E8E, 'M', u'ẏ'), (0x1E8F, 'V'), (0x1E90, 'M', u'ẑ'), (0x1E91, 'V'), (0x1E92, 'M', u'ẓ'), (0x1E93, 'V'), (0x1E94, 'M', u'ẕ'), (0x1E95, 'V'), (0x1E9A, 'M', u'aʾ'), (0x1E9B, 'M', u'ṡ'), (0x1E9C, 'V'), (0x1E9E, 'M', u'ss'), (0x1E9F, 'V'), (0x1EA0, 'M', u'ạ'), (0x1EA1, 'V'), (0x1EA2, 'M', u'ả'), (0x1EA3, 'V'), (0x1EA4, 'M', u'ấ'), (0x1EA5, 'V'), (0x1EA6, 'M', u'ầ'), (0x1EA7, 'V'), (0x1EA8, 'M', u'ẩ'), (0x1EA9, 'V'), (0x1EAA, 'M', u'ẫ'), (0x1EAB, 'V'), (0x1EAC, 'M', u'ậ'), (0x1EAD, 'V'), (0x1EAE, 'M', u'ắ'), (0x1EAF, 'V'), (0x1EB0, 'M', u'ằ'), (0x1EB1, 'V'), (0x1EB2, 'M', u'ẳ'), (0x1EB3, 'V'), ] def _seg_18(): return [ (0x1EB4, 'M', u'ẵ'), (0x1EB5, 'V'), (0x1EB6, 'M', u'ặ'), (0x1EB7, 'V'), (0x1EB8, 'M', u'ẹ'), (0x1EB9, 'V'), (0x1EBA, 'M', u'ẻ'), (0x1EBB, 'V'), (0x1EBC, 'M', u'ẽ'), (0x1EBD, 'V'), (0x1EBE, 'M', u'ế'), (0x1EBF, 'V'), (0x1EC0, 'M', u'ề'), (0x1EC1, 'V'), (0x1EC2, 'M', u'ể'), (0x1EC3, 'V'), (0x1EC4, 'M', u'ễ'), (0x1EC5, 'V'), (0x1EC6, 'M', u'ệ'), (0x1EC7, 'V'), (0x1EC8, 'M', u'ỉ'), (0x1EC9, 'V'), (0x1ECA, 'M', u'ị'), (0x1ECB, 'V'), (0x1ECC, 'M', u'ọ'), (0x1ECD, 'V'), (0x1ECE, 'M', u'ỏ'), (0x1ECF, 'V'), (0x1ED0, 'M', u'ố'), (0x1ED1, 'V'), (0x1ED2, 'M', u'ồ'), (0x1ED3, 'V'), (0x1ED4, 'M', u'ổ'), (0x1ED5, 'V'), (0x1ED6, 'M', u'ỗ'), (0x1ED7, 'V'), (0x1ED8, 'M', u'ộ'), (0x1ED9, 'V'), (0x1EDA, 'M', u'ớ'), (0x1EDB, 'V'), (0x1EDC, 'M', u'ờ'), (0x1EDD, 'V'), (0x1EDE, 'M', u'ở'), (0x1EDF, 'V'), (0x1EE0, 'M', u'ỡ'), (0x1EE1, 'V'), (0x1EE2, 'M', u'ợ'), (0x1EE3, 'V'), (0x1EE4, 'M', u'ụ'), (0x1EE5, 'V'), (0x1EE6, 'M', u'ủ'), (0x1EE7, 'V'), (0x1EE8, 'M', u'ứ'), (0x1EE9, 'V'), (0x1EEA, 'M', u'ừ'), (0x1EEB, 'V'), (0x1EEC, 'M', u'ử'), (0x1EED, 'V'), (0x1EEE, 'M', u'ữ'), (0x1EEF, 'V'), (0x1EF0, 'M', u'ự'), (0x1EF1, 'V'), (0x1EF2, 'M', u'ỳ'), (0x1EF3, 'V'), (0x1EF4, 'M', u'ỵ'), (0x1EF5, 'V'), (0x1EF6, 'M', u'ỷ'), (0x1EF7, 'V'), (0x1EF8, 'M', u'ỹ'), (0x1EF9, 'V'), (0x1EFA, 'M', u'ỻ'), (0x1EFB, 'V'), (0x1EFC, 'M', u'ỽ'), (0x1EFD, 'V'), (0x1EFE, 'M', u'ỿ'), (0x1EFF, 'V'), (0x1F08, 'M', u'ἀ'), (0x1F09, 'M', u'ἁ'), (0x1F0A, 'M', u'ἂ'), (0x1F0B, 'M', u'ἃ'), (0x1F0C, 'M', u'ἄ'), (0x1F0D, 'M', u'ἅ'), (0x1F0E, 'M', u'ἆ'), (0x1F0F, 'M', u'ἇ'), (0x1F10, 'V'), (0x1F16, 'X'), (0x1F18, 'M', u'ἐ'), (0x1F19, 'M', u'ἑ'), (0x1F1A, 'M', u'ἒ'), (0x1F1B, 'M', u'ἓ'), (0x1F1C, 'M', u'ἔ'), (0x1F1D, 'M', u'ἕ'), (0x1F1E, 'X'), (0x1F20, 'V'), (0x1F28, 'M', u'ἠ'), (0x1F29, 'M', u'ἡ'), (0x1F2A, 'M', u'ἢ'), (0x1F2B, 'M', u'ἣ'), (0x1F2C, 'M', u'ἤ'), (0x1F2D, 'M', u'ἥ'), ] def _seg_19(): return [ (0x1F2E, 'M', u'ἦ'), (0x1F2F, 'M', u'ἧ'), (0x1F30, 'V'), (0x1F38, 'M', u'ἰ'), (0x1F39, 'M', u'ἱ'), (0x1F3A, 'M', u'ἲ'), (0x1F3B, 'M', u'ἳ'), (0x1F3C, 'M', u'ἴ'), (0x1F3D, 'M', u'ἵ'), (0x1F3E, 'M', u'ἶ'), (0x1F3F, 'M', u'ἷ'), (0x1F40, 'V'), (0x1F46, 'X'), (0x1F48, 'M', u'ὀ'), (0x1F49, 'M', u'ὁ'), (0x1F4A, 'M', u'ὂ'), (0x1F4B, 'M', u'ὃ'), (0x1F4C, 'M', u'ὄ'), (0x1F4D, 'M', u'ὅ'), (0x1F4E, 'X'), (0x1F50, 'V'), (0x1F58, 'X'), (0x1F59, 'M', u'ὑ'), (0x1F5A, 'X'), (0x1F5B, 'M', u'ὓ'), (0x1F5C, 'X'), (0x1F5D, 'M', u'ὕ'), (0x1F5E, 'X'), (0x1F5F, 'M', u'ὗ'), (0x1F60, 'V'), (0x1F68, 'M', u'ὠ'), (0x1F69, 'M', u'ὡ'), (0x1F6A, 'M', u'ὢ'), (0x1F6B, 'M', u'ὣ'), (0x1F6C, 'M', u'ὤ'), (0x1F6D, 'M', u'ὥ'), (0x1F6E, 'M', u'ὦ'), (0x1F6F, 'M', u'ὧ'), (0x1F70, 'V'), (0x1F71, 'M', u'ά'), (0x1F72, 'V'), (0x1F73, 'M', u'έ'), (0x1F74, 'V'), (0x1F75, 'M', u'ή'), (0x1F76, 'V'), (0x1F77, 'M', u'ί'), (0x1F78, 'V'), (0x1F79, 'M', u'ό'), (0x1F7A, 'V'), (0x1F7B, 'M', u'ύ'), (0x1F7C, 'V'), (0x1F7D, 'M', u'ώ'), (0x1F7E, 'X'), (0x1F80, 'M', u'ἀι'), (0x1F81, 'M', u'ἁι'), (0x1F82, 'M', u'ἂι'), (0x1F83, 'M', u'ἃι'), (0x1F84, 'M', u'ἄι'), (0x1F85, 'M', u'ἅι'), (0x1F86, 'M', u'ἆι'), (0x1F87, 'M', u'ἇι'), (0x1F88, 'M', u'ἀι'), (0x1F89, 'M', u'ἁι'), (0x1F8A, 'M', u'ἂι'), (0x1F8B, 'M', u'ἃι'), (0x1F8C, 'M', u'ἄι'), (0x1F8D, 'M', u'ἅι'), (0x1F8E, 'M', u'ἆι'), (0x1F8F, 'M', u'ἇι'), (0x1F90, 'M', u'ἠι'), (0x1F91, 'M', u'ἡι'), (0x1F92, 'M', u'ἢι'), (0x1F93, 'M', u'ἣι'), (0x1F94, 'M', u'ἤι'), (0x1F95, 'M', u'ἥι'), (0x1F96, 'M', u'ἦι'), (0x1F97, 'M', u'ἧι'), (0x1F98, 'M', u'ἠι'), (0x1F99, 'M', u'ἡι'), (0x1F9A, 'M', u'ἢι'), (0x1F9B, 'M', u'ἣι'), (0x1F9C, 'M', u'ἤι'), (0x1F9D, 'M', u'ἥι'), (0x1F9E, 'M', u'ἦι'), (0x1F9F, 'M', u'ἧι'), (0x1FA0, 'M', u'ὠι'), (0x1FA1, 'M', u'ὡι'), (0x1FA2, 'M', u'ὢι'), (0x1FA3, 'M', u'ὣι'), (0x1FA4, 'M', u'ὤι'), (0x1FA5, 'M', u'ὥι'), (0x1FA6, 'M', u'ὦι'), (0x1FA7, 'M', u'ὧι'), (0x1FA8, 'M', u'ὠι'), (0x1FA9, 'M', u'ὡι'), (0x1FAA, 'M', u'ὢι'), (0x1FAB, 'M', u'ὣι'), (0x1FAC, 'M', u'ὤι'), (0x1FAD, 'M', u'ὥι'), (0x1FAE, 'M', u'ὦι'), ] def _seg_20(): return [ (0x1FAF, 'M', u'ὧι'), (0x1FB0, 'V'), (0x1FB2, 'M', u'ὰι'), (0x1FB3, 'M', u'αι'), (0x1FB4, 'M', u'άι'), (0x1FB5, 'X'), (0x1FB6, 'V'), (0x1FB7, 'M', u'ᾶι'), (0x1FB8, 'M', u'ᾰ'), (0x1FB9, 'M', u'ᾱ'), (0x1FBA, 'M', u'ὰ'), (0x1FBB, 'M', u'ά'), (0x1FBC, 'M', u'αι'), (0x1FBD, '3', u' ̓'), (0x1FBE, 'M', u'ι'), (0x1FBF, '3', u' ̓'), (0x1FC0, '3', u' ͂'), (0x1FC1, '3', u' ̈͂'), (0x1FC2, 'M', u'ὴι'), (0x1FC3, 'M', u'ηι'), (0x1FC4, 'M', u'ήι'), (0x1FC5, 'X'), (0x1FC6, 'V'), (0x1FC7, 'M', u'ῆι'), (0x1FC8, 'M', u'ὲ'), (0x1FC9, 'M', u'έ'), (0x1FCA, 'M', u'ὴ'), (0x1FCB, 'M', u'ή'), (0x1FCC, 'M', u'ηι'), (0x1FCD, '3', u' ̓̀'), (0x1FCE, '3', u' ̓́'), (0x1FCF, '3', u' ̓͂'), (0x1FD0, 'V'), (0x1FD3, 'M', u'ΐ'), (0x1FD4, 'X'), (0x1FD6, 'V'), (0x1FD8, 'M', u'ῐ'), (0x1FD9, 'M', u'ῑ'), (0x1FDA, 'M', u'ὶ'), (0x1FDB, 'M', u'ί'), (0x1FDC, 'X'), (0x1FDD, '3', u' ̔̀'), (0x1FDE, '3', u' ̔́'), (0x1FDF, '3', u' ̔͂'), (0x1FE0, 'V'), (0x1FE3, 'M', u'ΰ'), (0x1FE4, 'V'), (0x1FE8, 'M', u'ῠ'), (0x1FE9, 'M', u'ῡ'), (0x1FEA, 'M', u'ὺ'), (0x1FEB, 'M', u'ύ'), (0x1FEC, 'M', u'ῥ'), (0x1FED, '3', u' ̈̀'), (0x1FEE, '3', u' ̈́'), (0x1FEF, '3', u'`'), (0x1FF0, 'X'), (0x1FF2, 'M', u'ὼι'), (0x1FF3, 'M', u'ωι'), (0x1FF4, 'M', u'ώι'), (0x1FF5, 'X'), (0x1FF6, 'V'), (0x1FF7, 'M', u'ῶι'), (0x1FF8, 'M', u'ὸ'), (0x1FF9, 'M', u'ό'), (0x1FFA, 'M', u'ὼ'), (0x1FFB, 'M', u'ώ'), (0x1FFC, 'M', u'ωι'), (0x1FFD, '3', u' ́'), (0x1FFE, '3', u' ̔'), (0x1FFF, 'X'), (0x2000, '3', u' '), (0x200B, 'I'), (0x200C, 'D', u''), (0x200E, 'X'), (0x2010, 'V'), (0x2011, 'M', u'‐'), (0x2012, 'V'), (0x2017, '3', u' ̳'), (0x2018, 'V'), (0x2024, 'X'), (0x2027, 'V'), (0x2028, 'X'), (0x202F, '3', u' '), (0x2030, 'V'), (0x2033, 'M', u'′′'), (0x2034, 'M', u'′′′'), (0x2035, 'V'), (0x2036, 'M', u'‵‵'), (0x2037, 'M', u'‵‵‵'), (0x2038, 'V'), (0x203C, '3', u'!!'), (0x203D, 'V'), (0x203E, '3', u' ̅'), (0x203F, 'V'), (0x2047, '3', u'??'), (0x2048, '3', u'?!'), (0x2049, '3', u'!?'), (0x204A, 'V'), (0x2057, 'M', u'′′′′'), (0x2058, 'V'), ] def _seg_21(): return [ (0x205F, '3', u' '), (0x2060, 'I'), (0x2061, 'X'), (0x2064, 'I'), (0x2065, 'X'), (0x2070, 'M', u'0'), (0x2071, 'M', u'i'), (0x2072, 'X'), (0x2074, 'M', u'4'), (0x2075, 'M', u'5'), (0x2076, 'M', u'6'), (0x2077, 'M', u'7'), (0x2078, 'M', u'8'), (0x2079, 'M', u'9'), (0x207A, '3', u'+'), (0x207B, 'M', u'−'), (0x207C, '3', u'='), (0x207D, '3', u'('), (0x207E, '3', u')'), (0x207F, 'M', u'n'), (0x2080, 'M', u'0'), (0x2081, 'M', u'1'), (0x2082, 'M', u'2'), (0x2083, 'M', u'3'), (0x2084, 'M', u'4'), (0x2085, 'M', u'5'), (0x2086, 'M', u'6'), (0x2087, 'M', u'7'), (0x2088, 'M', u'8'), (0x2089, 'M', u'9'), (0x208A, '3', u'+'), (0x208B, 'M', u'−'), (0x208C, '3', u'='), (0x208D, '3', u'('), (0x208E, '3', u')'), (0x208F, 'X'), (0x2090, 'M', u'a'), (0x2091, 'M', u'e'), (0x2092, 'M', u'o'), (0x2093, 'M', u'x'), (0x2094, 'M', u'ə'), (0x2095, 'M', u'h'), (0x2096, 'M', u'k'), (0x2097, 'M', u'l'), (0x2098, 'M', u'm'), (0x2099, 'M', u'n'), (0x209A, 'M', u'p'), (0x209B, 'M', u's'), (0x209C, 'M', u't'), (0x209D, 'X'), (0x20A0, 'V'), (0x20A8, 'M', u'rs'), (0x20A9, 'V'), (0x20BB, 'X'), (0x20D0, 'V'), (0x20F1, 'X'), (0x2100, '3', u'a/c'), (0x2101, '3', u'a/s'), (0x2102, 'M', u'c'), (0x2103, 'M', u'°c'), (0x2104, 'V'), (0x2105, '3', u'c/o'), (0x2106, '3', u'c/u'), (0x2107, 'M', u'ɛ'), (0x2108, 'V'), (0x2109, 'M', u'°f'), (0x210A, 'M', u'g'), (0x210B, 'M', u'h'), (0x210F, 'M', u'ħ'), (0x2110, 'M', u'i'), (0x2112, 'M', u'l'), (0x2114, 'V'), (0x2115, 'M', u'n'), (0x2116, 'M', u'no'), (0x2117, 'V'), (0x2119, 'M', u'p'), (0x211A, 'M', u'q'), (0x211B, 'M', u'r'), (0x211E, 'V'), (0x2120, 'M', u'sm'), (0x2121, 'M', u'tel'), (0x2122, 'M', u'tm'), (0x2123, 'V'), (0x2124, 'M', u'z'), (0x2125, 'V'), (0x2126, 'M', u'ω'), (0x2127, 'V'), (0x2128, 'M', u'z'), (0x2129, 'V'), (0x212A, 'M', u'k'), (0x212B, 'M', u'å'), (0x212C, 'M', u'b'), (0x212D, 'M', u'c'), (0x212E, 'V'), (0x212F, 'M', u'e'), (0x2131, 'M', u'f'), (0x2132, 'X'), (0x2133, 'M', u'm'), (0x2134, 'M', u'o'), (0x2135, 'M', u'א'), ] def _seg_22(): return [ (0x2136, 'M', u'ב'), (0x2137, 'M', u'ג'), (0x2138, 'M', u'ד'), (0x2139, 'M', u'i'), (0x213A, 'V'), (0x213B, 'M', u'fax'), (0x213C, 'M', u'π'), (0x213D, 'M', u'γ'), (0x213F, 'M', u'π'), (0x2140, 'M', u'∑'), (0x2141, 'V'), (0x2145, 'M', u'd'), (0x2147, 'M', u'e'), (0x2148, 'M', u'i'), (0x2149, 'M', u'j'), (0x214A, 'V'), (0x2150, 'M', u'1⁄7'), (0x2151, 'M', u'1⁄9'), (0x2152, 'M', u'1⁄10'), (0x2153, 'M', u'1⁄3'), (0x2154, 'M', u'2⁄3'), (0x2155, 'M', u'1⁄5'), (0x2156, 'M', u'2⁄5'), (0x2157, 'M', u'3⁄5'), (0x2158, 'M', u'4⁄5'), (0x2159, 'M', u'1⁄6'), (0x215A, 'M', u'5⁄6'), (0x215B, 'M', u'1⁄8'), (0x215C, 'M', u'3⁄8'), (0x215D, 'M', u'5⁄8'), (0x215E, 'M', u'7⁄8'), (0x215F, 'M', u'1⁄'), (0x2160, 'M', u'i'), (0x2161, 'M', u'ii'), (0x2162, 'M', u'iii'), (0x2163, 'M', u'iv'), (0x2164, 'M', u'v'), (0x2165, 'M', u'vi'), (0x2166, 'M', u'vii'), (0x2167, 'M', u'viii'), (0x2168, 'M', u'ix'), (0x2169, 'M', u'x'), (0x216A, 'M', u'xi'), (0x216B, 'M', u'xii'), (0x216C, 'M', u'l'), (0x216D, 'M', u'c'), (0x216E, 'M', u'd'), (0x216F, 'M', u'm'), (0x2170, 'M', u'i'), (0x2171, 'M', u'ii'), (0x2172, 'M', u'iii'), (0x2173, 'M', u'iv'), (0x2174, 'M', u'v'), (0x2175, 'M', u'vi'), (0x2176, 'M', u'vii'), (0x2177, 'M', u'viii'), (0x2178, 'M', u'ix'), (0x2179, 'M', u'x'), (0x217A, 'M', u'xi'), (0x217B, 'M', u'xii'), (0x217C, 'M', u'l'), (0x217D, 'M', u'c'), (0x217E, 'M', u'd'), (0x217F, 'M', u'm'), (0x2180, 'V'), (0x2183, 'X'), (0x2184, 'V'), (0x2189, 'M', u'0⁄3'), (0x218A, 'X'), (0x2190, 'V'), (0x222C, 'M', u'∫∫'), (0x222D, 'M', u'∫∫∫'), (0x222E, 'V'), (0x222F, 'M', u'∮∮'), (0x2230, 'M', u'∮∮∮'), (0x2231, 'V'), (0x2260, '3'), (0x2261, 'V'), (0x226E, '3'), (0x2270, 'V'), (0x2329, 'M', u'〈'), (0x232A, 'M', u'〉'), (0x232B, 'V'), (0x23F4, 'X'), (0x2400, 'V'), (0x2427, 'X'), (0x2440, 'V'), (0x244B, 'X'), (0x2460, 'M', u'1'), (0x2461, 'M', u'2'), (0x2462, 'M', u'3'), (0x2463, 'M', u'4'), (0x2464, 'M', u'5'), (0x2465, 'M', u'6'), (0x2466, 'M', u'7'), (0x2467, 'M', u'8'), (0x2468, 'M', u'9'), (0x2469, 'M', u'10'), (0x246A, 'M', u'11'), (0x246B, 'M', u'12'), ] def _seg_23(): return [ (0x246C, 'M', u'13'), (0x246D, 'M', u'14'), (0x246E, 'M', u'15'), (0x246F, 'M', u'16'), (0x2470, 'M', u'17'), (0x2471, 'M', u'18'), (0x2472, 'M', u'19'), (0x2473, 'M', u'20'), (0x2474, '3', u'(1)'), (0x2475, '3', u'(2)'), (0x2476, '3', u'(3)'), (0x2477, '3', u'(4)'), (0x2478, '3', u'(5)'), (0x2479, '3', u'(6)'), (0x247A, '3', u'(7)'), (0x247B, '3', u'(8)'), (0x247C, '3', u'(9)'), (0x247D, '3', u'(10)'), (0x247E, '3', u'(11)'), (0x247F, '3', u'(12)'), (0x2480, '3', u'(13)'), (0x2481, '3', u'(14)'), (0x2482, '3', u'(15)'), (0x2483, '3', u'(16)'), (0x2484, '3', u'(17)'), (0x2485, '3', u'(18)'), (0x2486, '3', u'(19)'), (0x2487, '3', u'(20)'), (0x2488, 'X'), (0x249C, '3', u'(a)'), (0x249D, '3', u'(b)'), (0x249E, '3', u'(c)'), (0x249F, '3', u'(d)'), (0x24A0, '3', u'(e)'), (0x24A1, '3', u'(f)'), (0x24A2, '3', u'(g)'), (0x24A3, '3', u'(h)'), (0x24A4, '3', u'(i)'), (0x24A5, '3', u'(j)'), (0x24A6, '3', u'(k)'), (0x24A7, '3', u'(l)'), (0x24A8, '3', u'(m)'), (0x24A9, '3', u'(n)'), (0x24AA, '3', u'(o)'), (0x24AB, '3', u'(p)'), (0x24AC, '3', u'(q)'), (0x24AD, '3', u'(r)'), (0x24AE, '3', u'(s)'), (0x24AF, '3', u'(t)'), (0x24B0, '3', u'(u)'), (0x24B1, '3', u'(v)'), (0x24B2, '3', u'(w)'), (0x24B3, '3', u'(x)'), (0x24B4, '3', u'(y)'), (0x24B5, '3', u'(z)'), (0x24B6, 'M', u'a'), (0x24B7, 'M', u'b'), (0x24B8, 'M', u'c'), (0x24B9, 'M', u'd'), (0x24BA, 'M', u'e'), (0x24BB, 'M', u'f'), (0x24BC, 'M', u'g'), (0x24BD, 'M', u'h'), (0x24BE, 'M', u'i'), (0x24BF, 'M', u'j'), (0x24C0, 'M', u'k'), (0x24C1, 'M', u'l'), (0x24C2, 'M', u'm'), (0x24C3, 'M', u'n'), (0x24C4, 'M', u'o'), (0x24C5, 'M', u'p'), (0x24C6, 'M', u'q'), (0x24C7, 'M', u'r'), (0x24C8, 'M', u's'), (0x24C9, 'M', u't'), (0x24CA, 'M', u'u'), (0x24CB, 'M', u'v'), (0x24CC, 'M', u'w'), (0x24CD, 'M', u'x'), (0x24CE, 'M', u'y'), (0x24CF, 'M', u'z'), (0x24D0, 'M', u'a'), (0x24D1, 'M', u'b'), (0x24D2, 'M', u'c'), (0x24D3, 'M', u'd'), (0x24D4, 'M', u'e'), (0x24D5, 'M', u'f'), (0x24D6, 'M', u'g'), (0x24D7, 'M', u'h'), (0x24D8, 'M', u'i'), (0x24D9, 'M', u'j'), (0x24DA, 'M', u'k'), (0x24DB, 'M', u'l'), (0x24DC, 'M', u'm'), (0x24DD, 'M', u'n'), (0x24DE, 'M', u'o'), (0x24DF, 'M', u'p'), (0x24E0, 'M', u'q'), (0x24E1, 'M', u'r'), (0x24E2, 'M', u's'), ] def _seg_24(): return [ (0x24E3, 'M', u't'), (0x24E4, 'M', u'u'), (0x24E5, 'M', u'v'), (0x24E6, 'M', u'w'), (0x24E7, 'M', u'x'), (0x24E8, 'M', u'y'), (0x24E9, 'M', u'z'), (0x24EA, 'M', u'0'), (0x24EB, 'V'), (0x2700, 'X'), (0x2701, 'V'), (0x2A0C, 'M', u'∫∫∫∫'), (0x2A0D, 'V'), (0x2A74, '3', u'::='), (0x2A75, '3', u'=='), (0x2A76, '3', u'==='), (0x2A77, 'V'), (0x2ADC, 'M', u'⫝̸'), (0x2ADD, 'V'), (0x2B4D, 'X'), (0x2B50, 'V'), (0x2B5A, 'X'), (0x2C00, 'M', u'ⰰ'), (0x2C01, 'M', u'ⰱ'), (0x2C02, 'M', u'ⰲ'), (0x2C03, 'M', u'ⰳ'), (0x2C04, 'M', u'ⰴ'), (0x2C05, 'M', u'ⰵ'), (0x2C06, 'M', u'ⰶ'), (0x2C07, 'M', u'ⰷ'), (0x2C08, 'M', u'ⰸ'), (0x2C09, 'M', u'ⰹ'), (0x2C0A, 'M', u'ⰺ'), (0x2C0B, 'M', u'ⰻ'), (0x2C0C, 'M', u'ⰼ'), (0x2C0D, 'M', u'ⰽ'), (0x2C0E, 'M', u'ⰾ'), (0x2C0F, 'M', u'ⰿ'), (0x2C10, 'M', u'ⱀ'), (0x2C11, 'M', u'ⱁ'), (0x2C12, 'M', u'ⱂ'), (0x2C13, 'M', u'ⱃ'), (0x2C14, 'M', u'ⱄ'), (0x2C15, 'M', u'ⱅ'), (0x2C16, 'M', u'ⱆ'), (0x2C17, 'M', u'ⱇ'), (0x2C18, 'M', u'ⱈ'), (0x2C19, 'M', u'ⱉ'), (0x2C1A, 'M', u'ⱊ'), (0x2C1B, 'M', u'ⱋ'), (0x2C1C, 'M', u'ⱌ'), (0x2C1D, 'M', u'ⱍ'), (0x2C1E, 'M', u'ⱎ'), (0x2C1F, 'M', u'ⱏ'), (0x2C20, 'M', u'ⱐ'), (0x2C21, 'M', u'ⱑ'), (0x2C22, 'M', u'ⱒ'), (0x2C23, 'M', u'ⱓ'), (0x2C24, 'M', u'ⱔ'), (0x2C25, 'M', u'ⱕ'), (0x2C26, 'M', u'ⱖ'), (0x2C27, 'M', u'ⱗ'), (0x2C28, 'M', u'ⱘ'), (0x2C29, 'M', u'ⱙ'), (0x2C2A, 'M', u'ⱚ'), (0x2C2B, 'M', u'ⱛ'), (0x2C2C, 'M', u'ⱜ'), (0x2C2D, 'M', u'ⱝ'), (0x2C2E, 'M', u'ⱞ'), (0x2C2F, 'X'), (0x2C30, 'V'), (0x2C5F, 'X'), (0x2C60, 'M', u'ⱡ'), (0x2C61, 'V'), (0x2C62, 'M', u'ɫ'), (0x2C63, 'M', u'ᵽ'), (0x2C64, 'M', u'ɽ'), (0x2C65, 'V'), (0x2C67, 'M', u'ⱨ'), (0x2C68, 'V'), (0x2C69, 'M', u'ⱪ'), (0x2C6A, 'V'), (0x2C6B, 'M', u'ⱬ'), (0x2C6C, 'V'), (0x2C6D, 'M', u'ɑ'), (0x2C6E, 'M', u'ɱ'), (0x2C6F, 'M', u'ɐ'), (0x2C70, 'M', u'ɒ'), (0x2C71, 'V'), (0x2C72, 'M', u'ⱳ'), (0x2C73, 'V'), (0x2C75, 'M', u'ⱶ'), (0x2C76, 'V'), (0x2C7C, 'M', u'j'), (0x2C7D, 'M', u'v'), (0x2C7E, 'M', u'ȿ'), (0x2C7F, 'M', u'ɀ'), (0x2C80, 'M', u'ⲁ'), (0x2C81, 'V'), (0x2C82, 'M', u'ⲃ'), ] def _seg_25(): return [ (0x2C83, 'V'), (0x2C84, 'M', u'ⲅ'), (0x2C85, 'V'), (0x2C86, 'M', u'ⲇ'), (0x2C87, 'V'), (0x2C88, 'M', u'ⲉ'), (0x2C89, 'V'), (0x2C8A, 'M', u'ⲋ'), (0x2C8B, 'V'), (0x2C8C, 'M', u'ⲍ'), (0x2C8D, 'V'), (0x2C8E, 'M', u'ⲏ'), (0x2C8F, 'V'), (0x2C90, 'M', u'ⲑ'), (0x2C91, 'V'), (0x2C92, 'M', u'ⲓ'), (0x2C93, 'V'), (0x2C94, 'M', u'ⲕ'), (0x2C95, 'V'), (0x2C96, 'M', u'ⲗ'), (0x2C97, 'V'), (0x2C98, 'M', u'ⲙ'), (0x2C99, 'V'), (0x2C9A, 'M', u'ⲛ'), (0x2C9B, 'V'), (0x2C9C, 'M', u'ⲝ'), (0x2C9D, 'V'), (0x2C9E, 'M', u'ⲟ'), (0x2C9F, 'V'), (0x2CA0, 'M', u'ⲡ'), (0x2CA1, 'V'), (0x2CA2, 'M', u'ⲣ'), (0x2CA3, 'V'), (0x2CA4, 'M', u'ⲥ'), (0x2CA5, 'V'), (0x2CA6, 'M', u'ⲧ'), (0x2CA7, 'V'), (0x2CA8, 'M', u'ⲩ'), (0x2CA9, 'V'), (0x2CAA, 'M', u'ⲫ'), (0x2CAB, 'V'), (0x2CAC, 'M', u'ⲭ'), (0x2CAD, 'V'), (0x2CAE, 'M', u'ⲯ'), (0x2CAF, 'V'), (0x2CB0, 'M', u'ⲱ'), (0x2CB1, 'V'), (0x2CB2, 'M', u'ⲳ'), (0x2CB3, 'V'), (0x2CB4, 'M', u'ⲵ'), (0x2CB5, 'V'), (0x2CB6, 'M', u'ⲷ'), (0x2CB7, 'V'), (0x2CB8, 'M', u'ⲹ'), (0x2CB9, 'V'), (0x2CBA, 'M', u'ⲻ'), (0x2CBB, 'V'), (0x2CBC, 'M', u'ⲽ'), (0x2CBD, 'V'), (0x2CBE, 'M', u'ⲿ'), (0x2CBF, 'V'), (0x2CC0, 'M', u'ⳁ'), (0x2CC1, 'V'), (0x2CC2, 'M', u'ⳃ'), (0x2CC3, 'V'), (0x2CC4, 'M', u'ⳅ'), (0x2CC5, 'V'), (0x2CC6, 'M', u'ⳇ'), (0x2CC7, 'V'), (0x2CC8, 'M', u'ⳉ'), (0x2CC9, 'V'), (0x2CCA, 'M', u'ⳋ'), (0x2CCB, 'V'), (0x2CCC, 'M', u'ⳍ'), (0x2CCD, 'V'), (0x2CCE, 'M', u'ⳏ'), (0x2CCF, 'V'), (0x2CD0, 'M', u'ⳑ'), (0x2CD1, 'V'), (0x2CD2, 'M', u'ⳓ'), (0x2CD3, 'V'), (0x2CD4, 'M', u'ⳕ'), (0x2CD5, 'V'), (0x2CD6, 'M', u'ⳗ'), (0x2CD7, 'V'), (0x2CD8, 'M', u'ⳙ'), (0x2CD9, 'V'), (0x2CDA, 'M', u'ⳛ'), (0x2CDB, 'V'), (0x2CDC, 'M', u'ⳝ'), (0x2CDD, 'V'), (0x2CDE, 'M', u'ⳟ'), (0x2CDF, 'V'), (0x2CE0, 'M', u'ⳡ'), (0x2CE1, 'V'), (0x2CE2, 'M', u'ⳣ'), (0x2CE3, 'V'), (0x2CEB, 'M', u'ⳬ'), (0x2CEC, 'V'), (0x2CED, 'M', u'ⳮ'), ] def _seg_26(): return [ (0x2CEE, 'V'), (0x2CF2, 'M', u'ⳳ'), (0x2CF3, 'V'), (0x2CF4, 'X'), (0x2CF9, 'V'), (0x2D26, 'X'), (0x2D27, 'V'), (0x2D28, 'X'), (0x2D2D, 'V'), (0x2D2E, 'X'), (0x2D30, 'V'), (0x2D68, 'X'), (0x2D6F, 'M', u'ⵡ'), (0x2D70, 'V'), (0x2D71, 'X'), (0x2D7F, 'V'), (0x2D97, 'X'), (0x2DA0, 'V'), (0x2DA7, 'X'), (0x2DA8, 'V'), (0x2DAF, 'X'), (0x2DB0, 'V'), (0x2DB7, 'X'), (0x2DB8, 'V'), (0x2DBF, 'X'), (0x2DC0, 'V'), (0x2DC7, 'X'), (0x2DC8, 'V'), (0x2DCF, 'X'), (0x2DD0, 'V'), (0x2DD7, 'X'), (0x2DD8, 'V'), (0x2DDF, 'X'), (0x2DE0, 'V'), (0x2E3C, 'X'), (0x2E80, 'V'), (0x2E9A, 'X'), (0x2E9B, 'V'), (0x2E9F, 'M', u'母'), (0x2EA0, 'V'), (0x2EF3, 'M', u'龟'), (0x2EF4, 'X'), (0x2F00, 'M', u'一'), (0x2F01, 'M', u'丨'), (0x2F02, 'M', u'丶'), (0x2F03, 'M', u'丿'), (0x2F04, 'M', u'乙'), (0x2F05, 'M', u'亅'), (0x2F06, 'M', u'二'), (0x2F07, 'M', u'亠'), (0x2F08, 'M', u'人'), (0x2F09, 'M', u'儿'), (0x2F0A, 'M', u'入'), (0x2F0B, 'M', u'八'), (0x2F0C, 'M', u'冂'), (0x2F0D, 'M', u'冖'), (0x2F0E, 'M', u'冫'), (0x2F0F, 'M', u'几'), (0x2F10, 'M', u'凵'), (0x2F11, 'M', u'刀'), (0x2F12, 'M', u'力'), (0x2F13, 'M', u'勹'), (0x2F14, 'M', u'匕'), (0x2F15, 'M', u'匚'), (0x2F16, 'M', u'匸'), (0x2F17, 'M', u'十'), (0x2F18, 'M', u'卜'), (0x2F19, 'M', u'卩'), (0x2F1A, 'M', u'厂'), (0x2F1B, 'M', u'厶'), (0x2F1C, 'M', u'又'), (0x2F1D, 'M', u'口'), (0x2F1E, 'M', u'囗'), (0x2F1F, 'M', u'土'), (0x2F20, 'M', u'士'), (0x2F21, 'M', u'夂'), (0x2F22, 'M', u'夊'), (0x2F23, 'M', u'夕'), (0x2F24, 'M', u'大'), (0x2F25, 'M', u'女'), (0x2F26, 'M', u'子'), (0x2F27, 'M', u'宀'), (0x2F28, 'M', u'寸'), (0x2F29, 'M', u'小'), (0x2F2A, 'M', u'尢'), (0x2F2B, 'M', u'尸'), (0x2F2C, 'M', u'屮'), (0x2F2D, 'M', u'山'), (0x2F2E, 'M', u'巛'), (0x2F2F, 'M', u'工'), (0x2F30, 'M', u'己'), (0x2F31, 'M', u'巾'), (0x2F32, 'M', u'干'), (0x2F33, 'M', u'幺'), (0x2F34, 'M', u'广'), (0x2F35, 'M', u'廴'), (0x2F36, 'M', u'廾'), (0x2F37, 'M', u'弋'), (0x2F38, 'M', u'弓'), (0x2F39, 'M', u'彐'), ] def _seg_27(): return [ (0x2F3A, 'M', u'彡'), (0x2F3B, 'M', u'彳'), (0x2F3C, 'M', u'心'), (0x2F3D, 'M', u'戈'), (0x2F3E, 'M', u'戶'), (0x2F3F, 'M', u'手'), (0x2F40, 'M', u'支'), (0x2F41, 'M', u'攴'), (0x2F42, 'M', u'文'), (0x2F43, 'M', u'斗'), (0x2F44, 'M', u'斤'), (0x2F45, 'M', u'方'), (0x2F46, 'M', u'无'), (0x2F47, 'M', u'日'), (0x2F48, 'M', u'曰'), (0x2F49, 'M', u'月'), (0x2F4A, 'M', u'木'), (0x2F4B, 'M', u'欠'), (0x2F4C, 'M', u'止'), (0x2F4D, 'M', u'歹'), (0x2F4E, 'M', u'殳'), (0x2F4F, 'M', u'毋'), (0x2F50, 'M', u'比'), (0x2F51, 'M', u'毛'), (0x2F52, 'M', u'氏'), (0x2F53, 'M', u'气'), (0x2F54, 'M', u'水'), (0x2F55, 'M', u'火'), (0x2F56, 'M', u'爪'), (0x2F57, 'M', u'父'), (0x2F58, 'M', u'爻'), (0x2F59, 'M', u'爿'), (0x2F5A, 'M', u'片'), (0x2F5B, 'M', u'牙'), (0x2F5C, 'M', u'牛'), (0x2F5D, 'M', u'犬'), (0x2F5E, 'M', u'玄'), (0x2F5F, 'M', u'玉'), (0x2F60, 'M', u'瓜'), (0x2F61, 'M', u'瓦'), (0x2F62, 'M', u'甘'), (0x2F63, 'M', u'生'), (0x2F64, 'M', u'用'), (0x2F65, 'M', u'田'), (0x2F66, 'M', u'疋'), (0x2F67, 'M', u'疒'), (0x2F68, 'M', u'癶'), (0x2F69, 'M', u'白'), (0x2F6A, 'M', u'皮'), (0x2F6B, 'M', u'皿'), (0x2F6C, 'M', u'目'), (0x2F6D, 'M', u'矛'), (0x2F6E, 'M', u'矢'), (0x2F6F, 'M', u'石'), (0x2F70, 'M', u'示'), (0x2F71, 'M', u'禸'), (0x2F72, 'M', u'禾'), (0x2F73, 'M', u'穴'), (0x2F74, 'M', u'立'), (0x2F75, 'M', u'竹'), (0x2F76, 'M', u'米'), (0x2F77, 'M', u'糸'), (0x2F78, 'M', u'缶'), (0x2F79, 'M', u'网'), (0x2F7A, 'M', u'羊'), (0x2F7B, 'M', u'羽'), (0x2F7C, 'M', u'老'), (0x2F7D, 'M', u'而'), (0x2F7E, 'M', u'耒'), (0x2F7F, 'M', u'耳'), (0x2F80, 'M', u'聿'), (0x2F81, 'M', u'肉'), (0x2F82, 'M', u'臣'), (0x2F83, 'M', u'自'), (0x2F84, 'M', u'至'), (0x2F85, 'M', u'臼'), (0x2F86, 'M', u'舌'), (0x2F87, 'M', u'舛'), (0x2F88, 'M', u'舟'), (0x2F89, 'M', u'艮'), (0x2F8A, 'M', u'色'), (0x2F8B, 'M', u'艸'), (0x2F8C, 'M', u'虍'), (0x2F8D, 'M', u'虫'), (0x2F8E, 'M', u'血'), (0x2F8F, 'M', u'行'), (0x2F90, 'M', u'衣'), (0x2F91, 'M', u'襾'), (0x2F92, 'M', u'見'), (0x2F93, 'M', u'角'), (0x2F94, 'M', u'言'), (0x2F95, 'M', u'谷'), (0x2F96, 'M', u'豆'), (0x2F97, 'M', u'豕'), (0x2F98, 'M', u'豸'), (0x2F99, 'M', u'貝'), (0x2F9A, 'M', u'赤'), (0x2F9B, 'M', u'走'), (0x2F9C, 'M', u'足'), (0x2F9D, 'M', u'身'), ] def _seg_28(): return [ (0x2F9E, 'M', u'車'), (0x2F9F, 'M', u'辛'), (0x2FA0, 'M', u'辰'), (0x2FA1, 'M', u'辵'), (0x2FA2, 'M', u'邑'), (0x2FA3, 'M', u'酉'), (0x2FA4, 'M', u'釆'), (0x2FA5, 'M', u'里'), (0x2FA6, 'M', u'金'), (0x2FA7, 'M', u'長'), (0x2FA8, 'M', u'門'), (0x2FA9, 'M', u'阜'), (0x2FAA, 'M', u'隶'), (0x2FAB, 'M', u'隹'), (0x2FAC, 'M', u'雨'), (0x2FAD, 'M', u'靑'), (0x2FAE, 'M', u'非'), (0x2FAF, 'M', u'面'), (0x2FB0, 'M', u'革'), (0x2FB1, 'M', u'韋'), (0x2FB2, 'M', u'韭'), (0x2FB3, 'M', u'音'), (0x2FB4, 'M', u'頁'), (0x2FB5, 'M', u'風'), (0x2FB6, 'M', u'飛'), (0x2FB7, 'M', u'食'), (0x2FB8, 'M', u'首'), (0x2FB9, 'M', u'香'), (0x2FBA, 'M', u'馬'), (0x2FBB, 'M', u'骨'), (0x2FBC, 'M', u'高'), (0x2FBD, 'M', u'髟'), (0x2FBE, 'M', u'鬥'), (0x2FBF, 'M', u'鬯'), (0x2FC0, 'M', u'鬲'), (0x2FC1, 'M', u'鬼'), (0x2FC2, 'M', u'魚'), (0x2FC3, 'M', u'鳥'), (0x2FC4, 'M', u'鹵'), (0x2FC5, 'M', u'鹿'), (0x2FC6, 'M', u'麥'), (0x2FC7, 'M', u'麻'), (0x2FC8, 'M', u'黃'), (0x2FC9, 'M', u'黍'), (0x2FCA, 'M', u'黑'), (0x2FCB, 'M', u'黹'), (0x2FCC, 'M', u'黽'), (0x2FCD, 'M', u'鼎'), (0x2FCE, 'M', u'鼓'), (0x2FCF, 'M', u'鼠'), (0x2FD0, 'M', u'鼻'), (0x2FD1, 'M', u'齊'), (0x2FD2, 'M', u'齒'), (0x2FD3, 'M', u'龍'), (0x2FD4, 'M', u'龜'), (0x2FD5, 'M', u'龠'), (0x2FD6, 'X'), (0x3000, '3', u' '), (0x3001, 'V'), (0x3002, 'M', u'.'), (0x3003, 'V'), (0x3036, 'M', u'〒'), (0x3037, 'V'), (0x3038, 'M', u'十'), (0x3039, 'M', u'卄'), (0x303A, 'M', u'卅'), (0x303B, 'V'), (0x3040, 'X'), (0x3041, 'V'), (0x3097, 'X'), (0x3099, 'V'), (0x309B, '3', u' ゙'), (0x309C, '3', u' ゚'), (0x309D, 'V'), (0x309F, 'M', u'より'), (0x30A0, 'V'), (0x30FF, 'M', u'コト'), (0x3100, 'X'), (0x3105, 'V'), (0x312E, 'X'), (0x3131, 'M', u'ᄀ'), (0x3132, 'M', u'ᄁ'), (0x3133, 'M', u'ᆪ'), (0x3134, 'M', u'ᄂ'), (0x3135, 'M', u'ᆬ'), (0x3136, 'M', u'ᆭ'), (0x3137, 'M', u'ᄃ'), (0x3138, 'M', u'ᄄ'), (0x3139, 'M', u'ᄅ'), (0x313A, 'M', u'ᆰ'), (0x313B, 'M', u'ᆱ'), (0x313C, 'M', u'ᆲ'), (0x313D, 'M', u'ᆳ'), (0x313E, 'M', u'ᆴ'), (0x313F, 'M', u'ᆵ'), (0x3140, 'M', u'ᄚ'), (0x3141, 'M', u'ᄆ'), (0x3142, 'M', u'ᄇ'), (0x3143, 'M', u'ᄈ'), (0x3144, 'M', u'ᄡ'), ] def _seg_29(): return [ (0x3145, 'M', u'ᄉ'), (0x3146, 'M', u'ᄊ'), (0x3147, 'M', u'ᄋ'), (0x3148, 'M', u'ᄌ'), (0x3149, 'M', u'ᄍ'), (0x314A, 'M', u'ᄎ'), (0x314B, 'M', u'ᄏ'), (0x314C, 'M', u'ᄐ'), (0x314D, 'M', u'ᄑ'), (0x314E, 'M', u'ᄒ'), (0x314F, 'M', u'ᅡ'), (0x3150, 'M', u'ᅢ'), (0x3151, 'M', u'ᅣ'), (0x3152, 'M', u'ᅤ'), (0x3153, 'M', u'ᅥ'), (0x3154, 'M', u'ᅦ'), (0x3155, 'M', u'ᅧ'), (0x3156, 'M', u'ᅨ'), (0x3157, 'M', u'ᅩ'), (0x3158, 'M', u'ᅪ'), (0x3159, 'M', u'ᅫ'), (0x315A, 'M', u'ᅬ'), (0x315B, 'M', u'ᅭ'), (0x315C, 'M', u'ᅮ'), (0x315D, 'M', u'ᅯ'), (0x315E, 'M', u'ᅰ'), (0x315F, 'M', u'ᅱ'), (0x3160, 'M', u'ᅲ'), (0x3161, 'M', u'ᅳ'), (0x3162, 'M', u'ᅴ'), (0x3163, 'M', u'ᅵ'), (0x3164, 'X'), (0x3165, 'M', u'ᄔ'), (0x3166, 'M', u'ᄕ'), (0x3167, 'M', u'ᇇ'), (0x3168, 'M', u'ᇈ'), (0x3169, 'M', u'ᇌ'), (0x316A, 'M', u'ᇎ'), (0x316B, 'M', u'ᇓ'), (0x316C, 'M', u'ᇗ'), (0x316D, 'M', u'ᇙ'), (0x316E, 'M', u'ᄜ'), (0x316F, 'M', u'ᇝ'), (0x3170, 'M', u'ᇟ'), (0x3171, 'M', u'ᄝ'), (0x3172, 'M', u'ᄞ'), (0x3173, 'M', u'ᄠ'), (0x3174, 'M', u'ᄢ'), (0x3175, 'M', u'ᄣ'), (0x3176, 'M', u'ᄧ'), (0x3177, 'M', u'ᄩ'), (0x3178, 'M', u'ᄫ'), (0x3179, 'M', u'ᄬ'), (0x317A, 'M', u'ᄭ'), (0x317B, 'M', u'ᄮ'), (0x317C, 'M', u'ᄯ'), (0x317D, 'M', u'ᄲ'), (0x317E, 'M', u'ᄶ'), (0x317F, 'M', u'ᅀ'), (0x3180, 'M', u'ᅇ'), (0x3181, 'M', u'ᅌ'), (0x3182, 'M', u'ᇱ'), (0x3183, 'M', u'ᇲ'), (0x3184, 'M', u'ᅗ'), (0x3185, 'M', u'ᅘ'), (0x3186, 'M', u'ᅙ'), (0x3187, 'M', u'ᆄ'), (0x3188, 'M', u'ᆅ'), (0x3189, 'M', u'ᆈ'), (0x318A, 'M', u'ᆑ'), (0x318B, 'M', u'ᆒ'), (0x318C, 'M', u'ᆔ'), (0x318D, 'M', u'ᆞ'), (0x318E, 'M', u'ᆡ'), (0x318F, 'X'), (0x3190, 'V'), (0x3192, 'M', u'一'), (0x3193, 'M', u'二'), (0x3194, 'M', u'三'), (0x3195, 'M', u'四'), (0x3196, 'M', u'上'), (0x3197, 'M', u'中'), (0x3198, 'M', u'下'), (0x3199, 'M', u'甲'), (0x319A, 'M', u'乙'), (0x319B, 'M', u'丙'), (0x319C, 'M', u'丁'), (0x319D, 'M', u'天'), (0x319E, 'M', u'地'), (0x319F, 'M', u'人'), (0x31A0, 'V'), (0x31BB, 'X'), (0x31C0, 'V'), (0x31E4, 'X'), (0x31F0, 'V'), (0x3200, '3', u'(ᄀ)'), (0x3201, '3', u'(ᄂ)'), (0x3202, '3', u'(ᄃ)'), (0x3203, '3', u'(ᄅ)'), (0x3204, '3', u'(ᄆ)'), ] def _seg_30(): return [ (0x3205, '3', u'(ᄇ)'), (0x3206, '3', u'(ᄉ)'), (0x3207, '3', u'(ᄋ)'), (0x3208, '3', u'(ᄌ)'), (0x3209, '3', u'(ᄎ)'), (0x320A, '3', u'(ᄏ)'), (0x320B, '3', u'(ᄐ)'), (0x320C, '3', u'(ᄑ)'), (0x320D, '3', u'(ᄒ)'), (0x320E, '3', u'(가)'), (0x320F, '3', u'(나)'), (0x3210, '3', u'(다)'), (0x3211, '3', u'(라)'), (0x3212, '3', u'(마)'), (0x3213, '3', u'(바)'), (0x3214, '3', u'(사)'), (0x3215, '3', u'(아)'), (0x3216, '3', u'(자)'), (0x3217, '3', u'(차)'), (0x3218, '3', u'(카)'), (0x3219, '3', u'(타)'), (0x321A, '3', u'(파)'), (0x321B, '3', u'(하)'), (0x321C, '3', u'(주)'), (0x321D, '3', u'(오전)'), (0x321E, '3', u'(오후)'), (0x321F, 'X'), (0x3220, '3', u'(一)'), (0x3221, '3', u'(二)'), (0x3222, '3', u'(三)'), (0x3223, '3', u'(四)'), (0x3224, '3', u'(五)'), (0x3225, '3', u'(六)'), (0x3226, '3', u'(七)'), (0x3227, '3', u'(八)'), (0x3228, '3', u'(九)'), (0x3229, '3', u'(十)'), (0x322A, '3', u'(月)'), (0x322B, '3', u'(火)'), (0x322C, '3', u'(水)'), (0x322D, '3', u'(木)'), (0x322E, '3', u'(金)'), (0x322F, '3', u'(土)'), (0x3230, '3', u'(日)'), (0x3231, '3', u'(株)'), (0x3232, '3', u'(有)'), (0x3233, '3', u'(社)'), (0x3234, '3', u'(名)'), (0x3235, '3', u'(特)'), (0x3236, '3', u'(財)'), (0x3237, '3', u'(祝)'), (0x3238, '3', u'(労)'), (0x3239, '3', u'(代)'), (0x323A, '3', u'(呼)'), (0x323B, '3', u'(学)'), (0x323C, '3', u'(監)'), (0x323D, '3', u'(企)'), (0x323E, '3', u'(資)'), (0x323F, '3', u'(協)'), (0x3240, '3', u'(祭)'), (0x3241, '3', u'(休)'), (0x3242, '3', u'(自)'), (0x3243, '3', u'(至)'), (0x3244, 'M', u'問'), (0x3245, 'M', u'幼'), (0x3246, 'M', u'文'), (0x3247, 'M', u'箏'), (0x3248, 'V'), (0x3250, 'M', u'pte'), (0x3251, 'M', u'21'), (0x3252, 'M', u'22'), (0x3253, 'M', u'23'), (0x3254, 'M', u'24'), (0x3255, 'M', u'25'), (0x3256, 'M', u'26'), (0x3257, 'M', u'27'), (0x3258, 'M', u'28'), (0x3259, 'M', u'29'), (0x325A, 'M', u'30'), (0x325B, 'M', u'31'), (0x325C, 'M', u'32'), (0x325D, 'M', u'33'), (0x325E, 'M', u'34'), (0x325F, 'M', u'35'), (0x3260, 'M', u'ᄀ'), (0x3261, 'M', u'ᄂ'), (0x3262, 'M', u'ᄃ'), (0x3263, 'M', u'ᄅ'), (0x3264, 'M', u'ᄆ'), (0x3265, 'M', u'ᄇ'), (0x3266, 'M', u'ᄉ'), (0x3267, 'M', u'ᄋ'), (0x3268, 'M', u'ᄌ'), (0x3269, 'M', u'ᄎ'), (0x326A, 'M', u'ᄏ'), (0x326B, 'M', u'ᄐ'), (0x326C, 'M', u'ᄑ'), (0x326D, 'M', u'ᄒ'), (0x326E, 'M', u'가'), (0x326F, 'M', u'나'), ] def _seg_31(): return [ (0x3270, 'M', u'다'), (0x3271, 'M', u'라'), (0x3272, 'M', u'마'), (0x3273, 'M', u'바'), (0x3274, 'M', u'사'), (0x3275, 'M', u'아'), (0x3276, 'M', u'자'), (0x3277, 'M', u'차'), (0x3278, 'M', u'카'), (0x3279, 'M', u'타'), (0x327A, 'M', u'파'), (0x327B, 'M', u'하'), (0x327C, 'M', u'참고'), (0x327D, 'M', u'주의'), (0x327E, 'M', u'우'), (0x327F, 'V'), (0x3280, 'M', u'一'), (0x3281, 'M', u'二'), (0x3282, 'M', u'三'), (0x3283, 'M', u'四'), (0x3284, 'M', u'五'), (0x3285, 'M', u'六'), (0x3286, 'M', u'七'), (0x3287, 'M', u'八'), (0x3288, 'M', u'九'), (0x3289, 'M', u'十'), (0x328A, 'M', u'月'), (0x328B, 'M', u'火'), (0x328C, 'M', u'水'), (0x328D, 'M', u'木'), (0x328E, 'M', u'金'), (0x328F, 'M', u'土'), (0x3290, 'M', u'日'), (0x3291, 'M', u'株'), (0x3292, 'M', u'有'), (0x3293, 'M', u'社'), (0x3294, 'M', u'名'), (0x3295, 'M', u'特'), (0x3296, 'M', u'財'), (0x3297, 'M', u'祝'), (0x3298, 'M', u'労'), (0x3299, 'M', u'秘'), (0x329A, 'M', u'男'), (0x329B, 'M', u'女'), (0x329C, 'M', u'適'), (0x329D, 'M', u'優'), (0x329E, 'M', u'印'), (0x329F, 'M', u'注'), (0x32A0, 'M', u'項'), (0x32A1, 'M', u'休'), (0x32A2, 'M', u'写'), (0x32A3, 'M', u'正'), (0x32A4, 'M', u'上'), (0x32A5, 'M', u'中'), (0x32A6, 'M', u'下'), (0x32A7, 'M', u'左'), (0x32A8, 'M', u'右'), (0x32A9, 'M', u'医'), (0x32AA, 'M', u'宗'), (0x32AB, 'M', u'学'), (0x32AC, 'M', u'監'), (0x32AD, 'M', u'企'), (0x32AE, 'M', u'資'), (0x32AF, 'M', u'協'), (0x32B0, 'M', u'夜'), (0x32B1, 'M', u'36'), (0x32B2, 'M', u'37'), (0x32B3, 'M', u'38'), (0x32B4, 'M', u'39'), (0x32B5, 'M', u'40'), (0x32B6, 'M', u'41'), (0x32B7, 'M', u'42'), (0x32B8, 'M', u'43'), (0x32B9, 'M', u'44'), (0x32BA, 'M', u'45'), (0x32BB, 'M', u'46'), (0x32BC, 'M', u'47'), (0x32BD, 'M', u'48'), (0x32BE, 'M', u'49'), (0x32BF, 'M', u'50'), (0x32C0, 'M', u'1月'), (0x32C1, 'M', u'2月'), (0x32C2, 'M', u'3月'), (0x32C3, 'M', u'4月'), (0x32C4, 'M', u'5月'), (0x32C5, 'M', u'6月'), (0x32C6, 'M', u'7月'), (0x32C7, 'M', u'8月'), (0x32C8, 'M', u'9月'), (0x32C9, 'M', u'10月'), (0x32CA, 'M', u'11月'), (0x32CB, 'M', u'12月'), (0x32CC, 'M', u'hg'), (0x32CD, 'M', u'erg'), (0x32CE, 'M', u'ev'), (0x32CF, 'M', u'ltd'), (0x32D0, 'M', u'ア'), (0x32D1, 'M', u'イ'), (0x32D2, 'M', u'ウ'), (0x32D3, 'M', u'エ'), ] def _seg_32(): return [ (0x32D4, 'M', u'オ'), (0x32D5, 'M', u'カ'), (0x32D6, 'M', u'キ'), (0x32D7, 'M', u'ク'), (0x32D8, 'M', u'ケ'), (0x32D9, 'M', u'コ'), (0x32DA, 'M', u'サ'), (0x32DB, 'M', u'シ'), (0x32DC, 'M', u'ス'), (0x32DD, 'M', u'セ'), (0x32DE, 'M', u'ソ'), (0x32DF, 'M', u'タ'), (0x32E0, 'M', u'チ'), (0x32E1, 'M', u'ツ'), (0x32E2, 'M', u'テ'), (0x32E3, 'M', u'ト'), (0x32E4, 'M', u'ナ'), (0x32E5, 'M', u'ニ'), (0x32E6, 'M', u'ヌ'), (0x32E7, 'M', u'ネ'), (0x32E8, 'M', u'ノ'), (0x32E9, 'M', u'ハ'), (0x32EA, 'M', u'ヒ'), (0x32EB, 'M', u'フ'), (0x32EC, 'M', u'ヘ'), (0x32ED, 'M', u'ホ'), (0x32EE, 'M', u'マ'), (0x32EF, 'M', u'ミ'), (0x32F0, 'M', u'ム'), (0x32F1, 'M', u'メ'), (0x32F2, 'M', u'モ'), (0x32F3, 'M', u'ヤ'), (0x32F4, 'M', u'ユ'), (0x32F5, 'M', u'ヨ'), (0x32F6, 'M', u'ラ'), (0x32F7, 'M', u'リ'), (0x32F8, 'M', u'ル'), (0x32F9, 'M', u'レ'), (0x32FA, 'M', u'ロ'), (0x32FB, 'M', u'ワ'), (0x32FC, 'M', u'ヰ'), (0x32FD, 'M', u'ヱ'), (0x32FE, 'M', u'ヲ'), (0x32FF, 'X'), (0x3300, 'M', u'アパート'), (0x3301, 'M', u'アルファ'), (0x3302, 'M', u'アンペア'), (0x3303, 'M', u'アール'), (0x3304, 'M', u'イニング'), (0x3305, 'M', u'インチ'), (0x3306, 'M', u'ウォン'), (0x3307, 'M', u'エスクード'), (0x3308, 'M', u'エーカー'), (0x3309, 'M', u'オンス'), (0x330A, 'M', u'オーム'), (0x330B, 'M', u'カイリ'), (0x330C, 'M', u'カラット'), (0x330D, 'M', u'カロリー'), (0x330E, 'M', u'ガロン'), (0x330F, 'M', u'ガンマ'), (0x3310, 'M', u'ギガ'), (0x3311, 'M', u'ギニー'), (0x3312, 'M', u'キュリー'), (0x3313, 'M', u'ギルダー'), (0x3314, 'M', u'キロ'), (0x3315, 'M', u'キログラム'), (0x3316, 'M', u'キロメートル'), (0x3317, 'M', u'キロワット'), (0x3318, 'M', u'グラム'), (0x3319, 'M', u'グラムトン'), (0x331A, 'M', u'クルゼイロ'), (0x331B, 'M', u'クローネ'), (0x331C, 'M', u'ケース'), (0x331D, 'M', u'コルナ'), (0x331E, 'M', u'コーポ'), (0x331F, 'M', u'サイクル'), (0x3320, 'M', u'サンチーム'), (0x3321, 'M', u'シリング'), (0x3322, 'M', u'センチ'), (0x3323, 'M', u'セント'), (0x3324, 'M', u'ダース'), (0x3325, 'M', u'デシ'), (0x3326, 'M', u'ドル'), (0x3327, 'M', u'トン'), (0x3328, 'M', u'ナノ'), (0x3329, 'M', u'ノット'), (0x332A, 'M', u'ハイツ'), (0x332B, 'M', u'パーセント'), (0x332C, 'M', u'パーツ'), (0x332D, 'M', u'バーレル'), (0x332E, 'M', u'ピアストル'), (0x332F, 'M', u'ピクル'), (0x3330, 'M', u'ピコ'), (0x3331, 'M', u'ビル'), (0x3332, 'M', u'ファラッド'), (0x3333, 'M', u'フィート'), (0x3334, 'M', u'ブッシェル'), (0x3335, 'M', u'フラン'), (0x3336, 'M', u'ヘクタール'), (0x3337, 'M', u'ペソ'), ] def _seg_33(): return [ (0x3338, 'M', u'ペニヒ'), (0x3339, 'M', u'ヘルツ'), (0x333A, 'M', u'ペンス'), (0x333B, 'M', u'ページ'), (0x333C, 'M', u'ベータ'), (0x333D, 'M', u'ポイント'), (0x333E, 'M', u'ボルト'), (0x333F, 'M', u'ホン'), (0x3340, 'M', u'ポンド'), (0x3341, 'M', u'ホール'), (0x3342, 'M', u'ホーン'), (0x3343, 'M', u'マイクロ'), (0x3344, 'M', u'マイル'), (0x3345, 'M', u'マッハ'), (0x3346, 'M', u'マルク'), (0x3347, 'M', u'マンション'), (0x3348, 'M', u'ミクロン'), (0x3349, 'M', u'ミリ'), (0x334A, 'M', u'ミリバール'), (0x334B, 'M', u'メガ'), (0x334C, 'M', u'メガトン'), (0x334D, 'M', u'メートル'), (0x334E, 'M', u'ヤード'), (0x334F, 'M', u'ヤール'), (0x3350, 'M', u'ユアン'), (0x3351, 'M', u'リットル'), (0x3352, 'M', u'リラ'), (0x3353, 'M', u'ルピー'), (0x3354, 'M', u'ルーブル'), (0x3355, 'M', u'レム'), (0x3356, 'M', u'レントゲン'), (0x3357, 'M', u'ワット'), (0x3358, 'M', u'0点'), (0x3359, 'M', u'1点'), (0x335A, 'M', u'2点'), (0x335B, 'M', u'3点'), (0x335C, 'M', u'4点'), (0x335D, 'M', u'5点'), (0x335E, 'M', u'6点'), (0x335F, 'M', u'7点'), (0x3360, 'M', u'8点'), (0x3361, 'M', u'9点'), (0x3362, 'M', u'10点'), (0x3363, 'M', u'11点'), (0x3364, 'M', u'12点'), (0x3365, 'M', u'13点'), (0x3366, 'M', u'14点'), (0x3367, 'M', u'15点'), (0x3368, 'M', u'16点'), (0x3369, 'M', u'17点'), (0x336A, 'M', u'18点'), (0x336B, 'M', u'19点'), (0x336C, 'M', u'20点'), (0x336D, 'M', u'21点'), (0x336E, 'M', u'22点'), (0x336F, 'M', u'23点'), (0x3370, 'M', u'24点'), (0x3371, 'M', u'hpa'), (0x3372, 'M', u'da'), (0x3373, 'M', u'au'), (0x3374, 'M', u'bar'), (0x3375, 'M', u'ov'), (0x3376, 'M', u'pc'), (0x3377, 'M', u'dm'), (0x3378, 'M', u'dm2'), (0x3379, 'M', u'dm3'), (0x337A, 'M', u'iu'), (0x337B, 'M', u'平成'), (0x337C, 'M', u'昭和'), (0x337D, 'M', u'大正'), (0x337E, 'M', u'明治'), (0x337F, 'M', u'株式会社'), (0x3380, 'M', u'pa'), (0x3381, 'M', u'na'), (0x3382, 'M', u'μa'), (0x3383, 'M', u'ma'), (0x3384, 'M', u'ka'), (0x3385, 'M', u'kb'), (0x3386, 'M', u'mb'), (0x3387, 'M', u'gb'), (0x3388, 'M', u'cal'), (0x3389, 'M', u'kcal'), (0x338A, 'M', u'pf'), (0x338B, 'M', u'nf'), (0x338C, 'M', u'μf'), (0x338D, 'M', u'μg'), (0x338E, 'M', u'mg'), (0x338F, 'M', u'kg'), (0x3390, 'M', u'hz'), (0x3391, 'M', u'khz'), (0x3392, 'M', u'mhz'), (0x3393, 'M', u'ghz'), (0x3394, 'M', u'thz'), (0x3395, 'M', u'μl'), (0x3396, 'M', u'ml'), (0x3397, 'M', u'dl'), (0x3398, 'M', u'kl'), (0x3399, 'M', u'fm'), (0x339A, 'M', u'nm'), (0x339B, 'M', u'μm'), ] def _seg_34(): return [ (0x339C, 'M', u'mm'), (0x339D, 'M', u'cm'), (0x339E, 'M', u'km'), (0x339F, 'M', u'mm2'), (0x33A0, 'M', u'cm2'), (0x33A1, 'M', u'm2'), (0x33A2, 'M', u'km2'), (0x33A3, 'M', u'mm3'), (0x33A4, 'M', u'cm3'), (0x33A5, 'M', u'm3'), (0x33A6, 'M', u'km3'), (0x33A7, 'M', u'm∕s'), (0x33A8, 'M', u'm∕s2'), (0x33A9, 'M', u'pa'), (0x33AA, 'M', u'kpa'), (0x33AB, 'M', u'mpa'), (0x33AC, 'M', u'gpa'), (0x33AD, 'M', u'rad'), (0x33AE, 'M', u'rad∕s'), (0x33AF, 'M', u'rad∕s2'), (0x33B0, 'M', u'ps'), (0x33B1, 'M', u'ns'), (0x33B2, 'M', u'μs'), (0x33B3, 'M', u'ms'), (0x33B4, 'M', u'pv'), (0x33B5, 'M', u'nv'), (0x33B6, 'M', u'μv'), (0x33B7, 'M', u'mv'), (0x33B8, 'M', u'kv'), (0x33B9, 'M', u'mv'), (0x33BA, 'M', u'pw'), (0x33BB, 'M', u'nw'), (0x33BC, 'M', u'μw'), (0x33BD, 'M', u'mw'), (0x33BE, 'M', u'kw'), (0x33BF, 'M', u'mw'), (0x33C0, 'M', u'kω'), (0x33C1, 'M', u'mω'), (0x33C2, 'X'), (0x33C3, 'M', u'bq'), (0x33C4, 'M', u'cc'), (0x33C5, 'M', u'cd'), (0x33C6, 'M', u'c∕kg'), (0x33C7, 'X'), (0x33C8, 'M', u'db'), (0x33C9, 'M', u'gy'), (0x33CA, 'M', u'ha'), (0x33CB, 'M', u'hp'), (0x33CC, 'M', u'in'), (0x33CD, 'M', u'kk'), (0x33CE, 'M', u'km'), (0x33CF, 'M', u'kt'), (0x33D0, 'M', u'lm'), (0x33D1, 'M', u'ln'), (0x33D2, 'M', u'log'), (0x33D3, 'M', u'lx'), (0x33D4, 'M', u'mb'), (0x33D5, 'M', u'mil'), (0x33D6, 'M', u'mol'), (0x33D7, 'M', u'ph'), (0x33D8, 'X'), (0x33D9, 'M', u'ppm'), (0x33DA, 'M', u'pr'), (0x33DB, 'M', u'sr'), (0x33DC, 'M', u'sv'), (0x33DD, 'M', u'wb'), (0x33DE, 'M', u'v∕m'), (0x33DF, 'M', u'a∕m'), (0x33E0, 'M', u'1日'), (0x33E1, 'M', u'2日'), (0x33E2, 'M', u'3日'), (0x33E3, 'M', u'4日'), (0x33E4, 'M', u'5日'), (0x33E5, 'M', u'6日'), (0x33E6, 'M', u'7日'), (0x33E7, 'M', u'8日'), (0x33E8, 'M', u'9日'), (0x33E9, 'M', u'10日'), (0x33EA, 'M', u'11日'), (0x33EB, 'M', u'12日'), (0x33EC, 'M', u'13日'), (0x33ED, 'M', u'14日'), (0x33EE, 'M', u'15日'), (0x33EF, 'M', u'16日'), (0x33F0, 'M', u'17日'), (0x33F1, 'M', u'18日'), (0x33F2, 'M', u'19日'), (0x33F3, 'M', u'20日'), (0x33F4, 'M', u'21日'), (0x33F5, 'M', u'22日'), (0x33F6, 'M', u'23日'), (0x33F7, 'M', u'24日'), (0x33F8, 'M', u'25日'), (0x33F9, 'M', u'26日'), (0x33FA, 'M', u'27日'), (0x33FB, 'M', u'28日'), (0x33FC, 'M', u'29日'), (0x33FD, 'M', u'30日'), (0x33FE, 'M', u'31日'), (0x33FF, 'M', u'gal'), ] def _seg_35(): return [ (0x3400, 'V'), (0x4DB6, 'X'), (0x4DC0, 'V'), (0x9FCD, 'X'), (0xA000, 'V'), (0xA48D, 'X'), (0xA490, 'V'), (0xA4C7, 'X'), (0xA4D0, 'V'), (0xA62C, 'X'), (0xA640, 'M', u'ꙁ'), (0xA641, 'V'), (0xA642, 'M', u'ꙃ'), (0xA643, 'V'), (0xA644, 'M', u'ꙅ'), (0xA645, 'V'), (0xA646, 'M', u'ꙇ'), (0xA647, 'V'), (0xA648, 'M', u'ꙉ'), (0xA649, 'V'), (0xA64A, 'M', u'ꙋ'), (0xA64B, 'V'), (0xA64C, 'M', u'ꙍ'), (0xA64D, 'V'), (0xA64E, 'M', u'ꙏ'), (0xA64F, 'V'), (0xA650, 'M', u'ꙑ'), (0xA651, 'V'), (0xA652, 'M', u'ꙓ'), (0xA653, 'V'), (0xA654, 'M', u'ꙕ'), (0xA655, 'V'), (0xA656, 'M', u'ꙗ'), (0xA657, 'V'), (0xA658, 'M', u'ꙙ'), (0xA659, 'V'), (0xA65A, 'M', u'ꙛ'), (0xA65B, 'V'), (0xA65C, 'M', u'ꙝ'), (0xA65D, 'V'), (0xA65E, 'M', u'ꙟ'), (0xA65F, 'V'), (0xA660, 'M', u'ꙡ'), (0xA661, 'V'), (0xA662, 'M', u'ꙣ'), (0xA663, 'V'), (0xA664, 'M', u'ꙥ'), (0xA665, 'V'), (0xA666, 'M', u'ꙧ'), (0xA667, 'V'), (0xA668, 'M', u'ꙩ'), (0xA669, 'V'), (0xA66A, 'M', u'ꙫ'), (0xA66B, 'V'), (0xA66C, 'M', u'ꙭ'), (0xA66D, 'V'), (0xA680, 'M', u'ꚁ'), (0xA681, 'V'), (0xA682, 'M', u'ꚃ'), (0xA683, 'V'), (0xA684, 'M', u'ꚅ'), (0xA685, 'V'), (0xA686, 'M', u'ꚇ'), (0xA687, 'V'), (0xA688, 'M', u'ꚉ'), (0xA689, 'V'), (0xA68A, 'M', u'ꚋ'), (0xA68B, 'V'), (0xA68C, 'M', u'ꚍ'), (0xA68D, 'V'), (0xA68E, 'M', u'ꚏ'), (0xA68F, 'V'), (0xA690, 'M', u'ꚑ'), (0xA691, 'V'), (0xA692, 'M', u'ꚓ'), (0xA693, 'V'), (0xA694, 'M', u'ꚕ'), (0xA695, 'V'), (0xA696, 'M', u'ꚗ'), (0xA697, 'V'), (0xA698, 'X'), (0xA69F, 'V'), (0xA6F8, 'X'), (0xA700, 'V'), (0xA722, 'M', u'ꜣ'), (0xA723, 'V'), (0xA724, 'M', u'ꜥ'), (0xA725, 'V'), (0xA726, 'M', u'ꜧ'), (0xA727, 'V'), (0xA728, 'M', u'ꜩ'), (0xA729, 'V'), (0xA72A, 'M', u'ꜫ'), (0xA72B, 'V'), (0xA72C, 'M', u'ꜭ'), (0xA72D, 'V'), (0xA72E, 'M', u'ꜯ'), (0xA72F, 'V'), (0xA732, 'M', u'ꜳ'), (0xA733, 'V'), ] def _seg_36(): return [ (0xA734, 'M', u'ꜵ'), (0xA735, 'V'), (0xA736, 'M', u'ꜷ'), (0xA737, 'V'), (0xA738, 'M', u'ꜹ'), (0xA739, 'V'), (0xA73A, 'M', u'ꜻ'), (0xA73B, 'V'), (0xA73C, 'M', u'ꜽ'), (0xA73D, 'V'), (0xA73E, 'M', u'ꜿ'), (0xA73F, 'V'), (0xA740, 'M', u'ꝁ'), (0xA741, 'V'), (0xA742, 'M', u'ꝃ'), (0xA743, 'V'), (0xA744, 'M', u'ꝅ'), (0xA745, 'V'), (0xA746, 'M', u'ꝇ'), (0xA747, 'V'), (0xA748, 'M', u'ꝉ'), (0xA749, 'V'), (0xA74A, 'M', u'ꝋ'), (0xA74B, 'V'), (0xA74C, 'M', u'ꝍ'), (0xA74D, 'V'), (0xA74E, 'M', u'ꝏ'), (0xA74F, 'V'), (0xA750, 'M', u'ꝑ'), (0xA751, 'V'), (0xA752, 'M', u'ꝓ'), (0xA753, 'V'), (0xA754, 'M', u'ꝕ'), (0xA755, 'V'), (0xA756, 'M', u'ꝗ'), (0xA757, 'V'), (0xA758, 'M', u'ꝙ'), (0xA759, 'V'), (0xA75A, 'M', u'ꝛ'), (0xA75B, 'V'), (0xA75C, 'M', u'ꝝ'), (0xA75D, 'V'), (0xA75E, 'M', u'ꝟ'), (0xA75F, 'V'), (0xA760, 'M', u'ꝡ'), (0xA761, 'V'), (0xA762, 'M', u'ꝣ'), (0xA763, 'V'), (0xA764, 'M', u'ꝥ'), (0xA765, 'V'), (0xA766, 'M', u'ꝧ'), (0xA767, 'V'), (0xA768, 'M', u'ꝩ'), (0xA769, 'V'), (0xA76A, 'M', u'ꝫ'), (0xA76B, 'V'), (0xA76C, 'M', u'ꝭ'), (0xA76D, 'V'), (0xA76E, 'M', u'ꝯ'), (0xA76F, 'V'), (0xA770, 'M', u'ꝯ'), (0xA771, 'V'), (0xA779, 'M', u'ꝺ'), (0xA77A, 'V'), (0xA77B, 'M', u'ꝼ'), (0xA77C, 'V'), (0xA77D, 'M', u'ᵹ'), (0xA77E, 'M', u'ꝿ'), (0xA77F, 'V'), (0xA780, 'M', u'ꞁ'), (0xA781, 'V'), (0xA782, 'M', u'ꞃ'), (0xA783, 'V'), (0xA784, 'M', u'ꞅ'), (0xA785, 'V'), (0xA786, 'M', u'ꞇ'), (0xA787, 'V'), (0xA78B, 'M', u'ꞌ'), (0xA78C, 'V'), (0xA78D, 'M', u'ɥ'), (0xA78E, 'V'), (0xA78F, 'X'), (0xA790, 'M', u'ꞑ'), (0xA791, 'V'), (0xA792, 'M', u'ꞓ'), (0xA793, 'V'), (0xA794, 'X'), (0xA7A0, 'M', u'ꞡ'), (0xA7A1, 'V'), (0xA7A2, 'M', u'ꞣ'), (0xA7A3, 'V'), (0xA7A4, 'M', u'ꞥ'), (0xA7A5, 'V'), (0xA7A6, 'M', u'ꞧ'), (0xA7A7, 'V'), (0xA7A8, 'M', u'ꞩ'), (0xA7A9, 'V'), (0xA7AA, 'M', u'ɦ'), (0xA7AB, 'X'), (0xA7F8, 'M', u'ħ'), ] def _seg_37(): return [ (0xA7F9, 'M', u'œ'), (0xA7FA, 'V'), (0xA82C, 'X'), (0xA830, 'V'), (0xA83A, 'X'), (0xA840, 'V'), (0xA878, 'X'), (0xA880, 'V'), (0xA8C5, 'X'), (0xA8CE, 'V'), (0xA8DA, 'X'), (0xA8E0, 'V'), (0xA8FC, 'X'), (0xA900, 'V'), (0xA954, 'X'), (0xA95F, 'V'), (0xA97D, 'X'), (0xA980, 'V'), (0xA9CE, 'X'), (0xA9CF, 'V'), (0xA9DA, 'X'), (0xA9DE, 'V'), (0xA9E0, 'X'), (0xAA00, 'V'), (0xAA37, 'X'), (0xAA40, 'V'), (0xAA4E, 'X'), (0xAA50, 'V'), (0xAA5A, 'X'), (0xAA5C, 'V'), (0xAA7C, 'X'), (0xAA80, 'V'), (0xAAC3, 'X'), (0xAADB, 'V'), (0xAAF7, 'X'), (0xAB01, 'V'), (0xAB07, 'X'), (0xAB09, 'V'), (0xAB0F, 'X'), (0xAB11, 'V'), (0xAB17, 'X'), (0xAB20, 'V'), (0xAB27, 'X'), (0xAB28, 'V'), (0xAB2F, 'X'), (0xABC0, 'V'), (0xABEE, 'X'), (0xABF0, 'V'), (0xABFA, 'X'), (0xAC00, 'V'), (0xD7A4, 'X'), (0xD7B0, 'V'), (0xD7C7, 'X'), (0xD7CB, 'V'), (0xD7FC, 'X'), (0xF900, 'M', u'豈'), (0xF901, 'M', u'更'), (0xF902, 'M', u'車'), (0xF903, 'M', u'賈'), (0xF904, 'M', u'滑'), (0xF905, 'M', u'串'), (0xF906, 'M', u'句'), (0xF907, 'M', u'龜'), (0xF909, 'M', u'契'), (0xF90A, 'M', u'金'), (0xF90B, 'M', u'喇'), (0xF90C, 'M', u'奈'), (0xF90D, 'M', u'懶'), (0xF90E, 'M', u'癩'), (0xF90F, 'M', u'羅'), (0xF910, 'M', u'蘿'), (0xF911, 'M', u'螺'), (0xF912, 'M', u'裸'), (0xF913, 'M', u'邏'), (0xF914, 'M', u'樂'), (0xF915, 'M', u'洛'), (0xF916, 'M', u'烙'), (0xF917, 'M', u'珞'), (0xF918, 'M', u'落'), (0xF919, 'M', u'酪'), (0xF91A, 'M', u'駱'), (0xF91B, 'M', u'亂'), (0xF91C, 'M', u'卵'), (0xF91D, 'M', u'欄'), (0xF91E, 'M', u'爛'), (0xF91F, 'M', u'蘭'), (0xF920, 'M', u'鸞'), (0xF921, 'M', u'嵐'), (0xF922, 'M', u'濫'), (0xF923, 'M', u'藍'), (0xF924, 'M', u'襤'), (0xF925, 'M', u'拉'), (0xF926, 'M', u'臘'), (0xF927, 'M', u'蠟'), (0xF928, 'M', u'廊'), (0xF929, 'M', u'朗'), (0xF92A, 'M', u'浪'), (0xF92B, 'M', u'狼'), (0xF92C, 'M', u'郎'), (0xF92D, 'M', u'來'), ] def _seg_38(): return [ (0xF92E, 'M', u'冷'), (0xF92F, 'M', u'勞'), (0xF930, 'M', u'擄'), (0xF931, 'M', u'櫓'), (0xF932, 'M', u'爐'), (0xF933, 'M', u'盧'), (0xF934, 'M', u'老'), (0xF935, 'M', u'蘆'), (0xF936, 'M', u'虜'), (0xF937, 'M', u'路'), (0xF938, 'M', u'露'), (0xF939, 'M', u'魯'), (0xF93A, 'M', u'鷺'), (0xF93B, 'M', u'碌'), (0xF93C, 'M', u'祿'), (0xF93D, 'M', u'綠'), (0xF93E, 'M', u'菉'), (0xF93F, 'M', u'錄'), (0xF940, 'M', u'鹿'), (0xF941, 'M', u'論'), (0xF942, 'M', u'壟'), (0xF943, 'M', u'弄'), (0xF944, 'M', u'籠'), (0xF945, 'M', u'聾'), (0xF946, 'M', u'牢'), (0xF947, 'M', u'磊'), (0xF948, 'M', u'賂'), (0xF949, 'M', u'雷'), (0xF94A, 'M', u'壘'), (0xF94B, 'M', u'屢'), (0xF94C, 'M', u'樓'), (0xF94D, 'M', u'淚'), (0xF94E, 'M', u'漏'), (0xF94F, 'M', u'累'), (0xF950, 'M', u'縷'), (0xF951, 'M', u'陋'), (0xF952, 'M', u'勒'), (0xF953, 'M', u'肋'), (0xF954, 'M', u'凜'), (0xF955, 'M', u'凌'), (0xF956, 'M', u'稜'), (0xF957, 'M', u'綾'), (0xF958, 'M', u'菱'), (0xF959, 'M', u'陵'), (0xF95A, 'M', u'讀'), (0xF95B, 'M', u'拏'), (0xF95C, 'M', u'樂'), (0xF95D, 'M', u'諾'), (0xF95E, 'M', u'丹'), (0xF95F, 'M', u'寧'), (0xF960, 'M', u'怒'), (0xF961, 'M', u'率'), (0xF962, 'M', u'異'), (0xF963, 'M', u'北'), (0xF964, 'M', u'磻'), (0xF965, 'M', u'便'), (0xF966, 'M', u'復'), (0xF967, 'M', u'不'), (0xF968, 'M', u'泌'), (0xF969, 'M', u'數'), (0xF96A, 'M', u'索'), (0xF96B, 'M', u'參'), (0xF96C, 'M', u'塞'), (0xF96D, 'M', u'省'), (0xF96E, 'M', u'葉'), (0xF96F, 'M', u'說'), (0xF970, 'M', u'殺'), (0xF971, 'M', u'辰'), (0xF972, 'M', u'沈'), (0xF973, 'M', u'拾'), (0xF974, 'M', u'若'), (0xF975, 'M', u'掠'), (0xF976, 'M', u'略'), (0xF977, 'M', u'亮'), (0xF978, 'M', u'兩'), (0xF979, 'M', u'凉'), (0xF97A, 'M', u'梁'), (0xF97B, 'M', u'糧'), (0xF97C, 'M', u'良'), (0xF97D, 'M', u'諒'), (0xF97E, 'M', u'量'), (0xF97F, 'M', u'勵'), (0xF980, 'M', u'呂'), (0xF981, 'M', u'女'), (0xF982, 'M', u'廬'), (0xF983, 'M', u'旅'), (0xF984, 'M', u'濾'), (0xF985, 'M', u'礪'), (0xF986, 'M', u'閭'), (0xF987, 'M', u'驪'), (0xF988, 'M', u'麗'), (0xF989, 'M', u'黎'), (0xF98A, 'M', u'力'), (0xF98B, 'M', u'曆'), (0xF98C, 'M', u'歷'), (0xF98D, 'M', u'轢'), (0xF98E, 'M', u'年'), (0xF98F, 'M', u'憐'), (0xF990, 'M', u'戀'), (0xF991, 'M', u'撚'), ] def _seg_39(): return [ (0xF992, 'M', u'漣'), (0xF993, 'M', u'煉'), (0xF994, 'M', u'璉'), (0xF995, 'M', u'秊'), (0xF996, 'M', u'練'), (0xF997, 'M', u'聯'), (0xF998, 'M', u'輦'), (0xF999, 'M', u'蓮'), (0xF99A, 'M', u'連'), (0xF99B, 'M', u'鍊'), (0xF99C, 'M', u'列'), (0xF99D, 'M', u'劣'), (0xF99E, 'M', u'咽'), (0xF99F, 'M', u'烈'), (0xF9A0, 'M', u'裂'), (0xF9A1, 'M', u'說'), (0xF9A2, 'M', u'廉'), (0xF9A3, 'M', u'念'), (0xF9A4, 'M', u'捻'), (0xF9A5, 'M', u'殮'), (0xF9A6, 'M', u'簾'), (0xF9A7, 'M', u'獵'), (0xF9A8, 'M', u'令'), (0xF9A9, 'M', u'囹'), (0xF9AA, 'M', u'寧'), (0xF9AB, 'M', u'嶺'), (0xF9AC, 'M', u'怜'), (0xF9AD, 'M', u'玲'), (0xF9AE, 'M', u'瑩'), (0xF9AF, 'M', u'羚'), (0xF9B0, 'M', u'聆'), (0xF9B1, 'M', u'鈴'), (0xF9B2, 'M', u'零'), (0xF9B3, 'M', u'靈'), (0xF9B4, 'M', u'領'), (0xF9B5, 'M', u'例'), (0xF9B6, 'M', u'禮'), (0xF9B7, 'M', u'醴'), (0xF9B8, 'M', u'隸'), (0xF9B9, 'M', u'惡'), (0xF9BA, 'M', u'了'), (0xF9BB, 'M', u'僚'), (0xF9BC, 'M', u'寮'), (0xF9BD, 'M', u'尿'), (0xF9BE, 'M', u'料'), (0xF9BF, 'M', u'樂'), (0xF9C0, 'M', u'燎'), (0xF9C1, 'M', u'療'), (0xF9C2, 'M', u'蓼'), (0xF9C3, 'M', u'遼'), (0xF9C4, 'M', u'龍'), (0xF9C5, 'M', u'暈'), (0xF9C6, 'M', u'阮'), (0xF9C7, 'M', u'劉'), (0xF9C8, 'M', u'杻'), (0xF9C9, 'M', u'柳'), (0xF9CA, 'M', u'流'), (0xF9CB, 'M', u'溜'), (0xF9CC, 'M', u'琉'), (0xF9CD, 'M', u'留'), (0xF9CE, 'M', u'硫'), (0xF9CF, 'M', u'紐'), (0xF9D0, 'M', u'類'), (0xF9D1, 'M', u'六'), (0xF9D2, 'M', u'戮'), (0xF9D3, 'M', u'陸'), (0xF9D4, 'M', u'倫'), (0xF9D5, 'M', u'崙'), (0xF9D6, 'M', u'淪'), (0xF9D7, 'M', u'輪'), (0xF9D8, 'M', u'律'), (0xF9D9, 'M', u'慄'), (0xF9DA, 'M', u'栗'), (0xF9DB, 'M', u'率'), (0xF9DC, 'M', u'隆'), (0xF9DD, 'M', u'利'), (0xF9DE, 'M', u'吏'), (0xF9DF, 'M', u'履'), (0xF9E0, 'M', u'易'), (0xF9E1, 'M', u'李'), (0xF9E2, 'M', u'梨'), (0xF9E3, 'M', u'泥'), (0xF9E4, 'M', u'理'), (0xF9E5, 'M', u'痢'), (0xF9E6, 'M', u'罹'), (0xF9E7, 'M', u'裏'), (0xF9E8, 'M', u'裡'), (0xF9E9, 'M', u'里'), (0xF9EA, 'M', u'離'), (0xF9EB, 'M', u'匿'), (0xF9EC, 'M', u'溺'), (0xF9ED, 'M', u'吝'), (0xF9EE, 'M', u'燐'), (0xF9EF, 'M', u'璘'), (0xF9F0, 'M', u'藺'), (0xF9F1, 'M', u'隣'), (0xF9F2, 'M', u'鱗'), (0xF9F3, 'M', u'麟'), (0xF9F4, 'M', u'林'), (0xF9F5, 'M', u'淋'), ] def _seg_40(): return [ (0xF9F6, 'M', u'臨'), (0xF9F7, 'M', u'立'), (0xF9F8, 'M', u'笠'), (0xF9F9, 'M', u'粒'), (0xF9FA, 'M', u'狀'), (0xF9FB, 'M', u'炙'), (0xF9FC, 'M', u'識'), (0xF9FD, 'M', u'什'), (0xF9FE, 'M', u'茶'), (0xF9FF, 'M', u'刺'), (0xFA00, 'M', u'切'), (0xFA01, 'M', u'度'), (0xFA02, 'M', u'拓'), (0xFA03, 'M', u'糖'), (0xFA04, 'M', u'宅'), (0xFA05, 'M', u'洞'), (0xFA06, 'M', u'暴'), (0xFA07, 'M', u'輻'), (0xFA08, 'M', u'行'), (0xFA09, 'M', u'降'), (0xFA0A, 'M', u'見'), (0xFA0B, 'M', u'廓'), (0xFA0C, 'M', u'兀'), (0xFA0D, 'M', u'嗀'), (0xFA0E, 'V'), (0xFA10, 'M', u'塚'), (0xFA11, 'V'), (0xFA12, 'M', u'晴'), (0xFA13, 'V'), (0xFA15, 'M', u'凞'), (0xFA16, 'M', u'猪'), (0xFA17, 'M', u'益'), (0xFA18, 'M', u'礼'), (0xFA19, 'M', u'神'), (0xFA1A, 'M', u'祥'), (0xFA1B, 'M', u'福'), (0xFA1C, 'M', u'靖'), (0xFA1D, 'M', u'精'), (0xFA1E, 'M', u'羽'), (0xFA1F, 'V'), (0xFA20, 'M', u'蘒'), (0xFA21, 'V'), (0xFA22, 'M', u'諸'), (0xFA23, 'V'), (0xFA25, 'M', u'逸'), (0xFA26, 'M', u'都'), (0xFA27, 'V'), (0xFA2A, 'M', u'飯'), (0xFA2B, 'M', u'飼'), (0xFA2C, 'M', u'館'), (0xFA2D, 'M', u'鶴'), (0xFA2E, 'M', u'郞'), (0xFA2F, 'M', u'隷'), (0xFA30, 'M', u'侮'), (0xFA31, 'M', u'僧'), (0xFA32, 'M', u'免'), (0xFA33, 'M', u'勉'), (0xFA34, 'M', u'勤'), (0xFA35, 'M', u'卑'), (0xFA36, 'M', u'喝'), (0xFA37, 'M', u'嘆'), (0xFA38, 'M', u'器'), (0xFA39, 'M', u'塀'), (0xFA3A, 'M', u'墨'), (0xFA3B, 'M', u'層'), (0xFA3C, 'M', u'屮'), (0xFA3D, 'M', u'悔'), (0xFA3E, 'M', u'慨'), (0xFA3F, 'M', u'憎'), (0xFA40, 'M', u'懲'), (0xFA41, 'M', u'敏'), (0xFA42, 'M', u'既'), (0xFA43, 'M', u'暑'), (0xFA44, 'M', u'梅'), (0xFA45, 'M', u'海'), (0xFA46, 'M', u'渚'), (0xFA47, 'M', u'漢'), (0xFA48, 'M', u'煮'), (0xFA49, 'M', u'爫'), (0xFA4A, 'M', u'琢'), (0xFA4B, 'M', u'碑'), (0xFA4C, 'M', u'社'), (0xFA4D, 'M', u'祉'), (0xFA4E, 'M', u'祈'), (0xFA4F, 'M', u'祐'), (0xFA50, 'M', u'祖'), (0xFA51, 'M', u'祝'), (0xFA52, 'M', u'禍'), (0xFA53, 'M', u'禎'), (0xFA54, 'M', u'穀'), (0xFA55, 'M', u'突'), (0xFA56, 'M', u'節'), (0xFA57, 'M', u'練'), (0xFA58, 'M', u'縉'), (0xFA59, 'M', u'繁'), (0xFA5A, 'M', u'署'), (0xFA5B, 'M', u'者'), (0xFA5C, 'M', u'臭'), (0xFA5D, 'M', u'艹'), (0xFA5F, 'M', u'著'), ] def _seg_41(): return [ (0xFA60, 'M', u'褐'), (0xFA61, 'M', u'視'), (0xFA62, 'M', u'謁'), (0xFA63, 'M', u'謹'), (0xFA64, 'M', u'賓'), (0xFA65, 'M', u'贈'), (0xFA66, 'M', u'辶'), (0xFA67, 'M', u'逸'), (0xFA68, 'M', u'難'), (0xFA69, 'M', u'響'), (0xFA6A, 'M', u'頻'), (0xFA6B, 'M', u'恵'), (0xFA6C, 'M', u'𤋮'), (0xFA6D, 'M', u'舘'), (0xFA6E, 'X'), (0xFA70, 'M', u'並'), (0xFA71, 'M', u'况'), (0xFA72, 'M', u'全'), (0xFA73, 'M', u'侀'), (0xFA74, 'M', u'充'), (0xFA75, 'M', u'冀'), (0xFA76, 'M', u'勇'), (0xFA77, 'M', u'勺'), (0xFA78, 'M', u'喝'), (0xFA79, 'M', u'啕'), (0xFA7A, 'M', u'喙'), (0xFA7B, 'M', u'嗢'), (0xFA7C, 'M', u'塚'), (0xFA7D, 'M', u'墳'), (0xFA7E, 'M', u'奄'), (0xFA7F, 'M', u'奔'), (0xFA80, 'M', u'婢'), (0xFA81, 'M', u'嬨'), (0xFA82, 'M', u'廒'), (0xFA83, 'M', u'廙'), (0xFA84, 'M', u'彩'), (0xFA85, 'M', u'徭'), (0xFA86, 'M', u'惘'), (0xFA87, 'M', u'慎'), (0xFA88, 'M', u'愈'), (0xFA89, 'M', u'憎'), (0xFA8A, 'M', u'慠'), (0xFA8B, 'M', u'懲'), (0xFA8C, 'M', u'戴'), (0xFA8D, 'M', u'揄'), (0xFA8E, 'M', u'搜'), (0xFA8F, 'M', u'摒'), (0xFA90, 'M', u'敖'), (0xFA91, 'M', u'晴'), (0xFA92, 'M', u'朗'), (0xFA93, 'M', u'望'), (0xFA94, 'M', u'杖'), (0xFA95, 'M', u'歹'), (0xFA96, 'M', u'殺'), (0xFA97, 'M', u'流'), (0xFA98, 'M', u'滛'), (0xFA99, 'M', u'滋'), (0xFA9A, 'M', u'漢'), (0xFA9B, 'M', u'瀞'), (0xFA9C, 'M', u'煮'), (0xFA9D, 'M', u'瞧'), (0xFA9E, 'M', u'爵'), (0xFA9F, 'M', u'犯'), (0xFAA0, 'M', u'猪'), (0xFAA1, 'M', u'瑱'), (0xFAA2, 'M', u'甆'), (0xFAA3, 'M', u'画'), (0xFAA4, 'M', u'瘝'), (0xFAA5, 'M', u'瘟'), (0xFAA6, 'M', u'益'), (0xFAA7, 'M', u'盛'), (0xFAA8, 'M', u'直'), (0xFAA9, 'M', u'睊'), (0xFAAA, 'M', u'着'), (0xFAAB, 'M', u'磌'), (0xFAAC, 'M', u'窱'), (0xFAAD, 'M', u'節'), (0xFAAE, 'M', u'类'), (0xFAAF, 'M', u'絛'), (0xFAB0, 'M', u'練'), (0xFAB1, 'M', u'缾'), (0xFAB2, 'M', u'者'), (0xFAB3, 'M', u'荒'), (0xFAB4, 'M', u'華'), (0xFAB5, 'M', u'蝹'), (0xFAB6, 'M', u'襁'), (0xFAB7, 'M', u'覆'), (0xFAB8, 'M', u'視'), (0xFAB9, 'M', u'調'), (0xFABA, 'M', u'諸'), (0xFABB, 'M', u'請'), (0xFABC, 'M', u'謁'), (0xFABD, 'M', u'諾'), (0xFABE, 'M', u'諭'), (0xFABF, 'M', u'謹'), (0xFAC0, 'M', u'變'), (0xFAC1, 'M', u'贈'), (0xFAC2, 'M', u'輸'), (0xFAC3, 'M', u'遲'), (0xFAC4, 'M', u'醙'), ] def _seg_42(): return [ (0xFAC5, 'M', u'鉶'), (0xFAC6, 'M', u'陼'), (0xFAC7, 'M', u'難'), (0xFAC8, 'M', u'靖'), (0xFAC9, 'M', u'韛'), (0xFACA, 'M', u'響'), (0xFACB, 'M', u'頋'), (0xFACC, 'M', u'頻'), (0xFACD, 'M', u'鬒'), (0xFACE, 'M', u'龜'), (0xFACF, 'M', u'𢡊'), (0xFAD0, 'M', u'𢡄'), (0xFAD1, 'M', u'𣏕'), (0xFAD2, 'M', u'㮝'), (0xFAD3, 'M', u'䀘'), (0xFAD4, 'M', u'䀹'), (0xFAD5, 'M', u'𥉉'), (0xFAD6, 'M', u'𥳐'), (0xFAD7, 'M', u'𧻓'), (0xFAD8, 'M', u'齃'), (0xFAD9, 'M', u'龎'), (0xFADA, 'X'), (0xFB00, 'M', u'ff'), (0xFB01, 'M', u'fi'), (0xFB02, 'M', u'fl'), (0xFB03, 'M', u'ffi'), (0xFB04, 'M', u'ffl'), (0xFB05, 'M', u'st'), (0xFB07, 'X'), (0xFB13, 'M', u'մն'), (0xFB14, 'M', u'մե'), (0xFB15, 'M', u'մի'), (0xFB16, 'M', u'վն'), (0xFB17, 'M', u'մխ'), (0xFB18, 'X'), (0xFB1D, 'M', u'יִ'), (0xFB1E, 'V'), (0xFB1F, 'M', u'ײַ'), (0xFB20, 'M', u'ע'), (0xFB21, 'M', u'א'), (0xFB22, 'M', u'ד'), (0xFB23, 'M', u'ה'), (0xFB24, 'M', u'כ'), (0xFB25, 'M', u'ל'), (0xFB26, 'M', u'ם'), (0xFB27, 'M', u'ר'), (0xFB28, 'M', u'ת'), (0xFB29, '3', u'+'), (0xFB2A, 'M', u'שׁ'), (0xFB2B, 'M', u'שׂ'), (0xFB2C, 'M', u'שּׁ'), (0xFB2D, 'M', u'שּׂ'), (0xFB2E, 'M', u'אַ'), (0xFB2F, 'M', u'אָ'), (0xFB30, 'M', u'אּ'), (0xFB31, 'M', u'בּ'), (0xFB32, 'M', u'גּ'), (0xFB33, 'M', u'דּ'), (0xFB34, 'M', u'הּ'), (0xFB35, 'M', u'וּ'), (0xFB36, 'M', u'זּ'), (0xFB37, 'X'), (0xFB38, 'M', u'טּ'), (0xFB39, 'M', u'יּ'), (0xFB3A, 'M', u'ךּ'), (0xFB3B, 'M', u'כּ'), (0xFB3C, 'M', u'לּ'), (0xFB3D, 'X'), (0xFB3E, 'M', u'מּ'), (0xFB3F, 'X'), (0xFB40, 'M', u'נּ'), (0xFB41, 'M', u'סּ'), (0xFB42, 'X'), (0xFB43, 'M', u'ףּ'), (0xFB44, 'M', u'פּ'), (0xFB45, 'X'), (0xFB46, 'M', u'צּ'), (0xFB47, 'M', u'קּ'), (0xFB48, 'M', u'רּ'), (0xFB49, 'M', u'שּ'), (0xFB4A, 'M', u'תּ'), (0xFB4B, 'M', u'וֹ'), (0xFB4C, 'M', u'בֿ'), (0xFB4D, 'M', u'כֿ'), (0xFB4E, 'M', u'פֿ'), (0xFB4F, 'M', u'אל'), (0xFB50, 'M', u'ٱ'), (0xFB52, 'M', u'ٻ'), (0xFB56, 'M', u'پ'), (0xFB5A, 'M', u'ڀ'), (0xFB5E, 'M', u'ٺ'), (0xFB62, 'M', u'ٿ'), (0xFB66, 'M', u'ٹ'), (0xFB6A, 'M', u'ڤ'), (0xFB6E, 'M', u'ڦ'), (0xFB72, 'M', u'ڄ'), (0xFB76, 'M', u'ڃ'), (0xFB7A, 'M', u'چ'), (0xFB7E, 'M', u'ڇ'), (0xFB82, 'M', u'ڍ'), ] def _seg_43(): return [ (0xFB84, 'M', u'ڌ'), (0xFB86, 'M', u'ڎ'), (0xFB88, 'M', u'ڈ'), (0xFB8A, 'M', u'ژ'), (0xFB8C, 'M', u'ڑ'), (0xFB8E, 'M', u'ک'), (0xFB92, 'M', u'گ'), (0xFB96, 'M', u'ڳ'), (0xFB9A, 'M', u'ڱ'), (0xFB9E, 'M', u'ں'), (0xFBA0, 'M', u'ڻ'), (0xFBA4, 'M', u'ۀ'), (0xFBA6, 'M', u'ہ'), (0xFBAA, 'M', u'ھ'), (0xFBAE, 'M', u'ے'), (0xFBB0, 'M', u'ۓ'), (0xFBB2, 'V'), (0xFBC2, 'X'), (0xFBD3, 'M', u'ڭ'), (0xFBD7, 'M', u'ۇ'), (0xFBD9, 'M', u'ۆ'), (0xFBDB, 'M', u'ۈ'), (0xFBDD, 'M', u'ۇٴ'), (0xFBDE, 'M', u'ۋ'), (0xFBE0, 'M', u'ۅ'), (0xFBE2, 'M', u'ۉ'), (0xFBE4, 'M', u'ې'), (0xFBE8, 'M', u'ى'), (0xFBEA, 'M', u'ئا'), (0xFBEC, 'M', u'ئە'), (0xFBEE, 'M', u'ئو'), (0xFBF0, 'M', u'ئۇ'), (0xFBF2, 'M', u'ئۆ'), (0xFBF4, 'M', u'ئۈ'), (0xFBF6, 'M', u'ئې'), (0xFBF9, 'M', u'ئى'), (0xFBFC, 'M', u'ی'), (0xFC00, 'M', u'ئج'), (0xFC01, 'M', u'ئح'), (0xFC02, 'M', u'ئم'), (0xFC03, 'M', u'ئى'), (0xFC04, 'M', u'ئي'), (0xFC05, 'M', u'بج'), (0xFC06, 'M', u'بح'), (0xFC07, 'M', u'بخ'), (0xFC08, 'M', u'بم'), (0xFC09, 'M', u'بى'), (0xFC0A, 'M', u'بي'), (0xFC0B, 'M', u'تج'), (0xFC0C, 'M', u'تح'), (0xFC0D, 'M', u'تخ'), (0xFC0E, 'M', u'تم'), (0xFC0F, 'M', u'تى'), (0xFC10, 'M', u'تي'), (0xFC11, 'M', u'ثج'), (0xFC12, 'M', u'ثم'), (0xFC13, 'M', u'ثى'), (0xFC14, 'M', u'ثي'), (0xFC15, 'M', u'جح'), (0xFC16, 'M', u'جم'), (0xFC17, 'M', u'حج'), (0xFC18, 'M', u'حم'), (0xFC19, 'M', u'خج'), (0xFC1A, 'M', u'خح'), (0xFC1B, 'M', u'خم'), (0xFC1C, 'M', u'سج'), (0xFC1D, 'M', u'سح'), (0xFC1E, 'M', u'سخ'), (0xFC1F, 'M', u'سم'), (0xFC20, 'M', u'صح'), (0xFC21, 'M', u'صم'), (0xFC22, 'M', u'ضج'), (0xFC23, 'M', u'ضح'), (0xFC24, 'M', u'ضخ'), (0xFC25, 'M', u'ضم'), (0xFC26, 'M', u'طح'), (0xFC27, 'M', u'طم'), (0xFC28, 'M', u'ظم'), (0xFC29, 'M', u'عج'), (0xFC2A, 'M', u'عم'), (0xFC2B, 'M', u'غج'), (0xFC2C, 'M', u'غم'), (0xFC2D, 'M', u'فج'), (0xFC2E, 'M', u'فح'), (0xFC2F, 'M', u'فخ'), (0xFC30, 'M', u'فم'), (0xFC31, 'M', u'فى'), (0xFC32, 'M', u'في'), (0xFC33, 'M', u'قح'), (0xFC34, 'M', u'قم'), (0xFC35, 'M', u'قى'), (0xFC36, 'M', u'قي'), (0xFC37, 'M', u'كا'), (0xFC38, 'M', u'كج'), (0xFC39, 'M', u'كح'), (0xFC3A, 'M', u'كخ'), (0xFC3B, 'M', u'كل'), (0xFC3C, 'M', u'كم'), (0xFC3D, 'M', u'كى'), (0xFC3E, 'M', u'كي'), ] def _seg_44(): return [ (0xFC3F, 'M', u'لج'), (0xFC40, 'M', u'لح'), (0xFC41, 'M', u'لخ'), (0xFC42, 'M', u'لم'), (0xFC43, 'M', u'لى'), (0xFC44, 'M', u'لي'), (0xFC45, 'M', u'مج'), (0xFC46, 'M', u'مح'), (0xFC47, 'M', u'مخ'), (0xFC48, 'M', u'مم'), (0xFC49, 'M', u'مى'), (0xFC4A, 'M', u'مي'), (0xFC4B, 'M', u'نج'), (0xFC4C, 'M', u'نح'), (0xFC4D, 'M', u'نخ'), (0xFC4E, 'M', u'نم'), (0xFC4F, 'M', u'نى'), (0xFC50, 'M', u'ني'), (0xFC51, 'M', u'هج'), (0xFC52, 'M', u'هم'), (0xFC53, 'M', u'هى'), (0xFC54, 'M', u'هي'), (0xFC55, 'M', u'يج'), (0xFC56, 'M', u'يح'), (0xFC57, 'M', u'يخ'), (0xFC58, 'M', u'يم'), (0xFC59, 'M', u'يى'), (0xFC5A, 'M', u'يي'), (0xFC5B, 'M', u'ذٰ'), (0xFC5C, 'M', u'رٰ'), (0xFC5D, 'M', u'ىٰ'), (0xFC5E, '3', u' ٌّ'), (0xFC5F, '3', u' ٍّ'), (0xFC60, '3', u' َّ'), (0xFC61, '3', u' ُّ'), (0xFC62, '3', u' ِّ'), (0xFC63, '3', u' ّٰ'), (0xFC64, 'M', u'ئر'), (0xFC65, 'M', u'ئز'), (0xFC66, 'M', u'ئم'), (0xFC67, 'M', u'ئن'), (0xFC68, 'M', u'ئى'), (0xFC69, 'M', u'ئي'), (0xFC6A, 'M', u'بر'), (0xFC6B, 'M', u'بز'), (0xFC6C, 'M', u'بم'), (0xFC6D, 'M', u'بن'), (0xFC6E, 'M', u'بى'), (0xFC6F, 'M', u'بي'), (0xFC70, 'M', u'تر'), (0xFC71, 'M', u'تز'), (0xFC72, 'M', u'تم'), (0xFC73, 'M', u'تن'), (0xFC74, 'M', u'تى'), (0xFC75, 'M', u'تي'), (0xFC76, 'M', u'ثر'), (0xFC77, 'M', u'ثز'), (0xFC78, 'M', u'ثم'), (0xFC79, 'M', u'ثن'), (0xFC7A, 'M', u'ثى'), (0xFC7B, 'M', u'ثي'), (0xFC7C, 'M', u'فى'), (0xFC7D, 'M', u'في'), (0xFC7E, 'M', u'قى'), (0xFC7F, 'M', u'قي'), (0xFC80, 'M', u'كا'), (0xFC81, 'M', u'كل'), (0xFC82, 'M', u'كم'), (0xFC83, 'M', u'كى'), (0xFC84, 'M', u'كي'), (0xFC85, 'M', u'لم'), (0xFC86, 'M', u'لى'), (0xFC87, 'M', u'لي'), (0xFC88, 'M', u'ما'), (0xFC89, 'M', u'مم'), (0xFC8A, 'M', u'نر'), (0xFC8B, 'M', u'نز'), (0xFC8C, 'M', u'نم'), (0xFC8D, 'M', u'نن'), (0xFC8E, 'M', u'نى'), (0xFC8F, 'M', u'ني'), (0xFC90, 'M', u'ىٰ'), (0xFC91, 'M', u'ير'), (0xFC92, 'M', u'يز'), (0xFC93, 'M', u'يم'), (0xFC94, 'M', u'ين'), (0xFC95, 'M', u'يى'), (0xFC96, 'M', u'يي'), (0xFC97, 'M', u'ئج'), (0xFC98, 'M', u'ئح'), (0xFC99, 'M', u'ئخ'), (0xFC9A, 'M', u'ئم'), (0xFC9B, 'M', u'ئه'), (0xFC9C, 'M', u'بج'), (0xFC9D, 'M', u'بح'), (0xFC9E, 'M', u'بخ'), (0xFC9F, 'M', u'بم'), (0xFCA0, 'M', u'به'), (0xFCA1, 'M', u'تج'), (0xFCA2, 'M', u'تح'), ] def _seg_45(): return [ (0xFCA3, 'M', u'تخ'), (0xFCA4, 'M', u'تم'), (0xFCA5, 'M', u'ته'), (0xFCA6, 'M', u'ثم'), (0xFCA7, 'M', u'جح'), (0xFCA8, 'M', u'جم'), (0xFCA9, 'M', u'حج'), (0xFCAA, 'M', u'حم'), (0xFCAB, 'M', u'خج'), (0xFCAC, 'M', u'خم'), (0xFCAD, 'M', u'سج'), (0xFCAE, 'M', u'سح'), (0xFCAF, 'M', u'سخ'), (0xFCB0, 'M', u'سم'), (0xFCB1, 'M', u'صح'), (0xFCB2, 'M', u'صخ'), (0xFCB3, 'M', u'صم'), (0xFCB4, 'M', u'ضج'), (0xFCB5, 'M', u'ضح'), (0xFCB6, 'M', u'ضخ'), (0xFCB7, 'M', u'ضم'), (0xFCB8, 'M', u'طح'), (0xFCB9, 'M', u'ظم'), (0xFCBA, 'M', u'عج'), (0xFCBB, 'M', u'عم'), (0xFCBC, 'M', u'غج'), (0xFCBD, 'M', u'غم'), (0xFCBE, 'M', u'فج'), (0xFCBF, 'M', u'فح'), (0xFCC0, 'M', u'فخ'), (0xFCC1, 'M', u'فم'), (0xFCC2, 'M', u'قح'), (0xFCC3, 'M', u'قم'), (0xFCC4, 'M', u'كج'), (0xFCC5, 'M', u'كح'), (0xFCC6, 'M', u'كخ'), (0xFCC7, 'M', u'كل'), (0xFCC8, 'M', u'كم'), (0xFCC9, 'M', u'لج'), (0xFCCA, 'M', u'لح'), (0xFCCB, 'M', u'لخ'), (0xFCCC, 'M', u'لم'), (0xFCCD, 'M', u'له'), (0xFCCE, 'M', u'مج'), (0xFCCF, 'M', u'مح'), (0xFCD0, 'M', u'مخ'), (0xFCD1, 'M', u'مم'), (0xFCD2, 'M', u'نج'), (0xFCD3, 'M', u'نح'), (0xFCD4, 'M', u'نخ'), (0xFCD5, 'M', u'نم'), (0xFCD6, 'M', u'نه'), (0xFCD7, 'M', u'هج'), (0xFCD8, 'M', u'هم'), (0xFCD9, 'M', u'هٰ'), (0xFCDA, 'M', u'يج'), (0xFCDB, 'M', u'يح'), (0xFCDC, 'M', u'يخ'), (0xFCDD, 'M', u'يم'), (0xFCDE, 'M', u'يه'), (0xFCDF, 'M', u'ئم'), (0xFCE0, 'M', u'ئه'), (0xFCE1, 'M', u'بم'), (0xFCE2, 'M', u'به'), (0xFCE3, 'M', u'تم'), (0xFCE4, 'M', u'ته'), (0xFCE5, 'M', u'ثم'), (0xFCE6, 'M', u'ثه'), (0xFCE7, 'M', u'سم'), (0xFCE8, 'M', u'سه'), (0xFCE9, 'M', u'شم'), (0xFCEA, 'M', u'شه'), (0xFCEB, 'M', u'كل'), (0xFCEC, 'M', u'كم'), (0xFCED, 'M', u'لم'), (0xFCEE, 'M', u'نم'), (0xFCEF, 'M', u'نه'), (0xFCF0, 'M', u'يم'), (0xFCF1, 'M', u'يه'), (0xFCF2, 'M', u'ـَّ'), (0xFCF3, 'M', u'ـُّ'), (0xFCF4, 'M', u'ـِّ'), (0xFCF5, 'M', u'طى'), (0xFCF6, 'M', u'طي'), (0xFCF7, 'M', u'عى'), (0xFCF8, 'M', u'عي'), (0xFCF9, 'M', u'غى'), (0xFCFA, 'M', u'غي'), (0xFCFB, 'M', u'سى'), (0xFCFC, 'M', u'سي'), (0xFCFD, 'M', u'شى'), (0xFCFE, 'M', u'شي'), (0xFCFF, 'M', u'حى'), (0xFD00, 'M', u'حي'), (0xFD01, 'M', u'جى'), (0xFD02, 'M', u'جي'), (0xFD03, 'M', u'خى'), (0xFD04, 'M', u'خي'), (0xFD05, 'M', u'صى'), (0xFD06, 'M', u'صي'), ] def _seg_46(): return [ (0xFD07, 'M', u'ضى'), (0xFD08, 'M', u'ضي'), (0xFD09, 'M', u'شج'), (0xFD0A, 'M', u'شح'), (0xFD0B, 'M', u'شخ'), (0xFD0C, 'M', u'شم'), (0xFD0D, 'M', u'شر'), (0xFD0E, 'M', u'سر'), (0xFD0F, 'M', u'صر'), (0xFD10, 'M', u'ضر'), (0xFD11, 'M', u'طى'), (0xFD12, 'M', u'طي'), (0xFD13, 'M', u'عى'), (0xFD14, 'M', u'عي'), (0xFD15, 'M', u'غى'), (0xFD16, 'M', u'غي'), (0xFD17, 'M', u'سى'), (0xFD18, 'M', u'سي'), (0xFD19, 'M', u'شى'), (0xFD1A, 'M', u'شي'), (0xFD1B, 'M', u'حى'), (0xFD1C, 'M', u'حي'), (0xFD1D, 'M', u'جى'), (0xFD1E, 'M', u'جي'), (0xFD1F, 'M', u'خى'), (0xFD20, 'M', u'خي'), (0xFD21, 'M', u'صى'), (0xFD22, 'M', u'صي'), (0xFD23, 'M', u'ضى'), (0xFD24, 'M', u'ضي'), (0xFD25, 'M', u'شج'), (0xFD26, 'M', u'شح'), (0xFD27, 'M', u'شخ'), (0xFD28, 'M', u'شم'), (0xFD29, 'M', u'شر'), (0xFD2A, 'M', u'سر'), (0xFD2B, 'M', u'صر'), (0xFD2C, 'M', u'ضر'), (0xFD2D, 'M', u'شج'), (0xFD2E, 'M', u'شح'), (0xFD2F, 'M', u'شخ'), (0xFD30, 'M', u'شم'), (0xFD31, 'M', u'سه'), (0xFD32, 'M', u'شه'), (0xFD33, 'M', u'طم'), (0xFD34, 'M', u'سج'), (0xFD35, 'M', u'سح'), (0xFD36, 'M', u'سخ'), (0xFD37, 'M', u'شج'), (0xFD38, 'M', u'شح'), (0xFD39, 'M', u'شخ'), (0xFD3A, 'M', u'طم'), (0xFD3B, 'M', u'ظم'), (0xFD3C, 'M', u'اً'), (0xFD3E, 'V'), (0xFD40, 'X'), (0xFD50, 'M', u'تجم'), (0xFD51, 'M', u'تحج'), (0xFD53, 'M', u'تحم'), (0xFD54, 'M', u'تخم'), (0xFD55, 'M', u'تمج'), (0xFD56, 'M', u'تمح'), (0xFD57, 'M', u'تمخ'), (0xFD58, 'M', u'جمح'), (0xFD5A, 'M', u'حمي'), (0xFD5B, 'M', u'حمى'), (0xFD5C, 'M', u'سحج'), (0xFD5D, 'M', u'سجح'), (0xFD5E, 'M', u'سجى'), (0xFD5F, 'M', u'سمح'), (0xFD61, 'M', u'سمج'), (0xFD62, 'M', u'سمم'), (0xFD64, 'M', u'صحح'), (0xFD66, 'M', u'صمم'), (0xFD67, 'M', u'شحم'), (0xFD69, 'M', u'شجي'), (0xFD6A, 'M', u'شمخ'), (0xFD6C, 'M', u'شمم'), (0xFD6E, 'M', u'ضحى'), (0xFD6F, 'M', u'ضخم'), (0xFD71, 'M', u'طمح'), (0xFD73, 'M', u'طمم'), (0xFD74, 'M', u'طمي'), (0xFD75, 'M', u'عجم'), (0xFD76, 'M', u'عمم'), (0xFD78, 'M', u'عمى'), (0xFD79, 'M', u'غمم'), (0xFD7A, 'M', u'غمي'), (0xFD7B, 'M', u'غمى'), (0xFD7C, 'M', u'فخم'), (0xFD7E, 'M', u'قمح'), (0xFD7F, 'M', u'قمم'), (0xFD80, 'M', u'لحم'), (0xFD81, 'M', u'لحي'), (0xFD82, 'M', u'لحى'), (0xFD83, 'M', u'لجج'), (0xFD85, 'M', u'لخم'), (0xFD87, 'M', u'لمح'), (0xFD89, 'M', u'محج'), (0xFD8A, 'M', u'محم'), ] def _seg_47(): return [ (0xFD8B, 'M', u'محي'), (0xFD8C, 'M', u'مجح'), (0xFD8D, 'M', u'مجم'), (0xFD8E, 'M', u'مخج'), (0xFD8F, 'M', u'مخم'), (0xFD90, 'X'), (0xFD92, 'M', u'مجخ'), (0xFD93, 'M', u'همج'), (0xFD94, 'M', u'همم'), (0xFD95, 'M', u'نحم'), (0xFD96, 'M', u'نحى'), (0xFD97, 'M', u'نجم'), (0xFD99, 'M', u'نجى'), (0xFD9A, 'M', u'نمي'), (0xFD9B, 'M', u'نمى'), (0xFD9C, 'M', u'يمم'), (0xFD9E, 'M', u'بخي'), (0xFD9F, 'M', u'تجي'), (0xFDA0, 'M', u'تجى'), (0xFDA1, 'M', u'تخي'), (0xFDA2, 'M', u'تخى'), (0xFDA3, 'M', u'تمي'), (0xFDA4, 'M', u'تمى'), (0xFDA5, 'M', u'جمي'), (0xFDA6, 'M', u'جحى'), (0xFDA7, 'M', u'جمى'), (0xFDA8, 'M', u'سخى'), (0xFDA9, 'M', u'صحي'), (0xFDAA, 'M', u'شحي'), (0xFDAB, 'M', u'ضحي'), (0xFDAC, 'M', u'لجي'), (0xFDAD, 'M', u'لمي'), (0xFDAE, 'M', u'يحي'), (0xFDAF, 'M', u'يجي'), (0xFDB0, 'M', u'يمي'), (0xFDB1, 'M', u'ممي'), (0xFDB2, 'M', u'قمي'), (0xFDB3, 'M', u'نحي'), (0xFDB4, 'M', u'قمح'), (0xFDB5, 'M', u'لحم'), (0xFDB6, 'M', u'عمي'), (0xFDB7, 'M', u'كمي'), (0xFDB8, 'M', u'نجح'), (0xFDB9, 'M', u'مخي'), (0xFDBA, 'M', u'لجم'), (0xFDBB, 'M', u'كمم'), (0xFDBC, 'M', u'لجم'), (0xFDBD, 'M', u'نجح'), (0xFDBE, 'M', u'جحي'), (0xFDBF, 'M', u'حجي'), (0xFDC0, 'M', u'مجي'), (0xFDC1, 'M', u'فمي'), (0xFDC2, 'M', u'بحي'), (0xFDC3, 'M', u'كمم'), (0xFDC4, 'M', u'عجم'), (0xFDC5, 'M', u'صمم'), (0xFDC6, 'M', u'سخي'), (0xFDC7, 'M', u'نجي'), (0xFDC8, 'X'), (0xFDF0, 'M', u'صلے'), (0xFDF1, 'M', u'قلے'), (0xFDF2, 'M', u'الله'), (0xFDF3, 'M', u'اكبر'), (0xFDF4, 'M', u'محمد'), (0xFDF5, 'M', u'صلعم'), (0xFDF6, 'M', u'رسول'), (0xFDF7, 'M', u'عليه'), (0xFDF8, 'M', u'وسلم'), (0xFDF9, 'M', u'صلى'), (0xFDFA, '3', u'صلى الله عليه وسلم'), (0xFDFB, '3', u'جل جلاله'), (0xFDFC, 'M', u'ریال'), (0xFDFD, 'V'), (0xFDFE, 'X'), (0xFE00, 'I'), (0xFE10, '3', u','), (0xFE11, 'M', u'、'), (0xFE12, 'X'), (0xFE13, '3', u':'), (0xFE14, '3', u';'), (0xFE15, '3', u'!'), (0xFE16, '3', u'?'), (0xFE17, 'M', u'〖'), (0xFE18, 'M', u'〗'), (0xFE19, 'X'), (0xFE20, 'V'), (0xFE27, 'X'), (0xFE31, 'M', u'—'), (0xFE32, 'M', u'–'), (0xFE33, '3', u'_'), (0xFE35, '3', u'('), (0xFE36, '3', u')'), (0xFE37, '3', u'{'), (0xFE38, '3', u'}'), (0xFE39, 'M', u'〔'), (0xFE3A, 'M', u'〕'), (0xFE3B, 'M', u'【'), (0xFE3C, 'M', u'】'), (0xFE3D, 'M', u'《'), (0xFE3E, 'M', u'》'), ] def _seg_48(): return [ (0xFE3F, 'M', u'〈'), (0xFE40, 'M', u'〉'), (0xFE41, 'M', u'「'), (0xFE42, 'M', u'」'), (0xFE43, 'M', u'『'), (0xFE44, 'M', u'』'), (0xFE45, 'V'), (0xFE47, '3', u'['), (0xFE48, '3', u']'), (0xFE49, '3', u' ̅'), (0xFE4D, '3', u'_'), (0xFE50, '3', u','), (0xFE51, 'M', u'、'), (0xFE52, 'X'), (0xFE54, '3', u';'), (0xFE55, '3', u':'), (0xFE56, '3', u'?'), (0xFE57, '3', u'!'), (0xFE58, 'M', u'—'), (0xFE59, '3', u'('), (0xFE5A, '3', u')'), (0xFE5B, '3', u'{'), (0xFE5C, '3', u'}'), (0xFE5D, 'M', u'〔'), (0xFE5E, 'M', u'〕'), (0xFE5F, '3', u'#'), (0xFE60, '3', u'&'), (0xFE61, '3', u'*'), (0xFE62, '3', u'+'), (0xFE63, 'M', u'-'), (0xFE64, '3', u'<'), (0xFE65, '3', u'>'), (0xFE66, '3', u'='), (0xFE67, 'X'), (0xFE68, '3', u'\\'), (0xFE69, '3', u'$'), (0xFE6A, '3', u'%'), (0xFE6B, '3', u'@'), (0xFE6C, 'X'), (0xFE70, '3', u' ً'), (0xFE71, 'M', u'ـً'), (0xFE72, '3', u' ٌ'), (0xFE73, 'V'), (0xFE74, '3', u' ٍ'), (0xFE75, 'X'), (0xFE76, '3', u' َ'), (0xFE77, 'M', u'ـَ'), (0xFE78, '3', u' ُ'), (0xFE79, 'M', u'ـُ'), (0xFE7A, '3', u' ِ'), (0xFE7B, 'M', u'ـِ'), (0xFE7C, '3', u' ّ'), (0xFE7D, 'M', u'ـّ'), (0xFE7E, '3', u' ْ'), (0xFE7F, 'M', u'ـْ'), (0xFE80, 'M', u'ء'), (0xFE81, 'M', u'آ'), (0xFE83, 'M', u'أ'), (0xFE85, 'M', u'ؤ'), (0xFE87, 'M', u'إ'), (0xFE89, 'M', u'ئ'), (0xFE8D, 'M', u'ا'), (0xFE8F, 'M', u'ب'), (0xFE93, 'M', u'ة'), (0xFE95, 'M', u'ت'), (0xFE99, 'M', u'ث'), (0xFE9D, 'M', u'ج'), (0xFEA1, 'M', u'ح'), (0xFEA5, 'M', u'خ'), (0xFEA9, 'M', u'د'), (0xFEAB, 'M', u'ذ'), (0xFEAD, 'M', u'ر'), (0xFEAF, 'M', u'ز'), (0xFEB1, 'M', u'س'), (0xFEB5, 'M', u'ش'), (0xFEB9, 'M', u'ص'), (0xFEBD, 'M', u'ض'), (0xFEC1, 'M', u'ط'), (0xFEC5, 'M', u'ظ'), (0xFEC9, 'M', u'ع'), (0xFECD, 'M', u'غ'), (0xFED1, 'M', u'ف'), (0xFED5, 'M', u'ق'), (0xFED9, 'M', u'ك'), (0xFEDD, 'M', u'ل'), (0xFEE1, 'M', u'م'), (0xFEE5, 'M', u'ن'), (0xFEE9, 'M', u'ه'), (0xFEED, 'M', u'و'), (0xFEEF, 'M', u'ى'), (0xFEF1, 'M', u'ي'), (0xFEF5, 'M', u'لآ'), (0xFEF7, 'M', u'لأ'), (0xFEF9, 'M', u'لإ'), (0xFEFB, 'M', u'لا'), (0xFEFD, 'X'), (0xFEFF, 'I'), (0xFF00, 'X'), (0xFF01, '3', u'!'), (0xFF02, '3', u'"'), ] def _seg_49(): return [ (0xFF03, '3', u'#'), (0xFF04, '3', u'$'), (0xFF05, '3', u'%'), (0xFF06, '3', u'&'), (0xFF07, '3', u'\''), (0xFF08, '3', u'('), (0xFF09, '3', u')'), (0xFF0A, '3', u'*'), (0xFF0B, '3', u'+'), (0xFF0C, '3', u','), (0xFF0D, 'M', u'-'), (0xFF0E, 'M', u'.'), (0xFF0F, '3', u'/'), (0xFF10, 'M', u'0'), (0xFF11, 'M', u'1'), (0xFF12, 'M', u'2'), (0xFF13, 'M', u'3'), (0xFF14, 'M', u'4'), (0xFF15, 'M', u'5'), (0xFF16, 'M', u'6'), (0xFF17, 'M', u'7'), (0xFF18, 'M', u'8'), (0xFF19, 'M', u'9'), (0xFF1A, '3', u':'), (0xFF1B, '3', u';'), (0xFF1C, '3', u'<'), (0xFF1D, '3', u'='), (0xFF1E, '3', u'>'), (0xFF1F, '3', u'?'), (0xFF20, '3', u'@'), (0xFF21, 'M', u'a'), (0xFF22, 'M', u'b'), (0xFF23, 'M', u'c'), (0xFF24, 'M', u'd'), (0xFF25, 'M', u'e'), (0xFF26, 'M', u'f'), (0xFF27, 'M', u'g'), (0xFF28, 'M', u'h'), (0xFF29, 'M', u'i'), (0xFF2A, 'M', u'j'), (0xFF2B, 'M', u'k'), (0xFF2C, 'M', u'l'), (0xFF2D, 'M', u'm'), (0xFF2E, 'M', u'n'), (0xFF2F, 'M', u'o'), (0xFF30, 'M', u'p'), (0xFF31, 'M', u'q'), (0xFF32, 'M', u'r'), (0xFF33, 'M', u's'), (0xFF34, 'M', u't'), (0xFF35, 'M', u'u'), (0xFF36, 'M', u'v'), (0xFF37, 'M', u'w'), (0xFF38, 'M', u'x'), (0xFF39, 'M', u'y'), (0xFF3A, 'M', u'z'), (0xFF3B, '3', u'['), (0xFF3C, '3', u'\\'), (0xFF3D, '3', u']'), (0xFF3E, '3', u'^'), (0xFF3F, '3', u'_'), (0xFF40, '3', u'`'), (0xFF41, 'M', u'a'), (0xFF42, 'M', u'b'), (0xFF43, 'M', u'c'), (0xFF44, 'M', u'd'), (0xFF45, 'M', u'e'), (0xFF46, 'M', u'f'), (0xFF47, 'M', u'g'), (0xFF48, 'M', u'h'), (0xFF49, 'M', u'i'), (0xFF4A, 'M', u'j'), (0xFF4B, 'M', u'k'), (0xFF4C, 'M', u'l'), (0xFF4D, 'M', u'm'), (0xFF4E, 'M', u'n'), (0xFF4F, 'M', u'o'), (0xFF50, 'M', u'p'), (0xFF51, 'M', u'q'), (0xFF52, 'M', u'r'), (0xFF53, 'M', u's'), (0xFF54, 'M', u't'), (0xFF55, 'M', u'u'), (0xFF56, 'M', u'v'), (0xFF57, 'M', u'w'), (0xFF58, 'M', u'x'), (0xFF59, 'M', u'y'), (0xFF5A, 'M', u'z'), (0xFF5B, '3', u'{'), (0xFF5C, '3', u'|'), (0xFF5D, '3', u'}'), (0xFF5E, '3', u'~'), (0xFF5F, 'M', u'⦅'), (0xFF60, 'M', u'⦆'), (0xFF61, 'M', u'.'), (0xFF62, 'M', u'「'), (0xFF63, 'M', u'」'), (0xFF64, 'M', u'、'), (0xFF65, 'M', u'・'), (0xFF66, 'M', u'ヲ'), ] def _seg_50(): return [ (0xFF67, 'M', u'ァ'), (0xFF68, 'M', u'ィ'), (0xFF69, 'M', u'ゥ'), (0xFF6A, 'M', u'ェ'), (0xFF6B, 'M', u'ォ'), (0xFF6C, 'M', u'ャ'), (0xFF6D, 'M', u'ュ'), (0xFF6E, 'M', u'ョ'), (0xFF6F, 'M', u'ッ'), (0xFF70, 'M', u'ー'), (0xFF71, 'M', u'ア'), (0xFF72, 'M', u'イ'), (0xFF73, 'M', u'ウ'), (0xFF74, 'M', u'エ'), (0xFF75, 'M', u'オ'), (0xFF76, 'M', u'カ'), (0xFF77, 'M', u'キ'), (0xFF78, 'M', u'ク'), (0xFF79, 'M', u'ケ'), (0xFF7A, 'M', u'コ'), (0xFF7B, 'M', u'サ'), (0xFF7C, 'M', u'シ'), (0xFF7D, 'M', u'ス'), (0xFF7E, 'M', u'セ'), (0xFF7F, 'M', u'ソ'), (0xFF80, 'M', u'タ'), (0xFF81, 'M', u'チ'), (0xFF82, 'M', u'ツ'), (0xFF83, 'M', u'テ'), (0xFF84, 'M', u'ト'), (0xFF85, 'M', u'ナ'), (0xFF86, 'M', u'ニ'), (0xFF87, 'M', u'ヌ'), (0xFF88, 'M', u'ネ'), (0xFF89, 'M', u'ノ'), (0xFF8A, 'M', u'ハ'), (0xFF8B, 'M', u'ヒ'), (0xFF8C, 'M', u'フ'), (0xFF8D, 'M', u'ヘ'), (0xFF8E, 'M', u'ホ'), (0xFF8F, 'M', u'マ'), (0xFF90, 'M', u'ミ'), (0xFF91, 'M', u'ム'), (0xFF92, 'M', u'メ'), (0xFF93, 'M', u'モ'), (0xFF94, 'M', u'ヤ'), (0xFF95, 'M', u'ユ'), (0xFF96, 'M', u'ヨ'), (0xFF97, 'M', u'ラ'), (0xFF98, 'M', u'リ'), (0xFF99, 'M', u'ル'), (0xFF9A, 'M', u'レ'), (0xFF9B, 'M', u'ロ'), (0xFF9C, 'M', u'ワ'), (0xFF9D, 'M', u'ン'), (0xFF9E, 'M', u'゙'), (0xFF9F, 'M', u'゚'), (0xFFA0, 'X'), (0xFFA1, 'M', u'ᄀ'), (0xFFA2, 'M', u'ᄁ'), (0xFFA3, 'M', u'ᆪ'), (0xFFA4, 'M', u'ᄂ'), (0xFFA5, 'M', u'ᆬ'), (0xFFA6, 'M', u'ᆭ'), (0xFFA7, 'M', u'ᄃ'), (0xFFA8, 'M', u'ᄄ'), (0xFFA9, 'M', u'ᄅ'), (0xFFAA, 'M', u'ᆰ'), (0xFFAB, 'M', u'ᆱ'), (0xFFAC, 'M', u'ᆲ'), (0xFFAD, 'M', u'ᆳ'), (0xFFAE, 'M', u'ᆴ'), (0xFFAF, 'M', u'ᆵ'), (0xFFB0, 'M', u'ᄚ'), (0xFFB1, 'M', u'ᄆ'), (0xFFB2, 'M', u'ᄇ'), (0xFFB3, 'M', u'ᄈ'), (0xFFB4, 'M', u'ᄡ'), (0xFFB5, 'M', u'ᄉ'), (0xFFB6, 'M', u'ᄊ'), (0xFFB7, 'M', u'ᄋ'), (0xFFB8, 'M', u'ᄌ'), (0xFFB9, 'M', u'ᄍ'), (0xFFBA, 'M', u'ᄎ'), (0xFFBB, 'M', u'ᄏ'), (0xFFBC, 'M', u'ᄐ'), (0xFFBD, 'M', u'ᄑ'), (0xFFBE, 'M', u'ᄒ'), (0xFFBF, 'X'), (0xFFC2, 'M', u'ᅡ'), (0xFFC3, 'M', u'ᅢ'), (0xFFC4, 'M', u'ᅣ'), (0xFFC5, 'M', u'ᅤ'), (0xFFC6, 'M', u'ᅥ'), (0xFFC7, 'M', u'ᅦ'), (0xFFC8, 'X'), (0xFFCA, 'M', u'ᅧ'), (0xFFCB, 'M', u'ᅨ'), (0xFFCC, 'M', u'ᅩ'), (0xFFCD, 'M', u'ᅪ'), ] def _seg_51(): return [ (0xFFCE, 'M', u'ᅫ'), (0xFFCF, 'M', u'ᅬ'), (0xFFD0, 'X'), (0xFFD2, 'M', u'ᅭ'), (0xFFD3, 'M', u'ᅮ'), (0xFFD4, 'M', u'ᅯ'), (0xFFD5, 'M', u'ᅰ'), (0xFFD6, 'M', u'ᅱ'), (0xFFD7, 'M', u'ᅲ'), (0xFFD8, 'X'), (0xFFDA, 'M', u'ᅳ'), (0xFFDB, 'M', u'ᅴ'), (0xFFDC, 'M', u'ᅵ'), (0xFFDD, 'X'), (0xFFE0, 'M', u'¢'), (0xFFE1, 'M', u'£'), (0xFFE2, 'M', u'¬'), (0xFFE3, '3', u' ̄'), (0xFFE4, 'M', u'¦'), (0xFFE5, 'M', u'¥'), (0xFFE6, 'M', u'₩'), (0xFFE7, 'X'), (0xFFE8, 'M', u'│'), (0xFFE9, 'M', u'←'), (0xFFEA, 'M', u'↑'), (0xFFEB, 'M', u'→'), (0xFFEC, 'M', u'↓'), (0xFFED, 'M', u'■'), (0xFFEE, 'M', u'○'), (0xFFEF, 'X'), (0x10000, 'V'), (0x1000C, 'X'), (0x1000D, 'V'), (0x10027, 'X'), (0x10028, 'V'), (0x1003B, 'X'), (0x1003C, 'V'), (0x1003E, 'X'), (0x1003F, 'V'), (0x1004E, 'X'), (0x10050, 'V'), (0x1005E, 'X'), (0x10080, 'V'), (0x100FB, 'X'), (0x10100, 'V'), (0x10103, 'X'), (0x10107, 'V'), (0x10134, 'X'), (0x10137, 'V'), (0x1018B, 'X'), (0x10190, 'V'), (0x1019C, 'X'), (0x101D0, 'V'), (0x101FE, 'X'), (0x10280, 'V'), (0x1029D, 'X'), (0x102A0, 'V'), (0x102D1, 'X'), (0x10300, 'V'), (0x1031F, 'X'), (0x10320, 'V'), (0x10324, 'X'), (0x10330, 'V'), (0x1034B, 'X'), (0x10380, 'V'), (0x1039E, 'X'), (0x1039F, 'V'), (0x103C4, 'X'), (0x103C8, 'V'), (0x103D6, 'X'), (0x10400, 'M', u'𐐨'), (0x10401, 'M', u'𐐩'), (0x10402, 'M', u'𐐪'), (0x10403, 'M', u'𐐫'), (0x10404, 'M', u'𐐬'), (0x10405, 'M', u'𐐭'), (0x10406, 'M', u'𐐮'), (0x10407, 'M', u'𐐯'), (0x10408, 'M', u'𐐰'), (0x10409, 'M', u'𐐱'), (0x1040A, 'M', u'𐐲'), (0x1040B, 'M', u'𐐳'), (0x1040C, 'M', u'𐐴'), (0x1040D, 'M', u'𐐵'), (0x1040E, 'M', u'𐐶'), (0x1040F, 'M', u'𐐷'), (0x10410, 'M', u'𐐸'), (0x10411, 'M', u'𐐹'), (0x10412, 'M', u'𐐺'), (0x10413, 'M', u'𐐻'), (0x10414, 'M', u'𐐼'), (0x10415, 'M', u'𐐽'), (0x10416, 'M', u'𐐾'), (0x10417, 'M', u'𐐿'), (0x10418, 'M', u'𐑀'), (0x10419, 'M', u'𐑁'), (0x1041A, 'M', u'𐑂'), (0x1041B, 'M', u'𐑃'), (0x1041C, 'M', u'𐑄'), (0x1041D, 'M', u'𐑅'), ] def _seg_52(): return [ (0x1041E, 'M', u'𐑆'), (0x1041F, 'M', u'𐑇'), (0x10420, 'M', u'𐑈'), (0x10421, 'M', u'𐑉'), (0x10422, 'M', u'𐑊'), (0x10423, 'M', u'𐑋'), (0x10424, 'M', u'𐑌'), (0x10425, 'M', u'𐑍'), (0x10426, 'M', u'𐑎'), (0x10427, 'M', u'𐑏'), (0x10428, 'V'), (0x1049E, 'X'), (0x104A0, 'V'), (0x104AA, 'X'), (0x10800, 'V'), (0x10806, 'X'), (0x10808, 'V'), (0x10809, 'X'), (0x1080A, 'V'), (0x10836, 'X'), (0x10837, 'V'), (0x10839, 'X'), (0x1083C, 'V'), (0x1083D, 'X'), (0x1083F, 'V'), (0x10856, 'X'), (0x10857, 'V'), (0x10860, 'X'), (0x10900, 'V'), (0x1091C, 'X'), (0x1091F, 'V'), (0x1093A, 'X'), (0x1093F, 'V'), (0x10940, 'X'), (0x10980, 'V'), (0x109B8, 'X'), (0x109BE, 'V'), (0x109C0, 'X'), (0x10A00, 'V'), (0x10A04, 'X'), (0x10A05, 'V'), (0x10A07, 'X'), (0x10A0C, 'V'), (0x10A14, 'X'), (0x10A15, 'V'), (0x10A18, 'X'), (0x10A19, 'V'), (0x10A34, 'X'), (0x10A38, 'V'), (0x10A3B, 'X'), (0x10A3F, 'V'), (0x10A48, 'X'), (0x10A50, 'V'), (0x10A59, 'X'), (0x10A60, 'V'), (0x10A80, 'X'), (0x10B00, 'V'), (0x10B36, 'X'), (0x10B39, 'V'), (0x10B56, 'X'), (0x10B58, 'V'), (0x10B73, 'X'), (0x10B78, 'V'), (0x10B80, 'X'), (0x10C00, 'V'), (0x10C49, 'X'), (0x10E60, 'V'), (0x10E7F, 'X'), (0x11000, 'V'), (0x1104E, 'X'), (0x11052, 'V'), (0x11070, 'X'), (0x11080, 'V'), (0x110BD, 'X'), (0x110BE, 'V'), (0x110C2, 'X'), (0x110D0, 'V'), (0x110E9, 'X'), (0x110F0, 'V'), (0x110FA, 'X'), (0x11100, 'V'), (0x11135, 'X'), (0x11136, 'V'), (0x11144, 'X'), (0x11180, 'V'), (0x111C9, 'X'), (0x111D0, 'V'), (0x111DA, 'X'), (0x11680, 'V'), (0x116B8, 'X'), (0x116C0, 'V'), (0x116CA, 'X'), (0x12000, 'V'), (0x1236F, 'X'), (0x12400, 'V'), (0x12463, 'X'), (0x12470, 'V'), (0x12474, 'X'), (0x13000, 'V'), (0x1342F, 'X'), ] def _seg_53(): return [ (0x16800, 'V'), (0x16A39, 'X'), (0x16F00, 'V'), (0x16F45, 'X'), (0x16F50, 'V'), (0x16F7F, 'X'), (0x16F8F, 'V'), (0x16FA0, 'X'), (0x1B000, 'V'), (0x1B002, 'X'), (0x1D000, 'V'), (0x1D0F6, 'X'), (0x1D100, 'V'), (0x1D127, 'X'), (0x1D129, 'V'), (0x1D15E, 'M', u'𝅗𝅥'), (0x1D15F, 'M', u'𝅘𝅥'), (0x1D160, 'M', u'𝅘𝅥𝅮'), (0x1D161, 'M', u'𝅘𝅥𝅯'), (0x1D162, 'M', u'𝅘𝅥𝅰'), (0x1D163, 'M', u'𝅘𝅥𝅱'), (0x1D164, 'M', u'𝅘𝅥𝅲'), (0x1D165, 'V'), (0x1D173, 'X'), (0x1D17B, 'V'), (0x1D1BB, 'M', u'𝆹𝅥'), (0x1D1BC, 'M', u'𝆺𝅥'), (0x1D1BD, 'M', u'𝆹𝅥𝅮'), (0x1D1BE, 'M', u'𝆺𝅥𝅮'), (0x1D1BF, 'M', u'𝆹𝅥𝅯'), (0x1D1C0, 'M', u'𝆺𝅥𝅯'), (0x1D1C1, 'V'), (0x1D1DE, 'X'), (0x1D200, 'V'), (0x1D246, 'X'), (0x1D300, 'V'), (0x1D357, 'X'), (0x1D360, 'V'), (0x1D372, 'X'), (0x1D400, 'M', u'a'), (0x1D401, 'M', u'b'), (0x1D402, 'M', u'c'), (0x1D403, 'M', u'd'), (0x1D404, 'M', u'e'), (0x1D405, 'M', u'f'), (0x1D406, 'M', u'g'), (0x1D407, 'M', u'h'), (0x1D408, 'M', u'i'), (0x1D409, 'M', u'j'), (0x1D40A, 'M', u'k'), (0x1D40B, 'M', u'l'), (0x1D40C, 'M', u'm'), (0x1D40D, 'M', u'n'), (0x1D40E, 'M', u'o'), (0x1D40F, 'M', u'p'), (0x1D410, 'M', u'q'), (0x1D411, 'M', u'r'), (0x1D412, 'M', u's'), (0x1D413, 'M', u't'), (0x1D414, 'M', u'u'), (0x1D415, 'M', u'v'), (0x1D416, 'M', u'w'), (0x1D417, 'M', u'x'), (0x1D418, 'M', u'y'), (0x1D419, 'M', u'z'), (0x1D41A, 'M', u'a'), (0x1D41B, 'M', u'b'), (0x1D41C, 'M', u'c'), (0x1D41D, 'M', u'd'), (0x1D41E, 'M', u'e'), (0x1D41F, 'M', u'f'), (0x1D420, 'M', u'g'), (0x1D421, 'M', u'h'), (0x1D422, 'M', u'i'), (0x1D423, 'M', u'j'), (0x1D424, 'M', u'k'), (0x1D425, 'M', u'l'), (0x1D426, 'M', u'm'), (0x1D427, 'M', u'n'), (0x1D428, 'M', u'o'), (0x1D429, 'M', u'p'), (0x1D42A, 'M', u'q'), (0x1D42B, 'M', u'r'), (0x1D42C, 'M', u's'), (0x1D42D, 'M', u't'), (0x1D42E, 'M', u'u'), (0x1D42F, 'M', u'v'), (0x1D430, 'M', u'w'), (0x1D431, 'M', u'x'), (0x1D432, 'M', u'y'), (0x1D433, 'M', u'z'), (0x1D434, 'M', u'a'), (0x1D435, 'M', u'b'), (0x1D436, 'M', u'c'), (0x1D437, 'M', u'd'), (0x1D438, 'M', u'e'), (0x1D439, 'M', u'f'), (0x1D43A, 'M', u'g'), (0x1D43B, 'M', u'h'), (0x1D43C, 'M', u'i'), ] def _seg_54(): return [ (0x1D43D, 'M', u'j'), (0x1D43E, 'M', u'k'), (0x1D43F, 'M', u'l'), (0x1D440, 'M', u'm'), (0x1D441, 'M', u'n'), (0x1D442, 'M', u'o'), (0x1D443, 'M', u'p'), (0x1D444, 'M', u'q'), (0x1D445, 'M', u'r'), (0x1D446, 'M', u's'), (0x1D447, 'M', u't'), (0x1D448, 'M', u'u'), (0x1D449, 'M', u'v'), (0x1D44A, 'M', u'w'), (0x1D44B, 'M', u'x'), (0x1D44C, 'M', u'y'), (0x1D44D, 'M', u'z'), (0x1D44E, 'M', u'a'), (0x1D44F, 'M', u'b'), (0x1D450, 'M', u'c'), (0x1D451, 'M', u'd'), (0x1D452, 'M', u'e'), (0x1D453, 'M', u'f'), (0x1D454, 'M', u'g'), (0x1D455, 'X'), (0x1D456, 'M', u'i'), (0x1D457, 'M', u'j'), (0x1D458, 'M', u'k'), (0x1D459, 'M', u'l'), (0x1D45A, 'M', u'm'), (0x1D45B, 'M', u'n'), (0x1D45C, 'M', u'o'), (0x1D45D, 'M', u'p'), (0x1D45E, 'M', u'q'), (0x1D45F, 'M', u'r'), (0x1D460, 'M', u's'), (0x1D461, 'M', u't'), (0x1D462, 'M', u'u'), (0x1D463, 'M', u'v'), (0x1D464, 'M', u'w'), (0x1D465, 'M', u'x'), (0x1D466, 'M', u'y'), (0x1D467, 'M', u'z'), (0x1D468, 'M', u'a'), (0x1D469, 'M', u'b'), (0x1D46A, 'M', u'c'), (0x1D46B, 'M', u'd'), (0x1D46C, 'M', u'e'), (0x1D46D, 'M', u'f'), (0x1D46E, 'M', u'g'), (0x1D46F, 'M', u'h'), (0x1D470, 'M', u'i'), (0x1D471, 'M', u'j'), (0x1D472, 'M', u'k'), (0x1D473, 'M', u'l'), (0x1D474, 'M', u'm'), (0x1D475, 'M', u'n'), (0x1D476, 'M', u'o'), (0x1D477, 'M', u'p'), (0x1D478, 'M', u'q'), (0x1D479, 'M', u'r'), (0x1D47A, 'M', u's'), (0x1D47B, 'M', u't'), (0x1D47C, 'M', u'u'), (0x1D47D, 'M', u'v'), (0x1D47E, 'M', u'w'), (0x1D47F, 'M', u'x'), (0x1D480, 'M', u'y'), (0x1D481, 'M', u'z'), (0x1D482, 'M', u'a'), (0x1D483, 'M', u'b'), (0x1D484, 'M', u'c'), (0x1D485, 'M', u'd'), (0x1D486, 'M', u'e'), (0x1D487, 'M', u'f'), (0x1D488, 'M', u'g'), (0x1D489, 'M', u'h'), (0x1D48A, 'M', u'i'), (0x1D48B, 'M', u'j'), (0x1D48C, 'M', u'k'), (0x1D48D, 'M', u'l'), (0x1D48E, 'M', u'm'), (0x1D48F, 'M', u'n'), (0x1D490, 'M', u'o'), (0x1D491, 'M', u'p'), (0x1D492, 'M', u'q'), (0x1D493, 'M', u'r'), (0x1D494, 'M', u's'), (0x1D495, 'M', u't'), (0x1D496, 'M', u'u'), (0x1D497, 'M', u'v'), (0x1D498, 'M', u'w'), (0x1D499, 'M', u'x'), (0x1D49A, 'M', u'y'), (0x1D49B, 'M', u'z'), (0x1D49C, 'M', u'a'), (0x1D49D, 'X'), (0x1D49E, 'M', u'c'), (0x1D49F, 'M', u'd'), (0x1D4A0, 'X'), ] def _seg_55(): return [ (0x1D4A2, 'M', u'g'), (0x1D4A3, 'X'), (0x1D4A5, 'M', u'j'), (0x1D4A6, 'M', u'k'), (0x1D4A7, 'X'), (0x1D4A9, 'M', u'n'), (0x1D4AA, 'M', u'o'), (0x1D4AB, 'M', u'p'), (0x1D4AC, 'M', u'q'), (0x1D4AD, 'X'), (0x1D4AE, 'M', u's'), (0x1D4AF, 'M', u't'), (0x1D4B0, 'M', u'u'), (0x1D4B1, 'M', u'v'), (0x1D4B2, 'M', u'w'), (0x1D4B3, 'M', u'x'), (0x1D4B4, 'M', u'y'), (0x1D4B5, 'M', u'z'), (0x1D4B6, 'M', u'a'), (0x1D4B7, 'M', u'b'), (0x1D4B8, 'M', u'c'), (0x1D4B9, 'M', u'd'), (0x1D4BA, 'X'), (0x1D4BB, 'M', u'f'), (0x1D4BC, 'X'), (0x1D4BD, 'M', u'h'), (0x1D4BE, 'M', u'i'), (0x1D4BF, 'M', u'j'), (0x1D4C0, 'M', u'k'), (0x1D4C1, 'M', u'l'), (0x1D4C2, 'M', u'm'), (0x1D4C3, 'M', u'n'), (0x1D4C4, 'X'), (0x1D4C5, 'M', u'p'), (0x1D4C6, 'M', u'q'), (0x1D4C7, 'M', u'r'), (0x1D4C8, 'M', u's'), (0x1D4C9, 'M', u't'), (0x1D4CA, 'M', u'u'), (0x1D4CB, 'M', u'v'), (0x1D4CC, 'M', u'w'), (0x1D4CD, 'M', u'x'), (0x1D4CE, 'M', u'y'), (0x1D4CF, 'M', u'z'), (0x1D4D0, 'M', u'a'), (0x1D4D1, 'M', u'b'), (0x1D4D2, 'M', u'c'), (0x1D4D3, 'M', u'd'), (0x1D4D4, 'M', u'e'), (0x1D4D5, 'M', u'f'), (0x1D4D6, 'M', u'g'), (0x1D4D7, 'M', u'h'), (0x1D4D8, 'M', u'i'), (0x1D4D9, 'M', u'j'), (0x1D4DA, 'M', u'k'), (0x1D4DB, 'M', u'l'), (0x1D4DC, 'M', u'm'), (0x1D4DD, 'M', u'n'), (0x1D4DE, 'M', u'o'), (0x1D4DF, 'M', u'p'), (0x1D4E0, 'M', u'q'), (0x1D4E1, 'M', u'r'), (0x1D4E2, 'M', u's'), (0x1D4E3, 'M', u't'), (0x1D4E4, 'M', u'u'), (0x1D4E5, 'M', u'v'), (0x1D4E6, 'M', u'w'), (0x1D4E7, 'M', u'x'), (0x1D4E8, 'M', u'y'), (0x1D4E9, 'M', u'z'), (0x1D4EA, 'M', u'a'), (0x1D4EB, 'M', u'b'), (0x1D4EC, 'M', u'c'), (0x1D4ED, 'M', u'd'), (0x1D4EE, 'M', u'e'), (0x1D4EF, 'M', u'f'), (0x1D4F0, 'M', u'g'), (0x1D4F1, 'M', u'h'), (0x1D4F2, 'M', u'i'), (0x1D4F3, 'M', u'j'), (0x1D4F4, 'M', u'k'), (0x1D4F5, 'M', u'l'), (0x1D4F6, 'M', u'm'), (0x1D4F7, 'M', u'n'), (0x1D4F8, 'M', u'o'), (0x1D4F9, 'M', u'p'), (0x1D4FA, 'M', u'q'), (0x1D4FB, 'M', u'r'), (0x1D4FC, 'M', u's'), (0x1D4FD, 'M', u't'), (0x1D4FE, 'M', u'u'), (0x1D4FF, 'M', u'v'), (0x1D500, 'M', u'w'), (0x1D501, 'M', u'x'), (0x1D502, 'M', u'y'), (0x1D503, 'M', u'z'), (0x1D504, 'M', u'a'), (0x1D505, 'M', u'b'), (0x1D506, 'X'), (0x1D507, 'M', u'd'), ] def _seg_56(): return [ (0x1D508, 'M', u'e'), (0x1D509, 'M', u'f'), (0x1D50A, 'M', u'g'), (0x1D50B, 'X'), (0x1D50D, 'M', u'j'), (0x1D50E, 'M', u'k'), (0x1D50F, 'M', u'l'), (0x1D510, 'M', u'm'), (0x1D511, 'M', u'n'), (0x1D512, 'M', u'o'), (0x1D513, 'M', u'p'), (0x1D514, 'M', u'q'), (0x1D515, 'X'), (0x1D516, 'M', u's'), (0x1D517, 'M', u't'), (0x1D518, 'M', u'u'), (0x1D519, 'M', u'v'), (0x1D51A, 'M', u'w'), (0x1D51B, 'M', u'x'), (0x1D51C, 'M', u'y'), (0x1D51D, 'X'), (0x1D51E, 'M', u'a'), (0x1D51F, 'M', u'b'), (0x1D520, 'M', u'c'), (0x1D521, 'M', u'd'), (0x1D522, 'M', u'e'), (0x1D523, 'M', u'f'), (0x1D524, 'M', u'g'), (0x1D525, 'M', u'h'), (0x1D526, 'M', u'i'), (0x1D527, 'M', u'j'), (0x1D528, 'M', u'k'), (0x1D529, 'M', u'l'), (0x1D52A, 'M', u'm'), (0x1D52B, 'M', u'n'), (0x1D52C, 'M', u'o'), (0x1D52D, 'M', u'p'), (0x1D52E, 'M', u'q'), (0x1D52F, 'M', u'r'), (0x1D530, 'M', u's'), (0x1D531, 'M', u't'), (0x1D532, 'M', u'u'), (0x1D533, 'M', u'v'), (0x1D534, 'M', u'w'), (0x1D535, 'M', u'x'), (0x1D536, 'M', u'y'), (0x1D537, 'M', u'z'), (0x1D538, 'M', u'a'), (0x1D539, 'M', u'b'), (0x1D53A, 'X'), (0x1D53B, 'M', u'd'), (0x1D53C, 'M', u'e'), (0x1D53D, 'M', u'f'), (0x1D53E, 'M', u'g'), (0x1D53F, 'X'), (0x1D540, 'M', u'i'), (0x1D541, 'M', u'j'), (0x1D542, 'M', u'k'), (0x1D543, 'M', u'l'), (0x1D544, 'M', u'm'), (0x1D545, 'X'), (0x1D546, 'M', u'o'), (0x1D547, 'X'), (0x1D54A, 'M', u's'), (0x1D54B, 'M', u't'), (0x1D54C, 'M', u'u'), (0x1D54D, 'M', u'v'), (0x1D54E, 'M', u'w'), (0x1D54F, 'M', u'x'), (0x1D550, 'M', u'y'), (0x1D551, 'X'), (0x1D552, 'M', u'a'), (0x1D553, 'M', u'b'), (0x1D554, 'M', u'c'), (0x1D555, 'M', u'd'), (0x1D556, 'M', u'e'), (0x1D557, 'M', u'f'), (0x1D558, 'M', u'g'), (0x1D559, 'M', u'h'), (0x1D55A, 'M', u'i'), (0x1D55B, 'M', u'j'), (0x1D55C, 'M', u'k'), (0x1D55D, 'M', u'l'), (0x1D55E, 'M', u'm'), (0x1D55F, 'M', u'n'), (0x1D560, 'M', u'o'), (0x1D561, 'M', u'p'), (0x1D562, 'M', u'q'), (0x1D563, 'M', u'r'), (0x1D564, 'M', u's'), (0x1D565, 'M', u't'), (0x1D566, 'M', u'u'), (0x1D567, 'M', u'v'), (0x1D568, 'M', u'w'), (0x1D569, 'M', u'x'), (0x1D56A, 'M', u'y'), (0x1D56B, 'M', u'z'), (0x1D56C, 'M', u'a'), (0x1D56D, 'M', u'b'), (0x1D56E, 'M', u'c'), ] def _seg_57(): return [ (0x1D56F, 'M', u'd'), (0x1D570, 'M', u'e'), (0x1D571, 'M', u'f'), (0x1D572, 'M', u'g'), (0x1D573, 'M', u'h'), (0x1D574, 'M', u'i'), (0x1D575, 'M', u'j'), (0x1D576, 'M', u'k'), (0x1D577, 'M', u'l'), (0x1D578, 'M', u'm'), (0x1D579, 'M', u'n'), (0x1D57A, 'M', u'o'), (0x1D57B, 'M', u'p'), (0x1D57C, 'M', u'q'), (0x1D57D, 'M', u'r'), (0x1D57E, 'M', u's'), (0x1D57F, 'M', u't'), (0x1D580, 'M', u'u'), (0x1D581, 'M', u'v'), (0x1D582, 'M', u'w'), (0x1D583, 'M', u'x'), (0x1D584, 'M', u'y'), (0x1D585, 'M', u'z'), (0x1D586, 'M', u'a'), (0x1D587, 'M', u'b'), (0x1D588, 'M', u'c'), (0x1D589, 'M', u'd'), (0x1D58A, 'M', u'e'), (0x1D58B, 'M', u'f'), (0x1D58C, 'M', u'g'), (0x1D58D, 'M', u'h'), (0x1D58E, 'M', u'i'), (0x1D58F, 'M', u'j'), (0x1D590, 'M', u'k'), (0x1D591, 'M', u'l'), (0x1D592, 'M', u'm'), (0x1D593, 'M', u'n'), (0x1D594, 'M', u'o'), (0x1D595, 'M', u'p'), (0x1D596, 'M', u'q'), (0x1D597, 'M', u'r'), (0x1D598, 'M', u's'), (0x1D599, 'M', u't'), (0x1D59A, 'M', u'u'), (0x1D59B, 'M', u'v'), (0x1D59C, 'M', u'w'), (0x1D59D, 'M', u'x'), (0x1D59E, 'M', u'y'), (0x1D59F, 'M', u'z'), (0x1D5A0, 'M', u'a'), (0x1D5A1, 'M', u'b'), (0x1D5A2, 'M', u'c'), (0x1D5A3, 'M', u'd'), (0x1D5A4, 'M', u'e'), (0x1D5A5, 'M', u'f'), (0x1D5A6, 'M', u'g'), (0x1D5A7, 'M', u'h'), (0x1D5A8, 'M', u'i'), (0x1D5A9, 'M', u'j'), (0x1D5AA, 'M', u'k'), (0x1D5AB, 'M', u'l'), (0x1D5AC, 'M', u'm'), (0x1D5AD, 'M', u'n'), (0x1D5AE, 'M', u'o'), (0x1D5AF, 'M', u'p'), (0x1D5B0, 'M', u'q'), (0x1D5B1, 'M', u'r'), (0x1D5B2, 'M', u's'), (0x1D5B3, 'M', u't'), (0x1D5B4, 'M', u'u'), (0x1D5B5, 'M', u'v'), (0x1D5B6, 'M', u'w'), (0x1D5B7, 'M', u'x'), (0x1D5B8, 'M', u'y'), (0x1D5B9, 'M', u'z'), (0x1D5BA, 'M', u'a'), (0x1D5BB, 'M', u'b'), (0x1D5BC, 'M', u'c'), (0x1D5BD, 'M', u'd'), (0x1D5BE, 'M', u'e'), (0x1D5BF, 'M', u'f'), (0x1D5C0, 'M', u'g'), (0x1D5C1, 'M', u'h'), (0x1D5C2, 'M', u'i'), (0x1D5C3, 'M', u'j'), (0x1D5C4, 'M', u'k'), (0x1D5C5, 'M', u'l'), (0x1D5C6, 'M', u'm'), (0x1D5C7, 'M', u'n'), (0x1D5C8, 'M', u'o'), (0x1D5C9, 'M', u'p'), (0x1D5CA, 'M', u'q'), (0x1D5CB, 'M', u'r'), (0x1D5CC, 'M', u's'), (0x1D5CD, 'M', u't'), (0x1D5CE, 'M', u'u'), (0x1D5CF, 'M', u'v'), (0x1D5D0, 'M', u'w'), (0x1D5D1, 'M', u'x'), (0x1D5D2, 'M', u'y'), ] def _seg_58(): return [ (0x1D5D3, 'M', u'z'), (0x1D5D4, 'M', u'a'), (0x1D5D5, 'M', u'b'), (0x1D5D6, 'M', u'c'), (0x1D5D7, 'M', u'd'), (0x1D5D8, 'M', u'e'), (0x1D5D9, 'M', u'f'), (0x1D5DA, 'M', u'g'), (0x1D5DB, 'M', u'h'), (0x1D5DC, 'M', u'i'), (0x1D5DD, 'M', u'j'), (0x1D5DE, 'M', u'k'), (0x1D5DF, 'M', u'l'), (0x1D5E0, 'M', u'm'), (0x1D5E1, 'M', u'n'), (0x1D5E2, 'M', u'o'), (0x1D5E3, 'M', u'p'), (0x1D5E4, 'M', u'q'), (0x1D5E5, 'M', u'r'), (0x1D5E6, 'M', u's'), (0x1D5E7, 'M', u't'), (0x1D5E8, 'M', u'u'), (0x1D5E9, 'M', u'v'), (0x1D5EA, 'M', u'w'), (0x1D5EB, 'M', u'x'), (0x1D5EC, 'M', u'y'), (0x1D5ED, 'M', u'z'), (0x1D5EE, 'M', u'a'), (0x1D5EF, 'M', u'b'), (0x1D5F0, 'M', u'c'), (0x1D5F1, 'M', u'd'), (0x1D5F2, 'M', u'e'), (0x1D5F3, 'M', u'f'), (0x1D5F4, 'M', u'g'), (0x1D5F5, 'M', u'h'), (0x1D5F6, 'M', u'i'), (0x1D5F7, 'M', u'j'), (0x1D5F8, 'M', u'k'), (0x1D5F9, 'M', u'l'), (0x1D5FA, 'M', u'm'), (0x1D5FB, 'M', u'n'), (0x1D5FC, 'M', u'o'), (0x1D5FD, 'M', u'p'), (0x1D5FE, 'M', u'q'), (0x1D5FF, 'M', u'r'), (0x1D600, 'M', u's'), (0x1D601, 'M', u't'), (0x1D602, 'M', u'u'), (0x1D603, 'M', u'v'), (0x1D604, 'M', u'w'), (0x1D605, 'M', u'x'), (0x1D606, 'M', u'y'), (0x1D607, 'M', u'z'), (0x1D608, 'M', u'a'), (0x1D609, 'M', u'b'), (0x1D60A, 'M', u'c'), (0x1D60B, 'M', u'd'), (0x1D60C, 'M', u'e'), (0x1D60D, 'M', u'f'), (0x1D60E, 'M', u'g'), (0x1D60F, 'M', u'h'), (0x1D610, 'M', u'i'), (0x1D611, 'M', u'j'), (0x1D612, 'M', u'k'), (0x1D613, 'M', u'l'), (0x1D614, 'M', u'm'), (0x1D615, 'M', u'n'), (0x1D616, 'M', u'o'), (0x1D617, 'M', u'p'), (0x1D618, 'M', u'q'), (0x1D619, 'M', u'r'), (0x1D61A, 'M', u's'), (0x1D61B, 'M', u't'), (0x1D61C, 'M', u'u'), (0x1D61D, 'M', u'v'), (0x1D61E, 'M', u'w'), (0x1D61F, 'M', u'x'), (0x1D620, 'M', u'y'), (0x1D621, 'M', u'z'), (0x1D622, 'M', u'a'), (0x1D623, 'M', u'b'), (0x1D624, 'M', u'c'), (0x1D625, 'M', u'd'), (0x1D626, 'M', u'e'), (0x1D627, 'M', u'f'), (0x1D628, 'M', u'g'), (0x1D629, 'M', u'h'), (0x1D62A, 'M', u'i'), (0x1D62B, 'M', u'j'), (0x1D62C, 'M', u'k'), (0x1D62D, 'M', u'l'), (0x1D62E, 'M', u'm'), (0x1D62F, 'M', u'n'), (0x1D630, 'M', u'o'), (0x1D631, 'M', u'p'), (0x1D632, 'M', u'q'), (0x1D633, 'M', u'r'), (0x1D634, 'M', u's'), (0x1D635, 'M', u't'), (0x1D636, 'M', u'u'), ] def _seg_59(): return [ (0x1D637, 'M', u'v'), (0x1D638, 'M', u'w'), (0x1D639, 'M', u'x'), (0x1D63A, 'M', u'y'), (0x1D63B, 'M', u'z'), (0x1D63C, 'M', u'a'), (0x1D63D, 'M', u'b'), (0x1D63E, 'M', u'c'), (0x1D63F, 'M', u'd'), (0x1D640, 'M', u'e'), (0x1D641, 'M', u'f'), (0x1D642, 'M', u'g'), (0x1D643, 'M', u'h'), (0x1D644, 'M', u'i'), (0x1D645, 'M', u'j'), (0x1D646, 'M', u'k'), (0x1D647, 'M', u'l'), (0x1D648, 'M', u'm'), (0x1D649, 'M', u'n'), (0x1D64A, 'M', u'o'), (0x1D64B, 'M', u'p'), (0x1D64C, 'M', u'q'), (0x1D64D, 'M', u'r'), (0x1D64E, 'M', u's'), (0x1D64F, 'M', u't'), (0x1D650, 'M', u'u'), (0x1D651, 'M', u'v'), (0x1D652, 'M', u'w'), (0x1D653, 'M', u'x'), (0x1D654, 'M', u'y'), (0x1D655, 'M', u'z'), (0x1D656, 'M', u'a'), (0x1D657, 'M', u'b'), (0x1D658, 'M', u'c'), (0x1D659, 'M', u'd'), (0x1D65A, 'M', u'e'), (0x1D65B, 'M', u'f'), (0x1D65C, 'M', u'g'), (0x1D65D, 'M', u'h'), (0x1D65E, 'M', u'i'), (0x1D65F, 'M', u'j'), (0x1D660, 'M', u'k'), (0x1D661, 'M', u'l'), (0x1D662, 'M', u'm'), (0x1D663, 'M', u'n'), (0x1D664, 'M', u'o'), (0x1D665, 'M', u'p'), (0x1D666, 'M', u'q'), (0x1D667, 'M', u'r'), (0x1D668, 'M', u's'), (0x1D669, 'M', u't'), (0x1D66A, 'M', u'u'), (0x1D66B, 'M', u'v'), (0x1D66C, 'M', u'w'), (0x1D66D, 'M', u'x'), (0x1D66E, 'M', u'y'), (0x1D66F, 'M', u'z'), (0x1D670, 'M', u'a'), (0x1D671, 'M', u'b'), (0x1D672, 'M', u'c'), (0x1D673, 'M', u'd'), (0x1D674, 'M', u'e'), (0x1D675, 'M', u'f'), (0x1D676, 'M', u'g'), (0x1D677, 'M', u'h'), (0x1D678, 'M', u'i'), (0x1D679, 'M', u'j'), (0x1D67A, 'M', u'k'), (0x1D67B, 'M', u'l'), (0x1D67C, 'M', u'm'), (0x1D67D, 'M', u'n'), (0x1D67E, 'M', u'o'), (0x1D67F, 'M', u'p'), (0x1D680, 'M', u'q'), (0x1D681, 'M', u'r'), (0x1D682, 'M', u's'), (0x1D683, 'M', u't'), (0x1D684, 'M', u'u'), (0x1D685, 'M', u'v'), (0x1D686, 'M', u'w'), (0x1D687, 'M', u'x'), (0x1D688, 'M', u'y'), (0x1D689, 'M', u'z'), (0x1D68A, 'M', u'a'), (0x1D68B, 'M', u'b'), (0x1D68C, 'M', u'c'), (0x1D68D, 'M', u'd'), (0x1D68E, 'M', u'e'), (0x1D68F, 'M', u'f'), (0x1D690, 'M', u'g'), (0x1D691, 'M', u'h'), (0x1D692, 'M', u'i'), (0x1D693, 'M', u'j'), (0x1D694, 'M', u'k'), (0x1D695, 'M', u'l'), (0x1D696, 'M', u'm'), (0x1D697, 'M', u'n'), (0x1D698, 'M', u'o'), (0x1D699, 'M', u'p'), (0x1D69A, 'M', u'q'), ] def _seg_60(): return [ (0x1D69B, 'M', u'r'), (0x1D69C, 'M', u's'), (0x1D69D, 'M', u't'), (0x1D69E, 'M', u'u'), (0x1D69F, 'M', u'v'), (0x1D6A0, 'M', u'w'), (0x1D6A1, 'M', u'x'), (0x1D6A2, 'M', u'y'), (0x1D6A3, 'M', u'z'), (0x1D6A4, 'M', u'ı'), (0x1D6A5, 'M', u'ȷ'), (0x1D6A6, 'X'), (0x1D6A8, 'M', u'α'), (0x1D6A9, 'M', u'β'), (0x1D6AA, 'M', u'γ'), (0x1D6AB, 'M', u'δ'), (0x1D6AC, 'M', u'ε'), (0x1D6AD, 'M', u'ζ'), (0x1D6AE, 'M', u'η'), (0x1D6AF, 'M', u'θ'), (0x1D6B0, 'M', u'ι'), (0x1D6B1, 'M', u'κ'), (0x1D6B2, 'M', u'λ'), (0x1D6B3, 'M', u'μ'), (0x1D6B4, 'M', u'ν'), (0x1D6B5, 'M', u'ξ'), (0x1D6B6, 'M', u'ο'), (0x1D6B7, 'M', u'π'), (0x1D6B8, 'M', u'ρ'), (0x1D6B9, 'M', u'θ'), (0x1D6BA, 'M', u'σ'), (0x1D6BB, 'M', u'τ'), (0x1D6BC, 'M', u'υ'), (0x1D6BD, 'M', u'φ'), (0x1D6BE, 'M', u'χ'), (0x1D6BF, 'M', u'ψ'), (0x1D6C0, 'M', u'ω'), (0x1D6C1, 'M', u'∇'), (0x1D6C2, 'M', u'α'), (0x1D6C3, 'M', u'β'), (0x1D6C4, 'M', u'γ'), (0x1D6C5, 'M', u'δ'), (0x1D6C6, 'M', u'ε'), (0x1D6C7, 'M', u'ζ'), (0x1D6C8, 'M', u'η'), (0x1D6C9, 'M', u'θ'), (0x1D6CA, 'M', u'ι'), (0x1D6CB, 'M', u'κ'), (0x1D6CC, 'M', u'λ'), (0x1D6CD, 'M', u'μ'), (0x1D6CE, 'M', u'ν'), (0x1D6CF, 'M', u'ξ'), (0x1D6D0, 'M', u'ο'), (0x1D6D1, 'M', u'π'), (0x1D6D2, 'M', u'ρ'), (0x1D6D3, 'M', u'σ'), (0x1D6D5, 'M', u'τ'), (0x1D6D6, 'M', u'υ'), (0x1D6D7, 'M', u'φ'), (0x1D6D8, 'M', u'χ'), (0x1D6D9, 'M', u'ψ'), (0x1D6DA, 'M', u'ω'), (0x1D6DB, 'M', u'∂'), (0x1D6DC, 'M', u'ε'), (0x1D6DD, 'M', u'θ'), (0x1D6DE, 'M', u'κ'), (0x1D6DF, 'M', u'φ'), (0x1D6E0, 'M', u'ρ'), (0x1D6E1, 'M', u'π'), (0x1D6E2, 'M', u'α'), (0x1D6E3, 'M', u'β'), (0x1D6E4, 'M', u'γ'), (0x1D6E5, 'M', u'δ'), (0x1D6E6, 'M', u'ε'), (0x1D6E7, 'M', u'ζ'), (0x1D6E8, 'M', u'η'), (0x1D6E9, 'M', u'θ'), (0x1D6EA, 'M', u'ι'), (0x1D6EB, 'M', u'κ'), (0x1D6EC, 'M', u'λ'), (0x1D6ED, 'M', u'μ'), (0x1D6EE, 'M', u'ν'), (0x1D6EF, 'M', u'ξ'), (0x1D6F0, 'M', u'ο'), (0x1D6F1, 'M', u'π'), (0x1D6F2, 'M', u'ρ'), (0x1D6F3, 'M', u'θ'), (0x1D6F4, 'M', u'σ'), (0x1D6F5, 'M', u'τ'), (0x1D6F6, 'M', u'υ'), (0x1D6F7, 'M', u'φ'), (0x1D6F8, 'M', u'χ'), (0x1D6F9, 'M', u'ψ'), (0x1D6FA, 'M', u'ω'), (0x1D6FB, 'M', u'∇'), (0x1D6FC, 'M', u'α'), (0x1D6FD, 'M', u'β'), (0x1D6FE, 'M', u'γ'), (0x1D6FF, 'M', u'δ'), (0x1D700, 'M', u'ε'), ] def _seg_61(): return [ (0x1D701, 'M', u'ζ'), (0x1D702, 'M', u'η'), (0x1D703, 'M', u'θ'), (0x1D704, 'M', u'ι'), (0x1D705, 'M', u'κ'), (0x1D706, 'M', u'λ'), (0x1D707, 'M', u'μ'), (0x1D708, 'M', u'ν'), (0x1D709, 'M', u'ξ'), (0x1D70A, 'M', u'ο'), (0x1D70B, 'M', u'π'), (0x1D70C, 'M', u'ρ'), (0x1D70D, 'M', u'σ'), (0x1D70F, 'M', u'τ'), (0x1D710, 'M', u'υ'), (0x1D711, 'M', u'φ'), (0x1D712, 'M', u'χ'), (0x1D713, 'M', u'ψ'), (0x1D714, 'M', u'ω'), (0x1D715, 'M', u'∂'), (0x1D716, 'M', u'ε'), (0x1D717, 'M', u'θ'), (0x1D718, 'M', u'κ'), (0x1D719, 'M', u'φ'), (0x1D71A, 'M', u'ρ'), (0x1D71B, 'M', u'π'), (0x1D71C, 'M', u'α'), (0x1D71D, 'M', u'β'), (0x1D71E, 'M', u'γ'), (0x1D71F, 'M', u'δ'), (0x1D720, 'M', u'ε'), (0x1D721, 'M', u'ζ'), (0x1D722, 'M', u'η'), (0x1D723, 'M', u'θ'), (0x1D724, 'M', u'ι'), (0x1D725, 'M', u'κ'), (0x1D726, 'M', u'λ'), (0x1D727, 'M', u'μ'), (0x1D728, 'M', u'ν'), (0x1D729, 'M', u'ξ'), (0x1D72A, 'M', u'ο'), (0x1D72B, 'M', u'π'), (0x1D72C, 'M', u'ρ'), (0x1D72D, 'M', u'θ'), (0x1D72E, 'M', u'σ'), (0x1D72F, 'M', u'τ'), (0x1D730, 'M', u'υ'), (0x1D731, 'M', u'φ'), (0x1D732, 'M', u'χ'), (0x1D733, 'M', u'ψ'), (0x1D734, 'M', u'ω'), (0x1D735, 'M', u'∇'), (0x1D736, 'M', u'α'), (0x1D737, 'M', u'β'), (0x1D738, 'M', u'γ'), (0x1D739, 'M', u'δ'), (0x1D73A, 'M', u'ε'), (0x1D73B, 'M', u'ζ'), (0x1D73C, 'M', u'η'), (0x1D73D, 'M', u'θ'), (0x1D73E, 'M', u'ι'), (0x1D73F, 'M', u'κ'), (0x1D740, 'M', u'λ'), (0x1D741, 'M', u'μ'), (0x1D742, 'M', u'ν'), (0x1D743, 'M', u'ξ'), (0x1D744, 'M', u'ο'), (0x1D745, 'M', u'π'), (0x1D746, 'M', u'ρ'), (0x1D747, 'M', u'σ'), (0x1D749, 'M', u'τ'), (0x1D74A, 'M', u'υ'), (0x1D74B, 'M', u'φ'), (0x1D74C, 'M', u'χ'), (0x1D74D, 'M', u'ψ'), (0x1D74E, 'M', u'ω'), (0x1D74F, 'M', u'∂'), (0x1D750, 'M', u'ε'), (0x1D751, 'M', u'θ'), (0x1D752, 'M', u'κ'), (0x1D753, 'M', u'φ'), (0x1D754, 'M', u'ρ'), (0x1D755, 'M', u'π'), (0x1D756, 'M', u'α'), (0x1D757, 'M', u'β'), (0x1D758, 'M', u'γ'), (0x1D759, 'M', u'δ'), (0x1D75A, 'M', u'ε'), (0x1D75B, 'M', u'ζ'), (0x1D75C, 'M', u'η'), (0x1D75D, 'M', u'θ'), (0x1D75E, 'M', u'ι'), (0x1D75F, 'M', u'κ'), (0x1D760, 'M', u'λ'), (0x1D761, 'M', u'μ'), (0x1D762, 'M', u'ν'), (0x1D763, 'M', u'ξ'), (0x1D764, 'M', u'ο'), (0x1D765, 'M', u'π'), (0x1D766, 'M', u'ρ'), ] def _seg_62(): return [ (0x1D767, 'M', u'θ'), (0x1D768, 'M', u'σ'), (0x1D769, 'M', u'τ'), (0x1D76A, 'M', u'υ'), (0x1D76B, 'M', u'φ'), (0x1D76C, 'M', u'χ'), (0x1D76D, 'M', u'ψ'), (0x1D76E, 'M', u'ω'), (0x1D76F, 'M', u'∇'), (0x1D770, 'M', u'α'), (0x1D771, 'M', u'β'), (0x1D772, 'M', u'γ'), (0x1D773, 'M', u'δ'), (0x1D774, 'M', u'ε'), (0x1D775, 'M', u'ζ'), (0x1D776, 'M', u'η'), (0x1D777, 'M', u'θ'), (0x1D778, 'M', u'ι'), (0x1D779, 'M', u'κ'), (0x1D77A, 'M', u'λ'), (0x1D77B, 'M', u'μ'), (0x1D77C, 'M', u'ν'), (0x1D77D, 'M', u'ξ'), (0x1D77E, 'M', u'ο'), (0x1D77F, 'M', u'π'), (0x1D780, 'M', u'ρ'), (0x1D781, 'M', u'σ'), (0x1D783, 'M', u'τ'), (0x1D784, 'M', u'υ'), (0x1D785, 'M', u'φ'), (0x1D786, 'M', u'χ'), (0x1D787, 'M', u'ψ'), (0x1D788, 'M', u'ω'), (0x1D789, 'M', u'∂'), (0x1D78A, 'M', u'ε'), (0x1D78B, 'M', u'θ'), (0x1D78C, 'M', u'κ'), (0x1D78D, 'M', u'φ'), (0x1D78E, 'M', u'ρ'), (0x1D78F, 'M', u'π'), (0x1D790, 'M', u'α'), (0x1D791, 'M', u'β'), (0x1D792, 'M', u'γ'), (0x1D793, 'M', u'δ'), (0x1D794, 'M', u'ε'), (0x1D795, 'M', u'ζ'), (0x1D796, 'M', u'η'), (0x1D797, 'M', u'θ'), (0x1D798, 'M', u'ι'), (0x1D799, 'M', u'κ'), (0x1D79A, 'M', u'λ'), (0x1D79B, 'M', u'μ'), (0x1D79C, 'M', u'ν'), (0x1D79D, 'M', u'ξ'), (0x1D79E, 'M', u'ο'), (0x1D79F, 'M', u'π'), (0x1D7A0, 'M', u'ρ'), (0x1D7A1, 'M', u'θ'), (0x1D7A2, 'M', u'σ'), (0x1D7A3, 'M', u'τ'), (0x1D7A4, 'M', u'υ'), (0x1D7A5, 'M', u'φ'), (0x1D7A6, 'M', u'χ'), (0x1D7A7, 'M', u'ψ'), (0x1D7A8, 'M', u'ω'), (0x1D7A9, 'M', u'∇'), (0x1D7AA, 'M', u'α'), (0x1D7AB, 'M', u'β'), (0x1D7AC, 'M', u'γ'), (0x1D7AD, 'M', u'δ'), (0x1D7AE, 'M', u'ε'), (0x1D7AF, 'M', u'ζ'), (0x1D7B0, 'M', u'η'), (0x1D7B1, 'M', u'θ'), (0x1D7B2, 'M', u'ι'), (0x1D7B3, 'M', u'κ'), (0x1D7B4, 'M', u'λ'), (0x1D7B5, 'M', u'μ'), (0x1D7B6, 'M', u'ν'), (0x1D7B7, 'M', u'ξ'), (0x1D7B8, 'M', u'ο'), (0x1D7B9, 'M', u'π'), (0x1D7BA, 'M', u'ρ'), (0x1D7BB, 'M', u'σ'), (0x1D7BD, 'M', u'τ'), (0x1D7BE, 'M', u'υ'), (0x1D7BF, 'M', u'φ'), (0x1D7C0, 'M', u'χ'), (0x1D7C1, 'M', u'ψ'), (0x1D7C2, 'M', u'ω'), (0x1D7C3, 'M', u'∂'), (0x1D7C4, 'M', u'ε'), (0x1D7C5, 'M', u'θ'), (0x1D7C6, 'M', u'κ'), (0x1D7C7, 'M', u'φ'), (0x1D7C8, 'M', u'ρ'), (0x1D7C9, 'M', u'π'), (0x1D7CA, 'M', u'ϝ'), (0x1D7CC, 'X'), (0x1D7CE, 'M', u'0'), ] def _seg_63(): return [ (0x1D7CF, 'M', u'1'), (0x1D7D0, 'M', u'2'), (0x1D7D1, 'M', u'3'), (0x1D7D2, 'M', u'4'), (0x1D7D3, 'M', u'5'), (0x1D7D4, 'M', u'6'), (0x1D7D5, 'M', u'7'), (0x1D7D6, 'M', u'8'), (0x1D7D7, 'M', u'9'), (0x1D7D8, 'M', u'0'), (0x1D7D9, 'M', u'1'), (0x1D7DA, 'M', u'2'), (0x1D7DB, 'M', u'3'), (0x1D7DC, 'M', u'4'), (0x1D7DD, 'M', u'5'), (0x1D7DE, 'M', u'6'), (0x1D7DF, 'M', u'7'), (0x1D7E0, 'M', u'8'), (0x1D7E1, 'M', u'9'), (0x1D7E2, 'M', u'0'), (0x1D7E3, 'M', u'1'), (0x1D7E4, 'M', u'2'), (0x1D7E5, 'M', u'3'), (0x1D7E6, 'M', u'4'), (0x1D7E7, 'M', u'5'), (0x1D7E8, 'M', u'6'), (0x1D7E9, 'M', u'7'), (0x1D7EA, 'M', u'8'), (0x1D7EB, 'M', u'9'), (0x1D7EC, 'M', u'0'), (0x1D7ED, 'M', u'1'), (0x1D7EE, 'M', u'2'), (0x1D7EF, 'M', u'3'), (0x1D7F0, 'M', u'4'), (0x1D7F1, 'M', u'5'), (0x1D7F2, 'M', u'6'), (0x1D7F3, 'M', u'7'), (0x1D7F4, 'M', u'8'), (0x1D7F5, 'M', u'9'), (0x1D7F6, 'M', u'0'), (0x1D7F7, 'M', u'1'), (0x1D7F8, 'M', u'2'), (0x1D7F9, 'M', u'3'), (0x1D7FA, 'M', u'4'), (0x1D7FB, 'M', u'5'), (0x1D7FC, 'M', u'6'), (0x1D7FD, 'M', u'7'), (0x1D7FE, 'M', u'8'), (0x1D7FF, 'M', u'9'), (0x1D800, 'X'), (0x1EE00, 'M', u'ا'), (0x1EE01, 'M', u'ب'), (0x1EE02, 'M', u'ج'), (0x1EE03, 'M', u'د'), (0x1EE04, 'X'), (0x1EE05, 'M', u'و'), (0x1EE06, 'M', u'ز'), (0x1EE07, 'M', u'ح'), (0x1EE08, 'M', u'ط'), (0x1EE09, 'M', u'ي'), (0x1EE0A, 'M', u'ك'), (0x1EE0B, 'M', u'ل'), (0x1EE0C, 'M', u'م'), (0x1EE0D, 'M', u'ن'), (0x1EE0E, 'M', u'س'), (0x1EE0F, 'M', u'ع'), (0x1EE10, 'M', u'ف'), (0x1EE11, 'M', u'ص'), (0x1EE12, 'M', u'ق'), (0x1EE13, 'M', u'ر'), (0x1EE14, 'M', u'ش'), (0x1EE15, 'M', u'ت'), (0x1EE16, 'M', u'ث'), (0x1EE17, 'M', u'خ'), (0x1EE18, 'M', u'ذ'), (0x1EE19, 'M', u'ض'), (0x1EE1A, 'M', u'ظ'), (0x1EE1B, 'M', u'غ'), (0x1EE1C, 'M', u'ٮ'), (0x1EE1D, 'M', u'ں'), (0x1EE1E, 'M', u'ڡ'), (0x1EE1F, 'M', u'ٯ'), (0x1EE20, 'X'), (0x1EE21, 'M', u'ب'), (0x1EE22, 'M', u'ج'), (0x1EE23, 'X'), (0x1EE24, 'M', u'ه'), (0x1EE25, 'X'), (0x1EE27, 'M', u'ح'), (0x1EE28, 'X'), (0x1EE29, 'M', u'ي'), (0x1EE2A, 'M', u'ك'), (0x1EE2B, 'M', u'ل'), (0x1EE2C, 'M', u'م'), (0x1EE2D, 'M', u'ن'), (0x1EE2E, 'M', u'س'), (0x1EE2F, 'M', u'ع'), (0x1EE30, 'M', u'ف'), (0x1EE31, 'M', u'ص'), (0x1EE32, 'M', u'ق'), ] def _seg_64(): return [ (0x1EE33, 'X'), (0x1EE34, 'M', u'ش'), (0x1EE35, 'M', u'ت'), (0x1EE36, 'M', u'ث'), (0x1EE37, 'M', u'خ'), (0x1EE38, 'X'), (0x1EE39, 'M', u'ض'), (0x1EE3A, 'X'), (0x1EE3B, 'M', u'غ'), (0x1EE3C, 'X'), (0x1EE42, 'M', u'ج'), (0x1EE43, 'X'), (0x1EE47, 'M', u'ح'), (0x1EE48, 'X'), (0x1EE49, 'M', u'ي'), (0x1EE4A, 'X'), (0x1EE4B, 'M', u'ل'), (0x1EE4C, 'X'), (0x1EE4D, 'M', u'ن'), (0x1EE4E, 'M', u'س'), (0x1EE4F, 'M', u'ع'), (0x1EE50, 'X'), (0x1EE51, 'M', u'ص'), (0x1EE52, 'M', u'ق'), (0x1EE53, 'X'), (0x1EE54, 'M', u'ش'), (0x1EE55, 'X'), (0x1EE57, 'M', u'خ'), (0x1EE58, 'X'), (0x1EE59, 'M', u'ض'), (0x1EE5A, 'X'), (0x1EE5B, 'M', u'غ'), (0x1EE5C, 'X'), (0x1EE5D, 'M', u'ں'), (0x1EE5E, 'X'), (0x1EE5F, 'M', u'ٯ'), (0x1EE60, 'X'), (0x1EE61, 'M', u'ب'), (0x1EE62, 'M', u'ج'), (0x1EE63, 'X'), (0x1EE64, 'M', u'ه'), (0x1EE65, 'X'), (0x1EE67, 'M', u'ح'), (0x1EE68, 'M', u'ط'), (0x1EE69, 'M', u'ي'), (0x1EE6A, 'M', u'ك'), (0x1EE6B, 'X'), (0x1EE6C, 'M', u'م'), (0x1EE6D, 'M', u'ن'), (0x1EE6E, 'M', u'س'), (0x1EE6F, 'M', u'ع'), (0x1EE70, 'M', u'ف'), (0x1EE71, 'M', u'ص'), (0x1EE72, 'M', u'ق'), (0x1EE73, 'X'), (0x1EE74, 'M', u'ش'), (0x1EE75, 'M', u'ت'), (0x1EE76, 'M', u'ث'), (0x1EE77, 'M', u'خ'), (0x1EE78, 'X'), (0x1EE79, 'M', u'ض'), (0x1EE7A, 'M', u'ظ'), (0x1EE7B, 'M', u'غ'), (0x1EE7C, 'M', u'ٮ'), (0x1EE7D, 'X'), (0x1EE7E, 'M', u'ڡ'), (0x1EE7F, 'X'), (0x1EE80, 'M', u'ا'), (0x1EE81, 'M', u'ب'), (0x1EE82, 'M', u'ج'), (0x1EE83, 'M', u'د'), (0x1EE84, 'M', u'ه'), (0x1EE85, 'M', u'و'), (0x1EE86, 'M', u'ز'), (0x1EE87, 'M', u'ح'), (0x1EE88, 'M', u'ط'), (0x1EE89, 'M', u'ي'), (0x1EE8A, 'X'), (0x1EE8B, 'M', u'ل'), (0x1EE8C, 'M', u'م'), (0x1EE8D, 'M', u'ن'), (0x1EE8E, 'M', u'س'), (0x1EE8F, 'M', u'ع'), (0x1EE90, 'M', u'ف'), (0x1EE91, 'M', u'ص'), (0x1EE92, 'M', u'ق'), (0x1EE93, 'M', u'ر'), (0x1EE94, 'M', u'ش'), (0x1EE95, 'M', u'ت'), (0x1EE96, 'M', u'ث'), (0x1EE97, 'M', u'خ'), (0x1EE98, 'M', u'ذ'), (0x1EE99, 'M', u'ض'), (0x1EE9A, 'M', u'ظ'), (0x1EE9B, 'M', u'غ'), (0x1EE9C, 'X'), (0x1EEA1, 'M', u'ب'), (0x1EEA2, 'M', u'ج'), (0x1EEA3, 'M', u'د'), (0x1EEA4, 'X'), ] def _seg_65(): return [ (0x1EEA5, 'M', u'و'), (0x1EEA6, 'M', u'ز'), (0x1EEA7, 'M', u'ح'), (0x1EEA8, 'M', u'ط'), (0x1EEA9, 'M', u'ي'), (0x1EEAA, 'X'), (0x1EEAB, 'M', u'ل'), (0x1EEAC, 'M', u'م'), (0x1EEAD, 'M', u'ن'), (0x1EEAE, 'M', u'س'), (0x1EEAF, 'M', u'ع'), (0x1EEB0, 'M', u'ف'), (0x1EEB1, 'M', u'ص'), (0x1EEB2, 'M', u'ق'), (0x1EEB3, 'M', u'ر'), (0x1EEB4, 'M', u'ش'), (0x1EEB5, 'M', u'ت'), (0x1EEB6, 'M', u'ث'), (0x1EEB7, 'M', u'خ'), (0x1EEB8, 'M', u'ذ'), (0x1EEB9, 'M', u'ض'), (0x1EEBA, 'M', u'ظ'), (0x1EEBB, 'M', u'غ'), (0x1EEBC, 'X'), (0x1EEF0, 'V'), (0x1EEF2, 'X'), (0x1F000, 'V'), (0x1F02C, 'X'), (0x1F030, 'V'), (0x1F094, 'X'), (0x1F0A0, 'V'), (0x1F0AF, 'X'), (0x1F0B1, 'V'), (0x1F0BF, 'X'), (0x1F0C1, 'V'), (0x1F0D0, 'X'), (0x1F0D1, 'V'), (0x1F0E0, 'X'), (0x1F101, '3', u'0,'), (0x1F102, '3', u'1,'), (0x1F103, '3', u'2,'), (0x1F104, '3', u'3,'), (0x1F105, '3', u'4,'), (0x1F106, '3', u'5,'), (0x1F107, '3', u'6,'), (0x1F108, '3', u'7,'), (0x1F109, '3', u'8,'), (0x1F10A, '3', u'9,'), (0x1F10B, 'X'), (0x1F110, '3', u'(a)'), (0x1F111, '3', u'(b)'), (0x1F112, '3', u'(c)'), (0x1F113, '3', u'(d)'), (0x1F114, '3', u'(e)'), (0x1F115, '3', u'(f)'), (0x1F116, '3', u'(g)'), (0x1F117, '3', u'(h)'), (0x1F118, '3', u'(i)'), (0x1F119, '3', u'(j)'), (0x1F11A, '3', u'(k)'), (0x1F11B, '3', u'(l)'), (0x1F11C, '3', u'(m)'), (0x1F11D, '3', u'(n)'), (0x1F11E, '3', u'(o)'), (0x1F11F, '3', u'(p)'), (0x1F120, '3', u'(q)'), (0x1F121, '3', u'(r)'), (0x1F122, '3', u'(s)'), (0x1F123, '3', u'(t)'), (0x1F124, '3', u'(u)'), (0x1F125, '3', u'(v)'), (0x1F126, '3', u'(w)'), (0x1F127, '3', u'(x)'), (0x1F128, '3', u'(y)'), (0x1F129, '3', u'(z)'), (0x1F12A, 'M', u'〔s〕'), (0x1F12B, 'M', u'c'), (0x1F12C, 'M', u'r'), (0x1F12D, 'M', u'cd'), (0x1F12E, 'M', u'wz'), (0x1F12F, 'X'), (0x1F130, 'M', u'a'), (0x1F131, 'M', u'b'), (0x1F132, 'M', u'c'), (0x1F133, 'M', u'd'), (0x1F134, 'M', u'e'), (0x1F135, 'M', u'f'), (0x1F136, 'M', u'g'), (0x1F137, 'M', u'h'), (0x1F138, 'M', u'i'), (0x1F139, 'M', u'j'), (0x1F13A, 'M', u'k'), (0x1F13B, 'M', u'l'), (0x1F13C, 'M', u'm'), (0x1F13D, 'M', u'n'), (0x1F13E, 'M', u'o'), (0x1F13F, 'M', u'p'), (0x1F140, 'M', u'q'), (0x1F141, 'M', u'r'), (0x1F142, 'M', u's'), ] def _seg_66(): return [ (0x1F143, 'M', u't'), (0x1F144, 'M', u'u'), (0x1F145, 'M', u'v'), (0x1F146, 'M', u'w'), (0x1F147, 'M', u'x'), (0x1F148, 'M', u'y'), (0x1F149, 'M', u'z'), (0x1F14A, 'M', u'hv'), (0x1F14B, 'M', u'mv'), (0x1F14C, 'M', u'sd'), (0x1F14D, 'M', u'ss'), (0x1F14E, 'M', u'ppv'), (0x1F14F, 'M', u'wc'), (0x1F150, 'V'), (0x1F16A, 'M', u'mc'), (0x1F16B, 'M', u'md'), (0x1F16C, 'X'), (0x1F170, 'V'), (0x1F190, 'M', u'dj'), (0x1F191, 'V'), (0x1F19B, 'X'), (0x1F1E6, 'V'), (0x1F200, 'M', u'ほか'), (0x1F201, 'M', u'ココ'), (0x1F202, 'M', u'サ'), (0x1F203, 'X'), (0x1F210, 'M', u'手'), (0x1F211, 'M', u'字'), (0x1F212, 'M', u'双'), (0x1F213, 'M', u'デ'), (0x1F214, 'M', u'二'), (0x1F215, 'M', u'多'), (0x1F216, 'M', u'解'), (0x1F217, 'M', u'天'), (0x1F218, 'M', u'交'), (0x1F219, 'M', u'映'), (0x1F21A, 'M', u'無'), (0x1F21B, 'M', u'料'), (0x1F21C, 'M', u'前'), (0x1F21D, 'M', u'後'), (0x1F21E, 'M', u'再'), (0x1F21F, 'M', u'新'), (0x1F220, 'M', u'初'), (0x1F221, 'M', u'終'), (0x1F222, 'M', u'生'), (0x1F223, 'M', u'販'), (0x1F224, 'M', u'声'), (0x1F225, 'M', u'吹'), (0x1F226, 'M', u'演'), (0x1F227, 'M', u'投'), (0x1F228, 'M', u'捕'), (0x1F229, 'M', u'一'), (0x1F22A, 'M', u'三'), (0x1F22B, 'M', u'遊'), (0x1F22C, 'M', u'左'), (0x1F22D, 'M', u'中'), (0x1F22E, 'M', u'右'), (0x1F22F, 'M', u'指'), (0x1F230, 'M', u'走'), (0x1F231, 'M', u'打'), (0x1F232, 'M', u'禁'), (0x1F233, 'M', u'空'), (0x1F234, 'M', u'合'), (0x1F235, 'M', u'満'), (0x1F236, 'M', u'有'), (0x1F237, 'M', u'月'), (0x1F238, 'M', u'申'), (0x1F239, 'M', u'割'), (0x1F23A, 'M', u'営'), (0x1F23B, 'X'), (0x1F240, 'M', u'〔本〕'), (0x1F241, 'M', u'〔三〕'), (0x1F242, 'M', u'〔二〕'), (0x1F243, 'M', u'〔安〕'), (0x1F244, 'M', u'〔点〕'), (0x1F245, 'M', u'〔打〕'), (0x1F246, 'M', u'〔盗〕'), (0x1F247, 'M', u'〔勝〕'), (0x1F248, 'M', u'〔敗〕'), (0x1F249, 'X'), (0x1F250, 'M', u'得'), (0x1F251, 'M', u'可'), (0x1F252, 'X'), (0x1F300, 'V'), (0x1F321, 'X'), (0x1F330, 'V'), (0x1F336, 'X'), (0x1F337, 'V'), (0x1F37D, 'X'), (0x1F380, 'V'), (0x1F394, 'X'), (0x1F3A0, 'V'), (0x1F3C5, 'X'), (0x1F3C6, 'V'), (0x1F3CB, 'X'), (0x1F3E0, 'V'), (0x1F3F1, 'X'), (0x1F400, 'V'), (0x1F43F, 'X'), (0x1F440, 'V'), ] def _seg_67(): return [ (0x1F441, 'X'), (0x1F442, 'V'), (0x1F4F8, 'X'), (0x1F4F9, 'V'), (0x1F4FD, 'X'), (0x1F500, 'V'), (0x1F53E, 'X'), (0x1F540, 'V'), (0x1F544, 'X'), (0x1F550, 'V'), (0x1F568, 'X'), (0x1F5FB, 'V'), (0x1F641, 'X'), (0x1F645, 'V'), (0x1F650, 'X'), (0x1F680, 'V'), (0x1F6C6, 'X'), (0x1F700, 'V'), (0x1F774, 'X'), (0x20000, 'V'), (0x2A6D7, 'X'), (0x2A700, 'V'), (0x2B735, 'X'), (0x2B740, 'V'), (0x2B81E, 'X'), (0x2F800, 'M', u'丽'), (0x2F801, 'M', u'丸'), (0x2F802, 'M', u'乁'), (0x2F803, 'M', u'𠄢'), (0x2F804, 'M', u'你'), (0x2F805, 'M', u'侮'), (0x2F806, 'M', u'侻'), (0x2F807, 'M', u'倂'), (0x2F808, 'M', u'偺'), (0x2F809, 'M', u'備'), (0x2F80A, 'M', u'僧'), (0x2F80B, 'M', u'像'), (0x2F80C, 'M', u'㒞'), (0x2F80D, 'M', u'𠘺'), (0x2F80E, 'M', u'免'), (0x2F80F, 'M', u'兔'), (0x2F810, 'M', u'兤'), (0x2F811, 'M', u'具'), (0x2F812, 'M', u'𠔜'), (0x2F813, 'M', u'㒹'), (0x2F814, 'M', u'內'), (0x2F815, 'M', u'再'), (0x2F816, 'M', u'𠕋'), (0x2F817, 'M', u'冗'), (0x2F818, 'M', u'冤'), (0x2F819, 'M', u'仌'), (0x2F81A, 'M', u'冬'), (0x2F81B, 'M', u'况'), (0x2F81C, 'M', u'𩇟'), (0x2F81D, 'M', u'凵'), (0x2F81E, 'M', u'刃'), (0x2F81F, 'M', u'㓟'), (0x2F820, 'M', u'刻'), (0x2F821, 'M', u'剆'), (0x2F822, 'M', u'割'), (0x2F823, 'M', u'剷'), (0x2F824, 'M', u'㔕'), (0x2F825, 'M', u'勇'), (0x2F826, 'M', u'勉'), (0x2F827, 'M', u'勤'), (0x2F828, 'M', u'勺'), (0x2F829, 'M', u'包'), (0x2F82A, 'M', u'匆'), (0x2F82B, 'M', u'北'), (0x2F82C, 'M', u'卉'), (0x2F82D, 'M', u'卑'), (0x2F82E, 'M', u'博'), (0x2F82F, 'M', u'即'), (0x2F830, 'M', u'卽'), (0x2F831, 'M', u'卿'), (0x2F834, 'M', u'𠨬'), (0x2F835, 'M', u'灰'), (0x2F836, 'M', u'及'), (0x2F837, 'M', u'叟'), (0x2F838, 'M', u'𠭣'), (0x2F839, 'M', u'叫'), (0x2F83A, 'M', u'叱'), (0x2F83B, 'M', u'吆'), (0x2F83C, 'M', u'咞'), (0x2F83D, 'M', u'吸'), (0x2F83E, 'M', u'呈'), (0x2F83F, 'M', u'周'), (0x2F840, 'M', u'咢'), (0x2F841, 'M', u'哶'), (0x2F842, 'M', u'唐'), (0x2F843, 'M', u'啓'), (0x2F844, 'M', u'啣'), (0x2F845, 'M', u'善'), (0x2F847, 'M', u'喙'), (0x2F848, 'M', u'喫'), (0x2F849, 'M', u'喳'), (0x2F84A, 'M', u'嗂'), (0x2F84B, 'M', u'圖'), (0x2F84C, 'M', u'嘆'), (0x2F84D, 'M', u'圗'), ] def _seg_68(): return [ (0x2F84E, 'M', u'噑'), (0x2F84F, 'M', u'噴'), (0x2F850, 'M', u'切'), (0x2F851, 'M', u'壮'), (0x2F852, 'M', u'城'), (0x2F853, 'M', u'埴'), (0x2F854, 'M', u'堍'), (0x2F855, 'M', u'型'), (0x2F856, 'M', u'堲'), (0x2F857, 'M', u'報'), (0x2F858, 'M', u'墬'), (0x2F859, 'M', u'𡓤'), (0x2F85A, 'M', u'売'), (0x2F85B, 'M', u'壷'), (0x2F85C, 'M', u'夆'), (0x2F85D, 'M', u'多'), (0x2F85E, 'M', u'夢'), (0x2F85F, 'M', u'奢'), (0x2F860, 'M', u'𡚨'), (0x2F861, 'M', u'𡛪'), (0x2F862, 'M', u'姬'), (0x2F863, 'M', u'娛'), (0x2F864, 'M', u'娧'), (0x2F865, 'M', u'姘'), (0x2F866, 'M', u'婦'), (0x2F867, 'M', u'㛮'), (0x2F868, 'X'), (0x2F869, 'M', u'嬈'), (0x2F86A, 'M', u'嬾'), (0x2F86C, 'M', u'𡧈'), (0x2F86D, 'M', u'寃'), (0x2F86E, 'M', u'寘'), (0x2F86F, 'M', u'寧'), (0x2F870, 'M', u'寳'), (0x2F871, 'M', u'𡬘'), (0x2F872, 'M', u'寿'), (0x2F873, 'M', u'将'), (0x2F874, 'X'), (0x2F875, 'M', u'尢'), (0x2F876, 'M', u'㞁'), (0x2F877, 'M', u'屠'), (0x2F878, 'M', u'屮'), (0x2F879, 'M', u'峀'), (0x2F87A, 'M', u'岍'), (0x2F87B, 'M', u'𡷤'), (0x2F87C, 'M', u'嵃'), (0x2F87D, 'M', u'𡷦'), (0x2F87E, 'M', u'嵮'), (0x2F87F, 'M', u'嵫'), (0x2F880, 'M', u'嵼'), (0x2F881, 'M', u'巡'), (0x2F882, 'M', u'巢'), (0x2F883, 'M', u'㠯'), (0x2F884, 'M', u'巽'), (0x2F885, 'M', u'帨'), (0x2F886, 'M', u'帽'), (0x2F887, 'M', u'幩'), (0x2F888, 'M', u'㡢'), (0x2F889, 'M', u'𢆃'), (0x2F88A, 'M', u'㡼'), (0x2F88B, 'M', u'庰'), (0x2F88C, 'M', u'庳'), (0x2F88D, 'M', u'庶'), (0x2F88E, 'M', u'廊'), (0x2F88F, 'M', u'𪎒'), (0x2F890, 'M', u'廾'), (0x2F891, 'M', u'𢌱'), (0x2F893, 'M', u'舁'), (0x2F894, 'M', u'弢'), (0x2F896, 'M', u'㣇'), (0x2F897, 'M', u'𣊸'), (0x2F898, 'M', u'𦇚'), (0x2F899, 'M', u'形'), (0x2F89A, 'M', u'彫'), (0x2F89B, 'M', u'㣣'), (0x2F89C, 'M', u'徚'), (0x2F89D, 'M', u'忍'), (0x2F89E, 'M', u'志'), (0x2F89F, 'M', u'忹'), (0x2F8A0, 'M', u'悁'), (0x2F8A1, 'M', u'㤺'), (0x2F8A2, 'M', u'㤜'), (0x2F8A3, 'M', u'悔'), (0x2F8A4, 'M', u'𢛔'), (0x2F8A5, 'M', u'惇'), (0x2F8A6, 'M', u'慈'), (0x2F8A7, 'M', u'慌'), (0x2F8A8, 'M', u'慎'), (0x2F8A9, 'M', u'慌'), (0x2F8AA, 'M', u'慺'), (0x2F8AB, 'M', u'憎'), (0x2F8AC, 'M', u'憲'), (0x2F8AD, 'M', u'憤'), (0x2F8AE, 'M', u'憯'), (0x2F8AF, 'M', u'懞'), (0x2F8B0, 'M', u'懲'), (0x2F8B1, 'M', u'懶'), (0x2F8B2, 'M', u'成'), (0x2F8B3, 'M', u'戛'), (0x2F8B4, 'M', u'扝'), ] def _seg_69(): return [ (0x2F8B5, 'M', u'抱'), (0x2F8B6, 'M', u'拔'), (0x2F8B7, 'M', u'捐'), (0x2F8B8, 'M', u'𢬌'), (0x2F8B9, 'M', u'挽'), (0x2F8BA, 'M', u'拼'), (0x2F8BB, 'M', u'捨'), (0x2F8BC, 'M', u'掃'), (0x2F8BD, 'M', u'揤'), (0x2F8BE, 'M', u'𢯱'), (0x2F8BF, 'M', u'搢'), (0x2F8C0, 'M', u'揅'), (0x2F8C1, 'M', u'掩'), (0x2F8C2, 'M', u'㨮'), (0x2F8C3, 'M', u'摩'), (0x2F8C4, 'M', u'摾'), (0x2F8C5, 'M', u'撝'), (0x2F8C6, 'M', u'摷'), (0x2F8C7, 'M', u'㩬'), (0x2F8C8, 'M', u'敏'), (0x2F8C9, 'M', u'敬'), (0x2F8CA, 'M', u'𣀊'), (0x2F8CB, 'M', u'旣'), (0x2F8CC, 'M', u'書'), (0x2F8CD, 'M', u'晉'), (0x2F8CE, 'M', u'㬙'), (0x2F8CF, 'M', u'暑'), (0x2F8D0, 'M', u'㬈'), (0x2F8D1, 'M', u'㫤'), (0x2F8D2, 'M', u'冒'), (0x2F8D3, 'M', u'冕'), (0x2F8D4, 'M', u'最'), (0x2F8D5, 'M', u'暜'), (0x2F8D6, 'M', u'肭'), (0x2F8D7, 'M', u'䏙'), (0x2F8D8, 'M', u'朗'), (0x2F8D9, 'M', u'望'), (0x2F8DA, 'M', u'朡'), (0x2F8DB, 'M', u'杞'), (0x2F8DC, 'M', u'杓'), (0x2F8DD, 'M', u'𣏃'), (0x2F8DE, 'M', u'㭉'), (0x2F8DF, 'M', u'柺'), (0x2F8E0, 'M', u'枅'), (0x2F8E1, 'M', u'桒'), (0x2F8E2, 'M', u'梅'), (0x2F8E3, 'M', u'𣑭'), (0x2F8E4, 'M', u'梎'), (0x2F8E5, 'M', u'栟'), (0x2F8E6, 'M', u'椔'), (0x2F8E7, 'M', u'㮝'), (0x2F8E8, 'M', u'楂'), (0x2F8E9, 'M', u'榣'), (0x2F8EA, 'M', u'槪'), (0x2F8EB, 'M', u'檨'), (0x2F8EC, 'M', u'𣚣'), (0x2F8ED, 'M', u'櫛'), (0x2F8EE, 'M', u'㰘'), (0x2F8EF, 'M', u'次'), (0x2F8F0, 'M', u'𣢧'), (0x2F8F1, 'M', u'歔'), (0x2F8F2, 'M', u'㱎'), (0x2F8F3, 'M', u'歲'), (0x2F8F4, 'M', u'殟'), (0x2F8F5, 'M', u'殺'), (0x2F8F6, 'M', u'殻'), (0x2F8F7, 'M', u'𣪍'), (0x2F8F8, 'M', u'𡴋'), (0x2F8F9, 'M', u'𣫺'), (0x2F8FA, 'M', u'汎'), (0x2F8FB, 'M', u'𣲼'), (0x2F8FC, 'M', u'沿'), (0x2F8FD, 'M', u'泍'), (0x2F8FE, 'M', u'汧'), (0x2F8FF, 'M', u'洖'), (0x2F900, 'M', u'派'), (0x2F901, 'M', u'海'), (0x2F902, 'M', u'流'), (0x2F903, 'M', u'浩'), (0x2F904, 'M', u'浸'), (0x2F905, 'M', u'涅'), (0x2F906, 'M', u'𣴞'), (0x2F907, 'M', u'洴'), (0x2F908, 'M', u'港'), (0x2F909, 'M', u'湮'), (0x2F90A, 'M', u'㴳'), (0x2F90B, 'M', u'滋'), (0x2F90C, 'M', u'滇'), (0x2F90D, 'M', u'𣻑'), (0x2F90E, 'M', u'淹'), (0x2F90F, 'M', u'潮'), (0x2F910, 'M', u'𣽞'), (0x2F911, 'M', u'𣾎'), (0x2F912, 'M', u'濆'), (0x2F913, 'M', u'瀹'), (0x2F914, 'M', u'瀞'), (0x2F915, 'M', u'瀛'), (0x2F916, 'M', u'㶖'), (0x2F917, 'M', u'灊'), (0x2F918, 'M', u'災'), ] def _seg_70(): return [ (0x2F919, 'M', u'灷'), (0x2F91A, 'M', u'炭'), (0x2F91B, 'M', u'𠔥'), (0x2F91C, 'M', u'煅'), (0x2F91D, 'M', u'𤉣'), (0x2F91E, 'M', u'熜'), (0x2F91F, 'X'), (0x2F920, 'M', u'爨'), (0x2F921, 'M', u'爵'), (0x2F922, 'M', u'牐'), (0x2F923, 'M', u'𤘈'), (0x2F924, 'M', u'犀'), (0x2F925, 'M', u'犕'), (0x2F926, 'M', u'𤜵'), (0x2F927, 'M', u'𤠔'), (0x2F928, 'M', u'獺'), (0x2F929, 'M', u'王'), (0x2F92A, 'M', u'㺬'), (0x2F92B, 'M', u'玥'), (0x2F92C, 'M', u'㺸'), (0x2F92E, 'M', u'瑇'), (0x2F92F, 'M', u'瑜'), (0x2F930, 'M', u'瑱'), (0x2F931, 'M', u'璅'), (0x2F932, 'M', u'瓊'), (0x2F933, 'M', u'㼛'), (0x2F934, 'M', u'甤'), (0x2F935, 'M', u'𤰶'), (0x2F936, 'M', u'甾'), (0x2F937, 'M', u'𤲒'), (0x2F938, 'M', u'異'), (0x2F939, 'M', u'𢆟'), (0x2F93A, 'M', u'瘐'), (0x2F93B, 'M', u'𤾡'), (0x2F93C, 'M', u'𤾸'), (0x2F93D, 'M', u'𥁄'), (0x2F93E, 'M', u'㿼'), (0x2F93F, 'M', u'䀈'), (0x2F940, 'M', u'直'), (0x2F941, 'M', u'𥃳'), (0x2F942, 'M', u'𥃲'), (0x2F943, 'M', u'𥄙'), (0x2F944, 'M', u'𥄳'), (0x2F945, 'M', u'眞'), (0x2F946, 'M', u'真'), (0x2F948, 'M', u'睊'), (0x2F949, 'M', u'䀹'), (0x2F94A, 'M', u'瞋'), (0x2F94B, 'M', u'䁆'), (0x2F94C, 'M', u'䂖'), (0x2F94D, 'M', u'𥐝'), (0x2F94E, 'M', u'硎'), (0x2F94F, 'M', u'碌'), (0x2F950, 'M', u'磌'), (0x2F951, 'M', u'䃣'), (0x2F952, 'M', u'𥘦'), (0x2F953, 'M', u'祖'), (0x2F954, 'M', u'𥚚'), (0x2F955, 'M', u'𥛅'), (0x2F956, 'M', u'福'), (0x2F957, 'M', u'秫'), (0x2F958, 'M', u'䄯'), (0x2F959, 'M', u'穀'), (0x2F95A, 'M', u'穊'), (0x2F95B, 'M', u'穏'), (0x2F95C, 'M', u'𥥼'), (0x2F95D, 'M', u'𥪧'), (0x2F95F, 'X'), (0x2F960, 'M', u'䈂'), (0x2F961, 'M', u'𥮫'), (0x2F962, 'M', u'篆'), (0x2F963, 'M', u'築'), (0x2F964, 'M', u'䈧'), (0x2F965, 'M', u'𥲀'), (0x2F966, 'M', u'糒'), (0x2F967, 'M', u'䊠'), (0x2F968, 'M', u'糨'), (0x2F969, 'M', u'糣'), (0x2F96A, 'M', u'紀'), (0x2F96B, 'M', u'𥾆'), (0x2F96C, 'M', u'絣'), (0x2F96D, 'M', u'䌁'), (0x2F96E, 'M', u'緇'), (0x2F96F, 'M', u'縂'), (0x2F970, 'M', u'繅'), (0x2F971, 'M', u'䌴'), (0x2F972, 'M', u'𦈨'), (0x2F973, 'M', u'𦉇'), (0x2F974, 'M', u'䍙'), (0x2F975, 'M', u'𦋙'), (0x2F976, 'M', u'罺'), (0x2F977, 'M', u'𦌾'), (0x2F978, 'M', u'羕'), (0x2F979, 'M', u'翺'), (0x2F97A, 'M', u'者'), (0x2F97B, 'M', u'𦓚'), (0x2F97C, 'M', u'𦔣'), (0x2F97D, 'M', u'聠'), (0x2F97E, 'M', u'𦖨'), (0x2F97F, 'M', u'聰'), ] def _seg_71(): return [ (0x2F980, 'M', u'𣍟'), (0x2F981, 'M', u'䏕'), (0x2F982, 'M', u'育'), (0x2F983, 'M', u'脃'), (0x2F984, 'M', u'䐋'), (0x2F985, 'M', u'脾'), (0x2F986, 'M', u'媵'), (0x2F987, 'M', u'𦞧'), (0x2F988, 'M', u'𦞵'), (0x2F989, 'M', u'𣎓'), (0x2F98A, 'M', u'𣎜'), (0x2F98B, 'M', u'舁'), (0x2F98C, 'M', u'舄'), (0x2F98D, 'M', u'辞'), (0x2F98E, 'M', u'䑫'), (0x2F98F, 'M', u'芑'), (0x2F990, 'M', u'芋'), (0x2F991, 'M', u'芝'), (0x2F992, 'M', u'劳'), (0x2F993, 'M', u'花'), (0x2F994, 'M', u'芳'), (0x2F995, 'M', u'芽'), (0x2F996, 'M', u'苦'), (0x2F997, 'M', u'𦬼'), (0x2F998, 'M', u'若'), (0x2F999, 'M', u'茝'), (0x2F99A, 'M', u'荣'), (0x2F99B, 'M', u'莭'), (0x2F99C, 'M', u'茣'), (0x2F99D, 'M', u'莽'), (0x2F99E, 'M', u'菧'), (0x2F99F, 'M', u'著'), (0x2F9A0, 'M', u'荓'), (0x2F9A1, 'M', u'菊'), (0x2F9A2, 'M', u'菌'), (0x2F9A3, 'M', u'菜'), (0x2F9A4, 'M', u'𦰶'), (0x2F9A5, 'M', u'𦵫'), (0x2F9A6, 'M', u'𦳕'), (0x2F9A7, 'M', u'䔫'), (0x2F9A8, 'M', u'蓱'), (0x2F9A9, 'M', u'蓳'), (0x2F9AA, 'M', u'蔖'), (0x2F9AB, 'M', u'𧏊'), (0x2F9AC, 'M', u'蕤'), (0x2F9AD, 'M', u'𦼬'), (0x2F9AE, 'M', u'䕝'), (0x2F9AF, 'M', u'䕡'), (0x2F9B0, 'M', u'𦾱'), (0x2F9B1, 'M', u'𧃒'), (0x2F9B2, 'M', u'䕫'), (0x2F9B3, 'M', u'虐'), (0x2F9B4, 'M', u'虜'), (0x2F9B5, 'M', u'虧'), (0x2F9B6, 'M', u'虩'), (0x2F9B7, 'M', u'蚩'), (0x2F9B8, 'M', u'蚈'), (0x2F9B9, 'M', u'蜎'), (0x2F9BA, 'M', u'蛢'), (0x2F9BB, 'M', u'蝹'), (0x2F9BC, 'M', u'蜨'), (0x2F9BD, 'M', u'蝫'), (0x2F9BE, 'M', u'螆'), (0x2F9BF, 'X'), (0x2F9C0, 'M', u'蟡'), (0x2F9C1, 'M', u'蠁'), (0x2F9C2, 'M', u'䗹'), (0x2F9C3, 'M', u'衠'), (0x2F9C4, 'M', u'衣'), (0x2F9C5, 'M', u'𧙧'), (0x2F9C6, 'M', u'裗'), (0x2F9C7, 'M', u'裞'), (0x2F9C8, 'M', u'䘵'), (0x2F9C9, 'M', u'裺'), (0x2F9CA, 'M', u'㒻'), (0x2F9CB, 'M', u'𧢮'), (0x2F9CC, 'M', u'𧥦'), (0x2F9CD, 'M', u'䚾'), (0x2F9CE, 'M', u'䛇'), (0x2F9CF, 'M', u'誠'), (0x2F9D0, 'M', u'諭'), (0x2F9D1, 'M', u'變'), (0x2F9D2, 'M', u'豕'), (0x2F9D3, 'M', u'𧲨'), (0x2F9D4, 'M', u'貫'), (0x2F9D5, 'M', u'賁'), (0x2F9D6, 'M', u'贛'), (0x2F9D7, 'M', u'起'), (0x2F9D8, 'M', u'𧼯'), (0x2F9D9, 'M', u'𠠄'), (0x2F9DA, 'M', u'跋'), (0x2F9DB, 'M', u'趼'), (0x2F9DC, 'M', u'跰'), (0x2F9DD, 'M', u'𠣞'), (0x2F9DE, 'M', u'軔'), (0x2F9DF, 'M', u'輸'), (0x2F9E0, 'M', u'𨗒'), (0x2F9E1, 'M', u'𨗭'), (0x2F9E2, 'M', u'邔'), (0x2F9E3, 'M', u'郱'), ] def _seg_72(): return [ (0x2F9E4, 'M', u'鄑'), (0x2F9E5, 'M', u'𨜮'), (0x2F9E6, 'M', u'鄛'), (0x2F9E7, 'M', u'鈸'), (0x2F9E8, 'M', u'鋗'), (0x2F9E9, 'M', u'鋘'), (0x2F9EA, 'M', u'鉼'), (0x2F9EB, 'M', u'鏹'), (0x2F9EC, 'M', u'鐕'), (0x2F9ED, 'M', u'𨯺'), (0x2F9EE, 'M', u'開'), (0x2F9EF, 'M', u'䦕'), (0x2F9F0, 'M', u'閷'), (0x2F9F1, 'M', u'𨵷'), (0x2F9F2, 'M', u'䧦'), (0x2F9F3, 'M', u'雃'), (0x2F9F4, 'M', u'嶲'), (0x2F9F5, 'M', u'霣'), (0x2F9F6, 'M', u'𩅅'), (0x2F9F7, 'M', u'𩈚'), (0x2F9F8, 'M', u'䩮'), (0x2F9F9, 'M', u'䩶'), (0x2F9FA, 'M', u'韠'), (0x2F9FB, 'M', u'𩐊'), (0x2F9FC, 'M', u'䪲'), (0x2F9FD, 'M', u'𩒖'), (0x2F9FE, 'M', u'頋'), (0x2FA00, 'M', u'頩'), (0x2FA01, 'M', u'𩖶'), (0x2FA02, 'M', u'飢'), (0x2FA03, 'M', u'䬳'), (0x2FA04, 'M', u'餩'), (0x2FA05, 'M', u'馧'), (0x2FA06, 'M', u'駂'), (0x2FA07, 'M', u'駾'), (0x2FA08, 'M', u'䯎'), (0x2FA09, 'M', u'𩬰'), (0x2FA0A, 'M', u'鬒'), (0x2FA0B, 'M', u'鱀'), (0x2FA0C, 'M', u'鳽'), (0x2FA0D, 'M', u'䳎'), (0x2FA0E, 'M', u'䳭'), (0x2FA0F, 'M', u'鵧'), (0x2FA10, 'M', u'𪃎'), (0x2FA11, 'M', u'䳸'), (0x2FA12, 'M', u'𪄅'), (0x2FA13, 'M', u'𪈎'), (0x2FA14, 'M', u'𪊑'), (0x2FA15, 'M', u'麻'), (0x2FA16, 'M', u'䵖'), (0x2FA17, 'M', u'黹'), (0x2FA18, 'M', u'黾'), (0x2FA19, 'M', u'鼅'), (0x2FA1A, 'M', u'鼏'), (0x2FA1B, 'M', u'鼖'), (0x2FA1C, 'M', u'鼻'), (0x2FA1D, 'M', u'𪘀'), (0x2FA1E, 'X'), (0xE0100, 'I'), (0xE01F0, 'X'), ] uts46data = tuple( _seg_0() + _seg_1() + _seg_2() + _seg_3() + _seg_4() + _seg_5() + _seg_6() + _seg_7() + _seg_8() + _seg_9() + _seg_10() + _seg_11() + _seg_12() + _seg_13() + _seg_14() + _seg_15() + _seg_16() + _seg_17() + _seg_18() + _seg_19() + _seg_20() + _seg_21() + _seg_22() + _seg_23() + _seg_24() + _seg_25() + _seg_26() + _seg_27() + _seg_28() + _seg_29() + _seg_30() + _seg_31() + _seg_32() + _seg_33() + _seg_34() + _seg_35() + _seg_36() + _seg_37() + _seg_38() + _seg_39() + _seg_40() + _seg_41() + _seg_42() + _seg_43() + _seg_44() + _seg_45() + _seg_46() + _seg_47() + _seg_48() + _seg_49() + _seg_50() + _seg_51() + _seg_52() + _seg_53() + _seg_54() + _seg_55() + _seg_56() + _seg_57() + _seg_58() + _seg_59() + _seg_60() + _seg_61() + _seg_62() + _seg_63() + _seg_64() + _seg_65() + _seg_66() + _seg_67() + _seg_68() + _seg_69() + _seg_70() + _seg_71() + _seg_72() )
mit
nddsg/TreeDecomps
xplodnTree/tdec/b2CliqueTreeRules.py
1
3569
#!/usr/bin/env python __author__ = 'saguinag' + '@' + 'nd.edu' __version__ = "0.1.0" ## ## fname "b2CliqueTreeRules.py" ## ## TODO: some todo list ## VersionLog: import net_metrics as metrics import pandas as pd import argparse, traceback import os, sys import networkx as nx import re from collections import deque, defaultdict, Counter import tree_decomposition as td import PHRG as phrg import probabilistic_cfg as pcfg import exact_phrg as xphrg import a1_hrg_cliq_tree as nfld from a1_hrg_cliq_tree import load_edgelist DEBUG = False def get_parser (): parser = argparse.ArgumentParser(description='b2CliqueTreeRules.py: given a tree derive grammar rules') parser.add_argument('-t', '--treedecomp', required=True, help='input tree decomposition (dimacs file format)') parser.add_argument('--version', action='version', version=__version__) return parser def dimacs_td_ct (tdfname): """ tree decomp to clique-tree """ print '... input file:', tdfname fname = tdfname graph_name = os.path.basename(fname) gname = graph_name.split('.')[0] gfname = "datasets/out." + gname tdh = os.path.basename(fname).split('.')[1] # tree decomp heuristic tfname = gname+"."+tdh G = load_edgelist(gfname) if DEBUG: print nx.info(G) print with open(fname, 'r') as f: # read tree decomp from inddgo lines = f.readlines() lines = [x.rstrip('\r\n') for x in lines] cbags = {} bags = [x.split() for x in lines if x.startswith('B')] for b in bags: cbags[int(b[1])] = [int(x) for x in b[3:]] # what to do with bag size? edges = [x.split()[1:] for x in lines if x.startswith('e')] edges = [[int(k) for k in x] for x in edges] tree = defaultdict(set) for s, t in edges: tree[frozenset(cbags[s])].add(frozenset(cbags[t])) if DEBUG: print '.. # of keys in `tree`:', len(tree.keys()) if DEBUG: print tree.keys() root = list(tree)[0] if DEBUG: print '.. Root:', root root = frozenset(cbags[1]) if DEBUG: print '.. Root:', root T = td.make_rooted(tree, root) if DEBUG: print '.. T rooted:', len(T) # nfld.unfold_2wide_tuple(T) # lets me display the tree's frozen sets T = phrg.binarize(T) prod_rules = {} td.new_visit(T, G, prod_rules) if DEBUG: print "--------------------" if DEBUG: print "- Production Rules -" if DEBUG: print "--------------------" for k in prod_rules.iterkeys(): if DEBUG: print k s = 0 for d in prod_rules[k]: s += prod_rules[k][d] for d in prod_rules[k]: prod_rules[k][d] = float(prod_rules[k][d]) / float(s) # normailization step to create probs not counts. if DEBUG: print '\t -> ', d, prod_rules[k][d] rules = [] id = 0 for k, v in prod_rules.iteritems(): sid = 0 for x in prod_rules[k]: rhs = re.findall("[^()]+", x) rules.append(("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x])) if DEBUG: print ("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x]) sid += 1 id += 1 df = pd.DataFrame(rules) outdf_fname = "./ProdRules/"+tfname+".prules" if not os.path.isfile(outdf_fname+".bz2"): print '...',outdf_fname, "written" df.to_csv(outdf_fname+".bz2", compression="bz2") else: print '...', outdf_fname, "file exists" return def main (): parser = get_parser() args = vars(parser.parse_args()) dimacs_td_ct(args['treedecomp']) # gen synth graph if __name__ == '__main__': try: main() except Exception, e: print str(e) traceback.print_exc() sys.exit(1) sys.exit(0)
mit
ar4s/django
django/db/models/sql/expressions.py
3
4373
import copy from django.core.exceptions import FieldError from django.db.models.constants import LOOKUP_SEP from django.db.models.fields import FieldDoesNotExist class SQLEvaluator(object): def __init__(self, expression, query, allow_joins=True, reuse=None): self.expression = expression self.opts = query.get_meta() self.reuse = reuse self.cols = [] self.expression.prepare(self, query, allow_joins) def relabeled_clone(self, change_map): clone = copy.copy(self) clone.cols = [] for node, col in self.cols: if hasattr(col, 'relabeled_clone'): clone.cols.append((node, col.relabeled_clone(change_map))) else: clone.cols.append((node, (change_map.get(col[0], col[0]), col[1]))) return clone def get_cols(self): cols = [] for node, col in self.cols: if hasattr(node, 'get_cols'): cols.extend(node.get_cols()) elif isinstance(col, tuple): cols.append(col) return cols def prepare(self): return self def as_sql(self, qn, connection): return self.expression.evaluate(self, qn, connection) ##################################################### # Vistor methods for initial expression preparation # ##################################################### def prepare_node(self, node, query, allow_joins): for child in node.children: if hasattr(child, 'prepare'): child.prepare(self, query, allow_joins) def prepare_leaf(self, node, query, allow_joins): if not allow_joins and LOOKUP_SEP in node.name: raise FieldError("Joined field references are not permitted in this query") field_list = node.name.split(LOOKUP_SEP) if node.name in query.aggregates: self.cols.append((node, query.aggregate_select[node.name])) else: try: field, sources, opts, join_list, path = query.setup_joins( field_list, query.get_meta(), query.get_initial_alias(), self.reuse) targets, _, join_list = query.trim_joins(sources, join_list, path) if self.reuse is not None: self.reuse.update(join_list) for t in targets: self.cols.append((node, (join_list[-1], t.column))) except FieldDoesNotExist: raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (self.name, [f.name for f in self.opts.fields])) ################################################## # Vistor methods for final expression evaluation # ################################################## def evaluate_node(self, node, qn, connection): expressions = [] expression_params = [] for child in node.children: if hasattr(child, 'evaluate'): sql, params = child.evaluate(self, qn, connection) else: sql, params = '%s', (child,) if len(getattr(child, 'children', [])) > 1: format = '(%s)' else: format = '%s' if sql: expressions.append(format % sql) expression_params.extend(params) return connection.ops.combine_expression(node.connector, expressions), expression_params def evaluate_leaf(self, node, qn, connection): col = None for n, c in self.cols: if n is node: col = c break if col is None: raise ValueError("Given node not found") if hasattr(col, 'as_sql'): return col.as_sql(qn, connection) else: return '%s.%s' % (qn(col[0]), qn(col[1])), [] def evaluate_date_modifier_node(self, node, qn, connection): timedelta = node.children.pop() sql, params = self.evaluate_node(node, qn, connection) if (timedelta.days == timedelta.seconds == timedelta.microseconds == 0): return sql, params return connection.ops.date_interval_sql(sql, node.connector, timedelta), params
bsd-3-clause
hynnet/openwrt-mt7620
staging_dir/host/lib/python2.7/ctypes/test/test_struct_fields.py
264
1503
import unittest from ctypes import * class StructFieldsTestCase(unittest.TestCase): # Structure/Union classes must get 'finalized' sooner or # later, when one of these things happen: # # 1. _fields_ is set. # 2. An instance is created. # 3. The type is used as field of another Structure/Union. # 4. The type is subclassed # # When they are finalized, assigning _fields_ is no longer allowed. def test_1_A(self): class X(Structure): pass self.assertEqual(sizeof(X), 0) # not finalized X._fields_ = [] # finalized self.assertRaises(AttributeError, setattr, X, "_fields_", []) def test_1_B(self): class X(Structure): _fields_ = [] # finalized self.assertRaises(AttributeError, setattr, X, "_fields_", []) def test_2(self): class X(Structure): pass X() self.assertRaises(AttributeError, setattr, X, "_fields_", []) def test_3(self): class X(Structure): pass class Y(Structure): _fields_ = [("x", X)] # finalizes X self.assertRaises(AttributeError, setattr, X, "_fields_", []) def test_4(self): class X(Structure): pass class Y(X): pass self.assertRaises(AttributeError, setattr, X, "_fields_", []) Y._fields_ = [] self.assertRaises(AttributeError, setattr, X, "_fields_", []) if __name__ == "__main__": unittest.main()
gpl-2.0
iodoom-gitorious/enhanced-iodoom3
neo/sys/linux/runner/runner_lib.py
61
6759
# run doom process on a series of maps # can be used for regression testing, or to fetch media # keeps a log of each run ( see getLogfile ) # currently uses a basic stdout activity timeout to decide when to move on # using a periodic check of /proc/<pid>/status SleepAVG # when the sleep average is reaching 0, issue a 'quit' to stdout # keeps serialized run status in runner.pickle # NOTE: can be used to initiate runs on failed maps only for instance etc. # TODO: use the serialized and not the logs to sort the run order # TODO: better logging. Use idLogger? # TODO: configurable event when the process is found interactive # instead of emitting a quit, perform some warning action? import sys, os, commands, string, time, traceback, pickle from twisted.application import internet, service from twisted.internet import protocol, reactor, utils, defer from twisted.internet.task import LoopingCall class doomClientProtocol( protocol.ProcessProtocol ): # ProcessProtocol API def connectionMade( self ): self.logfile.write( 'connectionMade\n' ) def outReceived( self, data ): print data self.logfile.write( data ) def errReceived( self, data ): print 'stderr: ' + data self.logfile.write( 'stderr: ' + data ) def inConnectionLost( self ): self.logfile.write( 'inConnectionLost\n' ) def outConnectionLost( self ): self.logfile.write( 'outConnectionLost\n' ) def errConnectionLost( self ): self.logfile.write( 'errConnectionLost\n' ) def processEnded( self, status_object ): self.logfile.write( 'processEnded %s\n' % repr( status_object ) ) self.logfile.write( time.strftime( '%H:%M:%S', time.localtime( time.time() ) ) + '\n' ) self.logfile.close() self.deferred.callback( None ) # mac management def __init__( self, logfilename, deferred ): self.logfilename = logfilename self.logfile = open( logfilename, 'a' ) self.logfile.write( time.strftime( '%H:%M:%S', time.localtime( time.time() ) ) + '\n' ) self.deferred = deferred class doomService( service.Service ): # current monitoring state # 0: nothing running # 1: we have a process running, we're monitoring it's CPU usage # 2: we issued a 'quit' to the process's stdin # either going to get a processEnded, or a timeout # 3: we forced a kill because of error, timeout etc. state = 0 # load check period check_period = 10 # pickled status file pickle_file = 'runner.pickle' # stores status indexed by filename # { 'mapname' : ( state, last_update ), .. } status = {} # start the maps as multiplayer server multiplayer = 0 def __init__( self, bin, cmdline, maps, sort = 0, multiplayer = 0, blank_run = 0 ): self.p_transport = None self.multiplayer = multiplayer self.blank_run = blank_run if ( self.multiplayer ): print 'Operate in multiplayer mode' self.bin = os.path.abspath( bin ) if ( type( cmdline ) is type( '' ) ): self.cmdline = string.split( cmdline, ' ' ) else: self.cmdline = cmdline self.maps = maps if ( os.path.exists( self.pickle_file ) ): print 'Loading pickled status %s' % self.pickle_file handle = open( self.pickle_file, 'r' ) self.status = pickle.load( handle ) handle.close() if ( sort ): print 'Sorting maps oldest runs first' maps_sorted = [ ] for i in self.maps: i_log = self.getLogfile( i ) if ( os.path.exists( i_log ) ): maps_sorted.append( ( i, os.path.getmtime( i_log ) ) ) else: maps_sorted.append( ( i, 0 ) ) maps_sorted.sort( lambda x,y : cmp( x[1], y[1] ) ) self.maps = [ ] if ( blank_run ): self.maps.append( 'blankrun' ) for i in maps_sorted: self.maps.append( i[ 0 ] ) print 'Sorted as: %s\n' % repr( self.maps ) def getLogfile( self, name ): return 'logs/' + string.translate( name, string.maketrans( '/', '-' ) ) + '.log' # deferred call when child process dies def processEnded( self, val ): print 'child has died - state %d' % self.state self.status[ self.maps[ self.i_map ] ] = ( self.state, time.time() ) self.i_map += 1 if ( self.i_map >= len( self.maps ) ): reactor.stop() else: self.nextMap() def processTimeout( self ): self.p_transport.signalProcess( "KILL" ) def sleepAVGReply( self, val ): try: s = val[10:][:-2] print 'sleepAVGReply %s%%' % s if ( s == '0' ): # need twice in a row if ( self.state == 2 ): print 'child process is interactive' self.p_transport.write( 'quit\n' ) else: self.state = 2 else: self.state = 1 # else: # reactor.callLater( self.check_period, self.checkCPU ) except: print traceback.format_tb( sys.exc_info()[2] ) print sys.exc_info()[0] print 'exception raised in sleepAVGReply - killing process' self.state = 3 self.p_transport.signalProcess( 'KILL' ) def sleepAVGTimeout( self ): print 'sleepAVGTimeout - killing process' self.state = 3 self.p_transport.signalProcess( 'KILL' ) # called at regular intervals to monitor the sleep average of the child process # when sleep reaches 0, it means the map is loaded and interactive def checkCPU( self ): if ( self.state == 0 or self.p_transport is None or self.p_transport.pid is None ): print 'checkCPU: no child process atm' return defer = utils.getProcessOutput( '/bin/bash', [ '-c', 'cat /proc/%d/status | grep SleepAVG' % self.p_transport.pid ] ) defer.addCallback( self.sleepAVGReply ) defer.setTimeout( 2, self.sleepAVGTimeout ) def nextMap( self ): self.state = 0 name = self.maps[ self.i_map ] print 'Starting map: ' + name logfile = self.getLogfile( name ) print 'Logging to: ' + logfile if ( self.multiplayer ): cmdline = [ self.bin ] + self.cmdline + [ '+set', 'si_map', name ] if ( name != 'blankrun' ): cmdline.append( '+spawnServer' ) else: cmdline = [ self.bin ] + self.cmdline if ( name != 'blankrun' ): cmdline += [ '+devmap', name ] print 'Command line: ' + repr( cmdline ) self.deferred = defer.Deferred() self.deferred.addCallback( self.processEnded ) self.p_transport = reactor.spawnProcess( doomClientProtocol( logfile, self.deferred ), self.bin, cmdline , path = os.path.dirname( self.bin ), env = os.environ ) self.state = 1 # # setup the CPU usage loop # reactor.callLater( self.check_period, self.checkCPU ) def startService( self ): print 'doomService startService' loop = LoopingCall( self.checkCPU ) loop.start( self.check_period ) self.i_map = 0 self.nextMap() def stopService( self ): print 'doomService stopService' if ( not self.p_transport.pid is None ): self.p_transport.signalProcess( 'KILL' ) # serialize print 'saving status to %s' % self.pickle_file handle = open( self.pickle_file, 'w+' ) pickle.dump( self.status, handle ) handle.close()
gpl-3.0
igel-kun/pyload
module/plugins/hooks/CloudFlareDdos.py
1
11909
# -*- coding: utf-8 -*- import inspect import re import urlparse from module.network.HTTPRequest import BadHeader from ..captcha.ReCaptcha import ReCaptcha from ..internal.Addon import Addon from ..internal.misc import parse_html_header def plugin_id(plugin): return ("<%(plugintype)s %(pluginname)s%(id)s>" % {'plugintype': plugin.__type__.upper(), 'pluginname': plugin.__name__, 'id': "[%s]" % plugin.pyfile.id if plugin.pyfile else ""}) def is_simple_plugin(obj): return any(k.__name__ in ("SimpleHoster", "SimpleCrypter") for k in inspect.getmro(type(obj))) def get_plugin_last_header(plugin): # @NOTE: req can be a HTTPRequest or a Browser object return plugin.req.http.header if hasattr(plugin.req, "http") else plugin.req.header class CloudFlare(object): @staticmethod def handle_function(addon_plugin, owner_plugin, func_name, orig_func, args): addon_plugin.log_debug("Calling %s() of %s" % (func_name, plugin_id(owner_plugin))) try: data = orig_func(*args[0], **args[1]) addon_plugin.log_debug("%s() returned successfully" % func_name) return data except BadHeader, e: addon_plugin.log_debug("%s(): got BadHeader exception %s" % (func_name, e.code)) header = parse_html_header(e.header) if "cloudflare" in header.get('server', ""): if e.code == 403: data = CloudFlare._solve_cf_security_check(addon_plugin, owner_plugin, e.content) elif e.code == 503: for _i in range(3): try: data = CloudFlare._solve_cf_ddos_challenge(addon_plugin, owner_plugin, e.content) break except BadHeader, e: #: Possibly we got another ddos challenge addon_plugin.log_debug("%s(): got BadHeader exception %s" % (func_name, e.code)) header = parse_html_header(e.header) if e.code == 503 and "cloudflare" in header.get('server', ""): continue #: Yes, it's a ddos challenge again.. else: data = None # Tell the exception handler to re-throw the exception break else: addon_plugin.log_error("%s(): Max solve retries reached" % func_name) data = None # Tell the exception handler to re-throw the exception else: addon_plugin.log_warning(_("Unknown CloudFlare response code %s") % e.code) raise if data is None: raise e else: return data else: raise @staticmethod def _solve_cf_ddos_challenge(addon_plugin, owner_plugin, data): try: addon_plugin.log_info(_("Detected CloudFlare's DDoS protection page")) # Cloudflare requires a delay before solving the challenge wait_time = (int(re.search('submit\(\);\r?\n\s*},\s*([0-9]+)', data).group(1)) + 999) / 1000 owner_plugin.set_wait(wait_time) last_url = owner_plugin.req.lastEffectiveURL urlp = urlparse.urlparse(last_url) domain = urlp.netloc submit_url = "%s://%s/cdn-cgi/l/chk_jschl" % (urlp.scheme, domain) get_params = {} try: get_params['jschl_vc'] = re.search(r'name="jschl_vc" value="(\w+)"', data).group(1) get_params['pass'] = re.search(r'name="pass" value="(.+?)"', data).group(1) get_params['s'] = re.search(r'name="s" value="(.+?)"', data).group(1) # Extract the arithmetic operation js = re.search(r'setTimeout\(function\(\){\s+(var s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n', data).group(1) js = re.sub(r'a\.value = (.+\.toFixed\(10\);).+', r'\1', js) solution_name = re.search(r's,t,o,p,b,r,e,a,k,i,n,g,f,\s*(.+)\s*=', js).group(1) g = re.search(r'(.*};)\n\s*(t\s*=(.+))\n\s*(;%s.*)' % (solution_name), js, re.M | re.I | re.S).groups() js = g[0] + g[-1] js = re.sub(r"[\n\\']", "", js) except Exception: # Something is wrong with the page. # This may indicate CloudFlare has changed their anti-bot # technique. owner_plugin.log_error(_("Unable to parse CloudFlare's DDoS protection page")) return None # Tell the exception handler to re-throw the exception if "toFixed" not in js: owner_plugin.log_error(_("Unable to parse CloudFlare's DDoS protection page")) return None # Tell the exception handler to re-throw the exception atob = 'var atob = function(str) {return Buffer.from(str, "base64").toString("binary");}' try: k = re.search(r'k\s*=\s*\'(.+?)\';', data).group(1) v = re.search(r'<div(?:.*)id="%s"(?:.*)>(.*)</div>' % k, data).group(1) doc = 'var document= {getElementById: function(x) { return {innerHTML:"%s"};}}' % v except (AttributeError, IndexError): doc = '' js = '%s;%s;var t="%s";%s' % (doc, atob, domain, js) # Safely evaluate the Javascript expression res = owner_plugin.js.eval(js) try: get_params['jschl_answer'] = str(float(res)) except ValueError: owner_plugin.log_error(_("Unable to parse CloudFlare's DDoS protection page")) return None # Tell the exception handler to re-throw the exception owner_plugin.wait() # Do the actual wait return owner_plugin.load(submit_url, get=get_params, ref=last_url) except BadHeader, e: raise e #: Huston, we have a BadHeader! except Exception, e: addon_plugin.log_error(e) return None # Tell the exception handler to re-throw the exception @staticmethod def _solve_cf_security_check(addon_plugin, owner_plugin, data): try: last_url = owner_plugin.req.lastEffectiveURL captcha = ReCaptcha(owner_plugin.pyfile) captcha_key = captcha.detect_key(data) if captcha_key: addon_plugin.log_info(_("Detected CloudFlare's security check page")) response, challenge = captcha.challenge(captcha_key, data) return owner_plugin.load(owner_plugin.fixurl("/cdn-cgi/l/chk_captcha"), get={'g-recaptcha-response': response}, ref=last_url) else: addon_plugin.log_warning(_("Got unexpected CloudFlare html page")) return None # Tell the exception handler to re-throw the exception except Exception, e: addon_plugin.log_error(e) return None # Tell the exception handler to re-throw the exception class PreloadStub(object): def __init__(self, addon_plugin, owner_plugin): self.addon_plugin = addon_plugin self.owner_plugin = owner_plugin self.old_preload = owner_plugin._preload def my_preload(self, *args, **kwargs): data = CloudFlare.handle_function(self.addon_plugin, self.owner_plugin, "_preload", self.old_preload, (args, kwargs)) if data is not None: self.owner_plugin.data = data def __repr__(self): return "<PreloadStub object at %s>" % hex(id(self)) class CloudFlareDdos(Addon): __name__ = "CloudFlareDdos" __type__ = "hook" __version__ = "0.16" __status__ = "testing" __config__ = [("activated", "bool", "Activated", False)] __description__ = """CloudFlare DDoS protection support""" __license__ = "GPLv3" __authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")] def activate(self): self.stubs = {} self._override_get_url() def deactivate(self): while len(self.stubs): stub = next(self.stubs.itervalues()) self._unoverride_preload(stub.owner_plugin) self._unoverride_get_url() def _unoverride_preload(self, plugin): if id(plugin) in self.stubs: self.log_debug("Unoverriding _preload() for %s" % plugin_id(plugin)) stub = self.stubs.pop(id(plugin)) stub.owner_plugin._preload = stub.old_preload else: self.log_warning(_("No _preload() override found for %s, cannot un-override>") % plugin_id(plugin)) def _override_preload(self, plugin): if id(plugin) not in self.stubs: stub = PreloadStub(self, plugin) self.stubs[id(plugin)] = stub self.log_debug("Overriding _preload() for %s" % plugin_id(plugin)) plugin._preload = stub.my_preload else: self.log_warning(_("Already overrided _preload() for %s") % plugin_id(plugin)) def _override_get_url(self): self.log_debug("Overriding get_url()") self.old_get_url = self.pyload.requestFactory.getURL self.pyload.requestFactory.getURL = self.my_get_url def _unoverride_get_url(self): self.log_debug("Unoverriding get_url()") self.pyload.requestFactory.getURL = self.old_get_url def _find_owner_plugin(self): """ Walk the callstack until we find SimpleHoster or SimpleCrypter class Dirty but works. """ f = frame = inspect.currentframe() try: while True: if f is None: return None elif 'self' in f.f_locals and is_simple_plugin(f.f_locals['self']): return f.f_locals['self'] else: f = f.f_back finally: del frame def download_preparing(self, pyfile): #: Only SimpleHoster and SimpleCrypter based plugins are supported if not is_simple_plugin(pyfile.plugin): self.log_debug("Skipping plugin %s" % plugin_id(pyfile.plugin)) return attr = getattr(pyfile.plugin, "_preload", None) if not attr and not callable(attr): self.log_error(_("%s is missing _preload() function, cannot override!") % plugin_id(pyfile.plugin)) return self._override_preload(pyfile.plugin) def download_processed(self, pyfile): if id(pyfile.plugin) in self.stubs: self._unoverride_preload(pyfile.plugin) def my_get_url(self, *args, **kwargs): owner_plugin = self._find_owner_plugin() if owner_plugin is None: self.log_warning(_("Owner plugin not found, cannot process")) return self.old_get_url(*args, **kwargs) else: #@NOTE: Better use owner_plugin.load() instead of get_url() so cookies are saved and so captcha credits #@NOTE: Also that way we can use 'owner_plugin.req.header' to get the headers, otherwise we cannot get them res = CloudFlare.handle_function(self, owner_plugin, "get_url", owner_plugin.load, (args, kwargs)) if kwargs.get('just_header', False): # @NOTE: SimpleHoster/SimpleCrypter returns a dict while get_url() returns raw headers string, # make sure we return a string for get_url('just_header'=True) res = get_plugin_last_header(owner_plugin) return res
gpl-3.0
chrisndodge/edx-platform
lms/djangoapps/student_profile/test/test_views.py
113
3370
# -*- coding: utf-8 -*- """ Tests for student profile views. """ from django.conf import settings from django.core.urlresolvers import reverse from django.test import TestCase from django.test.client import RequestFactory from util.testing import UrlResetMixin from student.tests.factories import UserFactory from student_profile.views import learner_profile_context class LearnerProfileViewTest(UrlResetMixin, TestCase): """ Tests for the student profile view. """ USERNAME = "username" PASSWORD = "password" CONTEXT_DATA = [ 'default_public_account_fields', 'accounts_api_url', 'preferences_api_url', 'account_settings_page_url', 'has_preferences_access', 'own_profile', 'country_options', 'language_options', 'account_settings_data', 'preferences_data', ] def setUp(self): super(LearnerProfileViewTest, self).setUp() self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD) self.client.login(username=self.USERNAME, password=self.PASSWORD) def test_context(self): """ Verify learner profile page context data. """ request = RequestFactory().get('/url') request.user = self.user context = learner_profile_context(request, self.USERNAME, self.user.is_staff) self.assertEqual( context['data']['default_public_account_fields'], settings.ACCOUNT_VISIBILITY_CONFIGURATION['public_fields'] ) self.assertEqual( context['data']['accounts_api_url'], reverse("accounts_api", kwargs={'username': self.user.username}) ) self.assertEqual( context['data']['preferences_api_url'], reverse('preferences_api', kwargs={'username': self.user.username}) ) self.assertEqual( context['data']['profile_image_upload_url'], reverse("profile_image_upload", kwargs={'username': self.user.username}) ) self.assertEqual( context['data']['profile_image_remove_url'], reverse('profile_image_remove', kwargs={'username': self.user.username}) ) self.assertEqual( context['data']['profile_image_max_bytes'], settings.PROFILE_IMAGE_MAX_BYTES ) self.assertEqual( context['data']['profile_image_min_bytes'], settings.PROFILE_IMAGE_MIN_BYTES ) self.assertEqual(context['data']['account_settings_page_url'], reverse('account_settings')) for attribute in self.CONTEXT_DATA: self.assertIn(attribute, context['data']) def test_view(self): """ Verify learner profile page view. """ profile_path = reverse('learner_profile', kwargs={'username': self.USERNAME}) response = self.client.get(path=profile_path) for attribute in self.CONTEXT_DATA: self.assertIn(attribute, response.content) def test_undefined_profile_page(self): """ Verify that a 404 is returned for a non-existent profile page. """ profile_path = reverse('learner_profile', kwargs={'username': "no_such_user"}) response = self.client.get(path=profile_path) self.assertEqual(404, response.status_code)
agpl-3.0
ProfessorX/Config
.PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KShortcutWidget.py
1
1269
# encoding: utf-8 # module PyKDE4.kdeui # from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so # by generator 1.135 # no doc # imports import PyKDE4.kdecore as __PyKDE4_kdecore import PyQt4.QtCore as __PyQt4_QtCore import PyQt4.QtGui as __PyQt4_QtGui import PyQt4.QtSvg as __PyQt4_QtSvg class KShortcutWidget(__PyQt4_QtGui.QWidget): # no doc def applyStealShortcut(self, *args, **kwargs): # real signature unknown pass def clearShortcut(self, *args, **kwargs): # real signature unknown pass def isModifierlessAllowed(self, *args, **kwargs): # real signature unknown pass def setCheckActionCollections(self, *args, **kwargs): # real signature unknown pass def setCheckActionList(self, *args, **kwargs): # real signature unknown pass def setClearButtonsShown(self, *args, **kwargs): # real signature unknown pass def setModifierlessAllowed(self, *args, **kwargs): # real signature unknown pass def setShortcut(self, *args, **kwargs): # real signature unknown pass def shortcutChanged(self, *args, **kwargs): # real signature unknown pass def __init__(self, *args, **kwargs): # real signature unknown pass
gpl-2.0
Stavitsky/nova
nova/tests/unit/scheduler/test_scheduler_utils.py
10
15657
# Copyright (c) 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Utils """ import contextlib import uuid import mock from mox3 import mox from oslo_config import cfg from nova.compute import flavors from nova.compute import utils as compute_utils from nova import db from nova import exception from nova import objects from nova import rpc from nova.scheduler import utils as scheduler_utils from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_flavor CONF = cfg.CONF class SchedulerUtilsTestCase(test.NoDBTestCase): """Test case for scheduler utils methods.""" def setUp(self): super(SchedulerUtilsTestCase, self).setUp() self.context = 'fake-context' @mock.patch('nova.objects.Flavor.get_by_flavor_id') def test_build_request_spec_without_image(self, mock_get): image = None instance = {'uuid': 'fake-uuid'} instance_type = objects.Flavor(**test_flavor.fake_flavor) mock_get.return_value = objects.Flavor(extra_specs={}) self.mox.StubOutWithMock(flavors, 'extract_flavor') flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type) self.mox.ReplayAll() request_spec = scheduler_utils.build_request_spec(self.context, image, [instance]) self.assertEqual({}, request_spec['image']) def test_build_request_spec_with_object(self): instance_type = objects.Flavor() instance = fake_instance.fake_instance_obj(self.context) with mock.patch.object(instance, 'get_flavor') as mock_get: mock_get.return_value = instance_type request_spec = scheduler_utils.build_request_spec(self.context, None, [instance]) mock_get.assert_called_once_with() self.assertIsInstance(request_spec['instance_properties'], dict) @mock.patch.object(rpc, 'get_notifier', return_value=mock.Mock()) @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(objects.Instance, 'save') def test_set_vm_state_and_notify(self, mock_save, mock_add, mock_get): expected_uuid = 'fake-uuid' request_spec = dict(instance_properties=dict(uuid='other-uuid')) updates = dict(vm_state='fake-vm-state') service = 'fake-service' method = 'fake-method' exc_info = 'exc_info' payload = dict(request_spec=request_spec, instance_properties=request_spec.get( 'instance_properties', {}), instance_id=expected_uuid, state='fake-vm-state', method=method, reason=exc_info) event_type = '%s.%s' % (service, method) scheduler_utils.set_vm_state_and_notify(self.context, expected_uuid, service, method, updates, exc_info, request_spec, db) mock_save.assert_called_once_with() mock_add.assert_called_once_with(self.context, mock.ANY, exc_info, mock.ANY) self.assertIsInstance(mock_add.call_args[0][1], objects.Instance) self.assertIsInstance(mock_add.call_args[0][3], tuple) mock_get.return_value.error.assert_called_once_with(self.context, event_type, payload) def _test_populate_filter_props(self, host_state_obj=True, with_retry=True, force_hosts=None, force_nodes=None): if force_hosts is None: force_hosts = [] if force_nodes is None: force_nodes = [] if with_retry: if ((len(force_hosts) == 1 and len(force_nodes) <= 1) or (len(force_nodes) == 1 and len(force_hosts) <= 1)): filter_properties = dict(force_hosts=force_hosts, force_nodes=force_nodes) elif len(force_hosts) > 1 or len(force_nodes) > 1: filter_properties = dict(retry=dict(hosts=[]), force_hosts=force_hosts, force_nodes=force_nodes) else: filter_properties = dict(retry=dict(hosts=[])) else: filter_properties = dict() if host_state_obj: class host_state(object): host = 'fake-host' nodename = 'fake-node' limits = 'fake-limits' else: host_state = dict(host='fake-host', nodename='fake-node', limits='fake-limits') scheduler_utils.populate_filter_properties(filter_properties, host_state) enable_retry_force_hosts = not force_hosts or len(force_hosts) > 1 enable_retry_force_nodes = not force_nodes or len(force_nodes) > 1 if with_retry or enable_retry_force_hosts or enable_retry_force_nodes: # So we can check for 2 hosts scheduler_utils.populate_filter_properties(filter_properties, host_state) if force_hosts: expected_limits = None else: expected_limits = 'fake-limits' self.assertEqual(expected_limits, filter_properties.get('limits')) if (with_retry and enable_retry_force_hosts and enable_retry_force_nodes): self.assertEqual([['fake-host', 'fake-node'], ['fake-host', 'fake-node']], filter_properties['retry']['hosts']) else: self.assertNotIn('retry', filter_properties) def test_populate_filter_props(self): self._test_populate_filter_props() def test_populate_filter_props_host_dict(self): self._test_populate_filter_props(host_state_obj=False) def test_populate_filter_props_no_retry(self): self._test_populate_filter_props(with_retry=False) def test_populate_filter_props_force_hosts_no_retry(self): self._test_populate_filter_props(force_hosts=['force-host']) def test_populate_filter_props_force_nodes_no_retry(self): self._test_populate_filter_props(force_nodes=['force-node']) def test_populate_filter_props_multi_force_hosts_with_retry(self): self._test_populate_filter_props(force_hosts=['force-host1', 'force-host2']) def test_populate_filter_props_multi_force_nodes_with_retry(self): self._test_populate_filter_props(force_nodes=['force-node1', 'force-node2']) @mock.patch.object(scheduler_utils, '_max_attempts') def test_populate_retry_exception_at_max_attempts(self, _max_attempts): _max_attempts.return_value = 2 msg = 'The exception text was preserved!' filter_properties = dict(retry=dict(num_attempts=2, hosts=[], exc=[msg])) nvh = self.assertRaises(exception.NoValidHost, scheduler_utils.populate_retry, filter_properties, 'fake-uuid') # make sure 'msg' is a substring of the complete exception text self.assertIn(msg, nvh.message) def _check_parse_options(self, opts, sep, converter, expected): good = scheduler_utils.parse_options(opts, sep=sep, converter=converter) for item in expected: self.assertIn(item, good) def test_parse_options(self): # check normal self._check_parse_options(['foo=1', 'bar=-2.1'], '=', float, [('foo', 1.0), ('bar', -2.1)]) # check convert error self._check_parse_options(['foo=a1', 'bar=-2.1'], '=', float, [('bar', -2.1)]) # check separator missing self._check_parse_options(['foo', 'bar=-2.1'], '=', float, [('bar', -2.1)]) # check key missing self._check_parse_options(['=5', 'bar=-2.1'], '=', float, [('bar', -2.1)]) def test_validate_filters_configured(self): self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2') self.assertTrue(scheduler_utils.validate_filter('FakeFilter1')) self.assertTrue(scheduler_utils.validate_filter('FakeFilter2')) self.assertFalse(scheduler_utils.validate_filter('FakeFilter3')) def _create_server_group(self, policy='anti-affinity'): instance = fake_instance.fake_instance_obj(self.context, params={'host': 'hostA'}) group = objects.InstanceGroup() group.name = 'pele' group.uuid = str(uuid.uuid4()) group.members = [instance.uuid] group.policies = [policy] return group def _get_group_details(self, group, policy=None): group_hosts = ['hostB'] with contextlib.nested( mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid', return_value=group), mock.patch.object(objects.InstanceGroup, 'get_hosts', return_value=['hostA']), ) as (get_group, get_hosts): scheduler_utils._SUPPORTS_ANTI_AFFINITY = None scheduler_utils._SUPPORTS_AFFINITY = None group_info = scheduler_utils._get_group_details( self.context, 'fake_uuid', group_hosts) self.assertEqual( (set(['hostA', 'hostB']), [policy]), group_info) def test_get_group_details(self): for policy in ['affinity', 'anti-affinity']: group = self._create_server_group(policy) self._get_group_details(group, policy=policy) def test_get_group_details_with_no_affinity_filters(self): self.flags(scheduler_default_filters=['fake']) scheduler_utils._SUPPORTS_ANTI_AFFINITY = None scheduler_utils._SUPPORTS_AFFINITY = None group_info = scheduler_utils._get_group_details(self.context, 'fake-uuid') self.assertIsNone(group_info) def test_get_group_details_with_no_instance_uuid(self): self.flags(scheduler_default_filters=['fake']) scheduler_utils._SUPPORTS_ANTI_AFFINITY = None scheduler_utils._SUPPORTS_AFFINITY = None group_info = scheduler_utils._get_group_details(self.context, None) self.assertIsNone(group_info) def _get_group_details_with_filter_not_configured(self, policy): wrong_filter = { 'affinity': 'ServerGroupAntiAffinityFilter', 'anti-affinity': 'ServerGroupAffinityFilter', } self.flags(scheduler_default_filters=[wrong_filter[policy]]) instance = fake_instance.fake_instance_obj(self.context, params={'host': 'hostA'}) group = objects.InstanceGroup() group.uuid = str(uuid.uuid4()) group.members = [instance.uuid] group.policies = [policy] with contextlib.nested( mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid', return_value=group), mock.patch.object(objects.InstanceGroup, 'get_hosts', return_value=['hostA']), ) as (get_group, get_hosts): scheduler_utils._SUPPORTS_ANTI_AFFINITY = None scheduler_utils._SUPPORTS_AFFINITY = None self.assertRaises(exception.UnsupportedPolicyException, scheduler_utils._get_group_details, self.context, 'fake-uuid') def test_get_group_details_with_filter_not_configured(self): policies = ['anti-affinity', 'affinity'] for policy in policies: self._get_group_details_with_filter_not_configured(policy) @mock.patch.object(scheduler_utils, '_get_group_details') def test_setup_instance_group_in_filter_properties(self, mock_ggd): mock_ggd.return_value = scheduler_utils.GroupDetails( hosts=set(['hostA', 'hostB']), policies=['policy']) spec = {'instance_properties': {'uuid': 'fake-uuid'}} filter_props = {'group_hosts': ['hostC']} scheduler_utils.setup_instance_group(self.context, spec, filter_props) mock_ggd.assert_called_once_with(self.context, 'fake-uuid', ['hostC']) expected_filter_props = {'group_updated': True, 'group_hosts': set(['hostA', 'hostB']), 'group_policies': ['policy']} self.assertEqual(expected_filter_props, filter_props) @mock.patch.object(scheduler_utils, '_get_group_details') def test_setup_instance_group_with_no_group(self, mock_ggd): mock_ggd.return_value = None spec = {'instance_properties': {'uuid': 'fake-uuid'}} filter_props = {'group_hosts': ['hostC']} scheduler_utils.setup_instance_group(self.context, spec, filter_props) mock_ggd.assert_called_once_with(self.context, 'fake-uuid', ['hostC']) self.assertNotIn('group_updated', filter_props) self.assertNotIn('group_policies', filter_props) self.assertEqual(['hostC'], filter_props['group_hosts']) @mock.patch.object(scheduler_utils, '_get_group_details') def test_setup_instance_group_with_filter_not_configured(self, mock_ggd): mock_ggd.side_effect = exception.NoValidHost(reason='whatever') spec = {'instance_properties': {'uuid': 'fake-uuid'}} filter_props = {'group_hosts': ['hostC']} self.assertRaises(exception.NoValidHost, scheduler_utils.setup_instance_group, self.context, spec, filter_props)
apache-2.0
einaru/luma
luma/plugins/browser/AddAttributeWizard.py
3
8525
# -*- coding: utf-8 -*- # # Copyright (c) 2011 # Per Ove Ringdal # # Copyright (C) 2004 # Wido Depping, <widod@users.sourceforge.net> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/ import os.path import copy import PyQt4 from PyQt4.QtCore import QString, pyqtSlot from PyQt4.QtGui import QWizard from .gui.AddAttributeWizardDesign import Ui_AddAttributeWizardDesign from base.backend.ObjectClassAttributeInfo import ObjectClassAttributeInfo from base.util.IconTheme import pixmapFromTheme class AddAttributeWizard(QWizard, Ui_AddAttributeWizardDesign): def __init__(self, parent = None, flags = PyQt4.QtCore.Qt.Widget): QWizard.__init__(self, parent, flags) self.setupUi(self) # need to initialize the pages before connecting signals self.restart() attributePixmap = pixmapFromTheme( "addattribute", ":/icons/64/add-attribute") objectclassPixmap = pixmapFromTheme( "objectclass", ":/icons/64/objectclass") self.imageLabel.setPixmap(attributePixmap) self.objectclassLabel.setPixmap(objectclassPixmap) self.enableAllBox.toggled.connect(self.initAttributeBox) self.attributeBox.activated[str].connect(self.newSelection) self.classBox.itemSelectionChanged.connect(self.classSelection) # attribute values of the current ldap object self.OBJECTVALUES = None # schema information for the ldap server self.SCHEMAINFO = None # set of attributes which are possible with the current objectclasses self.possibleAttributes = None # set of all attributes which are supported by the server self.allPossibleAttributes = None ############################################################################### def setData(self, smartObject): """ Sets the current object data, schema information and initializes the attribute box and wizard buttons. """ self.smartObject = smartObject self.SCHEMAINFO = ObjectClassAttributeInfo(self.smartObject.getServerMeta()) self.processData() self.initAttributeBox() currentPageWidget = self.page(0) #self.button(QWizard.FinishButton).setDisabled(False) #self.button(QWizard.NextButton).setDisabled(True) ############################################################################### def processData(self): """ Compute all attributes which can be added according to the data of the object. Single values which are already given are sorted out. """ possibleMust, possibleMay = self.smartObject.getPossibleAttributes() # attributes used by the current objectClass #usedAttributes = set(objectAttributes).difference(set(['objectClass'])) usedAttributes = self.smartObject.getAttributeList() # set of attribute which are used and have to be single singleAttributes = set(filter(self.SCHEMAINFO.isSingle, usedAttributes)) # create a set of attributes which may be added self.possibleAttributes = (possibleMust.union(possibleMay)).difference(singleAttributes) self.possibleAttributes = map(lambda x: x.lower(), self.possibleAttributes) # create a set of attributes which are supported by the server self.allPossibleAttributes = set(self.SCHEMAINFO.attributeDict.keys()).difference(singleAttributes) ############################################################################### def initAttributeBox(self): self.attributeBox.clear() currentPageWidget = self.currentPage() showAll = self.enableAllBox.isChecked() currentPageWidget.setFinalPage(True) currentPageWidget.setCommitPage(False) #self.button(QWizard.FinishButton).setDisabled(False) tmpList = None if showAll: tmpList = copy.deepcopy(self.allPossibleAttributes) else: tmpList = copy.deepcopy(self.possibleAttributes) structuralClass = self.smartObject.getStructuralClasses() # only show attributes whose objectclass combinations don't violate # the objectclass chain (not two structural classes) if len(structuralClass) > 0: classList = filter(lambda x: not self.SCHEMAINFO.isStructural(x), self.SCHEMAINFO.getObjectClasses()) for x in structuralClass: classList += self.SCHEMAINFO.getParents(x) for x in self.smartObject.getObjectClasses(): if not (x in classList): classList.append(x) mustAttributes, mayAttributes = self.SCHEMAINFO.getAllAttributes(classList) attributeList = mustAttributes.union(mayAttributes) cleanList = filter(lambda x: x.lower() in tmpList, attributeList) tmpList = cleanList else: self.enableAllBox.setChecked(True) self.enableAllBox.setEnabled(False) tmpList = sorted(self.allPossibleAttributes) tmpList.sort() tmpList = filter(lambda x: not (x.lower() == "objectclass"), tmpList) map(self.attributeBox.addItem, tmpList) self.newSelection(self.attributeBox.currentText()) ############################################################################### @pyqtSlot(int) def newSelection(self, attribute): pass @pyqtSlot("QString") def newSelection(self, attribute): attribute = str(attribute).lower() currentPageWidget = self.currentPage() mustSet, maySet = self.SCHEMAINFO.getAllObjectclassesForAttr(attribute) tmpSet = mustSet.union(maySet) if (attribute in self.possibleAttributes) or (len(tmpSet) == 0): currentPageWidget.setFinalPage(True) #self.button(QWizard.FinishButton).setDisabled(False) self.button(QWizard.NextButton).setDisabled(True) else: currentPageWidget.setFinalPage(False) #self.button(QWizard.FinishButton).setDisabled(True) self.button(QWizard.NextButton).setDisabled(False) ############################################################################### def initClassPage(self): currentPageWidget = self.currentPage() #self.button(QWizard.FinishButton).setDisabled(True) self.classBox.clear() self.mustAttributeBox.clear() attribute = str(self.attributeBox.currentText()) mustSet, maySet = self.SCHEMAINFO.getAllObjectclassesForAttr(attribute) classList = mustSet.union(maySet) if self.smartObject.hasStructuralClass(): structList = filter(lambda x: self.SCHEMAINFO.isStructural(x), classList) classList = filter(lambda x: not self.SCHEMAINFO.isStructural(x), classList) for x in structList: for y in self.smartObject.getObjectClasses(): if self.SCHEMAINFO.sameObjectClassChain(x, y): classList.append(x) else: classList = sorted(classList) classList.sort() map(self.classBox.addItem, classList) self.classBox.setCurrentRow(0) ############################################################################### def classSelection(self): self.mustAttributeBox.clear() objectclass = str(self.classBox.currentItem().text()) mustAttributes = self.SCHEMAINFO.getAllMusts([objectclass]) attribute = set([str(self.attributeBox.currentText())]) map(self.mustAttributeBox.addItem, mustAttributes.difference(attribute)) currentPageWidget = self.currentPage() #self.button(QWizard.FinishButton).setDisabled(False) ############################################################################### def initializePage(self, id): if id == 1: self.initClassPage() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
gpl-2.0
SyndicateLtd/SyndicateQT
test/functional/wallet_zapwallettxes.py
3
2839
#!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the zapwallettxes functionality. - start two bitcoind nodes - create two transactions on node 0 - one is confirmed and one is unconfirmed. - restart node 0 and verify that both the confirmed and the unconfirmed transactions are still available. - restart node 0 with zapwallettxes and persistmempool, and verify that both the confirmed and the unconfirmed transactions are still available. - restart node 0 with just zapwallettxes and verify that the confirmed transactions are still available, but that the unconfirmed transaction has been zapped. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, wait_until, ) class ZapWalletTXesTest (BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 def run_test(self): self.log.info("Mining blocks...") self.nodes[0].generate(1) self.sync_all() self.nodes[1].generate(101) self.sync_all() assert_equal(self.nodes[0].getbalance(), 250) # This transaction will be confirmed txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10) self.sync_all() self.nodes[0].generate(1) self.sync_all() # This transaction will not be confirmed txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20) # Confirmed and unconfirmed transactions are now in the wallet. assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2) # Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet. self.stop_node(0) self.start_node(0) assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2) # Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed # transaction is zapped from the wallet, but is re-added when the mempool is reloaded. self.stop_node(0) self.start_node(0, ["-zapwallettxes=2"]) # tx1 is still be available because it was confirmed assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) # This will raise an exception because the unconfirmed transaction has been zapped assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2) if __name__ == '__main__': ZapWalletTXesTest().main()
mit
ema/conpaas
conpaas-services/src/conpaas/services/htc/manager/get_run_time.py
2
6391
#import os import sys import time import xmltodict import pprint pp = pprint.PrettyPrinter(indent=4,stream=sys.stderr) testing = False # def poll_condor(jonbr, bagnr): def poll_condor(filename): # filename = "hist-%d-%d.xml" % ( jobnr, bagnr ) # command = "condor_history -constraint 'HtcJob == %d && HtcBag == %d' -xml > %s" % ( jobnr, bagnr, filename ) # os.system( command ) tries = 0 poll_dict = {} while tries < 4: tries += 1 _trystr = "Try %d (%s) :" % (tries, filename) xml = open(filename).read() xmldict = xmltodict.parse(xml) print >> sys.stderr, "type(xmldict) = ", type(xmldict) if not ( type(xmldict) == dict and xmldict.has_key('classads') ): print >> sys.stderr, _trystr, "No classads, wait a little until the first results come in" time.sleep(2) continue print >> sys.stderr, "type(xmldict['classads']) = ", type(xmldict['classads']) if not ( type(xmldict['classads']) == dict and xmldict['classads'].has_key('c') ) : print >> sys.stderr, _trystr, "No classads <c> entries, wait a little until the first results come in" time.sleep(2) continue print >> sys.stderr, "type(xmldict['classads']['c']) = ", type(xmldict['classads']['c']) if not ( type(xmldict['classads']['c']) == list and xmldict['classads']['c'][0].has_key('a') ) : print >> sys.stderr, _trystr, "No classads attributes, wait a little until the first results come in" time.sleep(2) continue poll_dict = get_poll_dict(xmldict) break # if poll_dict['CompletedTasks'] == poll_dict['TotalTask']: #pp.pprint(xmldict) return poll_dict def get_poll_dict(xmldict): if testing: print >> sys.stderr, "selecting info from file %s, job %s, bag %s" % (filename, jobnr, bagnr) res_dict = {} # print >> sys.stderr, xml # print "----" # jobid = 0 for c in xmldict['classads']['c']: tempdict = {} # pp.pprint(c) attrs=c['a'] # pp.pprint(attrs) for d in attrs: v = None k = d['@n'].encode('ascii', 'ignore') # get rid of unicode from xmltodict # handle float if d.has_key('r'): v=float( d['r'].encode('ascii', 'ignore') ) # get rid of unicode from xmltodict # handle int if d.has_key('i'): v=int( d['i'].encode('ascii', 'ignore') ) # get rid of unicode from xmltodict # handle string if d.has_key('s'): # pp.pprint(d) if d['s'] == None: v = 'None' else: v= d['s'].encode('ascii', 'ignore') # get rid of unicode from xmltodict # handle boolean if d.has_key('b'): # pp.pprint(d) v= 'True' if d['b']['@v'] == 't' else 'False' # handle expression if d.has_key('e'): v= d['e'].encode('ascii', 'ignore') # get rid of unicode from xmltodict if v != None: tempdict[k] = v else: print "unknown datatype in " pp.pprint(d) attrdict = {} for k in [ 'HtcJob', 'HtcBag', 'HtcTask', 'RemoteWallClockTime', 'Cmd', 'MATCH_EXP_MachineCloudMachineType' ]: if tempdict.has_key(k): attrdict[k] = tempdict[k] #print kl # cur_jobnr = "%(HtcJob)s" % tempdict # if not ( jobnr == None or jobnr == cur_jobnr): # continue # cur_bagnr = "%(HtcBag)s" % tempdict # if not ( bagnr == None or bagnr == cur_bagnr): # continue # tasknr = "%(HtcTask)s" % taskdict taskid = "%(HtcJob)s.%(HtcBag)s.%(HtcTask)s" % tempdict #jobid += 1 # print "----" if res_dict.has_key(taskid): res_dict[taskid].append ( attrdict ) else: res_dict[taskid] = [ attrdict ] if testing: print >> sys.stderr, "====== res_dict ======" pp.pprint(res_dict) print >> sys.stderr, "------ res_dict ------" return res_dict """ { 'tasks': { taskid: [ { attr1: val1, attrn: valn, }, { attr1: val1, attrn: valn, } ] } } """ def do_test(filename): poll_dict = poll_condor(filename) completed_tasks = 0 for _ in poll_dict.keys(): completed_tasks += len(poll_dict[_]) completed_task_sets = poll_dict.keys().__len__() print >> sys.stderr, "Found %d completed tasks in %d sets" % (completed_tasks, completed_task_sets) if False: pp.pprint(poll_dict) if __name__ == "__main__": pp = pprint.PrettyPrinter(indent=4,stream=sys.stderr) testing = True usage = "usage : %s ClassAd_XML_file [ jobnr [ bagnr ] ]" % sys.argv[0] argc = len(sys.argv) jobnr = None bagnr = None print "%d args" % argc if argc <= 1: print usage filename = "test3.xml" if argc >= 2: filename = sys.argv[1] print "file = %s" % filename if argc >= 3: jobnr = sys.argv[2] print "job = %s" % jobnr if argc >= 4: bagnr = sys.argv[3] print "bag = %s" % bagnr for _ in [ "test1.xml", "test2.xml", "test3.xml", "test4.xml" ] : do_test( _ )
bsd-3-clause
leiferikb/bitpop
depot_tools/third_party/boto/mashups/interactive.py
119
2737
# Copyright (C) 2003-2007 Robey Pointer <robey@lag.net> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. import socket import sys # windows does not have termios... try: import termios import tty has_termios = True except ImportError: has_termios = False def interactive_shell(chan): if has_termios: posix_shell(chan) else: windows_shell(chan) def posix_shell(chan): import select oldtty = termios.tcgetattr(sys.stdin) try: tty.setraw(sys.stdin.fileno()) tty.setcbreak(sys.stdin.fileno()) chan.settimeout(0.0) while True: r, w, e = select.select([chan, sys.stdin], [], []) if chan in r: try: x = chan.recv(1024) if len(x) == 0: print '\r\n*** EOF\r\n', break sys.stdout.write(x) sys.stdout.flush() except socket.timeout: pass if sys.stdin in r: x = sys.stdin.read(1) if len(x) == 0: break chan.send(x) finally: termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty) # thanks to Mike Looijmans for this code def windows_shell(chan): import threading sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n") def writeall(sock): while True: data = sock.recv(256) if not data: sys.stdout.write('\r\n*** EOF ***\r\n\r\n') sys.stdout.flush() break sys.stdout.write(data) sys.stdout.flush() writer = threading.Thread(target=writeall, args=(chan,)) writer.start() try: while True: d = sys.stdin.read(1) if not d: break chan.send(d) except EOFError: # user hit ^Z or F6 pass
gpl-3.0
eeshangarg/zulip
zilencer/management/commands/add_new_realm.py
6
1137
from typing import Any from zerver.lib.actions import bulk_add_subscriptions, do_create_realm, do_create_user from zerver.lib.management import ZulipBaseCommand from zerver.lib.onboarding import send_initial_realm_messages from zerver.models import Realm, UserProfile class Command(ZulipBaseCommand): help = """Add a new realm and initial user for manual testing of the onboarding process.""" def handle(self, **options: Any) -> None: string_id = "realm{:02}".format(Realm.objects.filter(string_id__startswith="realm").count()) realm = do_create_realm(string_id, string_id) name = "{:02}-user".format(UserProfile.objects.filter(email__contains="user@").count()) user = do_create_user( f"{name}@{string_id}.zulip.com", "password", realm, name, role=UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None, ) assert realm.signup_notifications_stream is not None bulk_add_subscriptions(realm, [realm.signup_notifications_stream], [user], acting_user=None) send_initial_realm_messages(realm)
apache-2.0
jbzdak/edx-platform
lms/djangoapps/shoppingcart/processors/tests/test_CyberSource2.py
164
18446
# -*- coding: utf-8 -*- """ Tests for the newer CyberSource API implementation. """ from mock import patch from django.test import TestCase from django.conf import settings import ddt from student.tests.factories import UserFactory from shoppingcart.models import Order, OrderItem from shoppingcart.processors.CyberSource2 import ( processor_hash, process_postpay_callback, render_purchase_form_html, get_signed_purchase_params, _get_processor_exception_html ) from shoppingcart.processors.exceptions import ( CCProcessorSignatureException, CCProcessorDataException, CCProcessorWrongAmountException ) @ddt.ddt class CyberSource2Test(TestCase): """ Test the CyberSource API implementation. As much as possible, this test case should use ONLY the public processor interface (defined in shoppingcart.processors.__init__.py). Some of the tests in this suite rely on Django settings to be configured a certain way. """ COST = "10.00" CALLBACK_URL = "/test_callback_url" FAILED_DECISIONS = ["DECLINE", "CANCEL", "ERROR"] def setUp(self): """ Create a user and an order. """ super(CyberSource2Test, self).setUp() self.user = UserFactory() self.order = Order.get_cart_for_user(self.user) self.order_item = OrderItem.objects.create( order=self.order, user=self.user, unit_cost=self.COST, line_cost=self.COST ) def assert_dump_recorded(self, order): """ Verify that this order does have a dump of information from the payment processor. """ self.assertNotEqual(order.processor_reply_dump, '') def test_render_purchase_form_html(self): # Verify that the HTML form renders with the payment URL specified # in the test settings. # This does NOT test that all the form parameters are correct; # we verify that by testing `get_signed_purchase_params()` directly. html = render_purchase_form_html(self.order, callback_url=self.CALLBACK_URL) self.assertIn('<form action="/shoppingcart/payment_fake" method="post">', html) self.assertIn('transaction_uuid', html) self.assertIn('signature', html) self.assertIn(self.CALLBACK_URL, html) def test_get_signed_purchase_params(self): params = get_signed_purchase_params(self.order, callback_url=self.CALLBACK_URL) # Check the callback URL override self.assertEqual(params['override_custom_receipt_page'], self.CALLBACK_URL) # Parameters determined by the order model self.assertEqual(params['amount'], '10.00') self.assertEqual(params['currency'], 'usd') self.assertEqual(params['orderNumber'], 'OrderId: {order_id}'.format(order_id=self.order.id)) self.assertEqual(params['reference_number'], self.order.id) # Parameters determined by the Django (test) settings self.assertEqual(params['access_key'], '0123456789012345678901') self.assertEqual(params['profile_id'], 'edx') # Some fields will change depending on when the test runs, # so we just check that they're set to a non-empty string self.assertGreater(len(params['signed_date_time']), 0) self.assertGreater(len(params['transaction_uuid']), 0) # Constant parameters self.assertEqual(params['transaction_type'], 'sale') self.assertEqual(params['locale'], 'en') self.assertEqual(params['payment_method'], 'card') self.assertEqual( params['signed_field_names'], ",".join([ 'amount', 'currency', 'orderNumber', 'access_key', 'profile_id', 'reference_number', 'transaction_type', 'locale', 'signed_date_time', 'signed_field_names', 'unsigned_field_names', 'transaction_uuid', 'payment_method', 'override_custom_receipt_page', 'override_custom_cancel_page', ]) ) self.assertEqual(params['unsigned_field_names'], '') # Check the signature self.assertEqual(params['signature'], self._signature(params)) # We patch the purchased callback because # we're using the OrderItem base class, which throws an exception # when item doest not have a course id associated @patch.object(OrderItem, 'purchased_callback') def test_process_payment_raises_exception(self, purchased_callback): # pylint: disable=unused-argument self.order.clear() OrderItem.objects.create( order=self.order, user=self.user, unit_cost=self.COST, line_cost=self.COST, ) params = self._signed_callback_params(self.order.id, self.COST, self.COST) process_postpay_callback(params) # We patch the purchased callback because # (a) we're using the OrderItem base class, which doesn't implement this method, and # (b) we want to verify that the method gets called on success. @patch.object(OrderItem, 'purchased_callback') @patch.object(OrderItem, 'pdf_receipt_display_name') def test_process_payment_success(self, pdf_receipt_display_name, purchased_callback): # pylint: disable=unused-argument # Simulate a callback from CyberSource indicating that payment was successful params = self._signed_callback_params(self.order.id, self.COST, self.COST) result = process_postpay_callback(params) # Expect that we processed the payment successfully self.assertTrue( result['success'], msg="Payment was not successful: {error}".format(error=result.get('error_html')) ) self.assertEqual(result['error_html'], '') # Expect that the item's purchased callback was invoked purchased_callback.assert_called_with() # Expect that the order has been marked as purchased self.assertEqual(result['order'].status, 'purchased') self.assert_dump_recorded(result['order']) def test_process_payment_rejected(self): # Simulate a callback from CyberSource indicating that the payment was rejected params = self._signed_callback_params(self.order.id, self.COST, self.COST, decision='REJECT') result = process_postpay_callback(params) # Expect that we get an error message self.assertFalse(result['success']) self.assertIn(u"did not accept your payment", result['error_html']) self.assert_dump_recorded(result['order']) def test_process_payment_invalid_signature(self): # Simulate a callback from CyberSource indicating that the payment was rejected params = self._signed_callback_params(self.order.id, self.COST, self.COST, signature="invalid!") result = process_postpay_callback(params) # Expect that we get an error message self.assertFalse(result['success']) self.assertIn(u"corrupted message regarding your charge", result['error_html']) def test_process_payment_invalid_order(self): # Use an invalid order ID params = self._signed_callback_params("98272", self.COST, self.COST) result = process_postpay_callback(params) # Expect an error self.assertFalse(result['success']) self.assertIn(u"inconsistent data", result['error_html']) def test_process_invalid_payment_amount(self): # Change the payment amount (no longer matches the database order record) params = self._signed_callback_params(self.order.id, "145.00", "145.00") result = process_postpay_callback(params) # Expect an error self.assertFalse(result['success']) self.assertIn(u"different amount than the order total", result['error_html']) # refresh data for current order order = Order.objects.get(id=self.order.id) self.assert_dump_recorded(order) def test_process_amount_paid_not_decimal(self): # Change the payment amount to a non-decimal params = self._signed_callback_params(self.order.id, self.COST, "abcd") result = process_postpay_callback(params) # Expect an error self.assertFalse(result['success']) self.assertIn(u"badly-typed value", result['error_html']) def test_process_user_cancelled(self): # Change the payment amount to a non-decimal params = self._signed_callback_params(self.order.id, self.COST, "abcd") params['decision'] = u'CANCEL' result = process_postpay_callback(params) # Expect an error self.assertFalse(result['success']) self.assertIn(u"you have cancelled this transaction", result['error_html']) @patch.object(OrderItem, 'purchased_callback') @patch.object(OrderItem, 'pdf_receipt_display_name') def test_process_no_credit_card_digits(self, pdf_receipt_display_name, purchased_callback): # pylint: disable=unused-argument # Use a credit card number with no digits provided params = self._signed_callback_params( self.order.id, self.COST, self.COST, card_number='nodigits' ) result = process_postpay_callback(params) # Expect that we processed the payment successfully self.assertTrue( result['success'], msg="Payment was not successful: {error}".format(error=result.get('error_html')) ) self.assertEqual(result['error_html'], '') self.assert_dump_recorded(result['order']) # Expect that the order has placeholders for the missing credit card digits self.assertEqual(result['order'].bill_to_ccnum, '####') @ddt.data('req_reference_number', 'req_currency', 'decision', 'auth_amount') def test_process_missing_parameters(self, missing_param): # Remove a required parameter params = self._signed_callback_params(self.order.id, self.COST, self.COST) del params[missing_param] # Recalculate the signature with no signed fields so we can get past # signature validation. params['signed_field_names'] = 'reason_code,message' params['signature'] = self._signature(params) result = process_postpay_callback(params) # Expect an error self.assertFalse(result['success']) self.assertIn(u"did not return a required parameter", result['error_html']) @patch.object(OrderItem, 'purchased_callback') @patch.object(OrderItem, 'pdf_receipt_display_name') def test_sign_then_verify_unicode(self, pdf_receipt_display_name, purchased_callback): # pylint: disable=unused-argument params = self._signed_callback_params( self.order.id, self.COST, self.COST, first_name=u'\u2699' ) # Verify that this executes without a unicode error result = process_postpay_callback(params) self.assertTrue(result['success']) self.assert_dump_recorded(result['order']) @ddt.data('string', u'üñîçø∂é') def test_get_processor_exception_html(self, error_string): """ Tests the processor exception html message """ for exception_type in [CCProcessorSignatureException, CCProcessorWrongAmountException, CCProcessorDataException]: error_msg = error_string exception = exception_type(error_msg) html = _get_processor_exception_html(exception) self.assertIn(settings.PAYMENT_SUPPORT_EMAIL, html) self.assertIn('Sorry!', html) self.assertIn(error_msg, html) def _signed_callback_params( self, order_id, order_amount, paid_amount, decision='ACCEPT', signature=None, card_number='xxxxxxxxxxxx1111', first_name='John' ): """ Construct parameters that could be returned from CyberSource to our payment callback. Some values can be overridden to simulate different test scenarios, but most are fake values captured from interactions with a CyberSource test account. Args: order_id (string or int): The ID of the `Order` model. order_amount (string): The cost of the order. paid_amount (string): The amount the user paid using CyberSource. Keyword Args: decision (string): Whether the payment was accepted or rejected or declined. signature (string): If provided, use this value instead of calculating the signature. card_numer (string): If provided, use this value instead of the default credit card number. first_name (string): If provided, the first name of the user. Returns: dict """ # Parameters sent from CyberSource to our callback implementation # These were captured from the CC test server. signed_field_names = ["transaction_id", "decision", "req_access_key", "req_profile_id", "req_transaction_uuid", "req_transaction_type", "req_reference_number", "req_amount", "req_currency", "req_locale", "req_payment_method", "req_override_custom_receipt_page", "req_bill_to_forename", "req_bill_to_surname", "req_bill_to_email", "req_bill_to_address_line1", "req_bill_to_address_city", "req_bill_to_address_state", "req_bill_to_address_country", "req_bill_to_address_postal_code", "req_card_number", "req_card_type", "req_card_expiry_date", "message", "reason_code", "auth_avs_code", "auth_avs_code_raw", "auth_response", "auth_amount", "auth_code", "auth_trans_ref_no", "auth_time", "bill_trans_ref_no", "signed_field_names", "signed_date_time"] # if decision is in FAILED_DECISIONS list then remove auth_amount from # signed_field_names list. if decision in self.FAILED_DECISIONS: signed_field_names.remove("auth_amount") params = { # Parameters that change based on the test "decision": decision, "req_reference_number": str(order_id), "req_amount": order_amount, "auth_amount": paid_amount, "req_card_number": card_number, # Stub values "utf8": u"✓", "req_bill_to_address_country": "US", "auth_avs_code": "X", "req_card_expiry_date": "01-2018", "bill_trans_ref_no": "85080648RYI23S6I", "req_bill_to_address_state": "MA", "signed_field_names": ",".join(signed_field_names), "req_payment_method": "card", "req_transaction_type": "sale", "auth_code": "888888", "req_locale": "en", "reason_code": "100", "req_bill_to_address_postal_code": "02139", "req_bill_to_address_line1": "123 Fake Street", "req_card_type": "001", "req_bill_to_address_city": "Boston", "signed_date_time": "2014-08-18T14:07:10Z", "req_currency": "usd", "auth_avs_code_raw": "I1", "transaction_id": "4083708299660176195663", "auth_time": "2014-08-18T140710Z", "message": "Request was processed successfully.", "auth_response": "100", "req_profile_id": "0000001", "req_transaction_uuid": "ddd9935b82dd403f9aa4ba6ecf021b1f", "auth_trans_ref_no": "85080648RYI23S6I", "req_bill_to_surname": "Doe", "req_bill_to_forename": first_name, "req_bill_to_email": "john@example.com", "req_override_custom_receipt_page": "http://localhost:8000/shoppingcart/postpay_callback/", "req_access_key": "abcd12345", } # if decision is in FAILED_DECISIONS list then remove the auth_amount from params dict if decision in self.FAILED_DECISIONS: del params["auth_amount"] # Calculate the signature params['signature'] = signature if signature is not None else self._signature(params) return params def _signature(self, params): """ Calculate the signature from a dictionary of params. NOTE: This method uses the processor's hashing method. That method is a thin wrapper of standard library calls, and it seemed overly complex to rewrite that code in the test suite. Args: params (dict): Dictionary with a key 'signed_field_names', which is a comma-separated list of keys in the dictionary to include in the signature. Returns: string """ return processor_hash( ",".join([ u"{0}={1}".format(signed_field, params[signed_field]) for signed_field in params['signed_field_names'].split(u",") ]) ) def test_process_payment_declined(self): # Simulate a callback from CyberSource indicating that the payment was declined params = self._signed_callback_params(self.order.id, self.COST, self.COST, decision='DECLINE') result = process_postpay_callback(params) # Expect that we get an error message self.assertFalse(result['success']) self.assertIn(u"payment was declined", result['error_html'])
agpl-3.0
SOKP/kernel_motorola_msm8226
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
12980
5411
# SchedGui.py - Python extension for perf script, basic GUI code for # traces drawing and overview. # # Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com> # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. try: import wx except ImportError: raise ImportError, "You need to install the wxpython lib for this script" class RootFrame(wx.Frame): Y_OFFSET = 100 RECT_HEIGHT = 100 RECT_SPACE = 50 EVENT_MARKING_WIDTH = 5 def __init__(self, sched_tracer, title, parent = None, id = -1): wx.Frame.__init__(self, parent, id, title) (self.screen_width, self.screen_height) = wx.GetDisplaySize() self.screen_width -= 10 self.screen_height -= 10 self.zoom = 0.5 self.scroll_scale = 20 self.sched_tracer = sched_tracer self.sched_tracer.set_root_win(self) (self.ts_start, self.ts_end) = sched_tracer.interval() self.update_width_virtual() self.nr_rects = sched_tracer.nr_rectangles() + 1 self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) # whole window panel self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height)) # scrollable container self.scroll = wx.ScrolledWindow(self.panel) self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale) self.scroll.EnableScrolling(True, True) self.scroll.SetFocus() # scrollable drawing area self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2)) self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint) self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Bind(wx.EVT_PAINT, self.on_paint) self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Fit() self.Fit() self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING) self.txt = None self.Show(True) def us_to_px(self, val): return val / (10 ** 3) * self.zoom def px_to_us(self, val): return (val / self.zoom) * (10 ** 3) def scroll_start(self): (x, y) = self.scroll.GetViewStart() return (x * self.scroll_scale, y * self.scroll_scale) def scroll_start_us(self): (x, y) = self.scroll_start() return self.px_to_us(x) def paint_rectangle_zone(self, nr, color, top_color, start, end): offset_px = self.us_to_px(start - self.ts_start) width_px = self.us_to_px(end - self.ts_start) offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) width_py = RootFrame.RECT_HEIGHT dc = self.dc if top_color is not None: (r, g, b) = top_color top_color = wx.Colour(r, g, b) brush = wx.Brush(top_color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH) width_py -= RootFrame.EVENT_MARKING_WIDTH offset_py += RootFrame.EVENT_MARKING_WIDTH (r ,g, b) = color color = wx.Colour(r, g, b) brush = wx.Brush(color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, width_py) def update_rectangles(self, dc, start, end): start += self.ts_start end += self.ts_start self.sched_tracer.fill_zone(start, end) def on_paint(self, event): dc = wx.PaintDC(self.scroll_panel) self.dc = dc width = min(self.width_virtual, self.screen_width) (x, y) = self.scroll_start() start = self.px_to_us(x) end = self.px_to_us(x + width) self.update_rectangles(dc, start, end) def rect_from_ypixel(self, y): y -= RootFrame.Y_OFFSET rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT: return -1 return rect def update_summary(self, txt): if self.txt: self.txt.Destroy() self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50)) def on_mouse_down(self, event): (x, y) = event.GetPositionTuple() rect = self.rect_from_ypixel(y) if rect == -1: return t = self.px_to_us(x) + self.ts_start self.sched_tracer.mouse_down(rect, t) def update_width_virtual(self): self.width_virtual = self.us_to_px(self.ts_end - self.ts_start) def __zoom(self, x): self.update_width_virtual() (xpos, ypos) = self.scroll.GetViewStart() xpos = self.us_to_px(x) / self.scroll_scale self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos) self.Refresh() def zoom_in(self): x = self.scroll_start_us() self.zoom *= 2 self.__zoom(x) def zoom_out(self): x = self.scroll_start_us() self.zoom /= 2 self.__zoom(x) def on_key_press(self, event): key = event.GetRawKeyCode() if key == ord("+"): self.zoom_in() return if key == ord("-"): self.zoom_out() return key = event.GetKeyCode() (x, y) = self.scroll.GetViewStart() if key == wx.WXK_RIGHT: self.scroll.Scroll(x + 1, y) elif key == wx.WXK_LEFT: self.scroll.Scroll(x - 1, y) elif key == wx.WXK_DOWN: self.scroll.Scroll(x, y + 1) elif key == wx.WXK_UP: self.scroll.Scroll(x, y - 1)
gpl-2.0
vponomaryov/rally
rally/plugins/openstack/context/existing_users.py
1
2614
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.benchmark import context from rally.common.i18n import _ from rally.common import log as logging from rally.common import utils as rutils from rally import objects from rally import osclients LOG = logging.getLogger(__name__) # NOTE(boris-42): This context should be hidden for now and used only by # benchmark engine. In future during various refactoring of # validation system and rally CI testing we will make it public @context.context(name="existing_users", order=99, hidden=True) class ExistingUsers(context.Context): """This context supports using existing users in Rally. It uses information about deployment to properly initialize context["users"] and context["tenants"] So there won't be big difference between usage of "users" and "existing_users" context. """ # NOTE(boris-42): We don't need to check config schema because # this is used only by benchmark engine CONFIG_SCHEMA = {} def __init__(self, ctx): super(ExistingUsers, self).__init__(ctx) self.context["users"] = [] self.context["tenants"] = {} @rutils.log_task_wrapper(LOG.info, _("Enter context: `existing_users`")) def setup(self): for user in self.config: user_endpoint = objects.Endpoint(**user) user_kclient = osclients.Clients(user_endpoint).keystone() if user_kclient.tenant_id not in self.context["tenants"]: self.context["tenants"][user_kclient.tenant_id] = { "id": user_kclient.tenant_id, "name": user_kclient.tenant_name } self.context["users"].append({ "endpoint": user_endpoint, "id": user_kclient.user_id, "tenant_id": user_kclient.tenant_id }) @rutils.log_task_wrapper(LOG.info, _("Exit context: `existing_users`")) def cleanup(self): """These users are not managed by Rally, so don't touch them."""
apache-2.0
shinfan/api-client-staging
generated/python/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/text_annotation_pb2.py
8
24454
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/proto/vision/v1/text_annotation.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.cloud.proto.vision.v1 import geometry_pb2 as google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_geometry__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='google/cloud/proto/vision/v1/text_annotation.proto', package='google.cloud.vision.v1', syntax='proto3', serialized_pb=_b('\n2google/cloud/proto/vision/v1/text_annotation.proto\x12\x16google.cloud.vision.v1\x1a\x1cgoogle/api/annotations.proto\x1a+google/cloud/proto/vision/v1/geometry.proto\"\x96\x04\n\x0eTextAnnotation\x12+\n\x05pages\x18\x01 \x03(\x0b\x32\x1c.google.cloud.vision.v1.Page\x12\x0c\n\x04text\x18\x02 \x01(\t\x1a=\n\x10\x44\x65tectedLanguage\x12\x15\n\rlanguage_code\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x1a\xd5\x01\n\rDetectedBreak\x12L\n\x04type\x18\x01 \x01(\x0e\x32>.google.cloud.vision.v1.TextAnnotation.DetectedBreak.BreakType\x12\x11\n\tis_prefix\x18\x02 \x01(\x08\"c\n\tBreakType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05SPACE\x10\x01\x12\x0e\n\nSURE_SPACE\x10\x02\x12\x12\n\x0e\x45OL_SURE_SPACE\x10\x03\x12\n\n\x06HYPHEN\x10\x04\x12\x0e\n\nLINE_BREAK\x10\x05\x1a\xb1\x01\n\x0cTextProperty\x12S\n\x12\x64\x65tected_languages\x18\x01 \x03(\x0b\x32\x37.google.cloud.vision.v1.TextAnnotation.DetectedLanguage\x12L\n\x0e\x64\x65tected_break\x18\x02 \x01(\x0b\x32\x34.google.cloud.vision.v1.TextAnnotation.DetectedBreak\"\x9b\x01\n\x04Page\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x0e\n\x06height\x18\x03 \x01(\x05\x12-\n\x06\x62locks\x18\x04 \x03(\x0b\x32\x1d.google.cloud.vision.v1.Block\"\xd2\x02\n\x05\x42lock\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12:\n\x0c\x62ounding_box\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12\x35\n\nparagraphs\x18\x03 \x03(\x0b\x32!.google.cloud.vision.v1.Paragraph\x12;\n\nblock_type\x18\x04 \x01(\x0e\x32\'.google.cloud.vision.v1.Block.BlockType\"R\n\tBlockType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04TEXT\x10\x01\x12\t\n\x05TABLE\x10\x02\x12\x0b\n\x07PICTURE\x10\x03\x12\t\n\x05RULER\x10\x04\x12\x0b\n\x07\x42\x41RCODE\x10\x05\"\xbb\x01\n\tParagraph\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12:\n\x0c\x62ounding_box\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12+\n\x05words\x18\x03 \x03(\x0b\x32\x1c.google.cloud.vision.v1.Word\"\xba\x01\n\x04Word\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12:\n\x0c\x62ounding_box\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12/\n\x07symbols\x18\x03 \x03(\x0b\x32\x1e.google.cloud.vision.v1.Symbol\"\x99\x01\n\x06Symbol\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12:\n\x0c\x62ounding_box\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12\x0c\n\x04text\x18\x03 \x01(\tBt\n\x1a\x63om.google.cloud.vision.v1B\x13TextAnnotationProtoP\x01Z<google.golang.org/genproto/googleapis/cloud/vision/v1;vision\xf8\x01\x01\x62\x06proto3') , dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_geometry__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _TEXTANNOTATION_DETECTEDBREAK_BREAKTYPE = _descriptor.EnumDescriptor( name='BreakType', full_name='google.cloud.vision.v1.TextAnnotation.DetectedBreak.BreakType', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='UNKNOWN', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='SPACE', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='SURE_SPACE', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='EOL_SURE_SPACE', index=3, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='HYPHEN', index=4, number=4, options=None, type=None), _descriptor.EnumValueDescriptor( name='LINE_BREAK', index=5, number=5, options=None, type=None), ], containing_type=None, options=None, serialized_start=409, serialized_end=508, ) _sym_db.RegisterEnumDescriptor(_TEXTANNOTATION_DETECTEDBREAK_BREAKTYPE) _BLOCK_BLOCKTYPE = _descriptor.EnumDescriptor( name='BlockType', full_name='google.cloud.vision.v1.Block.BlockType', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='UNKNOWN', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='TEXT', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='TABLE', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='PICTURE', index=3, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='RULER', index=4, number=4, options=None, type=None), _descriptor.EnumValueDescriptor( name='BARCODE', index=5, number=5, options=None, type=None), ], containing_type=None, options=None, serialized_start=1105, serialized_end=1187, ) _sym_db.RegisterEnumDescriptor(_BLOCK_BLOCKTYPE) _TEXTANNOTATION_DETECTEDLANGUAGE = _descriptor.Descriptor( name='DetectedLanguage', full_name='google.cloud.vision.v1.TextAnnotation.DetectedLanguage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='language_code', full_name='google.cloud.vision.v1.TextAnnotation.DetectedLanguage.language_code', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='confidence', full_name='google.cloud.vision.v1.TextAnnotation.DetectedLanguage.confidence', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=231, serialized_end=292, ) _TEXTANNOTATION_DETECTEDBREAK = _descriptor.Descriptor( name='DetectedBreak', full_name='google.cloud.vision.v1.TextAnnotation.DetectedBreak', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='type', full_name='google.cloud.vision.v1.TextAnnotation.DetectedBreak.type', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='is_prefix', full_name='google.cloud.vision.v1.TextAnnotation.DetectedBreak.is_prefix', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _TEXTANNOTATION_DETECTEDBREAK_BREAKTYPE, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=295, serialized_end=508, ) _TEXTANNOTATION_TEXTPROPERTY = _descriptor.Descriptor( name='TextProperty', full_name='google.cloud.vision.v1.TextAnnotation.TextProperty', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='detected_languages', full_name='google.cloud.vision.v1.TextAnnotation.TextProperty.detected_languages', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='detected_break', full_name='google.cloud.vision.v1.TextAnnotation.TextProperty.detected_break', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=511, serialized_end=688, ) _TEXTANNOTATION = _descriptor.Descriptor( name='TextAnnotation', full_name='google.cloud.vision.v1.TextAnnotation', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='pages', full_name='google.cloud.vision.v1.TextAnnotation.pages', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='text', full_name='google.cloud.vision.v1.TextAnnotation.text', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_TEXTANNOTATION_DETECTEDLANGUAGE, _TEXTANNOTATION_DETECTEDBREAK, _TEXTANNOTATION_TEXTPROPERTY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=154, serialized_end=688, ) _PAGE = _descriptor.Descriptor( name='Page', full_name='google.cloud.vision.v1.Page', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='property', full_name='google.cloud.vision.v1.Page.property', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='width', full_name='google.cloud.vision.v1.Page.width', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='height', full_name='google.cloud.vision.v1.Page.height', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='blocks', full_name='google.cloud.vision.v1.Page.blocks', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=691, serialized_end=846, ) _BLOCK = _descriptor.Descriptor( name='Block', full_name='google.cloud.vision.v1.Block', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='property', full_name='google.cloud.vision.v1.Block.property', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='bounding_box', full_name='google.cloud.vision.v1.Block.bounding_box', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='paragraphs', full_name='google.cloud.vision.v1.Block.paragraphs', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='block_type', full_name='google.cloud.vision.v1.Block.block_type', index=3, number=4, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _BLOCK_BLOCKTYPE, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=849, serialized_end=1187, ) _PARAGRAPH = _descriptor.Descriptor( name='Paragraph', full_name='google.cloud.vision.v1.Paragraph', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='property', full_name='google.cloud.vision.v1.Paragraph.property', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='bounding_box', full_name='google.cloud.vision.v1.Paragraph.bounding_box', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='words', full_name='google.cloud.vision.v1.Paragraph.words', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1190, serialized_end=1377, ) _WORD = _descriptor.Descriptor( name='Word', full_name='google.cloud.vision.v1.Word', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='property', full_name='google.cloud.vision.v1.Word.property', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='bounding_box', full_name='google.cloud.vision.v1.Word.bounding_box', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='symbols', full_name='google.cloud.vision.v1.Word.symbols', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1380, serialized_end=1566, ) _SYMBOL = _descriptor.Descriptor( name='Symbol', full_name='google.cloud.vision.v1.Symbol', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='property', full_name='google.cloud.vision.v1.Symbol.property', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='bounding_box', full_name='google.cloud.vision.v1.Symbol.bounding_box', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='text', full_name='google.cloud.vision.v1.Symbol.text', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1569, serialized_end=1722, ) _TEXTANNOTATION_DETECTEDLANGUAGE.containing_type = _TEXTANNOTATION _TEXTANNOTATION_DETECTEDBREAK.fields_by_name['type'].enum_type = _TEXTANNOTATION_DETECTEDBREAK_BREAKTYPE _TEXTANNOTATION_DETECTEDBREAK.containing_type = _TEXTANNOTATION _TEXTANNOTATION_DETECTEDBREAK_BREAKTYPE.containing_type = _TEXTANNOTATION_DETECTEDBREAK _TEXTANNOTATION_TEXTPROPERTY.fields_by_name['detected_languages'].message_type = _TEXTANNOTATION_DETECTEDLANGUAGE _TEXTANNOTATION_TEXTPROPERTY.fields_by_name['detected_break'].message_type = _TEXTANNOTATION_DETECTEDBREAK _TEXTANNOTATION_TEXTPROPERTY.containing_type = _TEXTANNOTATION _TEXTANNOTATION.fields_by_name['pages'].message_type = _PAGE _PAGE.fields_by_name['property'].message_type = _TEXTANNOTATION_TEXTPROPERTY _PAGE.fields_by_name['blocks'].message_type = _BLOCK _BLOCK.fields_by_name['property'].message_type = _TEXTANNOTATION_TEXTPROPERTY _BLOCK.fields_by_name['bounding_box'].message_type = google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_geometry__pb2._BOUNDINGPOLY _BLOCK.fields_by_name['paragraphs'].message_type = _PARAGRAPH _BLOCK.fields_by_name['block_type'].enum_type = _BLOCK_BLOCKTYPE _BLOCK_BLOCKTYPE.containing_type = _BLOCK _PARAGRAPH.fields_by_name['property'].message_type = _TEXTANNOTATION_TEXTPROPERTY _PARAGRAPH.fields_by_name['bounding_box'].message_type = google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_geometry__pb2._BOUNDINGPOLY _PARAGRAPH.fields_by_name['words'].message_type = _WORD _WORD.fields_by_name['property'].message_type = _TEXTANNOTATION_TEXTPROPERTY _WORD.fields_by_name['bounding_box'].message_type = google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_geometry__pb2._BOUNDINGPOLY _WORD.fields_by_name['symbols'].message_type = _SYMBOL _SYMBOL.fields_by_name['property'].message_type = _TEXTANNOTATION_TEXTPROPERTY _SYMBOL.fields_by_name['bounding_box'].message_type = google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_geometry__pb2._BOUNDINGPOLY DESCRIPTOR.message_types_by_name['TextAnnotation'] = _TEXTANNOTATION DESCRIPTOR.message_types_by_name['Page'] = _PAGE DESCRIPTOR.message_types_by_name['Block'] = _BLOCK DESCRIPTOR.message_types_by_name['Paragraph'] = _PARAGRAPH DESCRIPTOR.message_types_by_name['Word'] = _WORD DESCRIPTOR.message_types_by_name['Symbol'] = _SYMBOL TextAnnotation = _reflection.GeneratedProtocolMessageType('TextAnnotation', (_message.Message,), dict( DetectedLanguage = _reflection.GeneratedProtocolMessageType('DetectedLanguage', (_message.Message,), dict( DESCRIPTOR = _TEXTANNOTATION_DETECTEDLANGUAGE, __module__ = 'google.cloud.proto.vision.v1.text_annotation_pb2' # @@protoc_insertion_point(class_scope:google.cloud.vision.v1.TextAnnotation.DetectedLanguage) )) , DetectedBreak = _reflection.GeneratedProtocolMessageType('DetectedBreak', (_message.Message,), dict( DESCRIPTOR = _TEXTANNOTATION_DETECTEDBREAK, __module__ = 'google.cloud.proto.vision.v1.text_annotation_pb2' # @@protoc_insertion_point(class_scope:google.cloud.vision.v1.TextAnnotation.DetectedBreak) )) , TextProperty = _reflection.GeneratedProtocolMessageType('TextProperty', (_message.Message,), dict( DESCRIPTOR = _TEXTANNOTATION_TEXTPROPERTY, __module__ = 'google.cloud.proto.vision.v1.text_annotation_pb2' # @@protoc_insertion_point(class_scope:google.cloud.vision.v1.TextAnnotation.TextProperty) )) , DESCRIPTOR = _TEXTANNOTATION, __module__ = 'google.cloud.proto.vision.v1.text_annotation_pb2' # @@protoc_insertion_point(class_scope:google.cloud.vision.v1.TextAnnotation) )) _sym_db.RegisterMessage(TextAnnotation) _sym_db.RegisterMessage(TextAnnotation.DetectedLanguage) _sym_db.RegisterMessage(TextAnnotation.DetectedBreak) _sym_db.RegisterMessage(TextAnnotation.TextProperty) Page = _reflection.GeneratedProtocolMessageType('Page', (_message.Message,), dict( DESCRIPTOR = _PAGE, __module__ = 'google.cloud.proto.vision.v1.text_annotation_pb2' # @@protoc_insertion_point(class_scope:google.cloud.vision.v1.Page) )) _sym_db.RegisterMessage(Page) Block = _reflection.GeneratedProtocolMessageType('Block', (_message.Message,), dict( DESCRIPTOR = _BLOCK, __module__ = 'google.cloud.proto.vision.v1.text_annotation_pb2' # @@protoc_insertion_point(class_scope:google.cloud.vision.v1.Block) )) _sym_db.RegisterMessage(Block) Paragraph = _reflection.GeneratedProtocolMessageType('Paragraph', (_message.Message,), dict( DESCRIPTOR = _PARAGRAPH, __module__ = 'google.cloud.proto.vision.v1.text_annotation_pb2' # @@protoc_insertion_point(class_scope:google.cloud.vision.v1.Paragraph) )) _sym_db.RegisterMessage(Paragraph) Word = _reflection.GeneratedProtocolMessageType('Word', (_message.Message,), dict( DESCRIPTOR = _WORD, __module__ = 'google.cloud.proto.vision.v1.text_annotation_pb2' # @@protoc_insertion_point(class_scope:google.cloud.vision.v1.Word) )) _sym_db.RegisterMessage(Word) Symbol = _reflection.GeneratedProtocolMessageType('Symbol', (_message.Message,), dict( DESCRIPTOR = _SYMBOL, __module__ = 'google.cloud.proto.vision.v1.text_annotation_pb2' # @@protoc_insertion_point(class_scope:google.cloud.vision.v1.Symbol) )) _sym_db.RegisterMessage(Symbol) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\032com.google.cloud.vision.v1B\023TextAnnotationProtoP\001Z<google.golang.org/genproto/googleapis/cloud/vision/v1;vision\370\001\001')) try: # THESE ELEMENTS WILL BE DEPRECATED. # Please use the generated *_pb2_grpc.py files instead. import grpc from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces except ImportError: pass # @@protoc_insertion_point(module_scope)
bsd-3-clause
robwebset/screensaver.weather
resources/lib/settings.py
1
1308
# -*- coding: utf-8 -*- import xbmc import xbmcaddon ADDON = xbmcaddon.Addon(id='screensaver.weather') ADDON_ID = ADDON.getAddonInfo('id') # Common logging module def log(txt, loglevel=xbmc.LOGDEBUG): if (ADDON.getSetting("logEnabled") == "true") or (loglevel != xbmc.LOGDEBUG): if isinstance(txt, str): txt = txt.decode("utf-8") message = u'%s: %s' % (ADDON_ID, txt) xbmc.log(msg=message.encode("utf-8"), level=loglevel) ############################## # Stores Various Settings ############################## class Settings(): DIM_LEVEL = ( '00000000', '11000000', '22000000', '33000000', '44000000', '55000000', '66000000', '77000000', '88000000', '99000000', 'AA000000', 'BB000000', 'CC000000', 'DD000000', 'EE000000' ) @staticmethod def getDimValue(): # The actual dim level (Hex) is one of # Where 00000000 is not changed # So that is a total of 15 different options # FF000000 would be completely black, so we do not use that one if ADDON.getSetting("dimLevel"): return Settings.DIM_LEVEL[int(ADDON.getSetting("dimLevel"))] else: return '00000000'
gpl-2.0
niekas/dakis
dakis/website/migrations/openid/0001_initial.py
5
1240
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='OpenIDNonce', fields=[ ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)), ('server_url', models.CharField(max_length=255)), ('timestamp', models.IntegerField()), ('salt', models.CharField(max_length=255)), ('date_created', models.DateTimeField(auto_now_add=True)), ], ), migrations.CreateModel( name='OpenIDStore', fields=[ ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)), ('server_url', models.CharField(max_length=255)), ('handle', models.CharField(max_length=255)), ('secret', models.TextField()), ('issued', models.IntegerField()), ('lifetime', models.IntegerField()), ('assoc_type', models.TextField()), ], ), ]
agpl-3.0
Jonbean/DSSM
classification/utils.py
8
5542
''' Author: Jon Tsai Created: May 29 2016 ''' import numpy as np import theano from time import sleep import sys def progress_bar(percent, speed): i = int(percent)/2 sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("[%-50s] %d%% %f instances/s" % ('='*i, percent, speed)) sys.stdout.flush() def combine_sents(sent_set): ''' parameter: sent_set ==> 2D sentences set ==> type: list[list[list]] return: sents1D ==> 1D sentences set ==> type: list[list] This function will combine 2D sentence set into 1D sentence set. e.g. [ [[sent1], [sent2], [sent3], ..., [sentn]] ... [[sent1], [sent2], [sent3], ..., [sentn]] ] ==> [ [sentences1], ... [sentencesn] ] ''' sents1D = [] for doc in sent_set: combine_sent = np.array([]) for sent in doc: combine_sent = np.concatenate((combine_sent,sent)) sents1D.append(combine_sent) return sents1D def shuffle_index(length_of_indices_ls): ''' ---------- parameter: ---------- length_of_indices_ls: type = int ---------- return: ---------- a shuffled numpy array of indices ''' ls = np.arange(length_of_indices_ls) np.random.shuffle(ls) return ls def padding(batch_input_list): ''' ---------- parameter: ---------- batch_input_list: type = list(list) ---------- return: ---------- numpy.ndarray: shape == (n_batch, max_time_step) ''' n_batch = len(batch_input_list) max_time_step = max([len(batch_input_list[i]) for i in range(n_batch)]) padding_result = np.zeros((n_batch, max_time_step)) for batch in range(n_batch): padding_result[batch] = np.concatenate((np.asarray(batch_input_list[batch]), np.zeros(max_time_step - len(batch_input_list[batch])))) return padding_result.astype('int64') def mask_generator(indices_matrix): ''' ---------- parameter: ---------- indices_matrix: type = list[list] ---------- return: ---------- mask : type = np.ndarray a mask matrix of a batch of varied length instances ''' n_batch = len(indices_matrix) len_ls = [len(sent) for sent in indices_matrix] max_len = max(len_ls) mask = np.zeros((n_batch, max_len)) for i in range(n_batch): for j in range(len(indices_matrix[i])): mask[i][j] = 1 return mask def mlp_mask_generator(indices_matrix, wemb_size): ''' ---------- parameter: ---------- indices_matrix: type = list[list] ---------- return: ---------- mask : type = np.ndarray mask.shape = (n_batch, wemb_size) ''' n_batch = len(indices_matrix) len_ls = [len(sent) for sent in indices_matrix] mask = np.ones((n_batch, wemb_size)) for i in range(n_batch): mask[i] = mask[i] * len_ls[i] return mask def fake_input_generator(max_index, batch_number, length_range): ''' ---------- parameter: ---------- max_index: type = int batch_number: type = int length_range: tuple(int), len(length_range) = 2 e.g. (50, 70) ---------- return: ---------- fake_data: type = list[list] format: fake_data.shape[0] = batch_number length_range[0] <= len(fake_data[i]) <= length_range[1] 0 <= fake_data[i][j] <= max_index ''' max_time_step = length_range[0] + np.random.randint(length_range[1] - length_range[0] + 1) fake_data = np.zeros((batch_number, max_time_step)) mask = np.zeros((batch_number, max_time_step)).astype(theano.config.floatX) len_range = max_time_step - length_range[0] assert len_range >= 0 #pick a row to be the max length row row = np.random.randint(batch_number) fake_data[row] = np.random.randint(max_index+1, size = (max_time_step,)) mask[row] = np.ones(max_time_step) for batch in range(batch_number): if batch == row: continue length = length_range[0]+np.random.randint(len_range) fake_data[batch] = np.concatenate((np.random.randint(max_index+1 ,size = (length,)), np.zeros(max_time_step - length))) mask[batch] = np.concatenate((np.ones(length), np.zeros(max_time_step - length))) return (fake_data.astype('int32'), mask) def fake_data(max_index, batch_number, max_time_step, min_time_step): fake_data = np.zeros((batch_number, max_time_step)) mask = np.zeros((batch_number, max_time_step)).astype(theano.config.floatX) len_range = max_time_step - min_time_step assert len_range >= 0 #pick a row to be the max length row row = np.random.randint(batch_number) fake_data[row] = np.random.randint(max_index+1, size = (max_time_step,)) mask[row] = np.ones(max_time_step) for batch in range(batch_number): if batch == row: continue length = min_time_step+np.random.randint(len_range) fake_data[batch] = np.concatenate((np.random.randint(max_index+1 ,size = (length,)), np.zeros(max_time_step - length))) mask[batch] = np.concatenate((np.ones(length), np.zeros(max_time_step - length))) return (fake_data.astype('int32'), mask)
gpl-3.0
Treeki/NewerSMBW
Koopatlas/src/editorui/objects.py
1
4434
from common import * from editorcommon import * import weakref class KPEditorObject(KPEditorItem): SNAP_TO = (24,24) def __init__(self, obj, layer): KPEditorItem.__init__(self) obj.qtItem = self self._objRef = weakref.ref(obj) self._layerRef = weakref.ref(layer) self._updatePosition() self._updateSize() self.setAcceptHoverEvents(True) self.resizing = None if not hasattr(KPEditorObject, 'SELECTION_PEN'): KPEditorObject.SELECTION_PEN = QtGui.QPen(Qt.green, 1, Qt.DotLine) # I don't bother setting the ZValue because it doesn't quite matter: # only one layer's objects are ever clickable, and drawBackground takes # care of the layered drawing def _updatePosition(self): self.ignoreMovement = True x,y = self._objRef().position self.setPos(x*24, y*24) self.ignoreMovement = False def _updateSize(self): self.prepareGeometryChange() obj = self._objRef() w,h = obj.size self._boundingRect = QtCore.QRectF(0, 0, w*24, h*24) self._selectionRect = QtCore.QRectF(0, 0, w*24-1, h*24-1) self._resizerEndXY = (w*24-5, h*24-5) def paint(self, painter, option, widget): if self.isSelected(): painter.setPen(self.SELECTION_PEN) painter.drawRect(self._selectionRect) def hoverMoveEvent(self, event): if self._layerRef() != KP.mapScene.currentLayer: self.setCursor(Qt.ArrowCursor) return pos = event.pos() bit = self.resizerPortionAt(pos.x(), pos.y()) if bit == 1 or bit == 4: self.setCursor(Qt.SizeFDiagCursor) elif bit == 2 or bit == 3: self.setCursor(Qt.SizeBDiagCursor) elif bit == 7 or bit == 8: self.setCursor(Qt.SizeHorCursor) elif bit == 5 or bit == 6: self.setCursor(Qt.SizeVerCursor) else: self.setCursor(Qt.ArrowCursor) def mousePressEvent(self, event): if event.button() == Qt.LeftButton: pos = event.pos() bit = self.resizerPortionAt(pos.x(), pos.y()) if self._layerRef() == KP.mapScene.currentLayer and bit: # if bit: event.accept() x, xSide, y, ySide = False, None, False, None if bit == 1 or bit == 7 or bit == 3: x, xSide = True, 1 elif bit == 2 or bit == 4 or bit == 8: x, xSide = True, 0 if bit == 1 or bit == 2 or bit == 5: y, ySide = True, 1 elif bit == 3 or bit == 4 or bit == 6: y, ySide = True, 0 self.resizing = (x, xSide, y, ySide) return KPEditorItem.mousePressEvent(self, event) def _tryAndResize(self, obj, axisIndex, mousePosition, stationarySide): objPosition = obj.position[axisIndex] objSize = obj.size[axisIndex] if stationarySide == 0: # Resize the right/bottom side relativeMousePosition = mousePosition - objPosition newSize = relativeMousePosition + 1 if newSize == objSize or newSize < 1: return False if axisIndex == 1: obj.size = (obj.size[0], newSize) else: obj.size = (newSize, obj.size[1]) else: # Resize the left/top side rightSide = objPosition + objSize - 1 newLeftSide = mousePosition newPosition = newLeftSide newSize = rightSide - newLeftSide + 1 if newSize < 1: return False if newPosition == objPosition and newSize == objSize: return False if axisIndex == 1: obj.position = (obj.position[0], newPosition) obj.size = (obj.size[0], newSize) else: obj.position = (newPosition, obj.position[1]) obj.size = (newSize, obj.size[1]) return True def mouseMoveEvent(self, event): if self.resizing: obj = self._objRef() scenePos = event.scenePos() hasChanged = False resizeX, xSide, resizeY, ySide = self.resizing if resizeX: hasChanged |= self._tryAndResize(obj, 0, int(scenePos.x() / 24), xSide) if resizeY: hasChanged |= self._tryAndResize(obj, 1, int(scenePos.y() / 24), ySide) if hasChanged: obj.updateCache() self._layerRef().updateCache() self._updatePosition() self._updateSize() else: KPEditorItem.mouseMoveEvent(self, event) def mouseReleaseEvent(self, event): if self.resizing and event.button() == Qt.LeftButton: self.resizing = None else: KPEditorItem.mouseReleaseEvent(self, event) def _itemMoved(self, oldX, oldY, newX, newY): obj = self._objRef() obj.position = (newX/24, newY/24) self._layerRef().updateCache() def remove(self, withItem=False): obj = self._objRef() layer = self._layerRef() layer.objects.remove(obj) layer.updateCache() if withItem: self.scene().removeItem(self)
mit
eestay/edx-ora2
scripts/render_templates.py
7
3912
#!/usr/bin/env python """ Render Django templates. Useful for generating fixtures for the JavaScript unit test suite. Usage: python render_templates.py path/to/templates.json where "templates.json" is a JSON file of the form: [ { "template": "openassessmentblock/oa_base.html", "context": { "title": "Lorem", "question": "Ipsum?" }, "output": "oa_base.html" }, ... ] The rendered templates are saved to "output" relative to the templates.json file's directory. """ import sys import os.path import json import re import dateutil.parser import pytz # This is a bit of a hack to ensure that the root repo directory # is in the Python path, so Django can find the settings module. sys.path.append(os.path.dirname(os.path.dirname(__file__))) from django.template.context import Context from django.template.loader import get_template USAGE = u"{prog} TEMPLATE_DESC" DATETIME_REGEX = re.compile("^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}$") def parse_dates(context): """ Transform datetime strings into Python datetime objects. JSON does not provide a standard way to serialize datetime objects, but some of the templates expect that the context contains Python datetime objects. This (somewhat hacky) solution recursively searches the context for formatted datetime strings of the form "2014-01-02T12:34" and converts them to Python datetime objects with the timezone set to UTC. Args: context (JSON-serializable): The context (or part of the context) that will be passed to the template. Dictionaries and lists will be recursively searched and transformed. Returns: JSON-serializable of the same type as the `context` argument. """ if isinstance(context, dict): return { key: parse_dates(value) for key, value in context.iteritems() } elif isinstance(context, list): return [ parse_dates(item) for item in context ] elif isinstance(context, basestring): if DATETIME_REGEX.match(context) is not None: return dateutil.parser.parse(context).replace(tzinfo=pytz.utc) return context def render_templates(root_dir, template_json): """ Create rendered templates. Args: root_dir (str): The directory in which to write the rendered templates. template_json (dict): Description of which templates to render. Must be a list of dicts, each containing keys "template" (str), "context" (dict), and "output" (str). Returns: None """ for template_dict in template_json: template = get_template(template_dict['template']) context = parse_dates(template_dict['context']) rendered = template.render(Context(context)) output_path = os.path.join(root_dir, template_dict['output']) try: with open(output_path, 'w') as output_file: output_file.write(rendered.encode('utf-8')) except IOError: print "Could not write rendered template to file: {}".format(output_path) sys.exit(1) def main(): """ Main entry point for the script. """ if len(sys.argv) < 2: print USAGE.format(sys.argv[0]) sys.exit(1) try: with open(sys.argv[1]) as template_json: root_dir = os.path.dirname(sys.argv[1]) render_templates(root_dir, json.load(template_json)) except IOError as ex: print u"Could not open template description file: {}".format(sys.argv[1]) print(ex) sys.exit(1) except ValueError as ex: print u"Could not parse template description as JSON: {}".format(sys.argv[1]) print(ex) sys.exit(1) if __name__ == '__main__': main()
agpl-3.0
scalient/ebsmount
cmd_manual.py
2
2801
#!/usr/bin/python # Copyright (c) 2010 Alon Swartz <alon@turnkeylinux.org> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """EBS Mount - manually mount EBS device (simulates udev add trigger) Arguments: device EBS device to mount (e.g., /dev/xvdf, /dev/vda) Options: --format=FS Format device prior to mount (e.g., --format=ext3) """ import re import os import sys import getopt import ebsmount import executil from utils import config, is_mounted def usage(e=None): if e: print >> sys.stderr, "error: " + str(e) print >> sys.stderr, "Syntax: %s [-opts] <device>" % sys.argv[0] print >> sys.stderr, __doc__.strip() sys.exit(1) def fatal(s): print >> sys.stderr, "error: " + str(s) sys.exit(1) def _expected_devpath(devname, devpaths): """ugly hack to test expected structure of devpath""" raw_output = executil.getoutput('udevadm info -a -n %s' % devname) for line in raw_output.splitlines(): line = line.strip() m = re.match("^looking at parent device '(.*)':", line) if m: devpath = m.group(1) for pattern in devpaths: if re.search(pattern, devpath): return True return False def main(): try: opts, args = getopt.gnu_getopt(sys.argv[1:], 'h', ['format=']) except getopt.GetoptError, e: usage(e) filesystem = None for opt, val in opts: if opt == '-h': usage() if opt == '--format': filesystem = val if not len(args) == 1: usage() devname = args[0] if not os.path.exists(devname): fatal("%s does not exist" % devname) if not _expected_devpath(devname, config.devpaths.split()): fatal("devpath not of expected structure, or failed lookup") if filesystem: if is_mounted(devname): fatal("%s is mounted" % devname) if not filesystem in config.filesystems.split(): fatal("%s is not supported in %s" % (filesystem, config.CONF_FILE)) executil.system("mkfs." + filesystem, "-q", devname) ebsmount.ebsmount_add(devname, config.mountdir) if __name__=="__main__": main()
gpl-2.0
vicky2135/lucious
lucious/lib/python2.7/site-packages/pip/_vendor/progress/__init__.py
916
3023
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import division from collections import deque from datetime import timedelta from math import ceil from sys import stderr from time import time __version__ = '1.2' class Infinite(object): file = stderr sma_window = 10 def __init__(self, *args, **kwargs): self.index = 0 self.start_ts = time() self._ts = self.start_ts self._dt = deque(maxlen=self.sma_window) for key, val in kwargs.items(): setattr(self, key, val) def __getitem__(self, key): if key.startswith('_'): return None return getattr(self, key, None) @property def avg(self): return sum(self._dt) / len(self._dt) if self._dt else 0 @property def elapsed(self): return int(time() - self.start_ts) @property def elapsed_td(self): return timedelta(seconds=self.elapsed) def update(self): pass def start(self): pass def finish(self): pass def next(self, n=1): if n > 0: now = time() dt = (now - self._ts) / n self._dt.append(dt) self._ts = now self.index = self.index + n self.update() def iter(self, it): for x in it: yield x self.next() self.finish() class Progress(Infinite): def __init__(self, *args, **kwargs): super(Progress, self).__init__(*args, **kwargs) self.max = kwargs.get('max', 100) @property def eta(self): return int(ceil(self.avg * self.remaining)) @property def eta_td(self): return timedelta(seconds=self.eta) @property def percent(self): return self.progress * 100 @property def progress(self): return min(1, self.index / self.max) @property def remaining(self): return max(self.max - self.index, 0) def start(self): self.update() def goto(self, index): incr = index - self.index self.next(incr) def iter(self, it): try: self.max = len(it) except TypeError: pass for x in it: yield x self.next() self.finish()
bsd-3-clause
johndpope/tensorflow
tensorflow/tensorboard/backend/application.py
24
26886
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorBoard WSGI Application Logic. TensorBoardApplication constructs TensorBoard as a WSGI application. It handles serving static assets, and implements TensorBoard data APIs. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv import imghdr import mimetypes import os import re import threading import time import six from six import StringIO from six.moves import urllib from six.moves import xrange # pylint: disable=redefined-builtin from six.moves.urllib import parse as urlparse from werkzeug import wrappers from tensorflow.python.platform import resource_loader from tensorflow.python.platform import tf_logging as logging from tensorflow.tensorboard.backend import http_util from tensorflow.tensorboard.backend import process_graph from tensorflow.tensorboard.backend.event_processing import event_accumulator from tensorflow.tensorboard.backend.event_processing import event_multiplexer DEFAULT_SIZE_GUIDANCE = { event_accumulator.COMPRESSED_HISTOGRAMS: 500, event_accumulator.IMAGES: 10, event_accumulator.AUDIO: 10, event_accumulator.SCALARS: 1000, event_accumulator.HEALTH_PILLS: 100, event_accumulator.HISTOGRAMS: 50, } DATA_PREFIX = '/data' LOGDIR_ROUTE = '/logdir' RUNS_ROUTE = '/runs' PLUGIN_PREFIX = '/plugin' PLUGINS_LISTING_ROUTE = '/plugins_listing' SCALARS_ROUTE = '/' + event_accumulator.SCALARS IMAGES_ROUTE = '/' + event_accumulator.IMAGES AUDIO_ROUTE = '/' + event_accumulator.AUDIO HISTOGRAMS_ROUTE = '/' + event_accumulator.HISTOGRAMS COMPRESSED_HISTOGRAMS_ROUTE = '/' + event_accumulator.COMPRESSED_HISTOGRAMS INDIVIDUAL_IMAGE_ROUTE = '/individualImage' INDIVIDUAL_AUDIO_ROUTE = '/individualAudio' GRAPH_ROUTE = '/' + event_accumulator.GRAPH RUN_METADATA_ROUTE = '/' + event_accumulator.RUN_METADATA TAB_ROUTES = ['', '/events', '/images', '/audio', '/graphs', '/histograms'] _IMGHDR_TO_MIMETYPE = { 'bmp': 'image/bmp', 'gif': 'image/gif', 'jpeg': 'image/jpeg', 'png': 'image/png' } _DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream' def _content_type_for_image(encoded_image_string): image_type = imghdr.what(None, encoded_image_string) return _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE) class _OutputFormat(object): """An enum used to list the valid output formats for API calls. Not all API calls support all formats (for example, only scalars and compressed histograms support CSV). """ JSON = 'json' CSV = 'csv' def standard_tensorboard_wsgi( logdir, purge_orphaned_data, reload_interval, plugins): """Construct a TensorBoardWSGIApp with standard plugins and multiplexer. Args: logdir: The path to the directory containing events files. purge_orphaned_data: Whether to purge orphaned data. reload_interval: The interval at which the backend reloads more data in seconds. plugins: A list of plugins for TensorBoard to initialize. Returns: The new TensorBoard WSGI application. """ multiplexer = event_multiplexer.EventMultiplexer( size_guidance=DEFAULT_SIZE_GUIDANCE, purge_orphaned_data=purge_orphaned_data) return TensorBoardWSGIApp(logdir, plugins, multiplexer, reload_interval) class TensorBoardWSGIApp(object): """The TensorBoard application, conforming to WSGI spec.""" # How many samples to include in sampling API calls by default. DEFAULT_SAMPLE_COUNT = 10 # NOTE TO MAINTAINERS: An accurate Content-Length MUST be specified on all # responses using send_header. protocol_version = 'HTTP/1.1' def __init__(self, logdir, plugins, multiplexer, reload_interval): """Constructs the TensorBoard application. Args: logdir: the logdir spec that describes where data will be loaded. may be a directory, or comma,separated list of directories, or colons can be used to provide named directories plugins: List of plugins that extend tensorboard.plugins.BasePlugin multiplexer: The EventMultiplexer with TensorBoard data to serve reload_interval: How often (in seconds) to reload the Multiplexer Returns: A WSGI application that implements the TensorBoard backend. Raises: ValueError: If some plugin has no plugin_name ValueError: If two plugins have the same plugin_name """ self._logdir = logdir self._plugins = plugins self._multiplexer = multiplexer self.tag = get_tensorboard_tag() path_to_run = parse_event_files_spec(self._logdir) if reload_interval: start_reloading_multiplexer(self._multiplexer, path_to_run, reload_interval) else: reload_multiplexer(self._multiplexer, path_to_run) self.data_applications = { '/app.js': self._serve_js, DATA_PREFIX + AUDIO_ROUTE: self._serve_audio, DATA_PREFIX + COMPRESSED_HISTOGRAMS_ROUTE: self._serve_compressed_histograms, DATA_PREFIX + GRAPH_ROUTE: self._serve_graph, DATA_PREFIX + HISTOGRAMS_ROUTE: self._serve_histograms, DATA_PREFIX + IMAGES_ROUTE: self._serve_images, DATA_PREFIX + INDIVIDUAL_AUDIO_ROUTE: self._serve_individual_audio, DATA_PREFIX + INDIVIDUAL_IMAGE_ROUTE: self._serve_image, DATA_PREFIX + LOGDIR_ROUTE: self._serve_logdir, # TODO(chizeng): Delete this RPC once we have skylark rules that obviate # the need for the frontend to determine which plugins are active. DATA_PREFIX + PLUGINS_LISTING_ROUTE: self._serve_plugins_listing, DATA_PREFIX + RUN_METADATA_ROUTE: self._serve_run_metadata, DATA_PREFIX + RUNS_ROUTE: self._serve_runs, DATA_PREFIX + SCALARS_ROUTE: self._serve_scalars, } # Serve the routes from the registered plugins using their name as the route # prefix. For example if plugin z has two routes /a and /b, they will be # served as /data/plugin/z/a and /data/plugin/z/b. plugin_names_encountered = set() for plugin in self._plugins: if plugin.plugin_name is None: raise ValueError('Plugin %s has no plugin_name' % plugin) if plugin.plugin_name in plugin_names_encountered: raise ValueError('Duplicate plugins for name %s' % plugin.plugin_name) plugin_names_encountered.add(plugin.plugin_name) try: plugin_apps = plugin.get_plugin_apps(self._multiplexer, self._logdir) except Exception as e: # pylint: disable=broad-except logging.warning('Plugin %s failed. Exception: %s', plugin.plugin_name, str(e)) continue for route, app in plugin_apps.items(): path = DATA_PREFIX + PLUGIN_PREFIX + '/' + plugin.plugin_name + route self.data_applications[path] = app # We use underscore_names for consistency with inherited methods. def _image_response_for_run(self, run_images, run, tag): """Builds a JSON-serializable object with information about run_images. Args: run_images: A list of event_accumulator.ImageValueEvent objects. run: The name of the run. tag: The name of the tag the images all belong to. Returns: A list of dictionaries containing the wall time, step, URL, width, and height for each image. """ response = [] for index, run_image in enumerate(run_images): response.append({ 'wall_time': run_image.wall_time, 'step': run_image.step, # We include the size so that the frontend can add that to the <img> # tag so that the page layout doesn't change when the image loads. 'width': run_image.width, 'height': run_image.height, 'query': self._query_for_individual_image(run, tag, index) }) return response def _audio_response_for_run(self, run_audio, run, tag): """Builds a JSON-serializable object with information about run_audio. Args: run_audio: A list of event_accumulator.AudioValueEvent objects. run: The name of the run. tag: The name of the tag the images all belong to. Returns: A list of dictionaries containing the wall time, step, URL, and content_type for each audio clip. """ response = [] for index, run_audio_clip in enumerate(run_audio): response.append({ 'wall_time': run_audio_clip.wall_time, 'step': run_audio_clip.step, 'content_type': run_audio_clip.content_type, 'query': self._query_for_individual_audio(run, tag, index) }) return response def _path_is_safe(self, path): """Check path is safe (stays within current directory). This is for preventing directory-traversal attacks. Args: path: The path to check for safety. Returns: True if the given path stays within the current directory, and false if it would escape to a higher directory. E.g. _path_is_safe('index.html') returns true, but _path_is_safe('../../../etc/password') returns false. """ base = os.path.abspath(os.curdir) absolute_path = os.path.abspath(path) prefix = os.path.commonprefix([base, absolute_path]) return prefix == base @wrappers.Request.application def _serve_logdir(self, request): """Respond with a JSON object containing this TensorBoard's logdir.""" return http_util.Respond( request, {'logdir': self._logdir}, 'application/json') @wrappers.Request.application def _serve_scalars(self, request): """Given a tag and single run, return array of ScalarEvents.""" # TODO(cassandrax): return HTTP status code for malformed requests tag = request.args.get('tag') run = request.args.get('run') values = self._multiplexer.Scalars(run, tag) if request.args.get('format') == _OutputFormat.CSV: string_io = StringIO() writer = csv.writer(string_io) writer.writerow(['Wall time', 'Step', 'Value']) writer.writerows(values) return http_util.Respond(request, string_io.getvalue(), 'text/csv') else: return http_util.Respond(request, values, 'application/json') @wrappers.Request.application def _serve_graph(self, request): """Given a single run, return the graph definition in json format.""" run = request.args.get('run', None) if run is None: return http_util.Respond( request, 'query parameter "run" is required', 'text/plain', 400) try: graph = self._multiplexer.Graph(run) except ValueError: return http_util.Respond( request, '404 Not Found', 'text/plain; charset=UTF-8', code=404) limit_attr_size = request.args.get('limit_attr_size', None) if limit_attr_size is not None: try: limit_attr_size = int(limit_attr_size) except ValueError: return http_util.Respond( request, 'query parameter `limit_attr_size` must be integer', 'text/plain', 400) large_attrs_key = request.args.get('large_attrs_key', None) try: process_graph.prepare_graph_for_ui(graph, limit_attr_size, large_attrs_key) except ValueError as e: return http_util.Respond(request, e.message, 'text/plain', 400) return http_util.Respond(request, str(graph), 'text/x-protobuf') # pbtxt @wrappers.Request.application def _serve_run_metadata(self, request): """Given a tag and a TensorFlow run, return the session.run() metadata.""" tag = request.args.get('tag', None) run = request.args.get('run', None) if tag is None: return http_util.Respond( request, 'query parameter "tag" is required', 'text/plain', 400) if run is None: return http_util.Respond( request, 'query parameter "run" is required', 'text/plain', 400) try: run_metadata = self._multiplexer.RunMetadata(run, tag) except ValueError: return http_util.Respond( request, '404 Not Found', 'text/plain; charset=UTF-8', code=404) return http_util.Respond( request, str(run_metadata), 'text/x-protobuf') # pbtxt @wrappers.Request.application def _serve_histograms(self, request): """Given a tag and single run, return an array of histogram values.""" tag = request.args.get('tag') run = request.args.get('run') values = self._multiplexer.Histograms(run, tag) return http_util.Respond(request, values, 'application/json') @wrappers.Request.application def _serve_compressed_histograms(self, request): """Given a tag and single run, return an array of compressed histograms.""" tag = request.args.get('tag') run = request.args.get('run') compressed_histograms = self._multiplexer.CompressedHistograms(run, tag) if request.args.get('format') == _OutputFormat.CSV: string_io = StringIO() writer = csv.writer(string_io) # Build the headers; we have two columns for timing and two columns for # each compressed histogram bucket. headers = ['Wall time', 'Step'] if compressed_histograms: bucket_count = len(compressed_histograms[0].compressed_histogram_values) for i in xrange(bucket_count): headers += ['Edge %d basis points' % i, 'Edge %d value' % i] writer.writerow(headers) for compressed_histogram in compressed_histograms: row = [compressed_histogram.wall_time, compressed_histogram.step] for value in compressed_histogram.compressed_histogram_values: row += [value.rank_in_bps, value.value] writer.writerow(row) return http_util.Respond(request, string_io.getvalue(), 'text/csv') else: return http_util.Respond( request, compressed_histograms, 'application/json') @wrappers.Request.application def _serve_images(self, request): """Given a tag and list of runs, serve a list of images. Note that the images themselves are not sent; instead, we respond with URLs to the images. The frontend should treat these URLs as opaque and should not try to parse information about them or generate them itself, as the format may change. Args: request: A werkzeug.wrappers.Request object. Returns: A werkzeug.Response application. """ tag = request.args.get('tag') run = request.args.get('run') images = self._multiplexer.Images(run, tag) response = self._image_response_for_run(images, run, tag) return http_util.Respond(request, response, 'application/json') @wrappers.Request.application def _serve_image(self, request): """Serves an individual image.""" tag = request.args.get('tag') run = request.args.get('run') index = int(request.args.get('index')) image = self._multiplexer.Images(run, tag)[index] encoded_image_string = image.encoded_image_string content_type = _content_type_for_image(encoded_image_string) return http_util.Respond(request, encoded_image_string, content_type) def _query_for_individual_image(self, run, tag, index): """Builds a URL for accessing the specified image. This should be kept in sync with _serve_image. Note that the URL is *not* guaranteed to always return the same image, since images may be unloaded from the reservoir as new images come in. Args: run: The name of the run. tag: The tag. index: The index of the image. Negative values are OK. Returns: A string representation of a URL that will load the index-th sampled image in the given run with the given tag. """ query_string = urllib.parse.urlencode({ 'run': run, 'tag': tag, 'index': index }) return query_string @wrappers.Request.application def _serve_audio(self, request): """Given a tag and list of runs, serve a list of audio. Note that the audio clips themselves are not sent; instead, we respond with URLs to the audio. The frontend should treat these URLs as opaque and should not try to parse information about them or generate them itself, as the format may change. Args: request: A werkzeug.wrappers.Request object. Returns: A werkzeug.Response application. """ tag = request.args.get('tag') run = request.args.get('run') audio_list = self._multiplexer.Audio(run, tag) response = self._audio_response_for_run(audio_list, run, tag) return http_util.Respond(request, response, 'application/json') @wrappers.Request.application def _serve_individual_audio(self, request): """Serves an individual audio clip.""" tag = request.args.get('tag') run = request.args.get('run') index = int(request.args.get('index')) audio = self._multiplexer.Audio(run, tag)[index] return http_util.Respond( request, audio.encoded_audio_string, audio.content_type) def _query_for_individual_audio(self, run, tag, index): """Builds a URL for accessing the specified audio. This should be kept in sync with _serve_individual_audio. Note that the URL is *not* guaranteed to always return the same audio, since audio may be unloaded from the reservoir as new audio comes in. Args: run: The name of the run. tag: The tag. index: The index of the audio. Negative values are OK. Returns: A string representation of a URL that will load the index-th sampled audio in the given run with the given tag. """ query_string = urllib.parse.urlencode({ 'run': run, 'tag': tag, 'index': index }) return query_string @wrappers.Request.application def _serve_plugins_listing(self, request): """Serves an object mapping plugin name to whether it is enabled. Args: request: The werkzeug.Request object. Returns: A werkzeug.Response object. """ return http_util.Respond( request, {plugin.plugin_name: plugin.is_active() for plugin in self._plugins}, 'application/json') @wrappers.Request.application def _serve_runs(self, request): """WSGI app serving a JSON object about runs and tags. Returns a mapping from runs to tagType to list of tags for that run. Args: request: A werkzeug request Returns: A werkzeug Response with the following content: {runName: {images: [tag1, tag2, tag3], audio: [tag4, tag5, tag6], scalars: [tagA, tagB, tagC], histograms: [tagX, tagY, tagZ], firstEventTimestamp: 123456.789}} """ runs = self._multiplexer.Runs() for run_name, run_data in runs.items(): try: run_data['firstEventTimestamp'] = self._multiplexer.FirstEventTimestamp( run_name) except ValueError: logging.warning('Unable to get first event timestamp for run %s', run_name) run_data['firstEventTimestamp'] = None return http_util.Respond(request, runs, 'application/json') @wrappers.Request.application def _serve_index(self, request): """Serves the index page (i.e., the tensorboard app itself).""" return self._serve_static_file(request, '/dist/index.html') @wrappers.Request.application def _serve_js(self, request): """Serves the JavaScript for the index page.""" return self._serve_static_file(request, '/dist/app.js') def _serve_static_file(self, request, path): """Serves the static file located at the given path. Args: request: A werkzeug Request path: The path of the static file, relative to the tensorboard/ directory. Returns: A werkzeug.Response application. """ # Strip off the leading forward slash. orig_path = path.lstrip('/') if not self._path_is_safe(orig_path): logging.warning('path not safe: %s', orig_path) return http_util.Respond(request, 'Naughty naughty!', 'text/plain', 400) # Resource loader wants a path relative to //WORKSPACE/tensorflow. path = os.path.join('tensorboard', orig_path) # Open the file and read it. try: contents = resource_loader.load_resource(path) except IOError: # For compatibility with latest version of Bazel, we renamed bower # packages to use '_' rather than '-' in their package name. # This means that the directory structure is changed too. # So that all our recursive imports work, we need to modify incoming # requests to map onto the new directory structure. path = orig_path components = path.split('/') components[0] = components[0].replace('-', '_') path = ('/').join(components) # Bazel keeps all the external dependencies in //WORKSPACE/external. # and resource loader wants a path relative to //WORKSPACE/tensorflow/. path = os.path.join('../external', path) try: contents = resource_loader.load_resource(path) except IOError: logging.warning('path %s not found, sending 404', path) return http_util.Respond(request, 'Not found', 'text/plain', code=404) mimetype, content_encoding = mimetypes.guess_type(path) mimetype = mimetype or 'application/octet-stream' return http_util.Respond( request, contents, mimetype, expires=3600, content_encoding=content_encoding) def __call__(self, environ, start_response): # pylint: disable=invalid-name """Central entry point for the TensorBoard application. This method handles routing to sub-applications. It does simple routing using regular expression matching. This __call__ method conforms to the WSGI spec, so that instances of this class are WSGI applications. Args: environ: See WSGI spec. start_response: See WSGI spec. Returns: A werkzeug Response. """ request = wrappers.Request(environ) parsed_url = urlparse.urlparse(request.path) # Remove a trailing slash, if present. clean_path = parsed_url.path if clean_path.endswith('/'): clean_path = clean_path[:-1] # pylint: disable=too-many-function-args if clean_path in self.data_applications: return self.data_applications[clean_path](environ, start_response) elif clean_path in TAB_ROUTES: return self._serve_index(environ, start_response) else: return self._serve_static_file(request, clean_path)(environ, start_response) # pylint: enable=too-many-function-args def parse_event_files_spec(logdir): """Parses `logdir` into a map from paths to run group names. The events files flag format is a comma-separated list of path specifications. A path specification either looks like 'group_name:/path/to/directory' or '/path/to/directory'; in the latter case, the group is unnamed. Group names cannot start with a forward slash: /foo:bar/baz will be interpreted as a spec with no name and path '/foo:bar/baz'. Globs are not supported. Args: logdir: A comma-separated list of run specifications. Returns: A dict mapping directory paths to names like {'/path/to/directory': 'name'}. Groups without an explicit name are named after their path. If logdir is None, returns an empty dict, which is helpful for testing things that don't require any valid runs. """ files = {} if logdir is None: return files # Make sure keeping consistent with ParseURI in core/lib/io/path.cc uri_pattern = re.compile('[a-zA-Z][0-9a-zA-Z.]*://.*') for specification in logdir.split(','): # Check if the spec contains group. A spec start with xyz:// is regarded as # URI path spec instead of group spec. If the spec looks like /foo:bar/baz, # then we assume it's a path with a colon. if (uri_pattern.match(specification) is None and ':' in specification and specification[0] != '/'): # We split at most once so run_name:/path:with/a/colon will work. run_name, _, path = specification.partition(':') else: run_name = None path = specification if uri_pattern.match(path) is None: path = os.path.realpath(path) files[path] = run_name return files def reload_multiplexer(multiplexer, path_to_run): """Loads all runs into the multiplexer. Args: multiplexer: The `EventMultiplexer` to add runs to and reload. path_to_run: A dict mapping from paths to run names, where `None` as the run name is interpreted as a run name equal to the path. """ start = time.time() logging.info('TensorBoard reload process beginning') for (path, name) in six.iteritems(path_to_run): multiplexer.AddRunsFromDirectory(path, name) logging.info('TensorBoard reload process: Reload the whole Multiplexer') multiplexer.Reload() duration = time.time() - start logging.info('TensorBoard done reloading. Load took %0.3f secs', duration) def start_reloading_multiplexer(multiplexer, path_to_run, load_interval): """Starts a thread to automatically reload the given multiplexer. The thread will reload the multiplexer by calling `ReloadMultiplexer` every `load_interval` seconds, starting immediately. Args: multiplexer: The `EventMultiplexer` to add runs to and reload. path_to_run: A dict mapping from paths to run names, where `None` as the run name is interpreted as a run name equal to the path. load_interval: How many seconds to wait after one load before starting the next load. Returns: A started `threading.Thread` that reloads the multiplexer. """ # We don't call multiplexer.Reload() here because that would make # AddRunsFromDirectory block until the runs have all loaded. def _reload_forever(): while True: reload_multiplexer(multiplexer, path_to_run) time.sleep(load_interval) thread = threading.Thread(target=_reload_forever) thread.daemon = True thread.start() return thread def get_tensorboard_tag(): """Read the TensorBoard TAG number, and return it or an empty string.""" tag = resource_loader.load_resource('tensorboard/TAG').strip() return tag
apache-2.0
slandis/InkCutter
inkcutter/app/bin/device.py
1
3171
#!/usr/bin/env python # InkCutter, Plot HPGL directly from Inkscape. # device.py # # Copyright 2010 Jairus Martin <frmdstryr@gmail.com> # Copyright 2013 Shaun Landis <slandis@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. import serial from lxml import etree import os if os.name != 'nt': import cups class Device: def __init__(self,config={}): #self.xml = etree.parse(filename).getroot() conf = {'width':0,'length':0,'name':'','interface':'serial','serial':{'port':'/dev/ttyUSB0','baud':9600}} conf.update(config) self.width = conf['width'] self.length = conf['length'] self.name = conf['name'] self.interface = conf['interface'] self.serial = conf['serial'] def getPrinters(self): con = cups.Connection() printers = con.getPrinters() self.printers = printers def save(self,id,attribs): # save settings to xml dev = self.xml.find('device[@id="%s"]'%id) err = [] # delete if exists? if len(dev): del dev[0] else: dev = etree.SubElement(self.xml,'device') dev.set('id',id) iface = etree.SubElement(d, "interface") for key,value in attribs.iteritems(): iface.set(key,value) def plot(self,filename): def toSerial(data,settings): assert type(data) == str, "input data must be a str type" import serial # set default settings set = {'baud':9600} set.update(settings); #create serial and set settings ser = serial.Serial() ser.baudrate = set['baud'] ser.port = set['port'] ser.open() if ser.isOpen(): #send data & return bits sent bits = ser.write(data); ser.close(); return True; else: return False; def toPrinter(data,printer): assert type(data) == str, "input data must be a str type" assert type(printer) == str, "printer name must be a string" printer = os.popen('lpr -P %s'%(printer),'w') printer.write(data) printer.close() return True; def toUSBPrinter(data,printer): assert type(data) == str, "input data must be a str type" assert type(printer) == str, "printer name must be a string" p = open(printer, 'w+') p.write(data) p.close() return True; f=open(filename,'r') if self.interface=='printer': toPrinter(f.read(),self.name) elif self.interface=='usb printer': toUSBPrinter(f.read(),self.name) elif self.interface=='serial': toSerial(f.read(),self.serial) else: raise AssertionError('Invalid interface type, only printers and serial connections are supported.')
gpl-3.0
ecederstrand/django
tests/generic_relations_regress/tests.py
300
11453
from django.db.models import Q, Sum from django.db.models.deletion import ProtectedError from django.db.utils import IntegrityError from django.forms.models import modelform_factory from django.test import TestCase, skipIfDBFeature from .models import ( A, B, C, D, Address, Board, CharLink, Company, Contact, Content, Developer, Guild, HasLinkThing, Link, Node, Note, OddRelation1, OddRelation2, Organization, Person, Place, Related, Restaurant, Tag, Team, TextLink, ) class GenericRelationTests(TestCase): def test_inherited_models_content_type(self): """ Test that GenericRelations on inherited classes use the correct content type. """ p = Place.objects.create(name="South Park") r = Restaurant.objects.create(name="Chubby's") l1 = Link.objects.create(content_object=p) l2 = Link.objects.create(content_object=r) self.assertEqual(list(p.links.all()), [l1]) self.assertEqual(list(r.links.all()), [l2]) def test_reverse_relation_pk(self): """ Test that the correct column name is used for the primary key on the originating model of a query. See #12664. """ p = Person.objects.create(account=23, name='Chef') Address.objects.create(street='123 Anywhere Place', city='Conifer', state='CO', zipcode='80433', content_object=p) qs = Person.objects.filter(addresses__zipcode='80433') self.assertEqual(1, qs.count()) self.assertEqual('Chef', qs[0].name) def test_charlink_delete(self): oddrel = OddRelation1.objects.create(name='clink') CharLink.objects.create(content_object=oddrel) oddrel.delete() def test_textlink_delete(self): oddrel = OddRelation2.objects.create(name='tlink') TextLink.objects.create(content_object=oddrel) oddrel.delete() def test_q_object_or(self): """ Tests that SQL query parameters for generic relations are properly grouped when OR is used. Test for bug http://code.djangoproject.com/ticket/11535 In this bug the first query (below) works while the second, with the query parameters the same but in reverse order, does not. The issue is that the generic relation conditions do not get properly grouped in parentheses. """ note_contact = Contact.objects.create() org_contact = Contact.objects.create() Note.objects.create(note='note', content_object=note_contact) org = Organization.objects.create(name='org name') org.contacts.add(org_contact) # search with a non-matching note and a matching org name qs = Contact.objects.filter(Q(notes__note__icontains=r'other note') | Q(organizations__name__icontains=r'org name')) self.assertIn(org_contact, qs) # search again, with the same query parameters, in reverse order qs = Contact.objects.filter( Q(organizations__name__icontains=r'org name') | Q(notes__note__icontains=r'other note')) self.assertIn(org_contact, qs) def test_join_reuse(self): qs = Person.objects.filter( addresses__street='foo' ).filter( addresses__street='bar' ) self.assertEqual(str(qs.query).count('JOIN'), 2) def test_generic_relation_ordering(self): """ Test that ordering over a generic relation does not include extraneous duplicate results, nor excludes rows not participating in the relation. """ p1 = Place.objects.create(name="South Park") p2 = Place.objects.create(name="The City") c = Company.objects.create(name="Chubby's Intl.") Link.objects.create(content_object=p1) Link.objects.create(content_object=c) places = list(Place.objects.order_by('links__id')) def count_places(place): return len([p for p in places if p.id == place.id]) self.assertEqual(len(places), 2) self.assertEqual(count_places(p1), 1) self.assertEqual(count_places(p2), 1) def test_target_model_is_unsaved(self): """Test related to #13085""" # Fails with another, ORM-level error dev1 = Developer(name='Joe') note = Note(note='Deserves promotion', content_object=dev1) self.assertRaises(IntegrityError, note.save) def test_target_model_len_zero(self): """Test for #13085 -- __len__() returns 0""" team1 = Team.objects.create(name='Backend devs') try: note = Note(note='Deserve a bonus', content_object=team1) except Exception as e: if (issubclass(type(e), Exception) and str(e) == 'Impossible arguments to GFK.get_content_type!'): self.fail("Saving model with GenericForeignKey to model instance whose " "__len__ method returns 0 shouldn't fail.") raise e note.save() def test_target_model_nonzero_false(self): """Test related to #13085""" # __nonzero__() returns False -- This actually doesn't currently fail. # This test validates that g1 = Guild.objects.create(name='First guild') note = Note(note='Note for guild', content_object=g1) note.save() @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_gfk_to_model_with_empty_pk(self): """Test related to #13085""" # Saving model with GenericForeignKey to model instance with an # empty CharField PK b1 = Board.objects.create(name='') tag = Tag(label='VP', content_object=b1) tag.save() def test_ticket_20378(self): # Create a couple of extra HasLinkThing so that the autopk value # isn't the same for Link and HasLinkThing. hs1 = HasLinkThing.objects.create() hs2 = HasLinkThing.objects.create() hs3 = HasLinkThing.objects.create() hs4 = HasLinkThing.objects.create() l1 = Link.objects.create(content_object=hs3) l2 = Link.objects.create(content_object=hs4) self.assertQuerysetEqual( HasLinkThing.objects.filter(links=l1), [hs3], lambda x: x) self.assertQuerysetEqual( HasLinkThing.objects.filter(links=l2), [hs4], lambda x: x) self.assertQuerysetEqual( HasLinkThing.objects.exclude(links=l2), [hs1, hs2, hs3], lambda x: x, ordered=False) self.assertQuerysetEqual( HasLinkThing.objects.exclude(links=l1), [hs1, hs2, hs4], lambda x: x, ordered=False) def test_ticket_20564(self): b1 = B.objects.create() b2 = B.objects.create() b3 = B.objects.create() c1 = C.objects.create(b=b1) c2 = C.objects.create(b=b2) c3 = C.objects.create(b=b3) A.objects.create(flag=None, content_object=b1) A.objects.create(flag=True, content_object=b2) self.assertQuerysetEqual( C.objects.filter(b__a__flag=None), [c1, c3], lambda x: x ) self.assertQuerysetEqual( C.objects.exclude(b__a__flag=None), [c2], lambda x: x ) def test_ticket_20564_nullable_fk(self): b1 = B.objects.create() b2 = B.objects.create() b3 = B.objects.create() d1 = D.objects.create(b=b1) d2 = D.objects.create(b=b2) d3 = D.objects.create(b=b3) d4 = D.objects.create() A.objects.create(flag=None, content_object=b1) A.objects.create(flag=True, content_object=b1) A.objects.create(flag=True, content_object=b2) self.assertQuerysetEqual( D.objects.exclude(b__a__flag=None), [d2], lambda x: x ) self.assertQuerysetEqual( D.objects.filter(b__a__flag=None), [d1, d3, d4], lambda x: x ) self.assertQuerysetEqual( B.objects.filter(a__flag=None), [b1, b3], lambda x: x ) self.assertQuerysetEqual( B.objects.exclude(a__flag=None), [b2], lambda x: x ) def test_extra_join_condition(self): # A crude check that content_type_id is taken in account in the # join/subquery condition. self.assertIn("content_type_id", str(B.objects.exclude(a__flag=None).query).lower()) # No need for any joins - the join from inner query can be trimmed in # this case (but not in the above case as no a objects at all for given # B would then fail). self.assertNotIn(" join ", str(B.objects.exclude(a__flag=True).query).lower()) self.assertIn("content_type_id", str(B.objects.exclude(a__flag=True).query).lower()) def test_annotate(self): hs1 = HasLinkThing.objects.create() hs2 = HasLinkThing.objects.create() HasLinkThing.objects.create() b = Board.objects.create(name=str(hs1.pk)) Link.objects.create(content_object=hs2) l = Link.objects.create(content_object=hs1) Link.objects.create(content_object=b) qs = HasLinkThing.objects.annotate(Sum('links')).filter(pk=hs1.pk) # If content_type restriction isn't in the query's join condition, # then wrong results are produced here as the link to b will also match # (b and hs1 have equal pks). self.assertEqual(qs.count(), 1) self.assertEqual(qs[0].links__sum, l.id) l.delete() # Now if we don't have proper left join, we will not produce any # results at all here. # clear cached results qs = qs.all() self.assertEqual(qs.count(), 1) # Note - 0 here would be a nicer result... self.assertIs(qs[0].links__sum, None) # Finally test that filtering works. self.assertEqual(qs.filter(links__sum__isnull=True).count(), 1) self.assertEqual(qs.filter(links__sum__isnull=False).count(), 0) def test_filter_targets_related_pk(self): HasLinkThing.objects.create() hs2 = HasLinkThing.objects.create() l = Link.objects.create(content_object=hs2) self.assertNotEqual(l.object_id, l.pk) self.assertQuerysetEqual( HasLinkThing.objects.filter(links=l.pk), [hs2], lambda x: x) def test_editable_generic_rel(self): GenericRelationForm = modelform_factory(HasLinkThing, fields='__all__') form = GenericRelationForm() self.assertIn('links', form.fields) form = GenericRelationForm({'links': None}) self.assertTrue(form.is_valid()) form.save() links = HasLinkThing._meta.get_field('links') self.assertEqual(links.save_form_data_calls, 1) def test_ticket_22998(self): related = Related.objects.create() content = Content.objects.create(related_obj=related) Node.objects.create(content=content) # deleting the Related cascades to the Content cascades to the Node, # where the pre_delete signal should fire and prevent deletion. with self.assertRaises(ProtectedError): related.delete() def test_ticket_22982(self): place = Place.objects.create(name='My Place') self.assertIn('GenericRelatedObjectManager', str(place.links))
bsd-3-clause
HackerTool/vivisect
vstruct/defs/pcap.py
2
16024
import vstruct import vstruct.defs.inet as vs_inet from vstruct.primitives import * PCAP_LINKTYPE_ETHER = 1 PCAP_LINKTYPE_RAW = 101 PCAPNG_BOM = 0x1A2B3C4D OPT_ENDOFOPT = 0 OPT_COMMENT = 1 #PCAPNG_BLOCKTYPE_SECTION_HEADER options OPT_SHB_HARDWARE = 2 OPT_SHB_OS = 3 OPT_SHB_USERAPPL = 4 #PCAPNG_INTERFACE_DESCRIPTION_BLOCK options OPT_IF_NAME = 2 OPT_IF_DESCRIPTION = 3 OPT_IF_IPV4ADDR = 4 OPT_IF_IPV6ADDR = 5 OPT_IF_MACADDR = 6 OPT_IF_EUIADDR = 7 OPT_IF_SPEED = 8 OPT_IF_TSRESOL = 9 OPT_IF_TZONE = 10 OPT_IF_FILTER = 11 OPT_IF_OS = 12 OPT_IF_FCSLEN = 13 OPT_IF_TSOFFSET = 14 # options for PCAPNG_ENHANCED_PACKET_BLOCK OPT_EPB_FLAGS = 2 OPT_EPB_HASH = 3 OPT_EPB_DROPCOUNT = 4 # values used in the blocktype field PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION = 0x00000001 PCAPNG_BLOCKTYPE_PACKET = 0x00000002 PCAPNG_BLOCKTYPE_SIMPLE_PACKET = 0x00000003 PCAPNG_BLOCKTYPE_NAME_RESOLUTION = 0x00000004 PCAPNG_BLOCKTYPE_INTERFACE_STATS = 0x00000005 PCAPNG_BLOCKTYPE_ENHANCED_PACKET = 0x00000006 PCAPNG_BLOCKTYPE_SECTION_HEADER = 0x0a0d0d0a def pad4bytes(size): if (size % 4) == 0: return size return size + (4 -( size % 4)) class PCAP_FILE_HEADER(vstruct.VStruct): def __init__(self): vstruct.VStruct.__init__(self) self.magic = v_uint32() self.vers_maj = v_uint16() self.vers_min = v_uint16() self.thiszone = v_uint32() self.sigfigs = v_uint32() self.snaplen = v_uint32() self.linktype = v_uint32() class PCAP_PACKET_HEADER(vstruct.VStruct): def __init__(self): vstruct.VStruct.__init__(self) self.tvsec = v_uint32() self.tvusec = v_uint32() self.caplen = v_uint32() self.len = v_uint32() class PCAPNG_GENERIC_BLOCK_HEADER(vstruct.VStruct): ''' Used to read the block type & size when parsing the file ''' def __init__(self, bigend=False): vstruct.VStruct.__init__(self) self.blocktype = v_uint32(bigend=bigend) self.blocksize = v_uint32(bigend=bigend) class PCAPNG_BLOCK_PARENT(vstruct.VStruct): ''' Used to inherit the weird parsing style where there's variable length options at the end, followed by the duplicate block total length ''' def __init__(self, bigend=False): vstruct.VStruct.__init__(self) #non-vstruct field, set during checking BOM self.bigend = False def vsParse(self, bytez, offset=0): startoff = offset roff = vstruct.VStruct.vsParse(self, bytez, offset=offset) #(blocksize-4): because we still need the trailing blocksize2 # apparently blocks can completely omit the options list and not # even have the OPT_ENDOFOPT entry while (roff < len(bytez)) and ((roff-startoff) < (self.blocksize-4)): opt = PCAPNG_OPTION(bigend=self.bigend) roff = opt.vsParse(bytez, roff) if opt.code == OPT_ENDOFOPT: break self.options.vsAddElement(opt) # append trailing blocksize2 bs2 = v_uint32(bigend=self.bigend) self.vsAddField('blocksize2', bs2) roff = bs2.vsParse(bytez, roff) #pad, plus we skip return pad4bytes(roff) class PCAPNG_SECTION_HEADER_BLOCK(PCAPNG_BLOCK_PARENT): def __init__(self, bigend=False): PCAPNG_BLOCK_PARENT.__init__(self, bigend) self.blocktype = v_uint32(bigend=bigend) self.blocksize = v_uint32(bigend=bigend) self.bom = v_uint32(bigend=bigend) self.vers_maj = v_uint16(bigend=bigend) self.vers_min = v_uint16(bigend=bigend) self.sectionsize = v_uint64(bigend=bigend) self.options = vstruct.VArray([]) #blocksize2: dynamcally added in vsParse() #self.blocksize2 = v_uint32(bigend=bigend) def pcb_bom(self): bom = self.vsGetField('bom') if self.bom == PCAPNG_BOM: #if it matches, then the endian of bom is correct self.bigend = bom._vs_bigend else: self.bigend = not bom._vs_bigend class PCAPNG_OPTION(vstruct.VStruct): def __init__(self, bigend=False): vstruct.VStruct.__init__(self) self.code = v_uint16(bigend=bigend) self.optsize = v_uint16(bigend=bigend) self.bytes = v_bytes(0) def pcb_optsize(self): size = pad4bytes(self.optsize) self.vsGetField('bytes').vsSetLength(size) class PCAPNG_INTERFACE_DESCRIPTION_BLOCK(PCAPNG_BLOCK_PARENT): def __init__(self, bigend=False): PCAPNG_BLOCK_PARENT.__init__(self, bigend) self.blocktype = v_uint32(bigend=bigend) self.blocksize = v_uint32(bigend=bigend) self.linktype = v_uint16(bigend=bigend) self.reserved = v_uint16(bigend=bigend) self.snaplen = v_uint32(bigend=bigend) self.options = vstruct.VArray([]) #blocksize2: dynamcally added in vsParse() #self.blocksize2 = v_uint32(bigend=bigend) def vsParse(self, bytez, offset=0): ''' We need the tsresol value to adjust timestamp values, so pull it out here ''' ret = PCAPNG_BLOCK_PARENT.vsParse(self, bytez, offset=0) self.tsresol = None #default offset is 0 self.tsoffset = 0 #sys.stderr.write('PCAPNG_INTERFACE_DESCRIPTION_BLOCK: searching options') for i, opt in self.options: if opt.code == OPT_IF_TSRESOL: self.tsresol = ord(opt.bytes[0]) #sys.stderr.write('Got tsresol: 0x%x\n' % self.tsresol) elif opt.code == OPT_IF_TSOFFSET: fmt = '<Q' if self.bigend: fmt = '>Q' self.tsoffset = struct.unpack_from(fmt, opt.bytes)[0] #sys.stderr.write('Got tsoffset: 0x%x\n' % self.tsoffset) return ret class PCAPNG_ENHANCED_PACKET_BLOCK(PCAPNG_BLOCK_PARENT): def __init__(self, bigend=False): PCAPNG_BLOCK_PARENT.__init__(self, bigend) self.blocktype = v_uint32(bigend=bigend) self.blocksize = v_uint32(bigend=bigend) self.interfaceid = v_uint32(bigend=bigend) self.tstamphi = v_uint32(bigend=bigend) self.tstamplow = v_uint32(bigend=bigend) self.caplen = v_uint32(bigend=bigend) self.packetlen = v_uint32(bigend=bigend) self.data = v_bytes(0) self.options = vstruct.VArray([]) #blocksize2: dynamcally added in vsParse() #self.blocksize2 = v_uint32(bigend=bigend) def pcb_caplen(self): size = pad4bytes(self.caplen) self.vsGetField('data').vsSetLength(size) def setPcapTimestamp(self, idb): ''' Adds a libpcap compatible tvsec and tvusec fields, based on the pcapng timestamp ''' #orange left off here self.snaplen = idb.snaplen tstamp = (self.tstamphi << 32) | self.tstamplow scale = 1000000 if idb.tsresol is None: #if not set, capture assumes 10e-6 resolution pass elif (0x80 & idb.tsresol) == 0: # remaining bits are resolution, to a negative power of 10 scale = 10**(idb.tsresol & 0x7f) else: # remaining bits are resolution, to a negative power of 2 scale = 1 << (idb.tsresol & 0x7f) self.tvsec = (tstamp / scale) + idb.tsoffset self.tvusec = tstamp % scale class PCAPNG_SIMPLE_PACKET_BLOCK(vstruct.VStruct): ''' Note: no variable length options fields, so inheriting from vstruct directly ''' def __init__(self, bigend=False): vstruct.VStruct.__init__(self) self.blocktype = v_uint32(bigend=bigend) self.blocksize = v_uint32(bigend=bigend) self.packetlen = v_uint32(bigend=bigend) self.data = v_bytes(0) self.blocksize2 = v_uint32(bigend=bigend) def pcb_blocksize(self): self.caplen = pad4bytes(self.blocksize - 16) self.vsGetField('data').vsSetLength(self.caplen) def setPcapTimestamp(self, idb): #no timestamp in this type of block :( self.tvsec = idb.tsoffset self.tvusec = 0 def iterPcapFileName(filename, reuse=False): fd = file(filename, 'rb') for x in iterPcapFile(fd, reuse=reuse): yield x def iterPcapFile(fd, reuse=False): ''' Figure out if it's a tcpdump format, or pcapng ''' h = PCAP_FILE_HEADER() b = fd.read(len(h)) h.vsParse(b, fast=True) fd.seek(0) if h.magic == PCAPNG_BLOCKTYPE_SECTION_HEADER: return _iterPcapNgFile(fd, reuse) return _iterPcapFile(fd, reuse) def _iterPcapFile(fd, reuse=False): h = PCAP_FILE_HEADER() b = fd.read(len(h)) h.vsParse(b, fast=True) linktype = h.linktype if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW): raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype) pkt = PCAP_PACKET_HEADER() eII = vs_inet.ETHERII() pktsize = len(pkt) eIIsize = len(eII) ipv4 = vs_inet.IPv4() ipv4size = 20 tcp_hdr = vs_inet.TCP() udp_hdr = vs_inet.UDP() icmp_hdr = vs_inet.ICMP() go = True while go: hdr = fd.read(pktsize) if len(hdr) != pktsize: break pkt.vsParse(hdr, fast=True) b = fd.read(pkt.caplen) offset = 0 if linktype == PCAP_LINKTYPE_ETHER: if len(b) < eIIsize: continue eII.vsParse(b, 0, fast=True) # No support for non-ip protocol yet... if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN): continue offset += eIIsize if eII.etype == vs_inet.ETH_P_VLAN: offset += 4 elif linktype == PCAP_LINKTYPE_RAW: pass #print eII.tree() if not reuse: ipv4 = vs_inet.IPv4() if (len(b) - offset) < ipv4size: continue ipv4.vsParse(b, offset, fast=True) # Make b *only* the IP datagram bytes... b = b[offset:offset+ipv4.totlen] offset = 0 offset += len(ipv4) tsize = len(b) - offset if ipv4.proto == vs_inet.IPPROTO_TCP: if tsize < 20: continue if not reuse: tcp_hdr = vs_inet.TCP() tcp_hdr.vsParse(b, offset, fast=True) offset += len(tcp_hdr) pdata = b[offset:] yield pkt,ipv4,tcp_hdr,pdata elif ipv4.proto == vs_inet.IPPROTO_UDP: if tsize < 8: continue if not reuse: udp_hdr = vs_inet.UDP() udp_hdr.vsParse(b, offset, fast=True) offset += len(udp_hdr) pdata = b[offset:] yield pkt,ipv4,udp_hdr,pdata elif ipv4.proto == vs_inet.IPPROTO_ICMP: if tsize < 4: continue if not reuse: icmp_hdr = vs_inet.ICMP() icmp_hdr.vsParse(b, offset, fast=True) offset += len(icmp_hdr) pdata = b[offset:] yield pkt,ipv4,icmp_hdr,pdata else: pass #print 'UNHANDLED IP PROTOCOL: %d' % ipv4.proto def _iterPcapNgFile(fd, reuse=False): header = PCAPNG_GENERIC_BLOCK_HEADER() ifaceidx = 0 ifacedict = {} roff = 0 bigend = False curroff = fd.tell() b0 = fd.read(len(header)) fd.seek(curroff) while len(b0) == len(header): header.vsParse(b0, fast=True) body = fd.read(header.blocksize) if header.blocktype == PCAPNG_BLOCKTYPE_SECTION_HEADER: shb = PCAPNG_SECTION_HEADER_BLOCK() roff = shb.vsParse(body) bigend = shb.bigend #reset interface stuff since we're in a new section ifaceidx = 0 ifacedict = {} elif header.blocktype == PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION: idb = PCAPNG_INTERFACE_DESCRIPTION_BLOCK(bigend) roff = idb.vsParse(body) #save off the interface for later reference ifacedict[ifaceidx] = idb ifaceidx += 1 elif header.blocktype == PCAPNG_BLOCKTYPE_SIMPLE_PACKET: spb = PCAPNG_SIMPLE_PACKET_BLOCK(bigend) roff = spb.vsParse(body) tup = _parsePcapngPacketBytes(iface.linktype, spb) if tup is not None: #if it is None, just fall through & read next block yield tup elif header.blocktype == PCAPNG_BLOCKTYPE_ENHANCED_PACKET: epb = PCAPNG_ENHANCED_PACKET_BLOCK(bigend) roff = epb.vsParse(body) iface = ifacedict.get(epb.interfaceid) epb.setPcapTimestamp(iface) tup = _parsePcapngPacketBytes(iface.linktype, epb) if tup is not None: #if tup is None, just fall through & read next block yield tup #TODO: other blocks needed? #PCAPNG_BLOCKTYPE_PACKET (obsolete) #PCAPNG_BLOCKTYPE_NAME_RESOLUTION: #PCAPNG_BLOCKTYPE_INTERFACE_STATS: else: #print 'Unknown block type: 0x%08x: 0x%08x 0x%08x bytes' % (roff, header.blocktype, header.blocksize) pass curroff = fd.tell() b0 = fd.read(len(header)) fd.seek(curroff) def _parsePcapngPacketBytes(linktype, pkt): ''' pkt is either a parsed PCAPNG_SIMPLE_PACKET_BLOCK or PCAPNG_ENHANCED_PACKET_BLOCK On success Returns tuple (pcapng_pkt, ipv4_vstruct, transport_vstruc, pdata) Returns None if the packet can't be parsed ''' if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW): raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype) #pkt = PCAP_PACKET_HEADER() eII = vs_inet.ETHERII() eIIsize = len(eII) offset = 0 if linktype == PCAP_LINKTYPE_ETHER: if len(pkt.data) < eIIsize: return None eII.vsParse(pkt.data, 0, fast=True) # No support for non-ip protocol yet... if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN): return None offset += eIIsize if eII.etype == vs_inet.ETH_P_VLAN: offset += 4 elif linktype == PCAP_LINKTYPE_RAW: pass ipv4 = vs_inet.IPv4() if (len(pkt.data) - offset) < len(ipv4): return None ipv4.vsParse(pkt.data, offset, fast=True) # Make b *only* the IP datagram bytes... b = pkt.data[offset:offset+ipv4.totlen] offset = 0 offset += len(ipv4) tsize = len(b) - offset if ipv4.proto == vs_inet.IPPROTO_TCP: if tsize < 20: return None tcp_hdr = vs_inet.TCP() tcp_hdr.vsParse(b, offset, fast=True) offset += len(tcp_hdr) pdata = b[offset:] return pkt,ipv4,tcp_hdr,pdata elif ipv4.proto == vs_inet.IPPROTO_UDP: if tsize < 8: return None udp_hdr = vs_inet.UDP() udp_hdr.vsParse(b, offset, fast=True) offset += len(udp_hdr) pdata = b[offset:] return pkt,ipv4,udp_hdr,pdata elif ipv4.proto == vs_inet.IPPROTO_ICMP: if tsize < 4: return None icmp_hdr = vs_inet.ICMP() icmp_hdr.vsParse(b, offset, fast=True) offset += len(icmp_hdr) pdata = b[offset:] return pkt,ipv4,icmp_hdr,pdata else: pass #print 'UNHANDLED IP PROTOCOL: %d' % ipv4.proto return None
apache-2.0