text
stringlengths 2
999k
|
|---|
from __future__ import print_function
"""
Created on Wed Apr 22 16:02:53 2015
Basic integrate-and-fire neuron
R Rao 2007
translated to Python by rkp 2015
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
# input current
I = 1 # nA
# capacitance and leak resistance
C = 1 # nF
R = 40 # M ohms
# I & F implementation dV/dt = - V/RC + I/C
# Using h = 1 ms step size, Euler method
V = 0
tstop = 200000
abs_ref = 5 # absolute refractory period
ref = 0 # absolute refractory period counter
V_trace = [] # voltage trace for plotting
V_th = 10 # spike threshold
spiketimes = [] # list of spike times
# input current
noiseamp = float(sys.argv[2]) # amplitude of added noise
I += noiseamp*np.random.normal(0, 1, (tstop,)) # nA; Gaussian noise
inter_spike_time = []
last_spike = 0
for t in range(tstop):
if not ref:
V = V - (V/(R*C)) + (I[t]/C)
else:
ref -= 1
V = 0.2 * V_th # reset voltage
if V > V_th:
V = 50 # emit spike
ref = abs_ref # set refractory counter
inter_spike_time.append(t - last_spike)
last_spike = t
V_trace += [V]
#plt.plot(V_trace)
plt.hist(inter_spike_time, bins=100)
plt.savefig(sys.argv[1], dpi=200)
plt.show()
|
# -*- coding: utf-8-*-
import sys, os, time, random
import re
import json
import argparse
import logging
import psutil
from multiprocessing import Process, Queue, Pipe
from lib.graphic.baiduGraphic import BaiduGraphic
from lib.voice.baiduVoice import BaiduVoice
from lib.voice.baseVoice import AbstractVoiceEngine
from plugin.bootstrap import Bootstrap
import lib.appPath
import lib.util
import plugin.volume.pulseAudio
from plugin.fm.doubanFM import DoubanFM
from lib.mail import SMTPMail
from plugin.monitor.people import PeopleMonitor
from plugin.feeds.jiqizhixin import JiqizhixinFeed
import plugin.feeds.jiqizhixin
def doubanFM(logger,args):
speaker = BaiduVoice.get_instance()
douban_fm = DoubanFM.get_instance()
douban_fm.set_speaker(speaker)
for i in range(0,2):
song = douban_fm.playRandomLikeSong()
def pulseAudio(logger,args):
baidu_voice = BaiduVoice.get_instance()
out_to_fp, in_to_fp = Pipe(True)
out_from_fp, in_from_fp = Pipe(True)
son_p = Process(target=Bootstrap.son_process,
args=(baidu_voice,
(out_to_fp, in_to_fp),
(out_from_fp, in_from_fp),
plugin.volume.pulseAudio.son_process_handle,False))
son_p.start()
# 等to_pipe被fork 后,关闭主进程的输出端; 创建的Pipe一端连接着主进程的输入,一端连接着子进程的输出口
out_to_fp.close()
# 等from_pipe被fork 后,关闭主进程的输入端; 创建的Pipe一端连接着子进程的输入,一端连接着父进程的输出口
in_from_fp.close()
words = [
u"打开声音",
u"声音小一点",
u"声音小点",
u"声音再小一点",
u"声音大点",
u"声音再大一点",
u"静音",
u"打开声音",
u"安静",
u"打开声音",
#u"声音放到最大",
]
for text in words:
is_valid = plugin.volume.pulseAudio.isValid(text)
if is_valid is True:
logger.debug("word %s is valid" % text)
plugin.volume.pulseAudio.process_handle(text,in_to_fp,out_from_fp,son_p,baidu_voice)
time.sleep(3)
else:
logger.debug("word %s is not valid" % text)
in_to_fp.close()
out_from_fp.close()
son_p.join()
logger.debug("debug pulseAudio is over")
def mail(logger,args):
smtpMail = SMTPMail.get_instance()
with open('./mind-idea.jpg', 'rb') as f:
smtpMail.sendImageEmail(f.read())
logger.debug("debug mail is over")
def peopleMonitor(logger,args):
speaker = BaiduVoice.get_instance()
people_monitor = PeopleMonitor.get_instance()
people_monitor.set_speaker(speaker)
def get_text_callback():
index = random.choice([0,1])
test_words = [
u'打开人体监控',
u'结束人体监控',
]
logger.debug("index %d, text:%s",index,test_words[index])
time.sleep(5)
return test_words[index]
people_monitor.start(get_text_callback)
logger.debug("debug peopleMonitor is over")
def baiduGraphic(logger,args):
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
baidu_graphic = BaiduGraphic.get_instance()
for detect_type in ["plant","dish","car","logo","animal","object","face"]:
file = os.path.join(lib.appPath.APP_PATH, '.'.join([detect_type,'jpg']))
img = get_file_content(file)
res = baidu_graphic.detectImage(img,detect_type)
logger.debug("%s: %s",detect_type,json.dumps(res,encoding="UTF-8",ensure_ascii=False))
logger.debug("debug baiduGraphic is over")
def jiqizhixinFeed(logger,args):
speaker = BaiduVoice.get_instance()
out_to_fp, in_to_fp = Pipe(True)
out_from_fp, in_from_fp = Pipe(True)
son_p = Process(target=Bootstrap.son_process,
args=(speaker,
(out_to_fp, in_to_fp),
(out_from_fp, in_from_fp),
plugin.feeds.jiqizhixin.son_process_handle,False))
son_p.start()
# 等to_pipe被fork 后,关闭主进程的输出端; 创建的Pipe一端连接着主进程的输入,一端连接着子进程的输出口
out_to_fp.close()
# 等from_pipe被fork 后,关闭主进程的输入端; 创建的Pipe一端连接着子进程的输入,一端连接着父进程的输出口
in_from_fp.close()
debug_words = [
u"阅读机器之心新闻",
u"阅读下一条",
u"下一条",
u"下一条",
u"结束阅读",
]
for text in debug_words:
is_valid = plugin.feeds.jiqizhixin.isValid(text)
if is_valid is True:
if any(word in text for word in [u'结束阅读',u'阅读机器之心']):
time.sleep(60)
plugin.feeds.jiqizhixin.process_handle(text,in_to_fp,out_from_fp,son_p,speaker)
if any(word in text for word in [u'结束阅读']): break
time.sleep(7)
else:
print("word %s is not valid" % text)
in_to_fp.close()
out_from_fp.close()
son_p.join()
'''
instance = JiqizhixinFeed.get_instance()
instance.set_speaker(speaker)
instance.update_feeds()
ct = instance.get_feeds_count()
for i in range(0,ct):
instance.get_next_feed()
'''
logger.debug("debug jiqizhixinFeed is over")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='debug')
parser.add_argument('--debug', action='store_true',
help='Show debug messages')
parser.add_argument('--pulseAudio', action='store_true',
help='Show debug pulse audio plugin messages')
parser.add_argument('--doubanFM', action='store_true',
help='Show debug douban fm plugin messages')
parser.add_argument('--mail', action='store_true',
help='Show debug mail lib messages')
parser.add_argument('--peopleMonitor', action='store_true',
help='Show debug people monitor plugin messages')
parser.add_argument('--baiduGraphic', action='store_true',
help='Show debug baidu graphic lib messages')
parser.add_argument('--jiqizhixinFeed', action='store_true',
help='Show debug jiqizhixinFeed plugin messages')
args = parser.parse_args()
logging.basicConfig(stream=sys.stdout)
logger = logging.getLogger("")
if args.debug:
logger.setLevel(logging.DEBUG)
if args.pulseAudio:
pulseAudio(logger,args)
exit(0)
if args.doubanFM:
doubanFM(logger,args)
exit(0)
if args.mail:
mail(logger,args)
exit(0)
if args.peopleMonitor:
peopleMonitor(logger,args)
exit(0)
if args.baiduGraphic:
baiduGraphic(logger,args)
exit(0)
if args.jiqizhixinFeed:
jiqizhixinFeed(logger,args)
exit(0)
|
# coding: utf-8
"""
ELEMENTS API
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from elements_sdk.configuration import Configuration
class TapeLibraryEndpointResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'drives': 'list[TapeLibrarySlot]',
'mailbox': 'list[TapeLibrarySlot]',
'slots': 'list[TapeLibrarySlot]'
}
attribute_map = {
'drives': 'drives',
'mailbox': 'mailbox',
'slots': 'slots'
}
def __init__(self, drives=None, mailbox=None, slots=None, local_vars_configuration=None): # noqa: E501
"""TapeLibraryEndpointResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._drives = None
self._mailbox = None
self._slots = None
self.discriminator = None
self.drives = drives
self.mailbox = mailbox
self.slots = slots
@property
def drives(self):
"""Gets the drives of this TapeLibraryEndpointResponse. # noqa: E501
:return: The drives of this TapeLibraryEndpointResponse. # noqa: E501
:rtype: list[TapeLibrarySlot]
"""
return self._drives
@drives.setter
def drives(self, drives):
"""Sets the drives of this TapeLibraryEndpointResponse.
:param drives: The drives of this TapeLibraryEndpointResponse. # noqa: E501
:type: list[TapeLibrarySlot]
"""
if self.local_vars_configuration.client_side_validation and drives is None: # noqa: E501
raise ValueError("Invalid value for `drives`, must not be `None`") # noqa: E501
self._drives = drives
@property
def mailbox(self):
"""Gets the mailbox of this TapeLibraryEndpointResponse. # noqa: E501
:return: The mailbox of this TapeLibraryEndpointResponse. # noqa: E501
:rtype: list[TapeLibrarySlot]
"""
return self._mailbox
@mailbox.setter
def mailbox(self, mailbox):
"""Sets the mailbox of this TapeLibraryEndpointResponse.
:param mailbox: The mailbox of this TapeLibraryEndpointResponse. # noqa: E501
:type: list[TapeLibrarySlot]
"""
if self.local_vars_configuration.client_side_validation and mailbox is None: # noqa: E501
raise ValueError("Invalid value for `mailbox`, must not be `None`") # noqa: E501
self._mailbox = mailbox
@property
def slots(self):
"""Gets the slots of this TapeLibraryEndpointResponse. # noqa: E501
:return: The slots of this TapeLibraryEndpointResponse. # noqa: E501
:rtype: list[TapeLibrarySlot]
"""
return self._slots
@slots.setter
def slots(self, slots):
"""Sets the slots of this TapeLibraryEndpointResponse.
:param slots: The slots of this TapeLibraryEndpointResponse. # noqa: E501
:type: list[TapeLibrarySlot]
"""
if self.local_vars_configuration.client_side_validation and slots is None: # noqa: E501
raise ValueError("Invalid value for `slots`, must not be `None`") # noqa: E501
self._slots = slots
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TapeLibraryEndpointResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TapeLibraryEndpointResponse):
return True
return self.to_dict() != other.to_dict()
|
from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
import datetime as dt
def init_browser():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
# return Browser("chrome", executable_path, headless=False)
return Browser("chrome", **executable_path, headless=True)
mars_info = {}
def scrape_mars_news():
browser = init_browser()
url="https://mars.nasa.gov/news"
browser.visit(url)
browser.is_element_present_by_css("ul.item_list li.slide", wait_time=0.5)
html=browser.html
soup=BeautifulSoup(html, "html.parser")
title = soup.find('div', class_='content_title').find('a').text
paragraph = soup.find('div', class_='article_teaser_body').text
return title, paragraph
def scrape_mars_image():
browser = init_browser()
url="https://www.jpl.nasa.gov/spaceimages/?search=&categ"
browser.visit(url)
html=browser.html
soup=BeautifulSoup(html, "html.parser")
featured_url = soup.find('article')['style'].replace('background-image: url(','').replace(');', '')[1:-1]
main_url='https://wwww.jpl.nasa.gov/'
featured_image_url=main_url + featured_url
return featured_image_url
def scrape_mars_weather():
browser = init_browser()
url="https://twitter.com/marswxreport?lang=en"
browser.visit(url)
html=browser.html
soup=BeautifulSoup(html, "html.parser")
mars_weather = soup.find('div', class_='js-tweet-text-container').find('p').text
return mars_weather
def scrape_mars_facts():
url="https://space-facts.com/mars"
tables = pd.read_html(url)
df_mars_facts = tables[1]
df_mars_facts.columns = ["Facts","Value"]
df_mars_facts.set_index('Facts', inplace=True)
html_table_mars = df_mars_facts.to_html(classes="table table-striped")
return html_table_mars
def scrape_mars_hemispheres():
browser = init_browser()
url="https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url)
html=browser.html
soup=BeautifulSoup(html, "html.parser")
hemispheres=soup.find_all('div', class_='item')
hemisphere_urls=[]
main_url='https://astrogeology.usgs.gov'
for h in hemispheres:
title=h.find('h3').text
img_url = h.find('a', class_='itemLink product-item')['href']
browser.visit(main_url+ img_url)
img_html=browser.html
soup=BeautifulSoup(img_html, 'html.parser')
img_url_2=main_url+ soup.find('img', class_='wide-image')['src']
hemisphere_urls.append({"title": title, "img_url": img_url_2})
return hemisphere_urls
def scrape_all():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
title, paragraph = scrape_mars_news()
image_url = scrape_mars_image()
mars_weather = scrape_mars_weather
facts = scrape_mars_facts
hemisphere_image_urls = scrape_mars_hemispheres
data = {
"title": title,
"paragraph": paragraph,
"featured_image": image_url,
"weather": mars_weather,
"facts": facts,
"hemispheres": hemisphere_image_urls,
}
browser.quit()
return data
if __name__ == "__main__":
print(scrape_all())
|
import random
from hashlib import md5
from faker import Faker
from six import with_metaclass
from pganonymizer.exceptions import InvalidProvider, InvalidProviderArgument
PROVIDERS = []
fake_data = Faker()
def get_provider(provider_config):
"""
Return a provider instance, according to the schema definition of a field.
:param dict provider_config: A provider configuration for a single field, e.g.:
{'name': 'set', 'value': 'Foo'}
:return: A provider instance
:rtype: Provider
"""
def get_provider_class(cid):
for klass in PROVIDERS:
if klass.matches(cid):
return klass
name = provider_config['name']
cls = get_provider_class(name)
if cls is None:
raise InvalidProvider('Could not find provider with id %s' % name)
return cls(**provider_config)
class ProviderMeta(type):
"""Metaclass to register all provider classes."""
def __new__(cls, clsname, bases, attrs):
newclass = super(ProviderMeta, cls).__new__(cls, clsname, bases, attrs)
if clsname != 'Provider':
PROVIDERS.append(newclass)
return newclass
class Provider(object):
"""Base class for all providers."""
id = None
def __init__(self, **kwargs):
self.kwargs = kwargs
@classmethod
def matches(cls, name):
return cls.id.lower() == name.lower()
def alter_value(self, value):
raise NotImplementedError
class ChoiceProvider(with_metaclass(ProviderMeta, Provider)):
"""Provider that returns a random value from a list of choices."""
id = 'choice'
def alter_value(self, value):
return random.choice(self.kwargs.get('values'))
class ClearProvider(with_metaclass(ProviderMeta, Provider)):
"""Provider to set a field value to None."""
id = 'clear'
def alter_value(self, value):
return None
class FakeProvider(with_metaclass(ProviderMeta, Provider)):
"""Provider to generate fake data."""
id = 'fake'
@classmethod
def matches(cls, name):
return cls.id.lower() == name.split('.')[0].lower()
def alter_value(self, value):
func_name = self.kwargs['name'].split('.')[1]
try:
func = getattr(fake_data, func_name)
except AttributeError as exc:
raise InvalidProviderArgument(exc)
return func()
class MaskProvider(with_metaclass(ProviderMeta, Provider)):
"""Provider that masks the original value."""
id = 'mask'
default_sign = 'X'
def alter_value(self, value):
sign = self.kwargs.get('sign', self.default_sign) or self.default_sign
return sign * len(value)
class MD5Provider(with_metaclass(ProviderMeta, Provider)):
"""Provider to hash a value with the md5 algorithm."""
id = 'md5'
def alter_value(self, value):
return md5(value.encode('utf-8')).hexdigest()
class SetProvider(with_metaclass(ProviderMeta, Provider)):
"""Provider to set a static value."""
id = 'set'
def alter_value(self, value):
return self.kwargs.get('value')
|
from nothing.main import Nothing
Nothing = Nothing()
|
import re
import itertools
from lxml import html
from scrapy.http.request.form import _get_inputs
class GenericForm:
def __init__(self, **kwargs):
self.kwargs = kwargs
def _pick_node(self, doc, selector):
nodes = doc.xpath(selector['xpath'])
if nodes:
return nodes[0]
def _filter_by_regex(self, lines, regex):
search_regex = re.compile(regex).search
return [l for l in lines if search_regex(l)]
def _get_field_values(self, form, field_descriptor):
if 'name' in field_descriptor:
field_name = field_descriptor['name']
else:
select_field = self._pick_node(form, field_descriptor)
field_name = select_field.name
field_type = field_descriptor['type']
if field_type == 'constants':
return [[field_name, option] for option in self.get_value(field_descriptor)]
elif field_type == 'iterate':
select_field = self._pick_node(form, field_descriptor)
values = self._filter_by_regex(select_field.value_options,
self.get_value(field_descriptor))
return [[select_field.name, option] for option in values]
elif field_type == 'inurl':
return [[field_name, option] for option in field_descriptor['file_values']]
def get_value(self, field_descriptor):
values = field_descriptor.get('value', '')
if isinstance(values, list):
return [val.format(**self.kwargs) for val in values]
else:
return values.format(**self.kwargs)
def set_values_url_field(self, field_descriptor, body):
field_descriptor['file_values'] = body.split('\n')
def get_url_field(self, form_descriptor):
for i, field_descriptor in enumerate(form_descriptor['fields']):
if (field_descriptor['type'] == 'inurl'
and (not 'file_values' in field_descriptor or
not field_descriptor['file_values'])):
yield i, field_descriptor
def fill_generic_form(self, url, body, form_descriptor):
doc = html.document_fromstring(body, base_url=url)
form = self._pick_node(doc, form_descriptor)
if form is None:
raise Exception('Generic form not found')
# Get all the possible inputs for each field
values = [self._get_field_values(form, field)
for field in form_descriptor['fields']]
for params in itertools.product(*values):
form_values = dict(_get_inputs(form, None, False, None, None))
for name, option in params:
form_values[name] = option
yield form_values.items(), form.action or form.base_url, form.method
|
from unittest import mock
import pytest
from django import forms
from django.template import Context, Template, TemplateSyntaxError
from tapeforms.mixins import TapeformMixin
class DummyForm(TapeformMixin, forms.Form):
my_field1 = forms.CharField()
class TestFormTag:
@mock.patch('tapeforms.templatetags.tapeforms.render_to_string')
def test_render(self, render_mock):
render_mock.return_value = 'render-mock-called'
form = DummyForm()
template = Template('{% load tapeforms %}{% form form %}')
assert template.render(Context({'form': form})) == 'render-mock-called'
assert render_mock.call_count == 1
assert render_mock.call_args[0][0] == 'tapeforms/layouts/default.html'
assert sorted(render_mock.call_args[0][1]) == [
'errors',
'form',
'hidden_fields',
'visible_fields',
]
assert render_mock.call_args[0][1]['form'] == form
@mock.patch('tapeforms.templatetags.tapeforms.render_to_string')
def test_render_using_template(self, render_mock):
render_mock.return_value = 'render-using-mock-called'
form = DummyForm()
template = Template('{% load tapeforms %}{% form form using="foo.html" %}')
assert template.render(Context({'form': form})) == 'render-using-mock-called'
assert render_mock.call_count == 1
assert render_mock.call_args[0][0] == 'foo.html'
def test_render_invalid_form(self):
template = Template('{% load tapeforms %}{% form form %}')
with pytest.raises(TemplateSyntaxError) as exc:
template.render(Context({'form': object()}))
assert str(exc.value) == (
'Provided form should be a `Form` instance, actual type: object'
)
class TestFormfieldTag:
@mock.patch('tapeforms.templatetags.tapeforms.render_to_string')
def test_render(self, render_mock):
render_mock.return_value = 'render-mock-called'
form = DummyForm()
template = Template('{% load tapeforms %}{% formfield form.my_field1 %}')
assert template.render(Context({'form': form})) == 'render-mock-called'
assert render_mock.call_count == 1
assert render_mock.call_args[0][0] == 'tapeforms/fields/default.html'
assert sorted(render_mock.call_args[0][1]) == [
'container_css_class',
'errors',
'field',
'field_id',
'field_name',
'form',
'help_text',
'label',
'label_css_class',
'required',
'widget_class_name',
'widget_input_type',
]
assert render_mock.call_args[0][1]['form'] == form
assert render_mock.call_args[0][1]['field'] == form['my_field1']
@mock.patch('tapeforms.templatetags.tapeforms.render_to_string')
def test_render_using_template(self, render_mock):
render_mock.return_value = 'render-using-mock-called'
form = DummyForm()
template = Template(
'{% load tapeforms %}{% formfield form.my_field1 using="foo.html" %}'
)
assert template.render(Context({'form': form})) == 'render-using-mock-called'
assert render_mock.call_count == 1
assert render_mock.call_args[0][0] == 'foo.html'
def test_render_invalid_field(self):
template = Template('{% load tapeforms %}{% formfield field %}')
with pytest.raises(TemplateSyntaxError) as exc:
template.render(Context({'field': object()}))
assert str(exc.value) == (
'Provided field should be a `BoundField` instance, actual type: object'
)
|
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
# @param node: the node in the list should be deleted
# @return: nothing
def deleteNode(self, node):
# write your code here
node.val = node.next.val
node.next = node.next.next
|
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from f5.sdk_exception import F5SDKError
class IappParser(object):
template_sections = [
'presentation',
'implementation',
'html-help',
'role-acl'
]
tcl_list_for_attr_re = r'{(\s*(\w+)?\s*)+}'
tcl_list_for_section_re = r'(\s*\w+\s*)+'
section_map = {
'html-help': 'htmlHelp',
'role-acl': 'roleAcl'
}
attr_map = {
'requires-modules': 'requiresModules',
'ignore-verification': 'ignoreVerification',
'tmpl-signature': 'tmplSignature',
'requires-bigip-version-min': 'requiresBigipVersionMin',
'requires-bigip-version-max': 'requiresBigipVersionMax',
'total-signing-status': 'totalSigningStatus',
'prerequisite-errors': 'prerequisiteErrors',
'verification-status': 'verificationStatus',
'signing-key': 'signingKey',
'tmpl-checksum': 'tmplChecksum'
}
sections_not_required = ['html-help', 'role-acl', 'macro']
tcl_list_patterns = {
'requires-modules': tcl_list_for_attr_re,
'role-acl': tcl_list_for_section_re
}
template_attrs = [
'description',
'partition',
'requires-modules',
'ignore-verification',
'requires-bigip-version-max',
'requires-bigip-version-min',
'signing-key',
'tmpl-checksum',
'tmpl-signature',
'total-signing-status',
'prerequisite-errors',
'verification-status',
]
def __init__(self, template_str):
'''Initialize class.
:param template_str: string of iapp template file
:raises: EmptyTemplateException
'''
if template_str:
self.template_str = str(template_str)
else:
raise EmptyTemplateException('Template empty or None value.')
def _get_section_end_index(self, section, section_start):
'''Get end of section's content.
In the loop to match braces, we must not count curly braces that are
within a doubly quoted string.
:param section: string name of section
:param section_start: integer index of section's beginning
:return: integer index of section's end
:raises: CurlyBraceMismatchException
'''
brace_count = 0
in_quote = False
in_escape = False
for index, char in enumerate(self.template_str[section_start:]):
# This check is to look for items inside of an escape sequence.
#
# For example, in the iApp team's iApps, there is a proc called
# "iapp_get_items" which has a line that looks like this.
#
# set val [string map {\" ""} $val]
#
# This will cause this parser to fail because of the unbalanced
# quotes. Therefore, this conditional takes this into consideration
#
if char == '\\' and not in_escape:
in_escape = True
elif char == '\\' and in_escape:
in_escape = False
if not in_escape:
if char == '"' and not in_quote:
in_quote = True
elif char == '"' and in_quote:
in_quote = False
if char == '{' and not in_quote:
brace_count += 1
elif char == '}' and not in_quote:
brace_count -= 1
if brace_count is 0:
return index + section_start
if brace_count is not 0:
raise CurlyBraceMismatchException(
'Curly braces mismatch in section %s.' % section
)
def _get_section_start_index(self, section):
'''Get start of a section's content.
:param section: string name of section
:return: integer index of section's beginning
:raises: NonextantSectionException
'''
sec_start_re = r'%s\s*\{' % section
found = re.search(sec_start_re, self.template_str)
if found:
return found.end() - 1
raise NonextantSectionException(
'Section %s not found in template' % section
)
def _get_template_name(self):
'''Find template name.
:returns: string of template name
:raises: NonextantTemplateNameException
'''
start_pattern = r"sys application template\s+" \
r"(\/[\w\.\-]+\/)?" \
r"(?P<name>[\w\.\-]+)\s*\{"
template_start = re.search(start_pattern, self.template_str)
if template_start:
return template_start.group('name')
raise NonextantTemplateNameException('Template name not found.')
def _get_template_attr(self, attr):
'''Find the attribute value for a specific attribute.
:param attr: string of attribute name
:returns: string of attribute value
'''
attr_re = r'{0}\s+.*'.format(attr)
attr_found = re.search(attr_re, self.template_str)
if attr_found:
attr_value = attr_found.group(0).replace(attr, '', 1)
return attr_value.strip()
def _add_sections(self):
'''Add the found and required sections to the templ_dict.'''
for section in self.template_sections:
try:
sec_start = self._get_section_start_index(section)
except NonextantSectionException:
if section in self.sections_not_required:
continue
raise
sec_end = self._get_section_end_index(section, sec_start)
section_value = self.template_str[sec_start+1:sec_end].strip()
section, section_value = self._transform_key_value(
section,
section_value,
self.section_map
)
self.templ_dict['actions']['definition'][section] = section_value
self.template_str = self.template_str[:sec_start+1] + \
self.template_str[sec_end:]
def _add_cli_scripts(self):
'''Add the found external sections to the templ_dict.'''
pattern = r"cli script\s+" \
r"(\/[\w\.\-]+\/)?" \
r"(?P<name>[\w\.\-]+)\s*\{"
sections = re.finditer(pattern, self.template_str)
for section in sections:
if 'scripts' not in self.templ_dict:
self.templ_dict['scripts'] = []
try:
sec_start = self._get_section_start_index(
section.group('name')
)
except NonextantSectionException:
continue
sec_end = self._get_section_end_index(
section.group('name'), sec_start
)
section_value = self.template_str[sec_start+1:sec_end].strip()
self.templ_dict['scripts'].append(dict(
name=section.group('name'),
script=section_value
))
self.template_str = self.template_str[:sec_start+1] + \
self.template_str[sec_end:]
def _add_attrs(self):
'''Add the found and required attrs to the templ_dict.'''
for attr in self.template_attrs:
attr_value = self._get_template_attr(attr)
if not attr_value:
continue
attr, attr_value = self._transform_key_value(
attr,
attr_value,
self.attr_map
)
self.templ_dict[attr] = attr_value
def _parse_tcl_list(self, attr, list_str):
'''Turns a string representation of a TCL list into a Python list.
:param attr: string name of attribute
:param list_str: string representation of a list
:returns: Python list
'''
list_str = list_str.strip()
if not list_str:
return []
if list_str[0] != '{' and list_str[-1] != '}':
if list_str.find('none') >= 0:
return list_str
if not re.search(self.tcl_list_patterns[attr], list_str):
raise MalformedTCLListException(
'TCL list for "%s" is malformed. ' % attr
)
list_str = list_str.strip('{').strip('}')
list_str = list_str.strip()
return list_str.split()
def _transform_key_value(self, key, value, map_dict):
'''Massage keys and values for iapp dict to look like JSON.
:param key: string dictionary key
:param value: string dictionary value
:param map_dict: dictionary to map key names
'''
if key in self.tcl_list_patterns:
value = self._parse_tcl_list(key, value)
if key in map_dict:
key = map_dict[key]
return key, value
def parse_template(self):
'''Parse the template string into a dict.
Find the (large) inner sections first, save them, and remove them from
a modified string. Then find the template attributes in the modified
string.
:returns: dictionary of parsed template
'''
self.templ_dict = {'actions': {'definition': {}}}
self.templ_dict['name'] = self._get_template_name()
self._add_cli_scripts()
self._add_sections()
self._add_attrs()
return self.templ_dict
class EmptyTemplateException(F5SDKError):
pass
class CurlyBraceMismatchException(F5SDKError):
pass
class NonextantSectionException(F5SDKError):
pass
class NonextantTemplateNameException(F5SDKError):
pass
class MalformedTCLListException(F5SDKError):
pass
|
from django.db import models
from utils.models import CampRelatedModel
from django.core.exceptions import ValidationError
import reversion
class InfoCategory(CampRelatedModel):
class Meta:
ordering = ["weight", "headline"]
verbose_name_plural = "Info Categories"
headline = models.CharField(
max_length=100, help_text="The headline of this info category"
)
anchor = models.SlugField(
help_text="The HTML anchor to use for this info category."
)
weight = models.PositiveIntegerField(
help_text="Determines sorting/ordering. Heavier categories sink to the bottom. Categories with the same weight are ordered alphabetically. Defaults to 100.",
default=100,
)
team = models.ForeignKey(
"teams.Team",
help_text="The team responsible for this info category.",
on_delete=models.PROTECT,
related_name="info_categories",
)
def clean(self):
if InfoItem.objects.filter(
category__team__camp=self.camp, anchor=self.anchor
).exists():
# this anchor is already in use on an item, so it cannot be used (must be unique on the page)
raise ValidationError(
{"anchor": "Anchor is already in use on an info item for this camp"}
)
@property
def camp(self):
return self.team.camp
camp_filter = "team__camp"
def __str__(self):
return "%s (%s)" % (self.headline, self.camp)
# We want to have info items under version control
@reversion.register()
class InfoItem(CampRelatedModel):
class Meta:
ordering = ["weight", "headline"]
unique_together = (("anchor", "category"), ("headline", "category"))
category = models.ForeignKey(
"info.InfoCategory", related_name="infoitems", on_delete=models.PROTECT
)
headline = models.CharField(max_length=100, help_text="Headline of this info item.")
anchor = models.SlugField(help_text="The HTML anchor to use for this info item.")
body = models.TextField(help_text="Body of this info item. Markdown is supported.")
weight = models.PositiveIntegerField(
help_text="Determines sorting/ordering. Heavier items sink to the bottom. Items with the same weight are ordered alphabetically. Defaults to 100.",
default=100,
)
@property
def camp(self):
return self.category.camp
camp_filter = "category__team__camp"
def clean(self):
if (
hasattr(self, "category")
and InfoCategory.objects.filter(
team__camp=self.category.team.camp, anchor=self.anchor
).exists()
):
# this anchor is already in use on a category, so it cannot be used here (they must be unique on the entire page)
raise ValidationError(
{"anchor": "Anchor is already in use on an info category for this camp"}
)
def __str__(self):
return "%s (%s)" % (self.headline, self.category)
|
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
gpus = '1'
import numpy
import tensorflow as tf
import logging
from tensorflow import logging as log
from collections import OrderedDict
from data_iterator import TextIterator
from tensorflow.contrib import rnn
import warnings
import pickle as pkl
import sys
import pprint
import pdb
import os
import copy
import time
logger = logging.getLogger(__name__)
def _s(pp, name): # add perfix
return '{}_{}'.format(pp, name)
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('{} is not in the archive'.format(kk))
continue
params[kk] = pp[kk]
return params
def ortho_weight(ndim): # used by norm_weight below
"""
Random orthogonal weights
Used by norm_weights(below), in which case, we
are ensuring that the rows are orthogonal
(i.e W = U \Sigma V, U has the same
# of rows, V has the same # of cols)
"""
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin, nout=None, scale=0.01, ortho=True):
"""
Random weights drawn from a Gaussian
"""
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
# W = numpy.random.uniform(-0.5,0.5,size=(nin,nout))
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def prepare_data(sequence, sequence_d1, sequence_d2, labels, options, maxlen=None, max_word=100):
# length = [len(s) for s in sequence]
length, length_d1, length_d2 = [], [], []
for i, d1, d2 in zip(sequence, sequence_d1, sequence_d2):
dd1, dd2 = list(), list()
length.append(len(i))
for day in d1:
dd1.append(len(day))
length_d1.append(dd1)
for day in d2:
dd2.append(len(day))
length_d2.append(dd2)
if maxlen is not None: # max length is the sentence level
new_sequence = []
new_lengths = []
new_sequence_d1 = []
new_lengths_d1 = []
new_sequence_d2 = []
new_lengths_d2 = []
for l, s, ld1, sd1, ld2, sd2 in zip(length, sequence, length_d1, sequence_d1, length_d2, sequence_d2):
dd1, lld1, dd2, lld2 = list(), list(), list(), list()
if l < maxlen:
new_sequence.append(s)
new_lengths.append(l)
for i, j in zip(ld1, sd1):
if i < maxlen:
dd1.append(j)
lld1.append(i)
new_sequence_d1.append(dd1)
new_lengths_d1.append(lld1)
for i, j in zip(ld2, sd2):
if i < maxlen:
dd2.append(j)
lld2.append(i)
new_sequence_d2.append(dd2)
new_lengths_d2.append(lld2)
length = new_lengths # This step is to filter the sentence which length is bigger
sequence = new_sequence # than the max length. length means number of news. sequence means
# length of each sentence
length_d1 = new_lengths_d1
sequence_d1 = new_sequence_d1
length_d2 = new_lengths_d2
sequence_d2 = new_sequence_d2
day1 = len(sequence_d1[0])
day2 = len(sequence_d2[0])
##TODO need to be careful, set the max length bigger to avoid bug
if len(length) < 1:
return None, None, None, None, None, None, None
maxlen_x = numpy.max(length) # max time step
maxlen_xd1 = numpy.max([numpy.max(i) for i in length_d1])
maxlen_xd2 = numpy.max([numpy.max(i) for i in length_d2])
n_samples = len(sequence) # number of samples== batch
max_sequence = max(len(j) for i in sequence for j in i) # find the sequence max length
max_sequence_d1 = max(len(j) for i in sequence_d1 for z in i for j in z)
max_sequence_d2 = max(len(j) for i in sequence_d2 for z in i for j in z)
max_sequence = max_word if max_sequence > max_word else max_sequence # shrink the data size
max_sequence_d1 = max_word if max_sequence_d1 > max_word else max_sequence_d1 # shrink the data size
max_sequence_d2 = max_word if max_sequence_d2 > max_word else max_sequence_d2 # shrink the data size
##TODO for x
x = numpy.zeros((maxlen_x, n_samples, max_sequence)).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
##TODO for x_d1
x_d1 = numpy.zeros((day1, maxlen_xd1, n_samples, max_sequence_d1)).astype('int64')
x_d1_mask = numpy.zeros((day1,maxlen_xd1, n_samples)).astype('float32')
##TODO for x_d2
x_d2 = numpy.zeros((day2, maxlen_xd2, n_samples, max_sequence_d2)).astype('int64')
x_d2_mask = numpy.zeros((day2,maxlen_xd2, n_samples)).astype('float32')
# l = numpy.array(labels).astype('int64')
##TODO for label
l = numpy.zeros((n_samples,)).astype('int64')
for index, (i, j, k, ll) in enumerate(zip(sequence, sequence_d1, sequence_d2, labels)): # batch size
l[index] = ll
for idx, ss in enumerate(i): # time step
# x[idx, index, :sequence_length[idx]] = ss
if len(ss) < max_sequence:
x[idx, index, :len(ss)] = ss
else:
x[idx, index, :max_sequence] = ss[:max_sequence]
x_mask[idx, index] = 1.
for jj, day in enumerate(j):
for idx, ss in enumerate(day):
if len(ss) < max_sequence_d1:
x_d1[jj, idx, index, :len(ss)] = ss
else:
x_d1[jj, idx, index, :max_sequence_d1] = ss[:max_sequence_d1]
x_d1_mask[jj, idx, index] = 1.
for jj, day in enumerate(k):
for idx, ss in enumerate(day):
if len(ss) < max_sequence_d2:
x_d2[jj, idx, index, :len(ss)] = ss
else:
x_d2[jj, idx, index, :max_sequence_d2] = ss[:max_sequence_d2]
x_d2_mask[jj, idx, index] = 1.
return x, x_mask, x_d1, x_d1_mask, x_d2, x_d2_mask, l
def old_sequence_lstm(input, sequence_mask, keep_prob, is_training, options):
# input time_step,batch,sequence_step,embedding, 40*32*13*100
# sequence_mask shape is time_step,batch,sequence_step, 40*32*13
def fn(inp):
out = bilstm_filter(tf.transpose(inp[0], [1, 0, 2]), tf.transpose(inp[1], [1, 0]), keep_prob,
prefix='sequence_encode', dim=options['dim'],
is_training=is_training) # output shape: sequence_step,batch,2*lstm_unit(concate) 13*32*600
return tf.transpose(tf.concat(out, axis=2), perm=[1, 0, 2])
outputs = tf.map_fn(fn, (input, sequence_mask), dtype=tf.float32)
print(tf.shape(outputs)) # outputs shape 40*32*13*600
outputs = outputs * tf.expand_dims(sequence_mask, -1) # mask the output
with tf.variable_scope('words_attention'):
hidden = tf.layers.dense(outputs, units=300, activation=tf.nn.tanh, use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.02), reuse=tf.AUTO_REUSE)
hidden = tf.nn.dropout(hidden, keep_prob)
# hidden 40*32*13*1200 #attention 40*32*13*1
attention = tf.layers.dense(hidden, units=1, use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.02), activation=None)
padding = tf.fill(tf.shape(attention), float(-1e8)) # float('-inf')
attention = tf.where(tf.equal(tf.expand_dims(sequence_mask, -1), 0.), padding,
attention) # fill 0 with -1e8 for softmax
attention = tf.transpose(tf.nn.softmax(tf.transpose(attention, perm=[0, 1, 3, 2])),
perm=[0, 1, 3, 2]) # attention 40*32*13*r
attention = attention * tf.expand_dims(sequence_mask, -1) # mask the attention
outputs = tf.reduce_sum(outputs * attention, axis=2)
print(tf.shape(outputs))
return outputs
def sequence_lstm(input, sequence_mask, keep_prob, is_training, options):
# input time_step,batch,sequence_step,embedding, 40*32*13*100
time_step = tf.shape(input)[0]
# time_step = input.get_shape().as_list()[0]
output_list = tf.TensorArray(dtype=tf.float32, size=time_step)
# sequence_mask shape is time_step,batch,sequence_step, 40*32*13
t = tf.constant(0, dtype=tf.int32)
def cond(i, *args):
return i < time_step
def body(i, x, mask, out_):
out = bilstm_filter(tf.transpose(x[i], [1, 0, 2]), tf.transpose(mask[i], [1, 0]), keep_prob,
prefix='sequence_encode', dim=options['dim'],
is_training=is_training) # output shape: sequence_step,batch,2*lstm_unit(concate) 13*32*600
'''out = bilstm_filter(tf.concat(out, 2) * tf.expand_dims(tf.transpose(mask[i], [1, 0]), 2), tf.transpose(mask[i], [1, 0]), keep_prob,
prefix='sequence_encode', dim=options['dim'],
is_training=is_training)
'''
out = tf.concat(out, 2) * tf.expand_dims(tf.transpose(mask[i], [1, 0]), -1) # mask the output 13*32*600
att = attention_v1(tf.transpose(out, [1, 0, 2]), mask[i],
name='attention_1', keep=keep_prob) # attention shape 32*600
out_ = out_.write(i, att)
return i + 1, x, mask, out_
_, _, _, result = tf.while_loop(cond, body, [t, input, sequence_mask, output_list])
result = result.stack() # result shape is time_step,batch,hidden units 40*32*600
return result
def attention_v1(input, masks, name='attention', nin=600, keep=1.0):
# input is batch,time_step,hidden_state 32*40*600 mask 32*40
# hidden layer is:batch,hidden_shape,attention_hidden_size 32*40*1200 or 32*40*600
# attention shape after squeeze is 32*40, # batch,time_step,attention_size 32*40*1
hidden = tf.layers.dense(input, nin / 2, activation=tf.nn.tanh, use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.02),
name=_s(name, 'hidden'), reuse=tf.AUTO_REUSE)
hidden = tf.nn.dropout(hidden, keep)
attention = tf.layers.dense(hidden, 1 , activation=None, use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.02), name=_s(name, 'out'),
reuse=tf.AUTO_REUSE)
padding = tf.fill(tf.shape(attention), float('-1e8')) # float('-inf')
attention = tf.where(tf.equal(tf.expand_dims(masks,-1), 0.), padding, attention) # fill 0 with a small number for softmax
attention = tf.nn.softmax(attention, 1) *tf.expand_dims(masks,-1) # 32*40*r #mask the attention here is not really neccesary,
outputs = tf.reduce_sum(input * attention, axis=1)#32*600
#outputs = tf.squeeze(tf.matmul(tf.transpose(attention, [0, 2, 1]), input)) # transpose to batch,hidden,time_step
return outputs
def attention_v2(input, masks, name='attention', nin=600, keep=1.0, r=4, beta=1.):
# input is batch,time_step,hidden_state 32*40*600 mask 32*40
# hidden layer is:batch,hidden_shape,attention_hidden_size 32*40*1200 or 32*40*600
# attention shape after squeeze is 32*40, # batch,time_step,attention_size 32*40*1
masks = tf.stack([masks] * r, -1) # copy r time for filling 32*40*r
iden = tf.eye(tf.shape(input)[1], batch_shape=[tf.shape(input)[0]]) # an identity matrix 32*40*40
hidden = tf.layers.dense(input, nin / 2, activation=tf.nn.tanh, use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.02),
name=_s(name, 'hidden'), reuse=tf.AUTO_REUSE)
hidden = tf.nn.dropout(hidden, keep)
attention = tf.layers.dense(hidden, r, activation=None, use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.02), name=_s(name, 'out'),
reuse=tf.AUTO_REUSE)
padding = tf.fill(tf.shape(attention), float('-1e8')) # float('-inf')
attention = tf.where(tf.equal(masks, 0.), padding, attention) # fill 0 with a small number for softmax
attention = tf.nn.softmax(attention, 1) * masks # 32*40*r #mask the attention here is not really neccesary,
penalty = tf.norm((tf.matmul(attention, tf.transpose(attention, [0, 2, 1])) - iden), ord='fro',
axis=(-2, -1)) # the Frobenius norm penalty 32 dimension
attention = attention + beta * tf.expand_dims(tf.expand_dims(penalty, -1), -1) # expand twice
# outputs = tf.reduce_sum(input * attention, axis=1)#32*600
outputs = tf.matmul(tf.transpose(attention, [0, 2, 1]), input) # transpose to batch,hidden,time_step
outputs = tf.reshape(outputs, [tf.shape(outputs)[0], -1])
if name == 'attention_2':
outputs.set_shape([None, nin * (r ** 2)])
else:
outputs.set_shape([None, nin * r])
return outputs # result shape is batch, hidden_unit 32*600
def fflayer_2D(options, input, name='feed_forward', activation_function=None, nin=None, nout=None):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
W = tf.get_variable(
_s(name, 'W'),
shape=[nin, nout],
# initializer=tf.random_uniform_initializer(-0.1, 0.1),
initializer=tf.random_normal_initializer(stddev=0.02),
dtype=tf.float32
)
bias = tf.get_variable(
_s(name, 'bias'),
shape=[nout],
initializer=tf.constant_initializer(0.),
dtype=tf.float32
)
# result = tf.nn.bias_add(tf.matmul(input, W), bias)
result = tf.nn.bias_add(tf.tensordot(input, W, [[-1], [0]]), bias)
if activation_function is None:
outputs = result
else:
outputs = activation_function(result)
return outputs
def bilstm_filter(input, mask, keep_prob, prefix='lstm', dim=300, is_training=True):
with tf.variable_scope(name_or_scope=prefix, reuse=tf.AUTO_REUSE):
sequence = tf.cast(tf.reduce_sum(mask, 0), tf.int32)
lstm_fw_cell = rnn.LSTMCell(dim, forget_bias=0.0, initializer=tf.orthogonal_initializer(), state_is_tuple=True)
# back directions
lstm_bw_cell = rnn.LSTMCell(dim, forget_bias=0.0, initializer=tf.orthogonal_initializer(), state_is_tuple=True)
keep_rate = tf.cond(is_training is not False and keep_prob < 1, lambda: 0.8, lambda: 1.0)
cell_dp_fw = rnn.DropoutWrapper(cell=lstm_fw_cell, output_keep_prob=keep_rate)
cell_dp_bw = rnn.DropoutWrapper(cell=lstm_bw_cell, output_keep_prob=keep_rate)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_dp_fw, cell_dp_bw, input, sequence_length=sequence,
dtype=tf.float32,
time_major=True)
return outputs
def init_params(options, worddicts):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words'], options['dim_word'])
# read embedding from GloVe
if options['embedding']:
with open(options['embedding'], 'r') as f:
for line in f:
tmp = line.split()
word = tmp[0]
vector = tmp[1:]
if word in worddicts and worddicts[word] < options['n_words']:
try:
params['Wemb'][worddicts[word], :] = vector
# encoder: bidirectional RNN
except ValueError as e:
print(str(e))
return params
def word_embedding(options, params):
embeddings = tf.get_variable("embeddings", shape=[options['n_words'], options['dim_word']],
initializer=tf.constant_initializer(numpy.array(params['Wemb'])))
return embeddings
def build_model(embedding, options):
""" Builds the entire computational graph used for training
"""
# description string: #words x #samples
with tf.device('/gpu:1'):
with tf.variable_scope('input'):
x = tf.placeholder(tf.int64, shape=[None, None, None],
name='x') # 3D vector timestep, batch and sequence(before embedding)40*32*13
x_mask = tf.placeholder(tf.float32, shape=[None, None], name='x_mask') # mask time step, batch
y = tf.placeholder(tf.int64, shape=[None], name='y')
##TODO important
keep_prob = tf.placeholder(tf.float32, [], name='keep_prob')
is_training = tf.placeholder(tf.bool, name='is_training')
##TODO important
# n_timesteps = x.get_shape().as_list()[0] # time steps
# n_samples = x.get_shape().as_list()[1] # n samples
sequence_mask = tf.cast(tf.abs(tf.sign(x)), tf.float32) # 3D
n_timesteps = tf.shape(x)[0] # time steps
n_samples = tf.shape(x)[1] # n samples
# # word embedding
##TODO word embedding
emb = tf.nn.embedding_lookup(embedding, x)
with tf.device('/gpu:1'):
# emb = tf.reduce_mean(emb, -2) # average embedding
# fed into the input of BILSTM from the official document
'''if options['use_dropout']:
emb = tf.nn.dropout(emb, keep_prob)'''
emb = sequence_lstm(emb, sequence_mask, keep_prob, is_training, options)
emb = emb * tf.expand_dims(x_mask, -1) # mask before attention
# TODO bilstm layers
# Change the time step and batch
att = attention_v1(tf.transpose(emb, [1, 0, 2]), tf.transpose(x_mask, [1, 0]),
name='attention_2', keep=keep_prob) # already masked after attention
# maxpolling and sum pooling from batch
if options['use_dropout']:
att = tf.nn.dropout(att, keep_prob)
'''conv1 = tf.layers.conv2d(inputs=tf.expand_dims(tf.transpose(emb,[1,0,2])),filters=32,kernel_size=[3, 2400],padding="same",activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)'''
logit = fflayer_2D(options, att, name='ff', activation_function=tf.nn.tanh, nin=2 * options['dim'],
nout=300) # 2 * options['dim']'''
if options['use_dropout']:
logit = tf.nn.dropout(logit, keep_prob)
pred = fflayer_2D(options, logit, name='fout', activation_function=None, nin=300, nout=2)
# with tf.device('/cpu:0'):
logger.info('Building f_cost...')
# todo not same
labels = tf.one_hot(y, depth=2, axis=1)
# labels = y
preds = tf.nn.softmax(pred, 1)
# preds = tf.nn.sigmoid(pred)
# pred=tf.reshape(pred,[-1])
cost = tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=labels)
# cost = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,logits=pred),1)
# cost = -tf.reduce_sum((tf.cast(labels, tf.float32) * tf.log(preds + 1e-8)),axis=1)
# cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=y)
logger.info('Done')
'''
logit1 = tf.reduce_sum(ctx1 * tf.expand_dims(x_mask, 2), 0) / tf.expand_dims(tf.reduce_sum(x_mask, 0), 1)
logit2 = tf.reduce_max(ctx1 * tf.expand_dims(x_mask, 2), 0)
logit = tf.concat([logit1, logit2], 1)
'''
with tf.variable_scope('logging'):
tf.summary.scalar('current_cost', tf.reduce_mean(cost))
tf.summary.histogram('predicted_value', preds)
summary = tf.summary.merge_all()
return is_training, cost, x, x_mask, y, n_timesteps, preds, summary
def predict_pro_acc(sess, cost, prepare_data, model_options, iterator, maxlen, correct_pred, pred, summary, eidx,
is_training, writer=None):
# fo = open(_s(prefix,'pre.txt'), "w")
num = 0
valid_acc = 0
total_cost = 0
loss = 0
result = 0
for x_sent, x_d1_sent, x_d2_sent, y_sent in iterator:
num += len(x_sent)
data_x, data_x_mask, data_x_d1, data_x_d1_mask, data_x_d2, data_x_d2_mask, data_y = prepare_data(x_sent, x_d1_sent, x_d2_sent, y_sent, model_options, maxlen=maxlen)
loss, result, preds = sess.run([cost, correct_pred, pred],
feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask,
'input/y:0': data_y, 'input/keep_prob:0': 1.,
'input/is_training:0': is_training})
valid_acc += result.sum()
total_cost += loss.sum()
final_acc = 1.0 * valid_acc / num
final_loss = 1.0 * total_cost / num
# if writer is not None:
# writer.add_summary(test_summary, eidx)
# print result,preds,loss,result_
print(preds, result, num)
return final_acc, final_loss
def train(
dim_word=100, # word vector dimensionality
dim=100, # the number of GRU units
encoder='lstm', # encoder model
decoder='lstm', # decoder model
patience=10, # early stopping patience
max_epochs=5000,
finish_after=10000000, # finish after this many updates
decay_c=0., # L2 regularization penalty
clip_c=-1., # gradient clipping threshold
lrate=0.0004, # learning rate
n_words=100000, # vocabulary size
n_words_lemma=100000,
maxlen=100, # maximum length of the description
optimizer='adam',
batch_size=32,
valid_batch_size=32,
save_model='../../models/',
saveto='model.npz',
dispFreq=100,
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
use_dropout=False,
reload_=False,
verbose=False, # print verbose information for debug but slow speed
delay1=3,
delay2=7,
types='title',
cut_word=False,
cut_sentence=False,
datasets=[],
valid_datasets=[],
test_datasets=[],
dictionary=[],
kb_dicts=[],
embedding='', # pretrain embedding file, such as word2vec, GLOVE
dim_kb=5,
RUN_NAME="histogram_visualization",
wait_N=10
):
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s",
filename='./log_result.txt')
# Model options
model_options = locals().copy()
# tf.set_random_seed(2345)
with open(dictionary, 'rb') as f:
worddicts = pkl.load(f)
logger.info("Loading knowledge base ...")
# reload options
if reload_ and os.path.exists(saveto):
logger.info("Reload options")
with open('%s.pkl' % saveto, 'rb') as f:
model_options = pkl.load(f)
logger.debug(pprint.pformat(model_options))
logger.info("Loading data")
train = TextIterator(datasets[0], datasets[1],
dict=dictionary,
delay1=delay1,
delay2=delay2,
types=types,
n_words=n_words,
batch_size=batch_size,
cut_word=cut_word,
cut_sentence=cut_sentence,
shuffle=True)
train_valid = TextIterator(datasets[0], datasets[1],
dict=dictionary,
delay1=delay1,
delay2=delay2,
types=types,
n_words=n_words,
batch_size=valid_batch_size,
cut_word=cut_word,
cut_sentence=cut_sentence,
shuffle=False)
valid = TextIterator(valid_datasets[0], valid_datasets[1],
dict=dictionary,
delay1=delay1,
delay2=delay2,
types=types,
n_words=n_words,
batch_size=valid_batch_size,
cut_word=cut_word,
cut_sentence=cut_sentence,
shuffle=False)
test = TextIterator(test_datasets[0], test_datasets[1],
dict=dictionary,
delay1=delay1,
delay2=delay2,
types=types,
n_words=n_words,
batch_size=valid_batch_size,
cut_word=cut_word,
cut_sentence=cut_sentence,
shuffle=False)
# Initialize (or reload) the parameters using 'model_options'
# then build the tensorflow graph
logger.info("init_word_embedding")
params = init_params(model_options, worddicts)
embedding = word_embedding(model_options, params)
is_training, cost, x, x_mask, y, n_timesteps, pred, summary = build_model(embedding, model_options)
lr = tf.Variable(0.0, trainable=False)
def assign_lr(session, lr_value):
session.run(tf.assign(lr, lr_value))
logger.info('Building optimizers...')
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
logger.info('Done')
# print all variables
tvars = tf.trainable_variables()
for var in tvars:
print(var.name, var.shape)
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in tvars if ('embeddings' or 'bias') not in v.name]) * 0.0001 #
cost = cost + lossL2
# regularization_cost = 0.0003 * tf.reduce_sum([tf.nn.l2_loss(v) for v in tvars])
# cost = cost + regularization_cost
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), model_options['clip_c'])
train_op = optimizer.apply_gradients(zip(grads, tvars))
# train_op = optimizer.minimize(cost)
op_loss = tf.reduce_mean(cost)
logger.info("correct_pred")
correct_pred = tf.equal(tf.argmax(input=pred, axis=1), y) # make prediction
logger.info("Done")
temp_accuracy = tf.cast(correct_pred, tf.float32) # change to float32
logger.info("init variables")
init = tf.global_variables_initializer()
logger.info("Done")
# saver
saver = tf.train.Saver(max_to_keep=15)
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.4
config.gpu_options.allow_growth = True
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:
training_writer = tf.summary.FileWriter("./logs/{}/training".format(RUN_NAME), sess.graph)
validate_writer = tf.summary.FileWriter("./logs/{}/validate".format(RUN_NAME), sess.graph)
testing_writer = tf.summary.FileWriter("./logs/{}/testing".format(RUN_NAME), sess.graph)
sess.run(init)
history_errs = []
# reload history
if reload_ and os.path.exists(saveto):
logger.info("Reload history error")
history_errs = list(numpy.load(saveto)['history_errs'])
bad_counter = 0
if validFreq == -1:
validFreq = len(train[0]) / batch_size
if saveFreq == -1:
saveFreq = len(train[0]) / batch_size
uidx = 0
estop = False
valid_acc_record = []
test_acc_record = []
best_num = -1
best_epoch_num = 0
lr_change_list = []
wait_counter = 0
wait_N = model_options['wait_N']
learning_rate = model_options['lrate']
assign_lr(sess, learning_rate)
for eidx in range(max_epochs):
n_samples = 0
for x, x_d1, x_d2, y in train:
n_samples += len(x)
uidx += 1
keep_prob = 0.5
is_training = True
data_x, data_x_mask, data_x_d1, data_x_d1_mask, data_x_d2, data_x_d2_mask, data_y = prepare_data(x,
x_d1,
x_d2,
y,
model_options,
maxlen=maxlen)
print(data_x.shape, data_x_mask.shape, data_y.shape)
assert data_y.shape[0] == data_x.shape[1], 'Size does not match'
if x is None:
logger.debug('Minibatch with zero sample under length {0}'.format(maxlen))
uidx -= 1
continue
ud_start = time.time()
_, loss = sess.run([train_op, op_loss],
feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask, 'input/y:0': data_y,
'input/keep_prob:0': keep_prob, 'input/is_training:0': is_training})
ud = time.time() - ud_start
'''train_summary = sess.run(summary, feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask,
'input/y:0': data_y,'input/keep_prob:0':keep_prob,'input/is_training:0':is_training})
training_writer.add_summary(train_summary, eidx)'''
if numpy.mod(uidx, dispFreq) == 0:
logger.debug('Epoch {0} Update {1} Cost {2} TIME {3}'.format(eidx, uidx, loss, ud))
# validate model on validation set and early stop if necessary
if numpy.mod(uidx, validFreq) == 0:
keep_prob = 1
is_training = False
valid_acc, valid_loss = predict_pro_acc(sess, cost, prepare_data, model_options, valid, maxlen,
correct_pred, pred, summary, eidx, is_training,
validate_writer)
test_acc, test_loss = predict_pro_acc(sess, cost, prepare_data, model_options, test, maxlen,
correct_pred, pred, summary, eidx, is_training,
testing_writer)
valid_err = 1.0 - valid_acc
# valid_err = valid_loss
history_errs.append(valid_err)
logger.debug('Epoch {0}'.format(eidx))
logger.debug('Valid cost {0}'.format(valid_loss))
logger.debug('Valid accuracy {0}'.format(valid_acc))
logger.debug('Test cost {0}'.format(test_loss))
logger.debug('Test accuracy {0}'.format(test_acc))
logger.debug('learning_rate: {0}'.format(learning_rate))
valid_acc_record.append(valid_acc)
test_acc_record.append(test_acc)
if uidx == 0 and valid_err <= numpy.array(history_errs).min():
best_num = best_num + 1
best_epoch_num = eidx
wait_counter = 0
logger.info("Saving...")
saver.save(sess, _s(_s(_s(save_model, "epoch"), str(best_num)), "model.ckpt"))
logger.info(_s(_s(_s(save_model, "epoch"), str(best_num)), "model.ckpt"))
numpy.savez(saveto, history_errs=history_errs, **params)
pkl.dump(model_options, open('{}.pkl'.format(saveto), 'wb'))
logger.info("Done")
if valid_err > numpy.array(history_errs).min():
wait_counter += 1
# wait_counter +=1 if valid_err>numpy.array(history_errs).min() else 0
if wait_counter >= wait_N:
logger.info("wait_counter max, need to half the lr")
# print 'wait_counter max, need to half the lr'
bad_counter += 1
wait_counter = 0
logger.debug('bad_counter: {0}'.format(bad_counter))
# TODO change the learining rate
learning_rate = learning_rate * 0.5
# learning_rate = learning_rate
assign_lr(sess, learning_rate)
lr_change_list.append(eidx)
logger.debug('lrate change to: {0}'.format(learning_rate))
# print 'lrate change to: ' + str(lrate)
if bad_counter > patience:
logger.info("Early Stop!")
estop = True
break
if numpy.isnan(valid_err):
pdb.set_trace()
# finish after this many updates
if uidx >= finish_after:
logger.debug('Finishing after iterations! {0}'.format(uidx))
# print 'Finishing after %d iterations!' % uidx
estop = True
break
logger.debug('Seen samples: {0}'.format(n_samples))
# print 'Seen %d samples' % n_samples
if estop:
break
with tf.Session() as sess:
# Restore variables from disk.
saver.restore(sess, _s(_s(_s(save_model, "epoch"), str(best_num)), "model.ckpt"))
keep_prob = 1
is_training = False
logger.info('=' * 80)
logger.info('Final Result')
logger.info('=' * 80)
logger.debug('best epoch {0}'.format(best_epoch_num))
valid_acc, valid_cost = predict_pro_acc(sess, cost, prepare_data, model_options, valid,
maxlen, correct_pred, pred, summary, eidx, is_training, None)
logger.debug('Valid cost {0}'.format(valid_cost))
logger.debug('Valid accuracy {0}'.format(valid_acc))
# print 'Valid cost', valid_cost
# print 'Valid accuracy', valid_acc
test_acc, test_cost = predict_pro_acc(sess, cost, prepare_data, model_options, test,
maxlen, correct_pred, pred, summary, eidx, is_training, None)
logger.debug('Test cost {0}'.format(test_cost))
logger.debug('Test accuracy {0}'.format(test_acc))
# print 'best epoch ', best_epoch_num
train_acc, train_cost = predict_pro_acc(sess, cost, prepare_data, model_options, train_valid,
maxlen, correct_pred, pred, summary, eidx, is_training, None)
logger.debug('Train cost {0}'.format(train_cost))
logger.debug('Train accuracy {0}'.format(train_acc))
# print 'Train cost', train_cost
# print 'Train accuracy', train_acc
# print 'Test cost ', test_cost
# print 'Test accuracy ', test_acc
return None
if __name__ == '__main__':
pass
|
try:
from . import generic as g
except BaseException:
import generic as g
class GraphTest(g.unittest.TestCase):
def setUp(self):
self.engines = ['scipy', 'networkx']
if g.trimesh.graph._has_gt:
self.engines.append('graphtool')
else:
g.log.warning('No graph-tool to test!')
def test_soup(self):
# a soup of random triangles, with no adjacent pairs
soup = g.get_mesh('soup.stl')
assert len(soup.face_adjacency) == 0
assert len(soup.face_adjacency_radius) == 0
assert len(soup.face_adjacency_edges) == 0
assert len(soup.face_adjacency_convex) == 0
assert len(soup.face_adjacency_unshared) == 0
assert len(soup.face_adjacency_angles) == 0
assert len(soup.facets) == 0
def test_components(self):
# a soup of random triangles, with no adjacent pairs
soup = g.get_mesh('soup.stl')
# a mesh with multiple watertight bodies
mult = g.get_mesh('cycloidal.ply')
# a mesh with a single watertight body
sing = g.get_mesh('featuretype.STL')
for engine in self.engines:
# without requiring watertight the split should be into every face
split = soup.split(only_watertight=False, engine=engine)
self.assertTrue(len(split) == len(soup.faces))
# with watertight there should be an empty list
split = soup.split(only_watertight=True, engine=engine)
self.assertTrue(len(split) == 0)
split = mult.split(only_watertight=False, engine=engine)
self.assertTrue(len(split) >= 119)
split = mult.split(only_watertight=True, engine=engine)
self.assertTrue(len(split) >= 117)
# random triangles should have no facets
facets = g.trimesh.graph.facets(mesh=soup, engine=engine)
self.assertTrue(len(facets) == 0)
facets = g.trimesh.graph.facets(mesh=mult, engine=engine)
self.assertTrue(all(len(i) >= 2 for i in facets))
self.assertTrue(len(facets) >= 8654)
split = sing.split(only_watertight=False, engine=engine)
self.assertTrue(len(split) == 1)
self.assertTrue(split[0].is_watertight)
self.assertTrue(split[0].is_winding_consistent)
split = sing.split(only_watertight=True, engine=engine)
self.assertTrue(len(split) == 1)
self.assertTrue(split[0].is_watertight)
self.assertTrue(split[0].is_winding_consistent)
def test_vertex_adjacency_graph(self):
f = g.trimesh.graph.vertex_adjacency_graph
# a mesh with a single watertight body
sing = g.get_mesh('featuretype.STL')
vert_adj_g = f(sing)
self.assertTrue(len(sing.vertices) == len(vert_adj_g))
def test_engine_time(self):
for mesh in g.get_meshes():
tic = [g.time.time()]
for engine in self.engines:
split = mesh.split(engine=engine, only_watertight=False)
facets = g.trimesh.graph.facets(mesh=mesh, engine=engine)
tic.append(g.time.time())
tic_diff = g.np.diff(tic)
tic_min = tic_diff.min()
tic_diff /= tic_min
g.log.info('graph engine on %s (scale %f sec):\n%s',
mesh.metadata['file_name'],
tic_min,
str(g.np.column_stack((self.engines,
tic_diff))))
def test_smoothed(self):
mesh = g.get_mesh('ADIS16480.STL')
assert len(mesh.faces) == len(mesh.smoothed().faces)
def test_engines(self):
edges = g.np.arange(10).reshape((-1, 2))
for i in range(0, 20):
check_engines(nodes=g.np.arange(i),
edges=edges)
edges = g.np.column_stack((g.np.arange(1, 11),
g.np.arange(0, 10)))
for i in range(0, 20):
check_engines(nodes=g.np.arange(i),
edges=edges)
def test_watertight(self):
m = g.get_mesh('shared.STL')
#assert m.is_watertight
#assert m.is_winding_consistent
#assert m.is_volume
def test_traversals(self):
"""
Test traversals (BFS+DFS)
"""
# generate some simple test data
simple_nodes = g.np.arange(20)
simple_edges = g.np.column_stack((simple_nodes[:-1],
simple_nodes[1:]))
simple_edges = g.np.vstack((
simple_edges,
[[19, 0],
[10, 1000],
[500, 501]])).astype(g.np.int64)
all_edges = g.data['edges']
all_edges.append(simple_edges)
for edges in all_edges:
edges = g.np.array(edges, dtype=g.np.int64)
assert g.trimesh.util.is_shape(edges, (-1, 2))
# collect the new nodes
nodes = g.np.unique(edges)
# the basic BFS/DFS traversal
dfs_basic = g.trimesh.graph.traversals(edges, 'dfs')
bfs_basic = g.trimesh.graph.traversals(edges, 'bfs')
# check return types
assert all(i.dtype == g.np.int64 for i in dfs_basic)
assert all(i.dtype == g.np.int64 for i in bfs_basic)
# check to make sure traversals visited every node
dfs_set = set(g.np.hstack(dfs_basic))
bfs_set = set(g.np.hstack(bfs_basic))
nodes_set = set(nodes)
assert dfs_set == nodes_set
assert bfs_set == nodes_set
# check traversal filling
# fill_traversals should always include every edge
# regardless of the path so test on bfs/dfs/empty
for traversal in [dfs_basic, bfs_basic, []]:
# disconnect consecutive nodes that are not edges
# and add edges that were left off by jumps
dfs = g.trimesh.graph.fill_traversals(traversal, edges)
# edges that are included in the new separated traversal
inc = g.trimesh.util.vstack_empty(
[g.np.column_stack((i[:-1], i[1:]))
for i in dfs])
# make a set from edges included in the traversal
inc_set = set(g.trimesh.grouping.hashable_rows(
g.np.sort(inc, axis=1)))
# make a set of the source edges we were supposed to include
edge_set = set(g.trimesh.grouping.hashable_rows(
g.np.sort(edges, axis=1)))
# we should have exactly the same edges
# after the filled traversal as we started with
assert len(inc) == len(edges)
# every edge should occur exactly once
assert len(inc_set) == len(inc)
# unique edges should be the same
assert inc_set == edge_set
# check all return dtypes
assert all(i.dtype == g.np.int64 for i in dfs)
def check_engines(edges, nodes):
"""
Make sure connected component graph engines are
returning the exact same values
"""
results = []
engines = [None, 'scipy', 'networkx']
for engine in engines:
c = g.trimesh.graph.connected_components(edges,
nodes=nodes,
engine=engine)
if len(c) > 0:
# check to see if every resulting component was in the
# set of nodes
diff = g.np.setdiff1d(g.np.hstack(c), nodes)
assert len(diff) == 0
results.append(
sorted(
g.trimesh.util.md5_object(
g.np.sort(i)) for i in c))
assert all(i == results[0] for i in results)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
import argparse
import logging
from aeromancer.db.models import Project
LOG = logging.getLogger(__name__)
class ProjectFilter(object):
"""Manage the arguments for filtering queries by project.
"""
@staticmethod
def add_arguments(parser):
"""Given an argparse.ArgumentParser add arguments.
"""
grp = parser.add_argument_group('Project Filter')
grp.add_argument(
'--project',
action='append',
default=[],
dest='projects',
help=('projects to limit search, '
'by exact name or glob-style patterns'),
)
@classmethod
def from_parsed_args(cls, parsed_args):
return cls(projects=parsed_args.projects)
def __init__(self, projects):
self.exact = []
self.patterns = []
for p in projects:
if '*' in p:
self.patterns.append(p.replace('*', '%'))
else:
self.exact.append(p)
self.projects = projects
def update_query(self, query):
the_filter = ()
if self.exact:
LOG.debug('filtering on projects in %s', self.exact)
the_filter += (Project.name.in_(self.exact),)
if self.patterns:
LOG.debug('filtering on projects matching %s', self.patterns)
the_filter += tuple(Project.name.ilike(p)
for p in self.patterns)
if the_filter:
query = query.filter(*the_filter)
return query
|
from django.core.management.base import BaseCommand
from django.db.models import Sum
from django.utils.translation import gettext as _
from mspray.apps.main.models.target_area import TargetArea
from mspray.apps.main.models.district import District
class Command(BaseCommand):
help = _('Load districts from TargetArea Model')
def handle(self, *args, **options):
qs = TargetArea.objects.filter(targeted=TargetArea.TARGETED_VALUE)\
.values('district_name').distinct()\
.annotate(num_houses=Sum('houses'))
for d in qs:
geom = TargetArea.objects\
.filter(district_name=d['district_name'],
targeted=TargetArea.TARGETED_VALUE)\
.collect()
District.objects.create(
district_name=d['district_name'],
houses=d['num_houses'],
geom=geom
)
print(d['district_name'], d['num_houses'], geom.num_points)
|
import pytube
url = 'https://www.youtube.com/watch?v=vxB0amY8BWs'
youtube = pytube.Youtube(url)
video = youtube.streams.first() # Establecer resolucion
video.donwload('../video') # Descargamos
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BC Builder."""
from typing import Iterator, List, Optional
import acme
from acme import adders
from acme import core
from acme import specs
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.jax import networks as networks_lib
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import reverb
from jrl.agents.bc import config as bc_config
from jrl.agents.bc import learning
from jrl.agents.bc import networks as bc_networks
class BCBuilder(builders.ActorLearnerBuilder):
"""BC Builder."""
def __init__(
self,
config,
# make_demonstrations: Callable[[int], Iterator[types.Transition]],
make_demonstrations,
):
self._config = config
self._make_demonstrations = make_demonstrations
def make_learner(
self,
random_key,
networks,
dataset,
logger,
replay_client = None,
counter = None,
checkpoint = False,
):
del dataset # Offline RL
data_iter = self._make_demonstrations()
return learning.BCLearner(
networks=networks,
rng=random_key,
iterator=data_iter,
policy_lr=self._config.policy_lr,
loss_type=self._config.loss_type,
regularize_entropy=self._config.regularize_entropy,
entropy_regularization_weight=(
self._config.entropy_regularization_weight),
use_img_encoder=self._config.use_img_encoder,
img_encoder_params_ckpt_path=self._config.img_encoder_params_ckpt_path,
counter=counter,
logger=logger,
num_sgd_steps_per_step=self._config.num_sgd_steps_per_step,)
def make_actor(
self,
random_key,
policy_network,
adder = None,
variable_source = None):
assert variable_source is not None
if self._config.use_img_encoder:
# Inference happens on CPU, so it's better to move variables there too.
variable_client = variable_utils.VariableClient(
variable_source, ['policy', 'img_encoder'], device='cpu')
return actors.GenericActor(
actor=policy_network,
random_key=random_key,
variable_client=variable_client,
adder=adder,
)
else:
# Inference happens on CPU, so it's better to move variables there too.
variable_client = variable_utils.VariableClient(
variable_source, ['policy',], device='cpu')
return actors.GenericActor(
actor=policy_network,
random_key=random_key,
variable_client=variable_client,
adder=adder,
)
def make_replay_tables(
self,
environment_spec,
policy
):
"""Create tables to insert data into."""
return []
def make_dataset_iterator(
self,
replay_client):
"""Create a dataset iterator to use for learning/updating the agent."""
return None
def make_adder(self,
replay_client):
"""Create an adder which records data generated by the actor/environment."""
return None
|
from typing import List, Tuple, Any
from typed_ast import ast3 as ast
from ..utils.snippet import snippet, let
from .base import BaseNodeTransformer
@snippet
def return_from_generator(return_value):
let(exc)
exc = StopIteration()
exc.value = return_value
raise exc
class ReturnFromGeneratorTransformer(BaseNodeTransformer):
"""Compiles return in generators like:
def fn():
yield 1
return 5
To:
def fn():
yield 1
exc = StopIteration()
exc.value = 5
raise exc
"""
target = (3, 2)
def _find_generator_returns(self, node: ast.FunctionDef) \
-> List[Tuple[ast.stmt, ast.Return]]:
"""Using bfs find all `return` statements in function."""
to_check = [(node, x) for x in node.body] # type: ignore
returns = []
has_yield = False
while to_check:
parent, current = to_check.pop()
if isinstance(current, ast.FunctionDef):
continue
elif hasattr(current, 'value'):
to_check.append((current, current.value)) # type: ignore
elif hasattr(current, 'body') and isinstance(current.body, list): # type: ignore
to_check.extend([(parent, x) for x in current.body]) # type: ignore
if isinstance(current, ast.Yield) or isinstance(current, ast.YieldFrom):
has_yield = True
if isinstance(current, ast.Return) and current.value is not None:
returns.append((parent, current))
if has_yield:
return returns # type: ignore
else:
return []
def _replace_return(self, parent: Any, return_: ast.Return) -> None:
"""Replace return with exception raising."""
index = parent.body.index(return_)
parent.body.pop(index)
for line in return_from_generator.get_body(return_value=return_.value)[::-1]:
parent.body.insert(index, line)
def visit_FunctionDef(self, node: ast.FunctionDef) -> ast.FunctionDef:
generator_returns = self._find_generator_returns(node)
if generator_returns:
self._tree_changed = True
for parent, return_ in generator_returns:
self._replace_return(parent, return_)
return self.generic_visit(node) # type: ignore
|
import tkinter as tk
infinity = 1000000000
class BinaryNode:
# Class-level drawing parameters.
node_radius = 10
x_spacing = 20
y_spacing = 20
def __init__(self, value):
self.value = value
self.left_child = None
self.right_child = None
# Drawing parameters.
self.center = (0, 0)
self.subtree_bounds = (
self.center[0] - BinaryNode.node_radius,
self.center[1] - BinaryNode.node_radius,
self.center[0] + BinaryNode.node_radius,
self.center[1] + BinaryNode.node_radius)
def position_subtree(self, xmin, ymin):
""" Position the node."""
# Set ymax to the bottom of this node.
ymax = ymin + 2 * BinaryNode.node_radius
xmax = xmin
# See if the node has any children.
if (self.left_child == None) and (self.right_child == None):
# There are no children. Put the node here.
xmax += 2 * BinaryNode.node_radius
self.subtree_bounds = (xmin, ymin, xmax, ymax)
else:
ymax += BinaryNode.y_spacing
# Position the left subtree.
subtree_bottom = ymax
if self.left_child != None:
self.left_child.position_subtree(xmax, ymax)
# Update xmax to allow room for the left subtree.
xmax = self.left_child.subtree_bounds[2]
# Update the subtree bottom.
subtree_bottom = self.left_child.subtree_bounds[3]
xmax += BinaryNode.x_spacing
# Position the right subtree.
if self.right_child != None:
self.right_child.position_subtree(xmax, ymax)
# Update xmax.
xmax = self.right_child.subtree_bounds[2]
# Update the subtree bottom.
if self.right_child.subtree_bounds[3] > subtree_bottom:
subtree_bottom = self.right_child.subtree_bounds[3]
# Position this node centered over the subtrees.
ymax = subtree_bottom
self.subtree_bounds = (xmin, ymin, xmax, ymax)
# Position the node.
cx = (self.subtree_bounds[0] + self.subtree_bounds[2]) / 2
cy = ymin + BinaryNode.node_radius
self.center = (cx, cy)
def draw_subtree_links(self, canvas, color):
""" Draw the subtree's links."""
if self.left_child != None:
self.left_child.draw_subtree_links(canvas, color)
canvas.create_line(self.center[0], self.center[1], self.left_child.center[0], self.left_child.center[1])
if self.right_child != None:
self.right_child.draw_subtree_links(canvas, color)
canvas.create_line(self.center[0], self.center[1], self.right_child.center[0], self.right_child.center[1])
# Outline the subtree for debugging.
#canvas.create_rectangle(self.subtree_bounds, fill="", outline="red")
def draw_subtree_nodes(self, canvas, bg_color, fg_color):
""" Draw the subtree's nodes."""
# Draw the node.
x0 = self.center[0] - BinaryNode.node_radius
y0 = self.center[1] - BinaryNode.node_radius
x1 = self.center[0] + BinaryNode.node_radius
y1 = self.center[1] + BinaryNode.node_radius
canvas.create_oval(x0, y0, x1, y1, fill=bg_color, outline=fg_color)
canvas.create_text(self.center, text=self.value)
# Draw the descendants' nodes.
if self.left_child != None:
self.left_child.draw_subtree_nodes(canvas, bg_color, fg_color)
if self.right_child != None:
self.right_child.draw_subtree_nodes(canvas, bg_color, fg_color)
def add_node(self, value):
""" Add a node to this node's sorted subtree."""
# See if this value is smaller than ours.
if value < self.value:
# The new value is smaller. Add it to the left subtree.
if self.left_child == None:
self.left_child = BinaryNode(value)
else:
self.left_child.add_node(value)
else:
# The new value is not smaller. Add it to the right subtree.
if self.right_child == None:
self.right_child = BinaryNode(value)
else:
self.right_child.add_node(value)
class App:
def kill_callback(self):
self.window.destroy()
def __init__(self):
self.window = tk.Tk()
self.window.title("sorted_tree")
self.window.protocol("WM_DELETE_WINDOW", self.kill_callback)
self.window.geometry("250x240")
frame = tk.Frame(self.window)
frame.pack(padx=5, pady=2)
label = tk.Label(frame, text="Value:")
label.pack(side=tk.LEFT, padx=5, pady=2)
self.value_entry = tk.Entry(frame, width=8, justify=tk.RIGHT)
self.value_entry.pack(side=tk.LEFT, padx=5, pady=2)
self.value_entry.insert(tk.END, "10")
add_button = tk.Button(frame, width=8, text="Add", command=self.add)
add_button.pack(side=tk.LEFT, padx=5, pady=2)
self.canvas = tk.Canvas(self.window, bg="white", borderwidth=2, relief=tk.SUNKEN)
self.canvas.pack(padx=10, pady=10, fill=tk.BOTH, expand=True)
# Build the tree's root.
self.root = BinaryNode(-infinity)
# Bind some keys.
self.window.bind('<Return>', (lambda e, button=add_button: add_button.invoke()))
# Force focus so Alt+F4 closes this window and not the Python shell.
self.value_entry.focus_force()
self.window.mainloop()
def add(self):
""" Add the value to the tree."""
value = int(self.value_entry.get())
self.value_entry.delete(0, tk.END)
self.value_entry.focus_force()
self.root.add_node(value)
self.draw_tree()
def draw_tree(self):
""" Draw the tree."""
self.canvas.delete(tk.ALL)
# Position the tree.
if self.root.right_child != None:
self.root.right_child.position_subtree(10, 10)
# Draw the links.
self.root.right_child.draw_subtree_links(self.canvas, "blue")
# Draw the nodes.
self.root.right_child.draw_subtree_nodes(self.canvas, "lightblue", "blue")
if __name__ == '__main__':
app = App()
# app.root.destroy()
|
#!D:\Python Learning\DjangoCrudApplication\DjangoCrudApplication\env\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
from enum import Enum, unique
@unique
class ItemTag(Enum):
BLESS = "BLESS"
GLOW = "GLOW"
HUM = "HUM"
INVIS = "INVIS"
INSURED = "INSURED"
LIMITED = "LIMITED"
MAG = "MAG"
NO_DONATE = "!DONATE"
NO_DROP = "!DROP"
NO_LOCATE = "!LOCATE"
UNIQUE = "UNIQUE"
NO_JUNK = "!JUNK"
ASSM = "ASSM"
NO_RENT = "!RENT"
NO_PURGE = "!PURGE"
NOBITS = "NOBITS"
NO_MORT = "!MORT"
|
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import meta as _meta
__all__ = [
'ContainerResourceMetricSourceArgs',
'ContainerResourceMetricStatusArgs',
'CrossVersionObjectReferenceArgs',
'ExternalMetricSourceArgs',
'ExternalMetricStatusArgs',
'HorizontalPodAutoscalerConditionArgs',
'HorizontalPodAutoscalerSpecArgs',
'HorizontalPodAutoscalerStatusArgs',
'HorizontalPodAutoscalerArgs',
'MetricSpecArgs',
'MetricStatusArgs',
'ObjectMetricSourceArgs',
'ObjectMetricStatusArgs',
'PodsMetricSourceArgs',
'PodsMetricStatusArgs',
'ResourceMetricSourceArgs',
'ResourceMetricStatusArgs',
]
@pulumi.input_type
class ContainerResourceMetricSourceArgs:
def __init__(__self__, *,
container: pulumi.Input[str],
name: pulumi.Input[str],
target_average_utilization: Optional[pulumi.Input[int]] = None,
target_average_value: Optional[pulumi.Input[str]] = None):
"""
ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. Only one "target" type should be set.
:param pulumi.Input[str] container: container is the name of the container in the pods of the scaling target
:param pulumi.Input[str] name: name is the name of the resource in question.
:param pulumi.Input[int] target_average_utilization: targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.
:param pulumi.Input[str] target_average_value: targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type.
"""
pulumi.set(__self__, "container", container)
pulumi.set(__self__, "name", name)
if target_average_utilization is not None:
pulumi.set(__self__, "target_average_utilization", target_average_utilization)
if target_average_value is not None:
pulumi.set(__self__, "target_average_value", target_average_value)
@property
@pulumi.getter
def container(self) -> pulumi.Input[str]:
"""
container is the name of the container in the pods of the scaling target
"""
return pulumi.get(self, "container")
@container.setter
def container(self, value: pulumi.Input[str]):
pulumi.set(self, "container", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name is the name of the resource in question.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="targetAverageUtilization")
def target_average_utilization(self) -> Optional[pulumi.Input[int]]:
"""
targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.
"""
return pulumi.get(self, "target_average_utilization")
@target_average_utilization.setter
def target_average_utilization(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_average_utilization", value)
@property
@pulumi.getter(name="targetAverageValue")
def target_average_value(self) -> Optional[pulumi.Input[str]]:
"""
targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type.
"""
return pulumi.get(self, "target_average_value")
@target_average_value.setter
def target_average_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_average_value", value)
@pulumi.input_type
class ContainerResourceMetricStatusArgs:
def __init__(__self__, *,
container: pulumi.Input[str],
current_average_value: pulumi.Input[str],
name: pulumi.Input[str],
current_average_utilization: Optional[pulumi.Input[int]] = None):
"""
ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
:param pulumi.Input[str] container: container is the name of the container in the pods of the scaling target
:param pulumi.Input[str] current_average_value: currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type. It will always be set, regardless of the corresponding metric specification.
:param pulumi.Input[str] name: name is the name of the resource in question.
:param pulumi.Input[int] current_average_utilization: currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.
"""
pulumi.set(__self__, "container", container)
pulumi.set(__self__, "current_average_value", current_average_value)
pulumi.set(__self__, "name", name)
if current_average_utilization is not None:
pulumi.set(__self__, "current_average_utilization", current_average_utilization)
@property
@pulumi.getter
def container(self) -> pulumi.Input[str]:
"""
container is the name of the container in the pods of the scaling target
"""
return pulumi.get(self, "container")
@container.setter
def container(self, value: pulumi.Input[str]):
pulumi.set(self, "container", value)
@property
@pulumi.getter(name="currentAverageValue")
def current_average_value(self) -> pulumi.Input[str]:
"""
currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type. It will always be set, regardless of the corresponding metric specification.
"""
return pulumi.get(self, "current_average_value")
@current_average_value.setter
def current_average_value(self, value: pulumi.Input[str]):
pulumi.set(self, "current_average_value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name is the name of the resource in question.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="currentAverageUtilization")
def current_average_utilization(self) -> Optional[pulumi.Input[int]]:
"""
currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.
"""
return pulumi.get(self, "current_average_utilization")
@current_average_utilization.setter
def current_average_utilization(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "current_average_utilization", value)
@pulumi.input_type
class CrossVersionObjectReferenceArgs:
def __init__(__self__, *,
kind: pulumi.Input[str],
name: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None):
"""
CrossVersionObjectReference contains enough information to let you identify the referred resource.
:param pulumi.Input[str] kind: Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
:param pulumi.Input[str] name: Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
:param pulumi.Input[str] api_version: API version of the referent
"""
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "name", name)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
API version of the referent
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@pulumi.input_type
class ExternalMetricSourceArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
metric_selector: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']] = None,
target_average_value: Optional[pulumi.Input[str]] = None,
target_value: Optional[pulumi.Input[str]] = None):
"""
ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). Exactly one "target" type should be set.
:param pulumi.Input[str] metric_name: metricName is the name of the metric in question.
:param pulumi.Input['_meta.v1.LabelSelectorArgs'] metric_selector: metricSelector is used to identify a specific time series within a given metric.
:param pulumi.Input[str] target_average_value: targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.
:param pulumi.Input[str] target_value: targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.
"""
pulumi.set(__self__, "metric_name", metric_name)
if metric_selector is not None:
pulumi.set(__self__, "metric_selector", metric_selector)
if target_average_value is not None:
pulumi.set(__self__, "target_average_value", target_average_value)
if target_value is not None:
pulumi.set(__self__, "target_value", target_value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
metricName is the name of the metric in question.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter(name="metricSelector")
def metric_selector(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]:
"""
metricSelector is used to identify a specific time series within a given metric.
"""
return pulumi.get(self, "metric_selector")
@metric_selector.setter
def metric_selector(self, value: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]):
pulumi.set(self, "metric_selector", value)
@property
@pulumi.getter(name="targetAverageValue")
def target_average_value(self) -> Optional[pulumi.Input[str]]:
"""
targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.
"""
return pulumi.get(self, "target_average_value")
@target_average_value.setter
def target_average_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_average_value", value)
@property
@pulumi.getter(name="targetValue")
def target_value(self) -> Optional[pulumi.Input[str]]:
"""
targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.
"""
return pulumi.get(self, "target_value")
@target_value.setter
def target_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_value", value)
@pulumi.input_type
class ExternalMetricStatusArgs:
def __init__(__self__, *,
current_value: pulumi.Input[str],
metric_name: pulumi.Input[str],
current_average_value: Optional[pulumi.Input[str]] = None,
metric_selector: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']] = None):
"""
ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.
:param pulumi.Input[str] current_value: currentValue is the current value of the metric (as a quantity)
:param pulumi.Input[str] metric_name: metricName is the name of a metric used for autoscaling in metric system.
:param pulumi.Input[str] current_average_value: currentAverageValue is the current value of metric averaged over autoscaled pods.
:param pulumi.Input['_meta.v1.LabelSelectorArgs'] metric_selector: metricSelector is used to identify a specific time series within a given metric.
"""
pulumi.set(__self__, "current_value", current_value)
pulumi.set(__self__, "metric_name", metric_name)
if current_average_value is not None:
pulumi.set(__self__, "current_average_value", current_average_value)
if metric_selector is not None:
pulumi.set(__self__, "metric_selector", metric_selector)
@property
@pulumi.getter(name="currentValue")
def current_value(self) -> pulumi.Input[str]:
"""
currentValue is the current value of the metric (as a quantity)
"""
return pulumi.get(self, "current_value")
@current_value.setter
def current_value(self, value: pulumi.Input[str]):
pulumi.set(self, "current_value", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
metricName is the name of a metric used for autoscaling in metric system.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter(name="currentAverageValue")
def current_average_value(self) -> Optional[pulumi.Input[str]]:
"""
currentAverageValue is the current value of metric averaged over autoscaled pods.
"""
return pulumi.get(self, "current_average_value")
@current_average_value.setter
def current_average_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "current_average_value", value)
@property
@pulumi.getter(name="metricSelector")
def metric_selector(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]:
"""
metricSelector is used to identify a specific time series within a given metric.
"""
return pulumi.get(self, "metric_selector")
@metric_selector.setter
def metric_selector(self, value: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]):
pulumi.set(self, "metric_selector", value)
@pulumi.input_type
class HorizontalPodAutoscalerConditionArgs:
def __init__(__self__, *,
status: pulumi.Input[str],
type: pulumi.Input[str],
last_transition_time: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None):
"""
HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.
:param pulumi.Input[str] status: status is the status of the condition (True, False, Unknown)
:param pulumi.Input[str] type: type describes the current condition
:param pulumi.Input[str] last_transition_time: lastTransitionTime is the last time the condition transitioned from one status to another
:param pulumi.Input[str] message: message is a human-readable explanation containing details about the transition
:param pulumi.Input[str] reason: reason is the reason for the condition's last transition.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
"""
status is the status of the condition (True, False, Unknown)
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
type describes the current condition
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[pulumi.Input[str]]:
"""
lastTransitionTime is the last time the condition transitioned from one status to another
"""
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
message is a human-readable explanation containing details about the transition
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
reason is the reason for the condition's last transition.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@pulumi.input_type
class HorizontalPodAutoscalerSpecArgs:
def __init__(__self__, *,
max_replicas: pulumi.Input[int],
scale_target_ref: pulumi.Input['CrossVersionObjectReferenceArgs'],
metrics: Optional[pulumi.Input[Sequence[pulumi.Input['MetricSpecArgs']]]] = None,
min_replicas: Optional[pulumi.Input[int]] = None):
"""
HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
:param pulumi.Input[int] max_replicas: maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.
:param pulumi.Input['CrossVersionObjectReferenceArgs'] scale_target_ref: scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.
:param pulumi.Input[Sequence[pulumi.Input['MetricSpecArgs']]] metrics: metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.
:param pulumi.Input[int] min_replicas: minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.
"""
pulumi.set(__self__, "max_replicas", max_replicas)
pulumi.set(__self__, "scale_target_ref", scale_target_ref)
if metrics is not None:
pulumi.set(__self__, "metrics", metrics)
if min_replicas is not None:
pulumi.set(__self__, "min_replicas", min_replicas)
@property
@pulumi.getter(name="maxReplicas")
def max_replicas(self) -> pulumi.Input[int]:
"""
maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.
"""
return pulumi.get(self, "max_replicas")
@max_replicas.setter
def max_replicas(self, value: pulumi.Input[int]):
pulumi.set(self, "max_replicas", value)
@property
@pulumi.getter(name="scaleTargetRef")
def scale_target_ref(self) -> pulumi.Input['CrossVersionObjectReferenceArgs']:
"""
scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.
"""
return pulumi.get(self, "scale_target_ref")
@scale_target_ref.setter
def scale_target_ref(self, value: pulumi.Input['CrossVersionObjectReferenceArgs']):
pulumi.set(self, "scale_target_ref", value)
@property
@pulumi.getter
def metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetricSpecArgs']]]]:
"""
metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.
"""
return pulumi.get(self, "metrics")
@metrics.setter
def metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MetricSpecArgs']]]]):
pulumi.set(self, "metrics", value)
@property
@pulumi.getter(name="minReplicas")
def min_replicas(self) -> Optional[pulumi.Input[int]]:
"""
minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.
"""
return pulumi.get(self, "min_replicas")
@min_replicas.setter
def min_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_replicas", value)
@pulumi.input_type
class HorizontalPodAutoscalerStatusArgs:
def __init__(__self__, *,
conditions: pulumi.Input[Sequence[pulumi.Input['HorizontalPodAutoscalerConditionArgs']]],
current_replicas: pulumi.Input[int],
desired_replicas: pulumi.Input[int],
current_metrics: Optional[pulumi.Input[Sequence[pulumi.Input['MetricStatusArgs']]]] = None,
last_scale_time: Optional[pulumi.Input[str]] = None,
observed_generation: Optional[pulumi.Input[int]] = None):
"""
HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
:param pulumi.Input[Sequence[pulumi.Input['HorizontalPodAutoscalerConditionArgs']]] conditions: conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.
:param pulumi.Input[int] current_replicas: currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.
:param pulumi.Input[int] desired_replicas: desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.
:param pulumi.Input[Sequence[pulumi.Input['MetricStatusArgs']]] current_metrics: currentMetrics is the last read state of the metrics used by this autoscaler.
:param pulumi.Input[str] last_scale_time: lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.
:param pulumi.Input[int] observed_generation: observedGeneration is the most recent generation observed by this autoscaler.
"""
pulumi.set(__self__, "conditions", conditions)
pulumi.set(__self__, "current_replicas", current_replicas)
pulumi.set(__self__, "desired_replicas", desired_replicas)
if current_metrics is not None:
pulumi.set(__self__, "current_metrics", current_metrics)
if last_scale_time is not None:
pulumi.set(__self__, "last_scale_time", last_scale_time)
if observed_generation is not None:
pulumi.set(__self__, "observed_generation", observed_generation)
@property
@pulumi.getter
def conditions(self) -> pulumi.Input[Sequence[pulumi.Input['HorizontalPodAutoscalerConditionArgs']]]:
"""
conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: pulumi.Input[Sequence[pulumi.Input['HorizontalPodAutoscalerConditionArgs']]]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter(name="currentReplicas")
def current_replicas(self) -> pulumi.Input[int]:
"""
currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.
"""
return pulumi.get(self, "current_replicas")
@current_replicas.setter
def current_replicas(self, value: pulumi.Input[int]):
pulumi.set(self, "current_replicas", value)
@property
@pulumi.getter(name="desiredReplicas")
def desired_replicas(self) -> pulumi.Input[int]:
"""
desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.
"""
return pulumi.get(self, "desired_replicas")
@desired_replicas.setter
def desired_replicas(self, value: pulumi.Input[int]):
pulumi.set(self, "desired_replicas", value)
@property
@pulumi.getter(name="currentMetrics")
def current_metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetricStatusArgs']]]]:
"""
currentMetrics is the last read state of the metrics used by this autoscaler.
"""
return pulumi.get(self, "current_metrics")
@current_metrics.setter
def current_metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MetricStatusArgs']]]]):
pulumi.set(self, "current_metrics", value)
@property
@pulumi.getter(name="lastScaleTime")
def last_scale_time(self) -> Optional[pulumi.Input[str]]:
"""
lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.
"""
return pulumi.get(self, "last_scale_time")
@last_scale_time.setter
def last_scale_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_scale_time", value)
@property
@pulumi.getter(name="observedGeneration")
def observed_generation(self) -> Optional[pulumi.Input[int]]:
"""
observedGeneration is the most recent generation observed by this autoscaler.
"""
return pulumi.get(self, "observed_generation")
@observed_generation.setter
def observed_generation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "observed_generation", value)
@pulumi.input_type
class HorizontalPodAutoscalerArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
spec: Optional[pulumi.Input['HorizontalPodAutoscalerSpecArgs']] = None,
status: Optional[pulumi.Input['HorizontalPodAutoscalerStatusArgs']] = None):
"""
HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input['HorizontalPodAutoscalerSpecArgs'] spec: spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
:param pulumi.Input['HorizontalPodAutoscalerStatusArgs'] status: status is the current information about the autoscaler.
"""
if api_version is not None:
pulumi.set(__self__, "api_version", 'autoscaling/v2beta1')
if kind is not None:
pulumi.set(__self__, "kind", 'HorizontalPodAutoscaler')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]:
"""
metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['HorizontalPodAutoscalerSpecArgs']]:
"""
spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['HorizontalPodAutoscalerSpecArgs']]):
pulumi.set(self, "spec", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input['HorizontalPodAutoscalerStatusArgs']]:
"""
status is the current information about the autoscaler.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input['HorizontalPodAutoscalerStatusArgs']]):
pulumi.set(self, "status", value)
@pulumi.input_type
class MetricSpecArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
container_resource: Optional[pulumi.Input['ContainerResourceMetricSourceArgs']] = None,
external: Optional[pulumi.Input['ExternalMetricSourceArgs']] = None,
object: Optional[pulumi.Input['ObjectMetricSourceArgs']] = None,
pods: Optional[pulumi.Input['PodsMetricSourceArgs']] = None,
resource: Optional[pulumi.Input['ResourceMetricSourceArgs']] = None):
"""
MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).
:param pulumi.Input[str] type: type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled
:param pulumi.Input['ContainerResourceMetricSourceArgs'] container_resource: container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
:param pulumi.Input['ExternalMetricSourceArgs'] external: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
:param pulumi.Input['ObjectMetricSourceArgs'] object: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
:param pulumi.Input['PodsMetricSourceArgs'] pods: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
:param pulumi.Input['ResourceMetricSourceArgs'] resource: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
"""
pulumi.set(__self__, "type", type)
if container_resource is not None:
pulumi.set(__self__, "container_resource", container_resource)
if external is not None:
pulumi.set(__self__, "external", external)
if object is not None:
pulumi.set(__self__, "object", object)
if pods is not None:
pulumi.set(__self__, "pods", pods)
if resource is not None:
pulumi.set(__self__, "resource", resource)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="containerResource")
def container_resource(self) -> Optional[pulumi.Input['ContainerResourceMetricSourceArgs']]:
"""
container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
"""
return pulumi.get(self, "container_resource")
@container_resource.setter
def container_resource(self, value: Optional[pulumi.Input['ContainerResourceMetricSourceArgs']]):
pulumi.set(self, "container_resource", value)
@property
@pulumi.getter
def external(self) -> Optional[pulumi.Input['ExternalMetricSourceArgs']]:
"""
external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
"""
return pulumi.get(self, "external")
@external.setter
def external(self, value: Optional[pulumi.Input['ExternalMetricSourceArgs']]):
pulumi.set(self, "external", value)
@property
@pulumi.getter
def object(self) -> Optional[pulumi.Input['ObjectMetricSourceArgs']]:
"""
object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
"""
return pulumi.get(self, "object")
@object.setter
def object(self, value: Optional[pulumi.Input['ObjectMetricSourceArgs']]):
pulumi.set(self, "object", value)
@property
@pulumi.getter
def pods(self) -> Optional[pulumi.Input['PodsMetricSourceArgs']]:
"""
pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
"""
return pulumi.get(self, "pods")
@pods.setter
def pods(self, value: Optional[pulumi.Input['PodsMetricSourceArgs']]):
pulumi.set(self, "pods", value)
@property
@pulumi.getter
def resource(self) -> Optional[pulumi.Input['ResourceMetricSourceArgs']]:
"""
resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: Optional[pulumi.Input['ResourceMetricSourceArgs']]):
pulumi.set(self, "resource", value)
@pulumi.input_type
class MetricStatusArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
container_resource: Optional[pulumi.Input['ContainerResourceMetricStatusArgs']] = None,
external: Optional[pulumi.Input['ExternalMetricStatusArgs']] = None,
object: Optional[pulumi.Input['ObjectMetricStatusArgs']] = None,
pods: Optional[pulumi.Input['PodsMetricStatusArgs']] = None,
resource: Optional[pulumi.Input['ResourceMetricStatusArgs']] = None):
"""
MetricStatus describes the last-read state of a single metric.
:param pulumi.Input[str] type: type is the type of metric source. It will be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled
:param pulumi.Input['ContainerResourceMetricStatusArgs'] container_resource: container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
:param pulumi.Input['ExternalMetricStatusArgs'] external: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
:param pulumi.Input['ObjectMetricStatusArgs'] object: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
:param pulumi.Input['PodsMetricStatusArgs'] pods: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
:param pulumi.Input['ResourceMetricStatusArgs'] resource: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
"""
pulumi.set(__self__, "type", type)
if container_resource is not None:
pulumi.set(__self__, "container_resource", container_resource)
if external is not None:
pulumi.set(__self__, "external", external)
if object is not None:
pulumi.set(__self__, "object", object)
if pods is not None:
pulumi.set(__self__, "pods", pods)
if resource is not None:
pulumi.set(__self__, "resource", resource)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
type is the type of metric source. It will be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="containerResource")
def container_resource(self) -> Optional[pulumi.Input['ContainerResourceMetricStatusArgs']]:
"""
container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
"""
return pulumi.get(self, "container_resource")
@container_resource.setter
def container_resource(self, value: Optional[pulumi.Input['ContainerResourceMetricStatusArgs']]):
pulumi.set(self, "container_resource", value)
@property
@pulumi.getter
def external(self) -> Optional[pulumi.Input['ExternalMetricStatusArgs']]:
"""
external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
"""
return pulumi.get(self, "external")
@external.setter
def external(self, value: Optional[pulumi.Input['ExternalMetricStatusArgs']]):
pulumi.set(self, "external", value)
@property
@pulumi.getter
def object(self) -> Optional[pulumi.Input['ObjectMetricStatusArgs']]:
"""
object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
"""
return pulumi.get(self, "object")
@object.setter
def object(self, value: Optional[pulumi.Input['ObjectMetricStatusArgs']]):
pulumi.set(self, "object", value)
@property
@pulumi.getter
def pods(self) -> Optional[pulumi.Input['PodsMetricStatusArgs']]:
"""
pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
"""
return pulumi.get(self, "pods")
@pods.setter
def pods(self, value: Optional[pulumi.Input['PodsMetricStatusArgs']]):
pulumi.set(self, "pods", value)
@property
@pulumi.getter
def resource(self) -> Optional[pulumi.Input['ResourceMetricStatusArgs']]:
"""
resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: Optional[pulumi.Input['ResourceMetricStatusArgs']]):
pulumi.set(self, "resource", value)
@pulumi.input_type
class ObjectMetricSourceArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
target: pulumi.Input['CrossVersionObjectReferenceArgs'],
target_value: pulumi.Input[str],
average_value: Optional[pulumi.Input[str]] = None,
selector: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']] = None):
"""
ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).
:param pulumi.Input[str] metric_name: metricName is the name of the metric in question.
:param pulumi.Input['CrossVersionObjectReferenceArgs'] target: target is the described Kubernetes object.
:param pulumi.Input[str] target_value: targetValue is the target value of the metric (as a quantity).
:param pulumi.Input[str] average_value: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
:param pulumi.Input['_meta.v1.LabelSelectorArgs'] selector: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "target", target)
pulumi.set(__self__, "target_value", target_value)
if average_value is not None:
pulumi.set(__self__, "average_value", average_value)
if selector is not None:
pulumi.set(__self__, "selector", selector)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
metricName is the name of the metric in question.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def target(self) -> pulumi.Input['CrossVersionObjectReferenceArgs']:
"""
target is the described Kubernetes object.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: pulumi.Input['CrossVersionObjectReferenceArgs']):
pulumi.set(self, "target", value)
@property
@pulumi.getter(name="targetValue")
def target_value(self) -> pulumi.Input[str]:
"""
targetValue is the target value of the metric (as a quantity).
"""
return pulumi.get(self, "target_value")
@target_value.setter
def target_value(self, value: pulumi.Input[str]):
pulumi.set(self, "target_value", value)
@property
@pulumi.getter(name="averageValue")
def average_value(self) -> Optional[pulumi.Input[str]]:
"""
averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
"""
return pulumi.get(self, "average_value")
@average_value.setter
def average_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "average_value", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]:
"""
selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.
"""
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]):
pulumi.set(self, "selector", value)
@pulumi.input_type
class ObjectMetricStatusArgs:
def __init__(__self__, *,
current_value: pulumi.Input[str],
metric_name: pulumi.Input[str],
target: pulumi.Input['CrossVersionObjectReferenceArgs'],
average_value: Optional[pulumi.Input[str]] = None,
selector: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']] = None):
"""
ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).
:param pulumi.Input[str] current_value: currentValue is the current value of the metric (as a quantity).
:param pulumi.Input[str] metric_name: metricName is the name of the metric in question.
:param pulumi.Input['CrossVersionObjectReferenceArgs'] target: target is the described Kubernetes object.
:param pulumi.Input[str] average_value: averageValue is the current value of the average of the metric across all relevant pods (as a quantity)
:param pulumi.Input['_meta.v1.LabelSelectorArgs'] selector: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
"""
pulumi.set(__self__, "current_value", current_value)
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "target", target)
if average_value is not None:
pulumi.set(__self__, "average_value", average_value)
if selector is not None:
pulumi.set(__self__, "selector", selector)
@property
@pulumi.getter(name="currentValue")
def current_value(self) -> pulumi.Input[str]:
"""
currentValue is the current value of the metric (as a quantity).
"""
return pulumi.get(self, "current_value")
@current_value.setter
def current_value(self, value: pulumi.Input[str]):
pulumi.set(self, "current_value", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
metricName is the name of the metric in question.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def target(self) -> pulumi.Input['CrossVersionObjectReferenceArgs']:
"""
target is the described Kubernetes object.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: pulumi.Input['CrossVersionObjectReferenceArgs']):
pulumi.set(self, "target", value)
@property
@pulumi.getter(name="averageValue")
def average_value(self) -> Optional[pulumi.Input[str]]:
"""
averageValue is the current value of the average of the metric across all relevant pods (as a quantity)
"""
return pulumi.get(self, "average_value")
@average_value.setter
def average_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "average_value", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]:
"""
selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
"""
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]):
pulumi.set(self, "selector", value)
@pulumi.input_type
class PodsMetricSourceArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
target_average_value: pulumi.Input[str],
selector: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']] = None):
"""
PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
:param pulumi.Input[str] metric_name: metricName is the name of the metric in question
:param pulumi.Input[str] target_average_value: targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)
:param pulumi.Input['_meta.v1.LabelSelectorArgs'] selector: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "target_average_value", target_average_value)
if selector is not None:
pulumi.set(__self__, "selector", selector)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
metricName is the name of the metric in question
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter(name="targetAverageValue")
def target_average_value(self) -> pulumi.Input[str]:
"""
targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)
"""
return pulumi.get(self, "target_average_value")
@target_average_value.setter
def target_average_value(self, value: pulumi.Input[str]):
pulumi.set(self, "target_average_value", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]:
"""
selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.
"""
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]):
pulumi.set(self, "selector", value)
@pulumi.input_type
class PodsMetricStatusArgs:
def __init__(__self__, *,
current_average_value: pulumi.Input[str],
metric_name: pulumi.Input[str],
selector: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']] = None):
"""
PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).
:param pulumi.Input[str] current_average_value: currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)
:param pulumi.Input[str] metric_name: metricName is the name of the metric in question
:param pulumi.Input['_meta.v1.LabelSelectorArgs'] selector: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
"""
pulumi.set(__self__, "current_average_value", current_average_value)
pulumi.set(__self__, "metric_name", metric_name)
if selector is not None:
pulumi.set(__self__, "selector", selector)
@property
@pulumi.getter(name="currentAverageValue")
def current_average_value(self) -> pulumi.Input[str]:
"""
currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)
"""
return pulumi.get(self, "current_average_value")
@current_average_value.setter
def current_average_value(self, value: pulumi.Input[str]):
pulumi.set(self, "current_average_value", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
metricName is the name of the metric in question
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]:
"""
selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
"""
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]):
pulumi.set(self, "selector", value)
@pulumi.input_type
class ResourceMetricSourceArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
target_average_utilization: Optional[pulumi.Input[int]] = None,
target_average_value: Optional[pulumi.Input[str]] = None):
"""
ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. Only one "target" type should be set.
:param pulumi.Input[str] name: name is the name of the resource in question.
:param pulumi.Input[int] target_average_utilization: targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.
:param pulumi.Input[str] target_average_value: targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type.
"""
pulumi.set(__self__, "name", name)
if target_average_utilization is not None:
pulumi.set(__self__, "target_average_utilization", target_average_utilization)
if target_average_value is not None:
pulumi.set(__self__, "target_average_value", target_average_value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name is the name of the resource in question.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="targetAverageUtilization")
def target_average_utilization(self) -> Optional[pulumi.Input[int]]:
"""
targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.
"""
return pulumi.get(self, "target_average_utilization")
@target_average_utilization.setter
def target_average_utilization(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_average_utilization", value)
@property
@pulumi.getter(name="targetAverageValue")
def target_average_value(self) -> Optional[pulumi.Input[str]]:
"""
targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type.
"""
return pulumi.get(self, "target_average_value")
@target_average_value.setter
def target_average_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_average_value", value)
@pulumi.input_type
class ResourceMetricStatusArgs:
def __init__(__self__, *,
current_average_value: pulumi.Input[str],
name: pulumi.Input[str],
current_average_utilization: Optional[pulumi.Input[int]] = None):
"""
ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
:param pulumi.Input[str] current_average_value: currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type. It will always be set, regardless of the corresponding metric specification.
:param pulumi.Input[str] name: name is the name of the resource in question.
:param pulumi.Input[int] current_average_utilization: currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.
"""
pulumi.set(__self__, "current_average_value", current_average_value)
pulumi.set(__self__, "name", name)
if current_average_utilization is not None:
pulumi.set(__self__, "current_average_utilization", current_average_utilization)
@property
@pulumi.getter(name="currentAverageValue")
def current_average_value(self) -> pulumi.Input[str]:
"""
currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type. It will always be set, regardless of the corresponding metric specification.
"""
return pulumi.get(self, "current_average_value")
@current_average_value.setter
def current_average_value(self, value: pulumi.Input[str]):
pulumi.set(self, "current_average_value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name is the name of the resource in question.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="currentAverageUtilization")
def current_average_utilization(self) -> Optional[pulumi.Input[int]]:
"""
currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.
"""
return pulumi.get(self, "current_average_utilization")
@current_average_utilization.setter
def current_average_utilization(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "current_average_utilization", value)
|
# Generated by Django 3.1.5 on 2021-06-20 00:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('compras', '0006_auto_20210616_2025'),
]
operations = [
migrations.AddField(
model_name='facturadet',
name='detalle_cargado_mes',
field=models.CharField(blank=True, default='N', max_length=2, null=True),
),
migrations.AlterField(
model_name='facturacompra',
name='fecha_alta',
field=models.CharField(default='19/06/2021', max_length=500, null=True),
),
migrations.AlterField(
model_name='pedido',
name='fecha_alta',
field=models.CharField(default='19/06/2021 20:23:10 hs', editable=False, max_length=200),
),
migrations.AlterField(
model_name='pedidocabecera',
name='fecha_alta',
field=models.CharField(default='19/06/2021', editable=False, max_length=200),
),
]
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGridgraphics(RPackage):
"""Redraw Base Graphics Using 'grid' Graphics.
Functions to convert a page of plots drawn with the 'graphics' package into
identical output drawn with the 'grid' package. The result looks like the
original 'graphics'-based plot, but consists of 'grid' grobs and viewports
that can then be manipulated with 'grid' functions (e.g., edit grobs and
revisit viewports)."""
cran = "gridGraphics"
version('0.5-1', sha256='29086e94e63891884c933b186b35511aac2a2f9c56967a72e4050e2980e7da8b')
version('0.4-1', sha256='b770127b71664bbf67f8853a2666c071f2b9920743eddc9f3a58ecb948b923cf')
|
# -*- coding: utf-8 -*-
r"""
Families of graphs derived from classical geometries over finite fields
These include graphs of polar spaces, affine polar graphs, graphs
related to Hermitean unitals, graphs on nonisotropic points, etc
The methods defined here appear in :mod:`sage.graphs.graph_generators`.
"""
###########################################################################
#
# Copyright (C) 2015 Sagemath project
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
###########################################################################
from __future__ import absolute_import, division
from six.moves import range
from sage.graphs.graph import Graph
from sage.arith.all import is_prime_power
from sage.rings.finite_rings.finite_field_constructor import FiniteField
def SymplecticPolarGraph(d, q, algorithm=None):
r"""
Returns the Symplectic Polar Graph `Sp(d,q)`.
The Symplectic Polar Graph `Sp(d,q)` is built from a projective space of dimension
`d-1` over a field `F_q`, and a symplectic form `f`. Two vertices `u,v` are
made adjacent if `f(u,v)=0`.
See the page `on symplectic graphs on Andries Brouwer's website
<https://www.win.tue.nl/~aeb/graphs/Sp.html>`_.
INPUT:
- ``d,q`` (integers) -- note that only even values of `d` are accepted by
the function.
- ``algorithm`` -- if set to 'gap' then the computation is carried via GAP
library interface, computing totally singular subspaces, which is faster for `q>3`.
Otherwise it is done directly.
EXAMPLES:
Computation of the spectrum of `Sp(6,2)`::
sage: g = graphs.SymplecticPolarGraph(6,2)
sage: g.is_strongly_regular(parameters=True)
(63, 30, 13, 15)
sage: set(g.spectrum()) == {-5, 3, 30}
True
The parameters of `Sp(4,q)` are the same as of `O(5,q)`, but they are
not isomorphic if `q` is odd::
sage: G = graphs.SymplecticPolarGraph(4,3)
sage: G.is_strongly_regular(parameters=True)
(40, 12, 2, 4)
sage: O=graphs.OrthogonalPolarGraph(5,3)
sage: O.is_strongly_regular(parameters=True)
(40, 12, 2, 4)
sage: O.is_isomorphic(G)
False
sage: graphs.SymplecticPolarGraph(6,4,algorithm="gap").is_strongly_regular(parameters=True) # not tested (long time)
(1365, 340, 83, 85)
TESTS::
sage: graphs.SymplecticPolarGraph(4,4,algorithm="gap").is_strongly_regular(parameters=True)
(85, 20, 3, 5)
sage: graphs.SymplecticPolarGraph(4,4).is_strongly_regular(parameters=True)
(85, 20, 3, 5)
sage: graphs.SymplecticPolarGraph(4,4,algorithm="blah")
Traceback (most recent call last):
...
ValueError: unknown algorithm!
"""
if d < 1 or d % 2:
raise ValueError("d must be even and greater than 2")
if algorithm == "gap": # faster for larger (q>3) fields
from sage.libs.gap.libgap import libgap
G = _polar_graph(d, q, libgap.SymplecticGroup(d, q))
elif algorithm is None: # faster for small (q<4) fields
from sage.modules.free_module import VectorSpace
from sage.schemes.projective.projective_space import ProjectiveSpace
from sage.matrix.constructor import identity_matrix, block_matrix, zero_matrix
F = FiniteField(q,"x")
M = block_matrix(F, 2, 2,
[zero_matrix(F,d/2),
identity_matrix(F,d/2),
-identity_matrix(F,d/2),
zero_matrix(F,d/2)])
V = VectorSpace(F,d)
PV = list(ProjectiveSpace(d-1,F))
G = Graph([[tuple(_) for _ in PV], lambda x,y:V(x)*(M*V(y)) == 0], loops = False)
else:
raise ValueError("unknown algorithm!")
G.name("Symplectic Polar Graph Sp("+str(d)+","+str(q)+")")
G.relabel()
return G
def AffineOrthogonalPolarGraph(d,q,sign="+"):
r"""
Returns the affine polar graph `VO^+(d,q),VO^-(d,q)` or `VO(d,q)`.
Affine Polar graphs are built from a `d`-dimensional vector space over
`F_q`, and a quadratic form which is hyperbolic, elliptic or parabolic
according to the value of ``sign``.
Note that `VO^+(d,q),VO^-(d,q)` are strongly regular graphs, while `VO(d,q)`
is not.
For more information on Affine Polar graphs, see `Affine Polar
Graphs page of Andries Brouwer's website
<https://www.win.tue.nl/~aeb/graphs/VO.html>`_.
INPUT:
- ``d`` (integer) -- ``d`` must be even if ``sign is not None``, and odd
otherwise.
- ``q`` (integer) -- a power of a prime number, as `F_q` must exist.
- ``sign`` -- must be equal to ``"+"``, ``"-"``, or ``None`` to compute
(respectively) `VO^+(d,q),VO^-(d,q)` or `VO(d,q)`. By default
``sign="+"``.
.. NOTE::
The graph `VO^\epsilon(d,q)` is the graph induced by the
non-neighbors of a vertex in an :meth:`Orthogonal Polar Graph
<OrthogonalPolarGraph>` `O^\epsilon(d+2,q)`.
EXAMPLES:
The :meth:`Brouwer-Haemers graph <BrouwerHaemersGraph>` is isomorphic to
`VO^-(4,3)`::
sage: g = graphs.AffineOrthogonalPolarGraph(4,3,"-")
sage: g.is_isomorphic(graphs.BrouwerHaemersGraph())
True
Some examples from `Brouwer's table or strongly regular graphs
<https://www.win.tue.nl/~aeb/graphs/srg/srgtab.html>`_::
sage: g = graphs.AffineOrthogonalPolarGraph(6,2,"-"); g
Affine Polar Graph VO^-(6,2): Graph on 64 vertices
sage: g.is_strongly_regular(parameters=True)
(64, 27, 10, 12)
sage: g = graphs.AffineOrthogonalPolarGraph(6,2,"+"); g
Affine Polar Graph VO^+(6,2): Graph on 64 vertices
sage: g.is_strongly_regular(parameters=True)
(64, 35, 18, 20)
When ``sign is None``::
sage: g = graphs.AffineOrthogonalPolarGraph(5,2,None); g
Affine Polar Graph VO^-(5,2): Graph on 32 vertices
sage: g.is_strongly_regular(parameters=True)
False
sage: g.is_regular()
True
sage: g.is_vertex_transitive()
True
"""
if sign in ["+", "-"]:
s = 1 if sign == "+" else -1
if d % 2:
raise ValueError("d must be even when sign is not None")
else:
if d % 2 == 0:
raise ValueError("d must be odd when sign is None")
s = 0
from sage.modules.free_module import VectorSpace
from sage.matrix.constructor import Matrix
from sage.libs.gap.libgap import libgap
from itertools import combinations
M = Matrix(libgap.InvariantQuadraticForm(libgap.GeneralOrthogonalGroup(s,d,q))['matrix'])
F = libgap.GF(q).sage()
V = list(VectorSpace(F,d))
G = Graph()
G.add_vertices([tuple(_) for _ in V])
for x,y in combinations(V,2):
if not (x-y)*M*(x-y):
G.add_edge(tuple(x),tuple(y))
G.name("Affine Polar Graph VO^"+str('+' if s == 1 else '-')+"("+str(d)+","+str(q)+")")
G.relabel()
return G
def _orthogonal_polar_graph(m, q, sign="+", point_type=[0]):
r"""
A helper function to build ``OrthogonalPolarGraph`` and ``NO2,3,5`` graphs.
See the `page of
Andries Brouwer's website <https://www.win.tue.nl/~aeb/graphs/srghub.html>`_.
INPUT:
- ``m,q`` (integers) -- `q` must be a prime power.
- ``sign`` -- ``"+"`` or ``"-"`` if `m` is even, ``"+"`` (default)
otherwise.
- ``point_type`` -- a list of elements from `F_q`
EXAMPLES:
Petersen graph::
`
sage: from sage.graphs.generators.classical_geometries import _orthogonal_polar_graph
sage: g=_orthogonal_polar_graph(3,5,point_type=[2,3])
sage: g.is_strongly_regular(parameters=True)
(10, 3, 0, 1)
A locally Petersen graph (a.k.a. Doro graph, a.k.a. Hall graph)::
sage: g=_orthogonal_polar_graph(4,5,'-',point_type=[2,3])
sage: g.is_distance_regular(parameters=True)
([10, 6, 4, None], [None, 1, 2, 5])
Various big and slow to build graphs:
`NO^+(7,3)`::
sage: g=_orthogonal_polar_graph(7,3,point_type=[1]) # not tested (long time)
sage: g.is_strongly_regular(parameters=True) # not tested (long time)
(378, 117, 36, 36)
`NO^-(7,3)`::
sage: g=_orthogonal_polar_graph(7,3,point_type=[-1]) # not tested (long time)
sage: g.is_strongly_regular(parameters=True) # not tested (long time)
(351, 126, 45, 45)
`NO^+(6,3)`::
sage: g=_orthogonal_polar_graph(6,3,point_type=[1])
sage: g.is_strongly_regular(parameters=True)
(117, 36, 15, 9)
`NO^-(6,3)`::
sage: g=_orthogonal_polar_graph(6,3,'-',point_type=[1])
sage: g.is_strongly_regular(parameters=True)
(126, 45, 12, 18)
`NO^{-,\perp}(5,5)`::
sage: g=_orthogonal_polar_graph(5,5,point_type=[2,3]) # long time
sage: g.is_strongly_regular(parameters=True) # long time
(300, 65, 10, 15)
`NO^{+,\perp}(5,5)`::
sage: g=_orthogonal_polar_graph(5,5,point_type=[1,-1]) # not tested (long time)
sage: g.is_strongly_regular(parameters=True) # not tested (long time)
(325, 60, 15, 10)
TESTS::
sage: g=_orthogonal_polar_graph(5,3,point_type=[-1])
sage: g.is_strongly_regular(parameters=True)
(45, 12, 3, 3)
sage: g=_orthogonal_polar_graph(5,3,point_type=[1])
sage: g.is_strongly_regular(parameters=True)
(36, 15, 6, 6)
"""
from sage.schemes.projective.projective_space import ProjectiveSpace
from sage.modules.free_module_element import free_module_element as vector
from sage.matrix.constructor import Matrix
from sage.libs.gap.libgap import libgap
if m % 2 == 0:
if sign != "+" and sign != "-":
raise ValueError("sign must be equal to either '-' or '+' when "
"m is even")
else:
if sign != "" and sign != "+":
raise ValueError("sign must be equal to either '' or '+' when "
"m is odd")
sign = ""
e = {'+': 1,
'-': -1,
'' : 0}[sign]
M = Matrix(libgap.InvariantQuadraticForm(libgap.GeneralOrthogonalGroup(e,m,q))['matrix'])
Fq = libgap.GF(q).sage()
PG = [vector(s) for s in ProjectiveSpace(m - 1, Fq)]
for v in PG:
v.set_immutable()
def F(x):
return x*M*x
if q % 2 == 0:
def P(x,y):
return F(x-y)
else:
def P(x,y):
return x*M*y+y*M*x
V = [x for x in PG if F(x) in point_type]
G = Graph([V,lambda x,y:P(x,y)==0],loops=False)
G.relabel()
return G
def OrthogonalPolarGraph(m, q, sign="+"):
r"""
Returns the Orthogonal Polar Graph `O^{\epsilon}(m,q)`.
For more information on Orthogonal Polar graphs, see the `page of
Andries Brouwer's website <https://www.win.tue.nl/~aeb/graphs/srghub.html>`_.
INPUT:
- ``m,q`` (integers) -- `q` must be a prime power.
- ``sign`` -- ``"+"`` or ``"-"`` if `m` is even, ``"+"`` (default)
otherwise.
EXAMPLES::
sage: G = graphs.OrthogonalPolarGraph(6,3,"+"); G
Orthogonal Polar Graph O^+(6, 3): Graph on 130 vertices
sage: G.is_strongly_regular(parameters=True)
(130, 48, 20, 16)
sage: G = graphs.OrthogonalPolarGraph(6,3,"-"); G
Orthogonal Polar Graph O^-(6, 3): Graph on 112 vertices
sage: G.is_strongly_regular(parameters=True)
(112, 30, 2, 10)
sage: G = graphs.OrthogonalPolarGraph(5,3); G
Orthogonal Polar Graph O(5, 3): Graph on 40 vertices
sage: G.is_strongly_regular(parameters=True)
(40, 12, 2, 4)
sage: G = graphs.OrthogonalPolarGraph(8,2,"+"); G
Orthogonal Polar Graph O^+(8, 2): Graph on 135 vertices
sage: G.is_strongly_regular(parameters=True)
(135, 70, 37, 35)
sage: G = graphs.OrthogonalPolarGraph(8,2,"-"); G
Orthogonal Polar Graph O^-(8, 2): Graph on 119 vertices
sage: G.is_strongly_regular(parameters=True)
(119, 54, 21, 27)
TESTS::
sage: G = graphs.OrthogonalPolarGraph(4,3,"")
Traceback (most recent call last):
...
ValueError: sign must be equal to either '-' or '+' when m is even
sage: G = graphs.OrthogonalPolarGraph(5,3,"-")
Traceback (most recent call last):
...
ValueError: sign must be equal to either '' or '+' when m is odd
"""
G = _orthogonal_polar_graph(m, q, sign=sign)
if m % 2:
sign = ""
G.name("Orthogonal Polar Graph O" + ("^" + sign if sign else "") + str((m, q)))
return G
def NonisotropicOrthogonalPolarGraph(m, q, sign="+", perp=None):
r"""
Returns the Graph `NO^{\epsilon,\perp}_{m}(q)`
Let the vectorspace of dimension `m` over `F_q` be
endowed with a nondegenerate quadratic form `F`, of type ``sign`` for `m` even.
* `m` even: assume further that `q=2` or `3`. Returns the graph of the
points (in the underlying projective space) `x` satisfying `F(x)=1`, with adjacency
given by orthogonality w.r.t. `F`. Parameter ``perp`` is ignored.
* `m` odd: if ``perp`` is not ``None``, then we assume that `q=5` and
return the graph of the points `x` satisfying `F(x)=\pm 1` if ``sign="+"``,
respectively `F(x) \in \{2,3\}` if ``sign="-"``, with adjacency
given by orthogonality w.r.t. `F` (cf. Sect 7.D of [BL1984]_).
Otherwise return the graph
of nongenerate hyperplanes of type ``sign``, adjacent whenever the intersection
is degenerate (cf. Sect. 7.C of [BL1984]_).
Note that for `q=2` one will get a complete graph.
For more information, see Sect. 9.9 of [BH12]_ and [BL1984]_. Note that
the `page of Andries Brouwer's website
<https://www.win.tue.nl/~aeb/graphs/srghub.html>`_ uses different notation.
INPUT:
- ``m`` - integer, half the dimension of the underlying vectorspace
- ``q`` - a power of a prime number, the size of the underlying field
- ``sign`` -- ``"+"`` (default) or ``"-"``.
EXAMPLES:
`NO^-(4,2)` is isomorphic to Petersen graph::
sage: g=graphs.NonisotropicOrthogonalPolarGraph(4,2,'-'); g
NO^-(4, 2): Graph on 10 vertices
sage: g.is_strongly_regular(parameters=True)
(10, 3, 0, 1)
`NO^-(6,2)` and `NO^+(6,2)`::
sage: g=graphs.NonisotropicOrthogonalPolarGraph(6,2,'-')
sage: g.is_strongly_regular(parameters=True)
(36, 15, 6, 6)
sage: g=graphs.NonisotropicOrthogonalPolarGraph(6,2,'+'); g
NO^+(6, 2): Graph on 28 vertices
sage: g.is_strongly_regular(parameters=True)
(28, 15, 6, 10)
`NO^+(8,2)`::
sage: g=graphs.NonisotropicOrthogonalPolarGraph(8,2,'+')
sage: g.is_strongly_regular(parameters=True)
(120, 63, 30, 36)
Wilbrink's graphs for `q=5`::
sage: graphs.NonisotropicOrthogonalPolarGraph(5,5,perp=1).is_strongly_regular(parameters=True) # long time
(325, 60, 15, 10)
sage: graphs.NonisotropicOrthogonalPolarGraph(5,5,'-',perp=1).is_strongly_regular(parameters=True) # long time
(300, 65, 10, 15)
Wilbrink's graphs::
sage: g=graphs.NonisotropicOrthogonalPolarGraph(5,4,'+')
sage: g.is_strongly_regular(parameters=True)
(136, 75, 42, 40)
sage: g=graphs.NonisotropicOrthogonalPolarGraph(5,4,'-')
sage: g.is_strongly_regular(parameters=True)
(120, 51, 18, 24)
sage: g=graphs.NonisotropicOrthogonalPolarGraph(7,4,'+'); g # not tested (long time)
NO^+(7, 4): Graph on 2080 vertices
sage: g.is_strongly_regular(parameters=True) # not tested (long time)
(2080, 1071, 558, 544)
TESTS::
sage: g=graphs.NonisotropicOrthogonalPolarGraph(4,2); g
NO^+(4, 2): Graph on 6 vertices
sage: graphs.NonisotropicOrthogonalPolarGraph(4,3,'-').is_strongly_regular(parameters=True)
(15, 6, 1, 3)
sage: g=graphs.NonisotropicOrthogonalPolarGraph(3,5,'-',perp=1); g
NO^-,perp(3, 5): Graph on 10 vertices
sage: g.is_strongly_regular(parameters=True)
(10, 3, 0, 1)
sage: g=graphs.NonisotropicOrthogonalPolarGraph(6,3,'+') # long time
sage: g.is_strongly_regular(parameters=True) # long time
(117, 36, 15, 9)
sage: g=graphs.NonisotropicOrthogonalPolarGraph(6,3,'-'); g # long time
NO^-(6, 3): Graph on 126 vertices
sage: g.is_strongly_regular(parameters=True) # long time
(126, 45, 12, 18)
sage: g=graphs.NonisotropicOrthogonalPolarGraph(5,5,'-') # long time
sage: g.is_strongly_regular(parameters=True) # long time
(300, 104, 28, 40)
sage: g=graphs.NonisotropicOrthogonalPolarGraph(5,5,'+') # long time
sage: g.is_strongly_regular(parameters=True) # long time
(325, 144, 68, 60)
sage: g=graphs.NonisotropicOrthogonalPolarGraph(6,4,'+')
Traceback (most recent call last):
...
ValueError: for m even q must be 2 or 3
"""
p, k = is_prime_power(q, get_data=True)
if k == 0:
raise ValueError('q must be a prime power')
dec = ''
if m % 2 == 0:
if q in [2, 3]:
G = _orthogonal_polar_graph(m, q, sign=sign, point_type=[1])
else:
raise ValueError("for m even q must be 2 or 3")
elif not perp is None:
if q == 5:
G = _orthogonal_polar_graph(m, q, point_type=\
[-1,1] if sign=='+' else [2,3] if sign=='-' else [])
dec = ",perp"
else:
raise ValueError("for perp not None q must be 5")
else:
if not sign in ['+','-']:
raise ValueError("sign must be '+' or '-'")
from sage.libs.gap.libgap import libgap
g0 = libgap.GeneralOrthogonalGroup(m,q)
g = libgap.Group(libgap.List(g0.GeneratorsOfGroup(),libgap.TransposedMat))
F=libgap.GF(q) # F_q
W=libgap.FullRowSpace(F, m) # F_q^m
e = 1 if sign=='+' else -1
n = (m-1)/2
# we build (q^n(q^n+e)/2, (q^n-e)(q^(n-1)+e), 2(q^(2n-2)-1)+eq^(n-1)(q-1),
# 2q^(n-1)(q^(n-1)+e))-srg
# **use** v and k to select appropriate orbit and orbital
nvert = (q**n)*(q**n+e)/2 # v
deg = (q**n-e)*(q**(n-1)+e) # k
S = [libgap.Elements(libgap.Basis(x))[0]
for x in libgap.Elements(libgap.Subspaces(W, 1))]
(V,) = [x for x in libgap.Orbits(g, S, libgap.OnLines)
if len(x) == nvert]
gp = libgap.Action(g, V, libgap.OnLines) # make a permutation group
h = libgap.Stabilizer(gp, 1)
(Vh,) = [x for x in libgap.Orbits(h, libgap.Orbit(gp, 1))
if len(x) == deg]
Vh = Vh[0]
L = libgap.Orbit(gp, [1, Vh], libgap.OnSets)
G = Graph()
G.add_edges(L)
G.name("NO^" + sign + dec + str((m, q)))
return G
def _polar_graph(m, q, g, intersection_size=None):
r"""
The helper function to build graphs `(D)U(m,q)` and `(D)Sp(m,q)`
Building a graph on an orbit of a group `g` of `m\times m` matrices over `GF(q)` on
the points (or subspaces of dimension ``m//2``) isotropic w.r.t. the form `F`
left invariant by the group `g`.
The only constraint is that the first ``m//2`` elements of the standard
basis must generate a totally isotropic w.r.t. `F` subspace; this is the case with
these groups coming from GAP; namely, `F` has the anti-diagonal all-1 matrix.
INPUT:
- ``m`` -- the dimension of the underlying vector space
- ``q`` -- the size of the field
- ``g`` -- the group acting
- ``intersection_size`` -- if ``None``, build the graph on the isotropic points, with
adjacency being orthogonality w.r.t. `F`. Otherwise, build the graph on the maximal
totally isotropic subspaces, with adjacency specified by ``intersection_size`` being
as given.
TESTS::
sage: from sage.graphs.generators.classical_geometries import _polar_graph
sage: _polar_graph(4, 4, libgap.GeneralUnitaryGroup(4, 2))
Graph on 45 vertices
sage: _polar_graph(4, 4, libgap.GeneralUnitaryGroup(4, 2), intersection_size=1)
Graph on 27 vertices
"""
from sage.libs.gap.libgap import libgap
from itertools import combinations
W=libgap.FullRowSpace(libgap.GF(q), m) # F_q^m
B=libgap.Elements(libgap.Basis(W)) # the standard basis of W
V = libgap.Orbit(g,B[0],libgap.OnLines) # orbit on isotropic points
gp = libgap.Action(g,V,libgap.OnLines) # make a permutation group
s = libgap.Subspace(W,[B[i] for i in range(m//2)]) # a totally isotropic subspace
# and the points there
sp = [libgap.Elements(libgap.Basis(x))[0] for x in libgap.Elements(s.Subspaces(1))]
h = libgap.Set([libgap.Position(V, x) for x in sp]) # indices of the points in s
L = libgap.Orbit(gp, h, libgap.OnSets) # orbit on these subspaces
if intersection_size is None:
G = Graph()
for x in L: # every pair of points in the subspace is adjacent to each other in G
G.add_edges(combinations(x, 2))
return G
else:
return Graph([L, lambda i,j: libgap.Size(libgap.Intersection(i,j))==intersection_size],
loops=False)
def UnitaryPolarGraph(m, q, algorithm="gap"):
r"""
Returns the Unitary Polar Graph `U(m,q)`.
For more information on Unitary Polar graphs, see the `page of
Andries Brouwer's website <https://www.win.tue.nl/~aeb/graphs/srghub.html>`_.
INPUT:
- ``m,q`` (integers) -- `q` must be a prime power.
- ``algorithm`` -- if set to 'gap' then the computation is carried via GAP
library interface, computing totally singular subspaces, which is faster for
large examples (especially with `q>2`). Otherwise it is done directly.
EXAMPLES::
sage: G = graphs.UnitaryPolarGraph(4,2); G
Unitary Polar Graph U(4, 2); GQ(4, 2): Graph on 45 vertices
sage: G.is_strongly_regular(parameters=True)
(45, 12, 3, 3)
sage: graphs.UnitaryPolarGraph(5,2).is_strongly_regular(parameters=True)
(165, 36, 3, 9)
sage: graphs.UnitaryPolarGraph(6,2) # not tested (long time)
Unitary Polar Graph U(6, 2): Graph on 693 vertices
TESTS::
sage: graphs.UnitaryPolarGraph(4,3, algorithm="gap").is_strongly_regular(parameters=True)
(280, 36, 8, 4)
sage: graphs.UnitaryPolarGraph(4,3).is_strongly_regular(parameters=True)
(280, 36, 8, 4)
sage: graphs.UnitaryPolarGraph(4,3, algorithm="foo")
Traceback (most recent call last):
...
ValueError: unknown algorithm!
"""
if algorithm == "gap":
from sage.libs.gap.libgap import libgap
G = _polar_graph(m, q**2, libgap.GeneralUnitaryGroup(m, q))
elif algorithm is None: # slow on large examples
from sage.schemes.projective.projective_space import ProjectiveSpace
from sage.modules.free_module_element import free_module_element as vector
Fq = FiniteField(q**2, 'a')
PG = map(vector, ProjectiveSpace(m - 1, Fq))
for v in PG:
v.set_immutable()
def P(x, y):
return sum(x[j] * y[m - 1 - j] ** q for j in range(m)) == 0
V = [x for x in PG if P(x,x)]
G = Graph([V,lambda x,y: # bottleneck is here, of course:
P(x,y)], loops=False)
else:
raise ValueError("unknown algorithm!")
G.relabel()
G.name("Unitary Polar Graph U" + str((m, q)))
if m==4:
G.name(G.name()+'; GQ'+str((q**2,q)))
if m==5:
G.name(G.name()+'; GQ'+str((q**2,q**3)))
return G
def NonisotropicUnitaryPolarGraph(m, q):
r"""
Returns the Graph `NU(m,q)`.
Returns the graph on nonisotropic, with respect to a nondegenerate
Hermitean form, points of the `(m-1)`-dimensional projective space over `F_q`,
with points adjacent whenever they lie on a tangent (to the set of isotropic points)
line.
For more information, see Sect. 9.9 of [BH12]_ and series C14 in [Hub1975]_.
INPUT:
- ``m,q`` (integers) -- `q` must be a prime power.
EXAMPLES::
sage: g=graphs.NonisotropicUnitaryPolarGraph(5,2); g
NU(5, 2): Graph on 176 vertices
sage: g.is_strongly_regular(parameters=True)
(176, 135, 102, 108)
TESTS::
sage: graphs.NonisotropicUnitaryPolarGraph(4,2).is_strongly_regular(parameters=True)
(40, 27, 18, 18)
sage: graphs.NonisotropicUnitaryPolarGraph(4,3).is_strongly_regular(parameters=True) # long time
(540, 224, 88, 96)
sage: graphs.NonisotropicUnitaryPolarGraph(6,6)
Traceback (most recent call last):
...
ValueError: q must be a prime power
"""
p, k = is_prime_power(q,get_data=True)
if k==0:
raise ValueError('q must be a prime power')
from sage.libs.gap.libgap import libgap
from itertools import combinations
F=libgap.GF(q**2) # F_{q^2}
W=libgap.FullRowSpace(F, m) # F_{q^2}^m
B=libgap.Elements(libgap.Basis(W)) # the standard basis of W
if m % 2:
point = B[(m-1)/2]
else:
if p==2:
point = B[m/2] + F.PrimitiveRoot()*B[(m-2)/2]
else:
point = B[(m-2)/2] + B[m/2]
g = libgap.GeneralUnitaryGroup(m,q)
V = libgap.Orbit(g,point,libgap.OnLines) # orbit on nonisotropic points
gp = libgap.Action(g,V,libgap.OnLines) # make a permutation group
s = libgap.Subspace(W,[point, point+B[0]]) # a tangent line on point
# and the points there
sp = [libgap.Elements(libgap.Basis(x))[0] for x in libgap.Elements(s.Subspaces(1))]
h = libgap.Set([libgap.Position(V, x)
for x in libgap.Intersection(V, sp)]) # indices
L = libgap.Orbit(gp, h, libgap.OnSets) # orbit on the tangent lines
G = Graph()
for x in L: # every pair of points in the subspace is adjacent to each other in G
G.add_edges(combinations(x, 2))
G.relabel()
G.name("NU" + str((m, q)))
return G
def UnitaryDualPolarGraph(m, q):
r"""
Returns the Dual Unitary Polar Graph `U(m,q)`.
For more information on Unitary Dual Polar graphs, see [BCN1989]_ and
Sect. 2.3.1 of [Coh1981]_.
INPUT:
- ``m,q`` (integers) -- `q` must be a prime power.
EXAMPLES:
The point graph of a generalized quadrangle (see
:wikipedia:`Generalized_quadrangle`, [PT2009]_) of order (8,4)::
sage: G = graphs.UnitaryDualPolarGraph(5,2); G # long time
Unitary Dual Polar Graph DU(5, 2); GQ(8, 4): Graph on 297 vertices
sage: G.is_strongly_regular(parameters=True) # long time
(297, 40, 7, 5)
Another way to get the generalized quadrangle of order (2,4)::
sage: G = graphs.UnitaryDualPolarGraph(4,2); G
Unitary Dual Polar Graph DU(4, 2); GQ(2, 4): Graph on 27 vertices
sage: G.is_isomorphic(graphs.OrthogonalPolarGraph(6,2,'-'))
True
A bigger graph::
sage: G = graphs.UnitaryDualPolarGraph(6,2); G # not tested (long time)
Unitary Dual Polar Graph DU(6, 2): Graph on 891 vertices
sage: G.is_distance_regular(parameters=True) # not tested (long time)
([42, 40, 32, None], [None, 1, 5, 21])
TESTS::
sage: graphs.UnitaryDualPolarGraph(6,6)
Traceback (most recent call last):
...
GAPError: Error, <subfield> must be a prime or a finite field
"""
from sage.libs.gap.libgap import libgap
G = _polar_graph(m, q**2, libgap.GeneralUnitaryGroup(m, q),
intersection_size=(q**(2*(m//2-1))-1)/(q**2-1))
G.relabel()
G.name("Unitary Dual Polar Graph DU" + str((m, q)))
if m==4:
G.name(G.name()+'; GQ'+str((q,q**2)))
if m==5:
G.name(G.name()+'; GQ'+str((q**3,q**2)))
return G
def SymplecticDualPolarGraph(m, q):
r"""
Returns the Symplectic Dual Polar Graph `DSp(m,q)`.
For more information on Symplectic Dual Polar graphs, see [BCN1989]_ and
Sect. 2.3.1 of [Coh1981]_.
INPUT:
- ``m,q`` (integers) -- `q` must be a prime power, and `m` must be even.
EXAMPLES::
sage: G = graphs.SymplecticDualPolarGraph(6,3); G # not tested (long time)
Symplectic Dual Polar Graph DSp(6, 3): Graph on 1120 vertices
sage: G.is_distance_regular(parameters=True) # not tested (long time)
([39, 36, 27, None], [None, 1, 4, 13])
TESTS::
sage: G = graphs.SymplecticDualPolarGraph(6,2); G
Symplectic Dual Polar Graph DSp(6, 2): Graph on 135 vertices
sage: G.is_distance_regular(parameters=True)
([14, 12, 8, None], [None, 1, 3, 7])
sage: graphs.SymplecticDualPolarGraph(6,6)
Traceback (most recent call last):
...
GAPError: Error, <subfield> must be a prime or a finite field
"""
from sage.libs.gap.libgap import libgap
G = _polar_graph(m, q, libgap.SymplecticGroup(m, q),
intersection_size=(q**(m/2-1)-1)/(q-1))
G.relabel()
G.name("Symplectic Dual Polar Graph DSp" + str((m, q)))
if m==4:
G.name(G.name()+'; GQ'+str((q,q)))
return G
def TaylorTwographDescendantSRG(q, clique_partition=None):
r"""
constructing the descendant graph of the Taylor's two-graph for `U_3(q)`, `q` odd
This is a strongly regular graph with parameters
`(v,k,\lambda,\mu)=(q^3, (q^2+1)(q-1)/2, (q-1)^3/4-1, (q^2+1)(q-1)/4)`
obtained as a two-graph descendant of the
:func:`Taylor's two-graph <sage.combinat.designs.twographs.taylor_twograph>` `T`.
This graph admits a partition into cliques of size `q`, which are useful in
:func:`~sage.graphs.graph_generators.GraphGenerators.TaylorTwographSRG`,
a strongly regular graph on `q^3+1` vertices in the
Seidel switching class of `T`, for which we need `(q^2+1)/2` cliques.
The cliques are the `q^2` lines on `v_0` of the projective plane containing the unital
for `U_3(q)`, and intersecting the unital (i.e. the vertices of the graph and the point
we remove) in `q+1` points. This is all taken from §7E of [BL1984]_.
INPUT:
- ``q`` -- a power of an odd prime number
- ``clique_partition`` -- if ``True``, return `q^2-1` cliques of size `q`
with empty pairwise intersection. (Removing all of them leaves a clique, too),
and the point removed from the unital.
EXAMPLES::
sage: g=graphs.TaylorTwographDescendantSRG(3); g
Taylor two-graph descendant SRG: Graph on 27 vertices
sage: g.is_strongly_regular(parameters=True)
(27, 10, 1, 5)
sage: from sage.combinat.designs.twographs import taylor_twograph
sage: T = taylor_twograph(3) # long time
sage: g.is_isomorphic(T.descendant(T.ground_set()[1])) # long time
True
sage: g=graphs.TaylorTwographDescendantSRG(5) # not tested (long time)
sage: g.is_strongly_regular(parameters=True) # not tested (long time)
(125, 52, 15, 26)
TESTS::
sage: g,l,_=graphs.TaylorTwographDescendantSRG(3,clique_partition=True)
sage: all(g.is_clique(x) for x in l)
True
sage: graphs.TaylorTwographDescendantSRG(4)
Traceback (most recent call last):
...
ValueError: q must be an odd prime power
sage: graphs.TaylorTwographDescendantSRG(6)
Traceback (most recent call last):
...
ValueError: q must be an odd prime power
"""
p, k = is_prime_power(q,get_data=True)
if k==0 or p==2:
raise ValueError('q must be an odd prime power')
from sage.schemes.projective.projective_space import ProjectiveSpace
from sage.rings.finite_rings.integer_mod import mod
from six.moves.builtins import sum
Fq = FiniteField(q**2, 'a')
PG = map(tuple,ProjectiveSpace(2, Fq))
def S(x, y):
return sum(x[j] * y[2 - j] ** q for j in range(3))
V = [x for x in PG if S(x,x) == 0] # the points of the unital
v0 = V[0]
V.remove(v0)
if mod(q,4)==1:
G = Graph([V,lambda y,z: not (S(v0,y)*S(y,z)*S(z,v0)).is_square()], loops=False)
else:
G = Graph([V,lambda y,z: (S(v0,y)*S(y,z)*S(z,v0)).is_square()], loops=False)
G.name("Taylor two-graph descendant SRG")
if clique_partition:
lines = [[t for t in V if t[0] + z * t[1] == 0]
for z in Fq if z]
return (G, lines, v0)
else:
return G
def TaylorTwographSRG(q):
r"""
constructing a strongly regular graph from the Taylor's two-graph for `U_3(q)`, `q` odd
This is a strongly regular graph with parameters
`(v,k,\lambda,\mu)=(q^3+1, q(q^2+1)/2, (q^2+3)(q-1)/4, (q^2+1)(q+1)/4)`
in the Seidel switching class of
:func:`Taylor two-graph <sage.combinat.designs.twographs.taylor_twograph>`.
Details are in §7E of [BL1984]_.
INPUT:
- ``q`` -- a power of an odd prime number
.. SEEALSO::
* :meth:`~sage.graphs.graph_generators.GraphGenerators.TaylorTwographDescendantSRG`
EXAMPLES::
sage: t=graphs.TaylorTwographSRG(3); t
Taylor two-graph SRG: Graph on 28 vertices
sage: t.is_strongly_regular(parameters=True)
(28, 15, 6, 10)
"""
G, l, v0 = TaylorTwographDescendantSRG(q, clique_partition=True)
G.add_vertex(v0)
G.seidel_switching(sum(l[:(q**2+1)/2],[]))
G.name("Taylor two-graph SRG")
return G
def AhrensSzekeresGeneralizedQuadrangleGraph(q, dual=False):
r"""
Return the collinearity graph of the generalized quadrangle `AS(q)`, or of its dual
Let `q` be an odd prime power. `AS(q)` is a generalized quadrangle
(:wikipedia:`Generalized_quadrangle`) of
order `(q-1,q+1)`, see 3.1.5 in [PT2009]_. Its points are elements
of `F_q^3`, and lines are sets of size `q` of the form
* `\{ (\sigma, a, b) \mid \sigma\in F_q \}`
* `\{ (a, \sigma, b) \mid \sigma\in F_q \}`
* `\{ (c \sigma^2 - b \sigma + a, -2 c \sigma + b, \sigma) \mid \sigma\in F_q \}`,
where `a`, `b`, `c` are arbitrary elements of `F_q`.
INPUT:
- ``q`` -- a power of an odd prime number
- ``dual`` -- if ``False`` (default), return the collinearity graph of `AS(q)`.
Otherwise return the collinearity graph of the dual `AS(q)`.
EXAMPLES::
sage: g=graphs.AhrensSzekeresGeneralizedQuadrangleGraph(5); g
AS(5); GQ(4, 6): Graph on 125 vertices
sage: g.is_strongly_regular(parameters=True)
(125, 28, 3, 7)
sage: g=graphs.AhrensSzekeresGeneralizedQuadrangleGraph(5,dual=True); g
AS(5)*; GQ(6, 4): Graph on 175 vertices
sage: g.is_strongly_regular(parameters=True)
(175, 30, 5, 5)
"""
from sage.combinat.designs.incidence_structures import IncidenceStructure
p, k = is_prime_power(q,get_data=True)
if k==0 or p==2:
raise ValueError('q must be an odd prime power')
F = FiniteField(q, 'a')
L = []
for a in F:
for b in F:
L.append(tuple((s, a, b) for s in F))
L.append(tuple((a, s, b) for s in F))
for c in F:
L.append(tuple((c*s**2 - b*s + a, -2*c*s + b, s) for s in F))
if dual:
G = IncidenceStructure(L).intersection_graph()
G.name('AS('+str(q)+')*; GQ'+str((q+1,q-1)))
else:
G = IncidenceStructure(L).dual().intersection_graph()
G.name('AS('+str(q)+'); GQ'+str((q-1,q+1)))
return G
def T2starGeneralizedQuadrangleGraph(q, dual=False, hyperoval=None, field=None, check_hyperoval=True):
r"""
Return the collinearity graph of the generalized quadrangle `T_2^*(q)`, or of its dual
Let `q=2^k` and `\Theta=PG(3,q)`. `T_2^*(q)` is a generalized quadrangle
(:wikipedia:`Generalized_quadrangle`)
of order `(q-1,q+1)`, see 3.1.3 in [PT2009]_. Fix a plane `\Pi \subset
\Theta` and a
`hyperoval <http://en.wikipedia.org/wiki/Oval_(projective_plane)#Even_q>`__
`O \subset \Pi`. The points of `T_2^*(q):=T_2^*(O)` are the points of `\Theta`
outside `\Pi`, and the lines are the lines of `\Theta` outside `\Pi`
that meet `\Pi` in a point of `O`.
INPUT:
- ``q`` -- a power of two
- ``dual`` -- if ``False`` (default), return the graph of `T_2^*(O)`.
Otherwise return the graph of the dual `T_2^*(O)`.
- ``hyperoval`` -- a hyperoval (i.e. a complete 2-arc; a set of points in the plane
meeting every line in 0 or 2 points) in the plane of points with 0th coordinate
0 in `PG(3,q)` over the field ``field``. Each point of ``hyperoval`` must be a length 4
vector over ``field`` with 1st non-0 coordinate equal to 1. By default, ``hyperoval`` and
``field`` are not specified, and constructed on the fly. In particular, ``hyperoval``
we build is the classical one, i.e. a conic with the point of intersection of its
tangent lines.
- ``field`` -- an instance of a finite field of order `q`, must be provided
if ``hyperoval`` is provided.
- ``check_hyperoval`` -- (default: ``True``) if ``True``,
check ``hyperoval`` for correctness.
EXAMPLES:
using the built-in construction::
sage: g=graphs.T2starGeneralizedQuadrangleGraph(4); g
T2*(O,4); GQ(3, 5): Graph on 64 vertices
sage: g.is_strongly_regular(parameters=True)
(64, 18, 2, 6)
sage: g=graphs.T2starGeneralizedQuadrangleGraph(4,dual=True); g
T2*(O,4)*; GQ(5, 3): Graph on 96 vertices
sage: g.is_strongly_regular(parameters=True)
(96, 20, 4, 4)
supplying your own hyperoval::
sage: F=GF(4,'b')
sage: O=[vector(F,(0,0,0,1)),vector(F,(0,0,1,0))]+[vector(F, (0,1,x^2,x)) for x in F]
sage: g=graphs.T2starGeneralizedQuadrangleGraph(4, hyperoval=O, field=F); g
T2*(O,4); GQ(3, 5): Graph on 64 vertices
sage: g.is_strongly_regular(parameters=True)
(64, 18, 2, 6)
TESTS::
sage: F=GF(4,'b') # repeating a point...
sage: O=[vector(F,(0,1,0,0)),vector(F,(0,0,1,0))]+[vector(F, (0,1,x^2,x)) for x in F]
sage: graphs.T2starGeneralizedQuadrangleGraph(4, hyperoval=O, field=F)
Traceback (most recent call last):
...
RuntimeError: incorrect hyperoval size
sage: O=[vector(F,(0,1,1,0)),vector(F,(0,0,1,0))]+[vector(F, (0,1,x^2,x)) for x in F]
sage: graphs.T2starGeneralizedQuadrangleGraph(4, hyperoval=O, field=F)
Traceback (most recent call last):
...
RuntimeError: incorrect hyperoval
"""
from sage.combinat.designs.incidence_structures import IncidenceStructure
from sage.combinat.designs.block_design import ProjectiveGeometryDesign as PG
p, k = is_prime_power(q,get_data=True)
if k==0 or p!=2:
raise ValueError('q must be a power of 2')
if field is None:
F = FiniteField(q, 'a')
else:
F = field
Theta = PG(3, 1, F, point_coordinates=1)
Pi = set(x for x in Theta.ground_set() if x[0] == F.zero())
if hyperoval is None:
O = set(x for x in Pi
if (x[1] + x[2] * x[3] == 0) or
(x[1] == 1 and x[2] == x[3] == 0))
else:
for v in hyperoval:
v.set_immutable()
O = set(hyperoval)
if check_hyperoval:
if len(O) != q+2:
raise RuntimeError("incorrect hyperoval size")
for L in Theta.blocks():
if set(L).issubset(Pi):
if not len(O.intersection(L)) in [0,2]:
raise RuntimeError("incorrect hyperoval")
L = [[y for y in z if y not in O]
for z in [x for x in Theta.blocks() if len(O.intersection(x)) == 1]]
if dual:
G = IncidenceStructure(L).intersection_graph()
G.name('T2*(O,'+str(q)+')*; GQ'+str((q+1,q-1)))
else:
G = IncidenceStructure(L).dual().intersection_graph()
G.name('T2*(O,'+str(q)+'); GQ'+str((q-1,q+1)))
return G
def HaemersGraph(q, hyperoval=None, hyperoval_matching=None, field=None, check_hyperoval=True):
r"""
Return the Haemers graph obtained from `T_2^*(q)^*`
Let `q` be a power of 2. In Sect. 8.A of [BL1984]_ one finds a construction
of a strongly regular graph with parameters `(q^2(q+2),q^2+q-1,q-2,q)` from
the graph of `T_2^*(q)^*`, constructed by
:func:`~sage.graphs.graph_generators.GraphGenerators.T2starGeneralizedQuadrangleGraph`,
by redefining adjacencies in the way specified by an arbitrary ``hyperoval_matching``
of the points (i.e. partitioning into size two parts) of ``hyperoval`` defining
`T_2^*(q)^*`.
While [BL1984]_ gives the construction in geometric terms, it can be
formulated, and is implemented, in graph-theoretic ones, of re-adjusting the
edges. Namely, `G=T_2^*(q)^*` has a partition
into `q+2` independent sets `I_k` of size `q^2` each. Each vertex in `I_j` is
adjacent to `q` vertices from `I_k`. Each `I_k` is paired to some `I_{k'}`,
according to ``hyperoval_matching``. One adds edges `(s,t)` for `s,t \in I_k` whenever
`s` and `t` are adjacent to some `u \in I_{k'}`, and removes all the edges
between `I_k` and `I_{k'}`.
INPUT:
- ``q`` -- a power of two
- ``hyperoval_matching`` -- if ``None`` (default), pair each `i`-th point of
``hyperoval`` with `(i+1)`-th. Otherwise, specifies the pairing
in the format `((i_1,i'_1),(i_2,i'_2),...)`.
- ``hyperoval`` -- a hyperoval defining `T_2^*(q)^*`. If ``None`` (default),
the classical hyperoval obtained from a conic is used. See the
documentation of
:func:`~sage.graphs.graph_generators.GraphGenerators.T2starGeneralizedQuadrangleGraph`,
for more information.
- ``field`` -- an instance of a finite field of order `q`, must be provided
if ``hyperoval`` is provided.
- ``check_hyperoval`` -- (default: ``True``) if ``True``, check
``hyperoval`` for correctness.
EXAMPLES:
using the built-in constructions::
sage: g=graphs.HaemersGraph(4); g
Haemers(4): Graph on 96 vertices
sage: g.is_strongly_regular(parameters=True)
(96, 19, 2, 4)
supplying your own hyperoval_matching::
sage: g=graphs.HaemersGraph(4,hyperoval_matching=((0,5),(1,4),(2,3))); g
Haemers(4): Graph on 96 vertices
sage: g.is_strongly_regular(parameters=True)
(96, 19, 2, 4)
TESTS::
sage: F=GF(4,'b') # repeating a point...
sage: O=[vector(F,(0,1,0,0)),vector(F,(0,0,1,0))]+[vector(F, (0,1,x^2,x)) for x in F]
sage: graphs.HaemersGraph(4, hyperoval=O, field=F)
Traceback (most recent call last):
...
RuntimeError: incorrect hyperoval size
sage: O=[vector(F,(0,1,1,0)),vector(F,(0,0,1,0))]+[vector(F, (0,1,x^2,x)) for x in F]
sage: graphs.HaemersGraph(4, hyperoval=O, field=F)
Traceback (most recent call last):
...
RuntimeError: incorrect hyperoval
sage: g=graphs.HaemersGraph(8); g # not tested (long time)
Haemers(8): Graph on 640 vertices
sage: g.is_strongly_regular(parameters=True) # not tested (long time)
(640, 71, 6, 8)
"""
from sage.modules.free_module_element import free_module_element as vector
from sage.rings.finite_rings.finite_field_constructor import GF
from itertools import combinations
p, k = is_prime_power(q, get_data=True)
if k == 0 or p != 2:
raise ValueError('q must be a power of 2')
if hyperoval_matching is None:
hyperoval_matching = [(2 * K + 1, 2 * K) for K in range(1 + q // 2)]
if field is None:
F = GF(q, 'a')
else:
F = field
# for q=8, 95% of CPU time taken by this function is spent in the following call
G = T2starGeneralizedQuadrangleGraph(q, field=F, dual=True, hyperoval=hyperoval, check_hyperoval=check_hyperoval)
def normalize(v): # make sure the 1st non-0 coordinate is 1.
d = next(x for x in v if x != F.zero())
return vector([x / d for x in v])
# build the partition into independent sets
P = [tuple(normalize(v[0] - v[1])) for v in G.vertices()]
O = list(set(P))
I_ks = {x:[] for x in range(q+2)} # the partition into I_k's
for i, Pi in enumerate(P):
I_ks[O.index(tuple(Pi))].append(i)
# perform the adjustment of the edges, as described.
G.relabel(range(G.order()))
cliques = []
for i,j in hyperoval_matching:
Pij = set(I_ks[i]+I_ks[j])
for v in Pij:
cliques.append(Pij.intersection(G.neighbors(v)))
G.delete_edges(G.edge_boundary(I_ks[i],I_ks[j])) # edges on (I_i,I_j)
G.add_edges(e for c in cliques for e in combinations(c,2))
G.name('Haemers('+str(q)+')')
return G
def CossidentePenttilaGraph(q):
r"""
Cossidente-Penttila `((q^3+1)(q+1)/2,(q^2+1)(q-1)/2,(q-3)/2,(q-1)^2/2)`-strongly regular graph
For each odd prime power `q`, one can partition the points of the `O_6^-(q)`-generalized
quadrangle `GQ(q,q^2)` into two parts, so that on any of them the induced subgraph of
the point graph of the GQ has parameters as above [CP2005]_.
Directly following the construction in [CP2005]_ is not efficient,
as one then needs to construct the dual `GQ(q^2,q)`. Thus we
describe here a more efficient approach that we came up with, following a suggestion by
T.Penttila. Namely, this partition is invariant
under the subgroup `H=\Omega_3(q^2)<O_6^-(q)`. We build the appropriate `H`, which
leaves the form `B(X,Y,Z)=XY+Z^2` invariant, and
pick up two orbits of `H` on the `F_q`-points. One them is `B`-isotropic, and we
take the representative `(1:0:0)`. The other one corresponds to the points of
`PG(2,q^2)` that have all the lines on them either missing the conic specified by `B`, or
intersecting the conic in two points. We take `(1:1:e)` as the representative. It suffices
to pick `e` so that `e^2+1` is not a square in `F_{q^2}`. Indeed,
The conic can be viewed as the union of `\{(0:1:0)\}` and `\{(1:-t^2:t) | t \in F_{q^2}\}`.
The coefficients of a generic line on `(1:1:e)` are `[1:-1-eb:b]`, for `-1\neq eb`.
Thus, to make sure the intersection with the conic is always even, we need that the
discriminant of `1+(1+eb)t^2+tb=0` never vanishes, and this is if and only if
`e^2+1` is not a square. Further, we need to adjust `B`, by multiplying it by appropriately
chosen `\nu`, so that `(1:1:e)` becomes isotropic under the relative trace norm
`\nu B(X,Y,Z)+(\nu B(X,Y,Z))^q`. The latter is used then to define the graph.
INPUT:
- ``q`` -- an odd prime power.
EXAMPLES:
For `q=3` one gets Sims-Gewirtz graph. ::
sage: G=graphs.CossidentePenttilaGraph(3) # optional - gap_packages (grape)
sage: G.is_strongly_regular(parameters=True) # optional - gap_packages (grape)
(56, 10, 0, 2)
For `q>3` one gets new graphs. ::
sage: G=graphs.CossidentePenttilaGraph(5) # optional - gap_packages (grape)
sage: G.is_strongly_regular(parameters=True) # optional - gap_packages (grape)
(378, 52, 1, 8)
TESTS::
sage: G=graphs.CossidentePenttilaGraph(7) # optional - gap_packages (grape) # long time
sage: G.is_strongly_regular(parameters=True) # optional - gap_packages (grape) # long time
(1376, 150, 2, 18)
sage: graphs.CossidentePenttilaGraph(2)
Traceback (most recent call last):
...
ValueError: q(=2) must be an odd prime power
"""
p, k = is_prime_power(q,get_data=True)
if k==0 or p==2:
raise ValueError('q(={}) must be an odd prime power'.format(q))
from sage.features.gap import GapPackage
GapPackage("grape", spkg="gap_packages").require()
from sage.libs.gap.libgap import libgap
adj_list=libgap.function_factory("""function(q)
local z, e, so, G, nu, G1, G0, B, T, s, O1, O2, x;
LoadPackage("grape");
G0:=SO(3,q^2);
so:=GeneratorsOfGroup(G0);
G1:=Group(Comm(so[1],so[2]),Comm(so[1],so[3]),Comm(so[2],so[3]));
B:=InvariantBilinearForm(G0).matrix;
z:=Z(q^2); e:=z; sqo:=(q^2-1)/2;
if IsInt(sqo/Order(e^2+z^0)) then
e:=z^First([2..q^2-2], x-> not IsInt(sqo/Order(z^(2*x)+z^0)));
fi;
nu:=z^First([0..q^2-2], x->z^x*(e^2+z^0)+(z^x*(e^2+z^0))^q=0*z);
T:=function(x)
local r;
r:=nu*x*B*x;
return r+r^q;
end;
s:=Group([Z(q)*IdentityMat(3,GF(q))]);
O1:=Orbit(G1, Set(Orbit(s,z^0*[1,0,0])), OnSets);
O2:=Orbit(G1, Set(Orbit(s,z^0*[1,1,e])), OnSets);
G:=Graph(G1,Concatenation(O1,O2),OnSets,
function(x,y) return x<>y and 0*z=T(x[1]+y[1]); end);
return List([1..OrderGraph(G)],x->Adjacency(G,x));
end;""")
adj = adj_list(q) # for each vertex, we get the list of vertices it is adjacent to
G = Graph(((i,int(j-1))
for i,ni in enumerate(adj) for j in ni),
format='list_of_edges', multiedges=False)
G.name('CossidentePenttila('+str(q)+')')
return G
def Nowhere0WordsTwoWeightCodeGraph(q, hyperoval=None, field=None, check_hyperoval=True):
r"""
Return the subgraph of nowhere 0 words from two-weight code of projective plane hyperoval.
Let `q=2^k` and `\Pi=PG(2,q)`. Fix a
`hyperoval <http://en.wikipedia.org/wiki/Oval_(projective_plane)#Even_q>`__
`O \subset \Pi`. Let `V=F_q^3` and `C` the two-weight 3-dimensional linear code
over `F_q` with words `c(v)` obtained from `v\in V` by computing
.. MATH::
c(v)=(\langle v,o_1 \rangle,...,\langle v,o_{q+2} \rangle), o_j \in O.
`C` contains `q(q-1)^2/2` words without 0 entries. The subgraph of the strongly
regular graph of `C` induced on the latter words is also strongly regular,
assuming `q>4`. This is a construction due to A.E.Brouwer [Bro2016]_, and
leads to graphs with parameters also given by a construction in [HHL2009]_.
According to [Bro2016]_, these two constructions are likely to produce
isomorphic graphs.
INPUT:
- ``q`` -- a power of two
- ``hyperoval`` -- a hyperoval (i.e. a complete 2-arc; a set of points in the plane
meeting every line in 0 or 2 points) in `PG(2,q)` over the field ``field``.
Each point of ``hyperoval`` must be a length 3
vector over ``field`` with 1st non-0 coordinate equal to 1. By default, ``hyperoval`` and
``field`` are not specified, and constructed on the fly. In particular, ``hyperoval``
we build is the classical one, i.e. a conic with the point of intersection of its
tangent lines.
- ``field`` -- an instance of a finite field of order `q`, must be provided
if ``hyperoval`` is provided.
- ``check_hyperoval`` -- (default: ``True``) if ``True``,
check ``hyperoval`` for correctness.
.. SEEALSO::
- :func:`~sage.graphs.strongly_regular_db.is_nowhere0_twoweight`
EXAMPLES:
using the built-in construction::
sage: g=graphs.Nowhere0WordsTwoWeightCodeGraph(8); g
Nowhere0WordsTwoWeightCodeGraph(8): Graph on 196 vertices
sage: g.is_strongly_regular(parameters=True)
(196, 60, 14, 20)
sage: g=graphs.Nowhere0WordsTwoWeightCodeGraph(16) # not tested (long time)
sage: g.is_strongly_regular(parameters=True) # not tested (long time)
(1800, 728, 268, 312)
supplying your own hyperoval::
sage: F=GF(8)
sage: O=[vector(F,(0,0,1)),vector(F,(0,1,0))]+[vector(F, (1,x^2,x)) for x in F]
sage: g=graphs.Nowhere0WordsTwoWeightCodeGraph(8,hyperoval=O,field=F); g
Nowhere0WordsTwoWeightCodeGraph(8): Graph on 196 vertices
sage: g.is_strongly_regular(parameters=True)
(196, 60, 14, 20)
TESTS::
sage: F=GF(8) # repeating a point...
sage: O=[vector(F,(1,0,0)),vector(F,(0,1,0))]+[vector(F, (1,x^2,x)) for x in F]
sage: graphs.Nowhere0WordsTwoWeightCodeGraph(8,hyperoval=O,field=F)
Traceback (most recent call last):
...
RuntimeError: incorrect hyperoval size
sage: O=[vector(F,(1,1,0)),vector(F,(0,1,0))]+[vector(F, (1,x^2,x)) for x in F]
sage: graphs.Nowhere0WordsTwoWeightCodeGraph(8,hyperoval=O,field=F)
Traceback (most recent call last):
...
RuntimeError: incorrect hyperoval
"""
from sage.combinat.designs.block_design import ProjectiveGeometryDesign as PG
from sage.matrix.constructor import matrix
p, k = is_prime_power(q,get_data=True)
if k==0 or p!=2:
raise ValueError('q must be a power of 2')
if k<3:
raise ValueError('q must be a at least 8')
if field is None:
F = FiniteField(q, 'a')
else:
F = field
Theta = PG(2, 1, F, point_coordinates=1)
Pi = Theta.ground_set()
if hyperoval is None:
hyperoval = [x for x in Pi
if (x[0] + x[1] * x[2] == 0) or
(x[0] == 1 and x[1] == x[2] == 0)]
O = set(hyperoval)
else:
for v in hyperoval:
v.set_immutable()
O = set(hyperoval)
if check_hyperoval:
if len(O) != q+2:
raise RuntimeError("incorrect hyperoval size")
for L in Theta.blocks():
if set(L).issubset(Pi):
if not len(O.intersection(L)) in [0,2]:
raise RuntimeError("incorrect hyperoval")
M = matrix(hyperoval)
F_0 = F.zero()
C = [p for p in [M*x for x in F**3] if F_0 not in p]
for x in C:
x.set_immutable()
G = Graph([C, lambda x,y: not F.zero() in x+y])
G.name('Nowhere0WordsTwoWeightCodeGraph('+str(q)+')')
G.relabel()
return G
|
'''
TACO: Multi-sample transcriptome assembly from RNA-Seq
'''
import os
import numpy as np
import h5py
import matplotlib.pyplot as plt
from taco.lib.base import Strand, Exon
from taco.lib.dtypes import FLOAT_DTYPE
from taco.lib.splice_graph import SpliceGraph
from taco.lib.path_graph import PathGraphFactory, reconstruct_path
from taco.lib.cpathfinder import find_paths
from taco.lib.cchangepoint import mse as mse_cython
from taco.lib.changepoint import mse as mse_python, smooth, run_changepoint
from taco.lib.transfrag import Transfrag
from taco.test.base import read_single_locus
def test_mse():
a = np.zeros(100, dtype=FLOAT_DTYPE)
a[:50] = 10
a[50:] = 0
a += np.random.random(100)
mse_min_c, mse_i_c = mse_cython(a)
mse_min_py, mse_i_py = mse_python(a)
assert mse_i_c == 50
assert mse_i_c == mse_i_py
assert np.allclose(mse_min_c, mse_min_py, atol=0.01)
return
def test_trim_transfrags():
def make_ramp(strand, sign=1):
transfrags = []
chrom = 'chr1'
start = 1000
end = 1220
change_expr = 0.0
base_expr = 0.0
# "flat" part of expression landscape
expr = 1.0
for i in xrange(0, 50):
t = Transfrag(chrom=chrom, strand=strand,
_id='T1.%d' % i, sample_id='S%d' % i,
expr=expr, is_ref=False,
exons=[Exon(start, end)])
transfrags.append(t)
change_expr += expr
base_expr += expr
# "changing" area
i = 0
expr = 10.0
for pos in range(1100, 1120):
left, right = (start, pos) if sign < 0 else (pos, end)
t = Transfrag(chrom=chrom, strand=strand,
_id='T2.%d' % i, sample_id='S%d' % i,
expr=expr, is_ref=False,
exons=[Exon(left, right)])
transfrags.append(t)
change_expr += expr
i += 1
return chrom, start, end, strand, change_expr, base_expr, transfrags
# positive strand
tup = make_ramp(Strand.POS, sign=-1)
chrom, start, end, strand, change_expr, base_expr, transfrags = tup
sgraph = SpliceGraph.create(transfrags)
cps = run_changepoint(sgraph.expr_data, smooth_window_len=11)
assert len(cps) == 1
cp = cps[0]
assert cp.pos == 110
assert cp.foldchange < 0.5
assert cp.sign == -1
cp = cp._replace(pos=start + cp.pos,
start=start + cp.start,
end=start + cp.end)
# trim transfrags
sgraph._trim_change_point(cp)
expr_data_after = sgraph._compute_expression()
assert expr_data_after[0] == 250
assert expr_data_after[-1] == 50
assert expr_data_after[cp.index - 1] == 150
assert expr_data_after[cp.index] == base_expr
# now try SpliceGraph interface
tup = make_ramp(Strand.POS, sign=-1)
chrom, start, end, strand, change_expr, base_expr, transfrags = tup
sgraph = SpliceGraph.create(transfrags)
cps = sgraph.detect_change_points(smooth_window_len=11)
for cp in cps:
sgraph.apply_change_point(cp)
sgraph.recreate()
assert sgraph.expr_data[cp.index - 1] == 150
assert sgraph.expr_data[cp.index] == base_expr
assert cp.pos in sgraph.stop_sites
# negative strand should not affect change point
tup = make_ramp(Strand.NEG, sign=-1)
chrom, start, end, strand, left_expr, base_expr, transfrags = tup
sgraph = SpliceGraph.create(transfrags)
cps = sgraph.detect_change_points(smooth_window_len=11)
for cp in cps:
sgraph.apply_change_point(cp)
sgraph.recreate()
assert sgraph.expr_data[cp.index - 1] == 150
assert sgraph.expr_data[cp.index] == base_expr
assert cp.pos in sgraph.start_sites
# neg strand change in opposite direction
tup = make_ramp(Strand.NEG, sign=1)
chrom, start, end, strand, left_expr, base_expr, transfrags = tup
sgraph = SpliceGraph.create(transfrags)
cps = run_changepoint(sgraph.expr_data, smooth_window_len=11)
cp = cps[0]
assert cp.index == 110
assert cp.foldchange < 0.5
assert cp.sign == 1.0
cps = sgraph.detect_change_points(smooth_window_len=11)
cp = cps[0]
for cp in cps:
sgraph.apply_change_point(cp)
sgraph.recreate()
assert sgraph.expr_data[0] == 50
assert sgraph.expr_data[-1] == 250
assert sgraph.expr_data[cp.index - 1] == base_expr
assert sgraph.expr_data[cp.index] == 160
assert cp.pos in sgraph.stop_sites
# pos strand change in opposite direction
tup = make_ramp(Strand.POS, sign=1)
chrom, start, end, strand, left_expr, base_expr, transfrags = tup
sgraph = SpliceGraph.create(transfrags)
cps = run_changepoint(sgraph.expr_data, smooth_window_len=11)
cp = cps[0]
assert cp.index == 110
assert cp.foldchange < 0.5
assert cp.sign == 1.0
cps = sgraph.detect_change_points(smooth_window_len=11)
for cp in cps:
sgraph.apply_change_point(cp)
sgraph.recreate()
assert sgraph.expr_data[0] == 50
assert sgraph.expr_data[-1] == 250
assert sgraph.expr_data[cp.index - 1] == base_expr
assert sgraph.expr_data[cp.index] == 160
assert cp.pos in sgraph.start_sites
return
def test_trimming_to_zero_bug():
t_dict, locus = read_single_locus('change_point_bug.gtf')
transfrags_un = locus.get_transfrags(Strand.NA)
sgraph = SpliceGraph.create(transfrags_un)
cps = sgraph.detect_change_points(pval=0.1)
for cp in cps:
sgraph.apply_change_point(cp)
sgraph.recreate()
# get start/stop nodes
start_nodes, stop_nodes = sgraph.get_start_stop_nodes()
# convert to node intervals
start_nodes = set(sgraph.get_node_interval(n_id) for n_id in start_nodes)
stop_nodes = set(sgraph.get_node_interval(n_id) for n_id in stop_nodes)
assert Exon(173433532, 173435169) in stop_nodes
assert Exon(173433532, 173435169) in start_nodes
assert Exon(173433532, 173435169) in start_nodes
def test_ccle55_cuff_noc2l():
'''Locus containing from 55 CCLE samples assembled with Cufflinks'''
# pull SpliceGraph out of GTF
t_dict, locus = read_single_locus('noc2l_locus.gtf')
found_sgraph = False
for sgraph in locus.create_splice_graphs():
if (sgraph.chrom == 'chr1' and sgraph.start == 934942 and
sgraph.end == 976702 and sgraph.strand == Strand.NEG):
found_sgraph = True
break
assert found_sgraph
# examine specific change points
trim = False
pval = 0.1
fc_cutoff = 0.8
n1 = Exon(934942, 944589)
n1_id = sgraph.get_node_id(n1)
assert sgraph.G.is_stop[n1_id]
cps = sgraph.detect_change_points(pval=pval, fc_cutoff=fc_cutoff)
for cp in cps:
sgraph.apply_change_point(cp, trim=trim)
true_starts = set([964528, 957434, 959316])
true_stops = set([944278])
assert true_starts.issubset(sgraph.start_sites)
assert true_stops.issubset(sgraph.stop_sites)
# rebuild graph and examine start/stop nodes
sgraph.recreate()
# get start/stop nodes
start_nodes, stop_nodes = sgraph.get_start_stop_nodes()
# convert to node intervals
start_nodes = set(sgraph.get_node_interval(n_id) for n_id in start_nodes)
stop_nodes = set(sgraph.get_node_interval(n_id) for n_id in stop_nodes)
assert Exon(959214, 959316) in start_nodes
assert Exon(959316, 964528) in start_nodes
assert Exon(957273, 957434) in start_nodes
assert Exon(944278, 944321) in stop_nodes
# ensure best path uses change points
pgf = PathGraphFactory(sgraph)
pgraph, k = pgf.create_optimal()
paths = find_paths(pgraph, max_paths=1)
assert len(paths) == 1
path, expr = paths[0]
path = reconstruct_path(path, pgraph, sgraph)
assert path[0] == Exon(944321, 944800)
assert path[-1] == Exon(959214, 959316)
|
"""Assignment 01: Project box to xy-plane
"""
from compas.geometry import Box
from compas.geometry import Frame
from compas.geometry import Projection
from compas.artists import Artist
from compas.datastructures import Mesh
from compas.geometry import Plane
# Define a Frame, which is not in the origin and a bit tilted to the world frame
frame = Frame([0.0, 3.0, 15], [2.0, 1.0, 1.0],[-1.0, 2.0, 1.0]) # random Frame
# Create a Box with that frame
box = Box(frame, 10, 10, 10)
# Create a Projection (can be orthogonal, parallel or perspective)
P = Projection.from_plane(Plane([0,0,0], [0,0,1])) # projection from top view
# Create a Mesh from the Box
mesh = Mesh.from_shape(box)
# Apply the Projection onto the mesh
mesh_projected = mesh.transformed(P)
# Create artists
artist1 = Artist(box)
artist2 = Artist(mesh_projected)
# Draw
artist1.draw()
artist2.draw_edges(color=[242,242,48]) # change the color of the edges, doesn't work
|
from rest_framework import serializers
from .models import gemsMeta
class AttributeSerializer(serializers.ModelSerializer):
class Meta:
model = gemsMeta
fields = ['name', 'description', 'image']
|
# (c) Copyright [2017] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True)
torch.nn.LocalResponseNorm(size, alpha=0.0001, beta=0.75, k=1)
"""
from __future__ import absolute_import
import torch.nn as nn
from pytorch_benchmarks.models.model import Model
class AlexNet(Model):
implements = 'alexnet'
def __init__(self, params):
Model.check_parameters(
params,
{'name': 'AlexNet', 'input_shape':(3, 227, 227), 'num_classes': 1000,
'phase': 'training',
'dtype': 'float32'}
)
Model.__init__(self, params)
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=11, stride=4),
nn.ReLU(inplace=True),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(96, 256, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(256, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, self.num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x
|
#!/usr/bin/env python3
import tensorflow as tf
from flows.squeeze import Squeeze, Squeeze2DWithMask
class Squeeze2DTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self.squeeze = Squeeze2DWithMask()
def testSqueezeWithOutAnythng(self):
x = tf.random.normal([32, 16, 8])
y, mask = self.squeeze(x, inverse=False)
rev_x, mask = self.squeeze(y, inverse=True)
self.assertAllEqual(x, rev_x)
zaux = tf.random.normal([32, 16, 16])
y, mask, new_zaux = self.squeeze(x, zaux=zaux, inverse=False)
rev_x, mask, rev_zaux = self.squeeze(y, zaux=new_zaux, inverse=True)
self.assertAllEqual(x, rev_x)
self.assertAllEqual(zaux, rev_zaux)
class SqueezeTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self.squeeze = Squeeze(with_zaux=True)
def testSqueezeWithOutAnythng(self):
x = tf.random.normal([32, 16, 16, 8])
y = self.squeeze(x, inverse=False)
rev_x = self.squeeze(y, inverse=True)
self.assertAllEqual(x, rev_x)
zaux = tf.random.normal([32, 16, 16, 12])
y, new_zaux = self.squeeze(x, zaux=zaux, inverse=False)
rev_x, rev_zaux = self.squeeze(y, zaux=new_zaux, inverse=True)
self.assertAllEqual(x, rev_x)
self.assertAllEqual(zaux, rev_zaux)
|
# -*- coding: utf-8 -*-
'''
Control virtual machines via Salt
'''
# Import python libs
from __future__ import absolute_import, print_function
import os.path
import logging
# Import Salt libs
import salt.client
import salt.utils.virt
import salt.utils.cloud
import salt.key
from salt.exceptions import SaltClientError
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
def _determine_host(data, omit=''):
'''
Determine what the most resource free host is based on the given data
'''
# This is just checking for the host with the most free ram, this needs
# to be much more complicated.
host = ''
bestmem = 0
for hv_, comps in six.iteritems(data):
if hv_ == omit:
continue
if not isinstance(comps, dict):
continue
if comps.get('freemem', 0) > bestmem:
bestmem = comps['freemem']
host = hv_
return host
def _find_vm(name, data, quiet=False):
'''
Scan the query data for the named VM
'''
for hv_ in data:
# Check if data is a dict, and not '"virt.full_info" is not available.'
if not isinstance(data[hv_], dict):
continue
if name in data[hv_].get('vm_info', {}):
ret = {hv_: {name: data[hv_]['vm_info'][name]}}
if not quiet:
__jid_event__.fire_event({'data': ret, 'outputter': 'nested'}, 'progress')
return ret
return {}
def query(host=None, quiet=False, hyper=None):
'''
Query the virtual machines. When called without options all hosts
are detected and a full query is returned. A single host can be
passed in to specify an individual host to query.
'''
if hyper is not None:
salt.utils.warn_until(
'Carbon',
'Please use "host" instead of "hyper". The "hyper" argument will '
'be removed in the Carbon release of Salt'
)
host = hyper
if quiet:
log.warn('\'quiet\' is deprecated. Please migrate to --quiet')
ret = {}
client = salt.client.get_local_client(__opts__['conf_file'])
try:
for info in client.cmd_iter('virtual:physical',
'virt.full_info', expr_form='grain'):
if not info:
continue
if not isinstance(info, dict):
continue
chunk = {}
id_ = next(info.iterkeys())
if host:
if host != id_:
continue
if not isinstance(info[id_], dict):
continue
if 'ret' not in info[id_]:
continue
if not isinstance(info[id_]['ret'], dict):
continue
chunk[id_] = info[id_]['ret']
ret.update(chunk)
if not quiet:
__jid_event__.fire_event({'data': chunk, 'outputter': 'virt_query'}, 'progress')
except SaltClientError as client_error:
print(client_error)
return ret
def list(host=None, quiet=False, hyper=None): # pylint: disable=redefined-builtin
'''
List the virtual machines on each host, this is a simplified query,
showing only the virtual machine names belonging to each host.
A single host can be passed in to specify an individual host
to list.
'''
if hyper is not None:
salt.utils.warn_until(
'Carbon',
'Please use "host" instead of "hyper". The "hyper" argument will '
'be removed in the Carbon release of Salt'
)
host = hyper
if quiet:
log.warn('\'quiet\' is deprecated. Please migrate to --quiet')
ret = {}
client = salt.client.get_local_client(__opts__['conf_file'])
for info in client.cmd_iter('virtual:physical',
'virt.vm_info', expr_form='grain'):
if not info:
continue
if not isinstance(info, dict):
continue
chunk = {}
id_ = next(six.iterkeys(info))
if host:
if host != id_:
continue
if not isinstance(info[id_], dict):
continue
if 'ret' not in info[id_]:
continue
if not isinstance(info[id_]['ret'], dict):
continue
data = {}
for key, val in six.iteritems(info[id_]['ret']):
if val['state'] in data:
data[val['state']].append(key)
else:
data[val['state']] = [key]
chunk[id_] = data
ret.update(chunk)
if not quiet:
__jid_event__.fire_event({'data': chunk, 'outputter': 'nested'}, 'progress')
return ret
def next_host():
'''
Return the host to use for the next autodeployed VM. This queries
the available host and executes some math the determine the most
"available" next host.
'''
host = _determine_host(query(quiet=True))
print(host)
return host
def next_hyper():
'''
Return the host to use for the next autodeployed VM. This queries
the available host and executes some math the determine the most
"available" next host.
'''
salt.utils.warn_until(
'Carbon',
'Please use "host" instead of "hyper". The "hyper" argument will '
'be removed in the Carbon release of Salt'
)
return next_host()
def host_info(host=None):
'''
Return information about the host connected to this master
'''
data = query(host, quiet=True)
for id_ in data:
if 'vm_info' in data[id_]:
data[id_].pop('vm_info')
__jid_event__.fire_event({'data': data, 'outputter': 'nested'}, 'progress')
return data
def hyper_info(hyper=None):
'''
Return information about the host connected to this master
'''
salt.utils.warn_until(
'Carbon',
'Please use "host" instead of "hyper". The "hyper" argument will '
'be removed in the Carbon release of Salt'
)
return host_info(hyper)
def init(
name,
cpu,
mem,
image,
hyper=None,
hypervisor='kvm',
host=None,
seed=True,
nic='default',
install=True,
start=True,
disk='default',
saltenv='base',
enable_vnc=False):
'''
This routine is used to create a new virtual machine. This routines takes
a number of options to determine what the newly created virtual machine
will look like.
name
The mandatory name of the new virtual machine. The name option is
also the minion id, all minions must have an id.
cpu
The number of cpus to allocate to this new virtual machine.
mem
The amount of memory to allocate tot his virtual machine. The number
is interpreted in megabytes.
image
The network location of the virtual machine image, commonly a location
on the salt fileserver, but http, https and ftp can also be used.
hypervisor
The hypervisor to use for the new virtual machine. Default is 'kvm'.
host
The host to use for the new virtual machine, if this is omitted
Salt will automatically detect what host to use.
seed
Set to False to prevent Salt from seeding the new virtual machine.
nic
The nic profile to use, defaults to the "default" nic profile which
assumes a single network interface per VM associated with the "br0"
bridge on the master.
install
Set to False to prevent Salt from installing a minion on the new VM
before it spins up.
disk
The disk profile to use
saltenv
The Salt environment to use
'''
if hyper is not None:
salt.utils.warn_until(
'Carbon',
'Please use "host" instead of "hyper". The "hyper" argument will '
'be removed in the Carbon release of Salt'
)
host = hyper
__jid_event__.fire_event({'message': 'Searching for hosts'}, 'progress')
data = query(host, quiet=True)
# Check if the name is already deployed
for node in data:
if 'vm_info' in data[node]:
if name in data[node]['vm_info']:
__jid_event__.fire_event(
{'message': 'Virtual machine {0} is already deployed'.format(name)},
'progress'
)
return 'fail'
if host is None:
host = _determine_host(data)
if host not in data or not host:
__jid_event__.fire_event(
{'message': 'Host {0} was not found'.format(host)},
'progress'
)
return 'fail'
pub_key = None
priv_key = None
if seed:
__jid_event__.fire_event({'message': 'Minion will be preseeded'}, 'progress')
priv_key, pub_key = salt.utils.cloud.gen_keys()
accepted_key = os.path.join(__opts__['pki_dir'], 'minions', name)
with salt.utils.fopen(accepted_key, 'w') as fp_:
fp_.write(pub_key)
client = salt.client.get_local_client(__opts__['conf_file'])
__jid_event__.fire_event(
{'message': 'Creating VM {0} on host {1}'.format(name, host)},
'progress'
)
try:
cmd_ret = client.cmd_iter(
host,
'virt.init',
[
name,
cpu,
mem,
image,
nic,
hypervisor,
start,
disk,
saltenv,
seed,
install,
pub_key,
priv_key,
enable_vnc,
],
timeout=600)
except SaltClientError as client_error:
# Fall through to ret error handling below
print(client_error)
ret = next(cmd_ret)
if not ret:
__jid_event__.fire_event({'message': 'VM {0} was not initialized.'.format(name)}, 'progress')
return 'fail'
for minion_id in ret:
if ret[minion_id]['ret'] is False:
print('VM {0} initialization failed. Returned error: {1}'.format(name, ret[minion_id]['ret']))
return 'fail'
__jid_event__.fire_event({'message': 'VM {0} initialized on host {1}'.format(name, host)}, 'progress')
return 'good'
def vm_info(name, quiet=False):
'''
Return the information on the named VM
'''
data = query(quiet=True)
return _find_vm(name, data, quiet)
def reset(name):
'''
Force power down and restart an existing VM
'''
ret = {}
client = salt.client.get_local_client(__opts__['conf_file'])
data = vm_info(name, quiet=True)
if not data:
__jid_event__.fire_event({'message': 'Failed to find VM {0} to reset'.format(name)}, 'progress')
return 'fail'
host = next(six.iterkeys(data))
try:
cmd_ret = client.cmd_iter(
host,
'virt.reset',
[name],
timeout=600)
for comp in cmd_ret:
ret.update(comp)
__jid_event__.fire_event({'message': 'Reset VM {0}'.format(name)}, 'progress')
except SaltClientError as client_error:
print(client_error)
return ret
def start(name):
'''
Start a named virtual machine
'''
ret = {}
client = salt.client.get_local_client(__opts__['conf_file'])
data = vm_info(name, quiet=True)
if not data:
__jid_event__.fire_event({'message': 'Failed to find VM {0} to start'.format(name)}, 'progress')
return 'fail'
host = next(six.iterkeys(data))
if data[host][name]['state'] == 'running':
print('VM {0} is already running'.format(name))
return 'bad state'
try:
cmd_ret = client.cmd_iter(
host,
'virt.start',
[name],
timeout=600)
except SaltClientError as client_error:
return 'Virtual machine {0} not started: {1}'. format(name, client_error)
for comp in cmd_ret:
ret.update(comp)
__jid_event__.fire_event({'message': 'Started VM {0}'.format(name)}, 'progress')
return 'good'
def force_off(name):
'''
Force power down the named virtual machine
'''
ret = {}
client = salt.client.get_local_client(__opts__['conf_file'])
data = vm_info(name, quiet=True)
if not data:
print('Failed to find VM {0} to destroy'.format(name))
return 'fail'
host = next(six.iterkeys(data))
if data[host][name]['state'] == 'shutdown':
print('VM {0} is already shutdown'.format(name))
return'bad state'
try:
cmd_ret = client.cmd_iter(
host,
'virt.destroy',
[name],
timeout=600)
except SaltClientError as client_error:
return 'Virtual machine {0} could not be forced off: {1}'.format(name, client_error)
for comp in cmd_ret:
ret.update(comp)
__jid_event__.fire_event({'message': 'Powered off VM {0}'.format(name)}, 'progress')
return 'good'
def purge(name, delete_key=True):
'''
Destroy the named VM
'''
ret = {}
client = salt.client.get_local_client(__opts__['conf_file'])
data = vm_info(name, quiet=True)
if not data:
__jid_event__.fire_event({'error': 'Failed to find VM {0} to purge'.format(name)}, 'progress')
return 'fail'
host = next(six.iterkeys(data))
try:
cmd_ret = client.cmd_iter(
host,
'virt.purge',
[name, True],
timeout=600)
except SaltClientError as client_error:
return 'Virtual machine {0} could not be purged: {1}'.format(name, client_error)
for comp in cmd_ret:
ret.update(comp)
if delete_key:
log.debug('Deleting key {0}'.format(name))
skey = salt.key.Key(__opts__)
skey.delete_key(name)
__jid_event__.fire_event({'message': 'Purged VM {0}'.format(name)}, 'progress')
return 'good'
def pause(name):
'''
Pause the named VM
'''
ret = {}
client = salt.client.get_local_client(__opts__['conf_file'])
data = vm_info(name, quiet=True)
if not data:
__jid_event__.fire_event({'error': 'Failed to find VM {0} to pause'.format(name)}, 'progress')
return 'fail'
host = next(six.iterkeys(data))
if data[host][name]['state'] == 'paused':
__jid_event__.fire_event({'error': 'VM {0} is already paused'.format(name)}, 'progress')
return 'bad state'
try:
cmd_ret = client.cmd_iter(
host,
'virt.pause',
[name],
timeout=600)
except SaltClientError as client_error:
return 'Virtual machine {0} could not be pasued: {1}'.format(name, client_error)
for comp in cmd_ret:
ret.update(comp)
__jid_event__.fire_event({'message': 'Paused VM {0}'.format(name)}, 'progress')
return 'good'
def resume(name):
'''
Resume a paused VM
'''
ret = {}
client = salt.client.get_local_client(__opts__['conf_file'])
data = vm_info(name, quiet=True)
if not data:
__jid_event__.fire_event({'error': 'Failed to find VM {0} to pause'.format(name)}, 'progress')
return 'not found'
host = next(six.iterkeys(data))
if data[host][name]['state'] != 'paused':
__jid_event__.fire_event({'error': 'VM {0} is not paused'.format(name)}, 'progress')
return 'bad state'
try:
cmd_ret = client.cmd_iter(
host,
'virt.resume',
[name],
timeout=600)
except SaltClientError as client_error:
return 'Virtual machine {0} could not be resumed: {1}'.format(name, client_error)
for comp in cmd_ret:
ret.update(comp)
__jid_event__.fire_event({'message': 'Resumed VM {0}'.format(name)}, 'progress')
return 'good'
def migrate(name, target=''):
'''
Migrate a VM from one host to another. This routine will just start
the migration and display information on how to look up the progress.
'''
client = salt.client.get_local_client(__opts__['conf_file'])
data = query(quiet=True)
origin_data = _find_vm(name, data, quiet=True)
try:
origin_host = list(origin_data.keys())[0]
except IndexError:
__jid_event__.fire_event({'error': 'Named VM {0} was not found to migrate'.format(name)}, 'progress')
return ''
disks = origin_data[origin_host][name]['disks']
if not origin_data:
__jid_event__.fire_event({'error': 'Named VM {0} was not found to migrate'.format(name)}, 'progress')
return ''
if not target:
target = _determine_host(data, origin_host)
if target not in data:
__jid_event__.fire_event({'error': 'Target host {0} not found'.format(origin_data)}, 'progress')
return ''
try:
client.cmd(target, 'virt.seed_non_shared_migrate', [disks, True])
jid = client.cmd_async(origin_host,
'virt.migrate_non_shared',
[name, target])
except SaltClientError as client_error:
return 'Virtual machine {0} could not be migrated: {1}'.format(name, client_error)
msg = ('The migration of virtual machine {0} to host {1} has begun, '
'and can be tracked via jid {2}. The ``salt-run virt.query`` '
'runner can also be used, the target VM will be shown as paused '
'until the migration is complete.').format(name, target, jid)
__jid_event__.fire_event({'message': msg}, 'progress')
|
"""
WSGI config for fitlog_32927 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fitlog_32927.settings')
application = get_wsgi_application()
|
# coding: utf-8
from youtube_video_url.youtube import get_youtube_urls
|
from typing import Any, Callable, Tuple
from dace.dtypes import paramdec
class Replacements(object):
""" A management singleton for functions that replace existing function calls with either an SDFG or a node.
Used in the Python frontend to replace functions such as `numpy.ndarray` and operators such
as `Array.__add__`. """
_rep = {}
_oprep = {}
@staticmethod
def get(name, implementation='sdfg'):
""" Returns an implementation of a function. """
if (name, implementation) not in Replacements._rep:
return None
return Replacements._rep[(name, implementation)]
@staticmethod
def getop(classname: str,
optype: str,
implementation='sdfg',
otherclass: str = None):
""" Returns an implementation of an operator. """
if otherclass is None:
otherclass = classname
if (classname, otherclass, optype,
implementation) not in Replacements._oprep:
return None
return Replacements._oprep[(classname, otherclass, optype,
implementation)]
@paramdec
def replaces(func: Callable[..., Tuple[str]], name: str,
implementation='sdfg'):
""" Registers a replacement sub-SDFG generator for a function.
@param func: A function that receives an SDFG, SDFGState, and the original function
arguments, returning a tuple of array names to connect to the outputs.
@param name: Full name (pydoc-compliant, including package) of function to replace.
@param implementation: The default implementation to replace the SDFG with.
"""
Replacements._rep[(name, implementation)] = func
return func
@paramdec
def replaces_operator(func: Callable[[Any, Any, str, str], Tuple[str]],
classname: str,
optype: str,
implementation='sdfg',
otherclass: str = None):
""" Registers a replacement sub-SDFG generator for an operator.
@param func: A function that receives an SDFG, SDFGState, and the two operand array names,
returning a tuple of array names to connect to the outputs.
@param classname: The name of the class to implement the operator for (extends dace.Data).
@param optype: The type (as string) of the operator to replace (extends ast.operator).
@param implementation: The default implementation to replace the SDFG with.
@param otherclass: Optional argument defining operators for a second class that
differs from the first.
"""
if otherclass is None:
otherclass = classname
Replacements._oprep[(classname, otherclass, optype, implementation)] = func
return func
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn import metrics
data = pd.read_csv('E:\projectKNN/heart.csv')
data.head()
data.target.value_counts()
sns.countplot(x="target", data=data, palette="bwr")
plt.show()
sns.countplot(x='sex', data=data, palette="mako_r")
plt.xlabel("Sex (0 = female, 1= male)")
plt.show()
plt.scatter(x=data.age[data.target==1], y=data.thalach[(data.target==1)], c="green")
plt.scatter(x=data.age[data.target==0], y=data.thalach[(data.target==0)], c = 'black')
plt.legend(["Disease", "Not Disease"])
plt.xlabel("Age")
plt.ylabel("Maximum Heart Rate")
plt.show()
X = data.iloc[:,:-1].values
y = data.iloc[:,13].values
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.25, random_state= 0)
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
y_pred = classifier.predict(X_test)
#check accuracy
accuracy = metrics.accuracy_score(y_test, y_pred)
print('Accuracy: {:.2f}'.format(accuracy))
classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
classifier = classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
#check accuracy
accuracy = metrics.accuracy_score(y_test, y_pred)
print('Accuracy: {:.2f}'.format(accuracy))
cm = confusion_matrix(y_test, y_pred)
|
from .pyoptic import *
|
#header to convert outputs of model into boxes, scores, classes, valid
import tensorflow as tf
import numpy as np
def YoloV4Header(num_classes, anchorlist, mask, strides,
max_outputs, iou_threshold, score_threshold,inputs):
boxes, objects, classes = [], [], []
dtype = inputs[0].dtype
for i, logits in enumerate(inputs):
print(i,mask[i])
stride = strides[i]
anchors = anchorlist[mask[i]]
x_shape = tf.shape(logits)
logits = tf.reshape(logits, (x_shape[0], x_shape[1], x_shape[2], len(anchors), num_classes + 5))
box_xy, box_wh, obj, cls = tf.split(logits, (2, 2, 1, num_classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
obj = tf.sigmoid(obj)
cls = tf.sigmoid(cls)
anchors = anchors.astype(np.float32)
grid_shape = x_shape[1:3]
# print(grid_shape)
grid_h, grid_w = grid_shape[0], grid_shape[1]
# print(grid_h,tf.range(grid_h))
grid = tf.meshgrid(tf.range(grid_w), tf.range(grid_h))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, dtype)) * stride
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2.
box_x2y2 = box_xy + box_wh / 2.
box = tf.concat([box_x1y1, box_x2y2], axis=-1)
boxes.append(tf.reshape(box, (x_shape[0], -1, 1, 4)))
objects.append(tf.reshape(obj, (x_shape[0], -1, 1)))
classes.append(tf.reshape(cls, (x_shape[0], -1, num_classes)))
boxes = tf.concat(boxes, axis=1)
objects = tf.concat(objects, axis=1)
classes = tf.concat(classes, axis=1)
scores = objects * classes
boxes, scores, classes, valid = tf.image.combined_non_max_suppression(
boxes=boxes,
scores=scores,
max_output_size_per_class=max_outputs,
max_total_size=max_outputs,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
clip_boxes=False
)
return boxes, scores, classes, valid
|
# Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Propose a natural language on top of cp_model_pb2 python proto.
This file implements a easy-to-use API on top of the cp_model_pb2 protobuf
defined in ../ .
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import time
from six import iteritems
from ortools.sat import cp_model_pb2
from ortools.sat import sat_parameters_pb2
from ortools.sat.python import cp_model_helper
from ortools.sat import pywrapsat
# The classes below allow linear expressions to be expressed naturally with the
# usual arithmetic operators +-*/ and with constant numbers, which makes the
# python API very intuitive. See ../samples/*.py for examples.
INT_MIN = -9223372036854775808 # hardcoded to be platform independent.
INT_MAX = 9223372036854775807
INT32_MAX = 2147483647
INT32_MIN = -2147483648
# Cp Solver status (exported to avoid importing cp_model_cp2).
UNKNOWN = cp_model_pb2.UNKNOWN
MODEL_INVALID = cp_model_pb2.MODEL_INVALID
FEASIBLE = cp_model_pb2.FEASIBLE
INFEASIBLE = cp_model_pb2.INFEASIBLE
OPTIMAL = cp_model_pb2.OPTIMAL
# Variable selection strategy
CHOOSE_FIRST = cp_model_pb2.DecisionStrategyProto.CHOOSE_FIRST
CHOOSE_LOWEST_MIN = cp_model_pb2.DecisionStrategyProto.CHOOSE_LOWEST_MIN
CHOOSE_HIGHEST_MAX = cp_model_pb2.DecisionStrategyProto.CHOOSE_HIGHEST_MAX
CHOOSE_MIN_DOMAIN_SIZE = (
cp_model_pb2.DecisionStrategyProto.CHOOSE_MIN_DOMAIN_SIZE)
CHOOSE_MAX_DOMAIN_SIZE = (
cp_model_pb2.DecisionStrategyProto.CHOOSE_MAX_DOMAIN_SIZE)
# Domain reduction strategy
SELECT_MIN_VALUE = cp_model_pb2.DecisionStrategyProto.SELECT_MIN_VALUE
SELECT_MAX_VALUE = cp_model_pb2.DecisionStrategyProto.SELECT_MAX_VALUE
SELECT_LOWER_HALF = cp_model_pb2.DecisionStrategyProto.SELECT_LOWER_HALF
SELECT_UPPER_HALF = cp_model_pb2.DecisionStrategyProto.SELECT_UPPER_HALF
# Search branching
AUTOMATIC_SEARCH = sat_parameters_pb2.SatParameters.AUTOMATIC_SEARCH
FIXED_SEARCH = sat_parameters_pb2.SatParameters.FIXED_SEARCH
PORTFOLIO_SEARCH = sat_parameters_pb2.SatParameters.PORTFOLIO_SEARCH
LP_SEARCH = sat_parameters_pb2.SatParameters.LP_SEARCH
def DisplayBounds(bounds):
"""Displays a flattened list of intervals."""
out = ''
for i in range(0, len(bounds), 2):
if i != 0:
out += ', '
if bounds[i] == bounds[i + 1]:
out += str(bounds[i])
else:
out += str(bounds[i]) + '..' + str(bounds[i + 1])
return out
def ShortName(model, i):
"""Returns a short name of an integer variable, or its negation."""
if i < 0:
return 'Not(%s)' % ShortName(model, -i - 1)
v = model.variables[i]
if v.name:
return v.name
elif len(v.domain) == 2 and v.domain[0] == v.domain[1]:
return str(v.domain[0])
else:
return '[%s]' % DisplayBounds(v.domain)
class LinearExpression(object):
"""Holds an integer linear expression.
An linear expression is built from integer constants and variables.
x + 2 * (y - z + 1) is one such linear expression, and can be written that
way directly in Python, provided x, y, and z are integer variables.
Linear expressions are used in two places in the cp_model.
When used with equality and inequality operators, they create linear
inequalities that can be added to the model as in:
model.Add(x + 2 * y <= 5)
model.Add(sum(array_of_vars) == 5)
Linear expressions can also be used to specify the objective of the model.
model.Minimize(x + 2 * y + z)
"""
def GetVarValueMap(self):
"""Scan the expression, and return a list of (var_coef_map, constant)."""
coeffs = collections.defaultdict(int)
constant = 0
to_process = [(self, 1)]
while to_process: # Flatten to avoid recursion.
expr, coef = to_process.pop()
if isinstance(expr, _ProductCst):
to_process.append((expr.Expression(),
coef * expr.Coefficient()))
elif isinstance(expr, _SumArray):
for e in expr.Array():
to_process.append((e, coef))
constant += expr.Constant() * coef
elif isinstance(expr, IntVar):
coeffs[expr] += coef
elif isinstance(expr, _NotBooleanVariable):
raise TypeError(
'Cannot interpret literals in a linear expression.')
else:
raise TypeError('Unrecognized linear expression: ' + str(expr))
return coeffs, constant
def __hash__(self):
return object.__hash__(self)
def __add__(self, expr):
return _SumArray([self, expr])
def __radd__(self, arg):
return _SumArray([self, arg])
def __sub__(self, expr):
return _SumArray([self, -expr])
def __rsub__(self, arg):
return _SumArray([-self, arg])
def __mul__(self, arg):
if isinstance(arg, numbers.Integral):
if arg == 1:
return self
cp_model_helper.AssertIsInt64(arg)
return _ProductCst(self, arg)
else:
raise TypeError('Not an integer linear expression: ' + str(arg))
def __rmul__(self, arg):
cp_model_helper.AssertIsInt64(arg)
if arg == 1:
return self
return _ProductCst(self, arg)
def __div__(self, _):
raise NotImplementedError('LinearExpression.__div__')
def __truediv__(self, _):
raise NotImplementedError('LinearExpression.__truediv__')
def __mod__(self, _):
raise NotImplementedError('LinearExpression.__mod__')
def __neg__(self):
return _ProductCst(self, -1)
def __eq__(self, arg):
if arg is None:
return False
if isinstance(arg, numbers.Integral):
cp_model_helper.AssertIsInt64(arg)
return LinearInequality(self, [arg, arg])
else:
return LinearInequality(self - arg, [0, 0])
def __ge__(self, arg):
if isinstance(arg, numbers.Integral):
cp_model_helper.AssertIsInt64(arg)
return LinearInequality(self, [arg, INT_MAX])
else:
return LinearInequality(self - arg, [0, INT_MAX])
def __le__(self, arg):
if isinstance(arg, numbers.Integral):
cp_model_helper.AssertIsInt64(arg)
return LinearInequality(self, [INT_MIN, arg])
else:
return LinearInequality(self - arg, [INT_MIN, 0])
def __lt__(self, arg):
if isinstance(arg, numbers.Integral):
cp_model_helper.AssertIsInt64(arg)
if arg == INT_MIN:
raise ArithmeticError('< INT_MIN is not supported')
return LinearInequality(
self, [INT_MIN, cp_model_helper.CapInt64(arg - 1)])
else:
return LinearInequality(self - arg, [INT_MIN, -1])
def __gt__(self, arg):
if isinstance(arg, numbers.Integral):
cp_model_helper.AssertIsInt64(arg)
if arg == INT_MAX:
raise ArithmeticError('> INT_MAX is not supported')
return LinearInequality(
self, [cp_model_helper.CapInt64(arg + 1), INT_MAX])
else:
return LinearInequality(self - arg, [1, INT_MAX])
def __ne__(self, arg):
if arg is None:
return True
if isinstance(arg, numbers.Integral):
cp_model_helper.AssertIsInt64(arg)
if arg == INT_MAX:
return LinearInequality(self, [INT_MIN, INT_MAX - 1])
elif arg == INT_MIN:
return LinearInequality(self, [INT_MIN + 1, INT_MAX])
else:
return LinearInequality(self, [
INT_MIN,
cp_model_helper.CapInt64(arg - 1),
cp_model_helper.CapInt64(arg + 1), INT_MAX
])
else:
return LinearInequality(self - arg, [INT_MIN, -1, 1, INT_MAX])
class _ProductCst(LinearExpression):
"""Represents the product of a LinearExpression by a constant."""
def __init__(self, expr, coef):
cp_model_helper.AssertIsInt64(coef)
if isinstance(expr, _ProductCst):
self.__expr = expr.Expression()
self.__coef = expr.Coefficient() * coef
else:
self.__expr = expr
self.__coef = coef
def __str__(self):
if self.__coef == -1:
return '-' + str(self.__expr)
else:
return '(' + str(self.__coef) + ' * ' + str(self.__expr) + ')'
def __repr__(self):
return 'ProductCst(' + repr(self.__expr) + ', ' + repr(
self.__coef) + ')'
def Coefficient(self):
return self.__coef
def Expression(self):
return self.__expr
class _SumArray(LinearExpression):
"""Represents the sum of a list of LinearExpression and a constant."""
def __init__(self, array):
self.__array = []
self.__constant = 0
for x in array:
if isinstance(x, numbers.Integral):
cp_model_helper.AssertIsInt64(x)
self.__constant += x
elif isinstance(x, LinearExpression):
self.__array.append(x)
else:
raise TypeError('Not an linear expression: ' + str(x))
def __str__(self):
if self.__constant == 0:
return '({})'.format(' + '.join(map(str, self.__array)))
else:
return '({} + {})'.format(' + '.join(map(str, self.__array)),
self.__constant)
def __repr__(self):
return 'SumArray({}, {})'.format(', '.join(map(repr, self.__array)),
self.__constant)
def Array(self):
return self.__array
def Constant(self):
return self.__constant
class IntVar(LinearExpression):
"""An integer variable.
An IntVar is an object that can take on any integer value within defined
ranges. Variables appears in constraint like:
x + y >= 5
AllDifferent([x, y, z])
Solving a model is equivalent to finding, for each variable, a single value
from the set of initial values (called the initial domain), such that the
model is feasible, or optimal if you provided an objective function.
"""
def __init__(self, model, bounds, name):
"""See CpModel.NewIntVar below."""
self.__model = model
self.__index = len(model.variables)
self.__var = model.variables.add()
self.__var.domain.extend(bounds)
self.__var.name = name
self.__negation = None
def Index(self):
return self.__index
def __str__(self):
return self.__var.name
def __repr__(self):
return '%s(%s)' % (self.__var.name, DisplayBounds(self.__var.domain))
def Name(self):
return self.__var.name
def Not(self):
"""Returns the negation of a Boolean variable.
This method implements the logical negation of a Boolean variable.
It is only valid of the variable has a Boolean domain (0 or 1).
Note that this method is nilpotent: x.Not().Not() == x.
"""
for bound in self.__var.domain:
if bound < 0 or bound > 1:
raise TypeError(
'Cannot call Not on a non boolean variable: %s' % self)
if not self.__negation:
self.__negation = _NotBooleanVariable(self)
return self.__negation
class _NotBooleanVariable(LinearExpression):
"""Negation of a boolean variable."""
def __init__(self, boolvar):
self.__boolvar = boolvar
def Index(self):
return -self.__boolvar.Index() - 1
def Not(self):
return self.__boolvar
def __str__(self):
return 'not(%s)' % str(self.__boolvar)
class LinearInequality(object):
"""Represents a linear constraint: lb <= expression <= ub.
The only use of this class is to be added to the CpModel through
CpModel.Add(expression), as in:
model.Add(x + 2 * y -1 >= z)
"""
def __init__(self, expr, bounds):
self.__expr = expr
self.__bounds = bounds
def __str__(self):
if len(self.__bounds) == 2:
lb = self.__bounds[0]
ub = self.__bounds[1]
if lb > INT_MIN and ub < INT_MAX:
if lb == ub:
return str(self.__expr) + ' == ' + str(lb)
else:
return str(lb) + ' <= ' + str(
self.__expr) + ' <= ' + str(ub)
elif lb > INT_MIN:
return str(self.__expr) + ' >= ' + str(lb)
elif ub < INT_MAX:
return str(self.__expr) + ' <= ' + str(ub)
else:
return 'True (unbounded expr ' + str(self.__expr) + ')'
else:
return str(self.__expr) + ' in [' + DisplayBounds(
self.__bounds) + ']'
def Expression(self):
return self.__expr
def Bounds(self):
return self.__bounds
class Constraint(object):
"""Base class for constraints.
Constraints are built by the CpModel through the Add<XXX> methods.
Once created by the CpModel class, they are automatically added to the model.
The purpose of this class is to allow specification of enforcement literals
for this constraint.
b = model.BoolVar('b')
x = model.IntVar(0, 10, 'x')
y = model.IntVar(0, 10, 'y')
model.Add(x + 2 * y == 5).OnlyEnforceIf(b.Not())
"""
def __init__(self, constraints):
self.__index = len(constraints)
self.__constraint = constraints.add()
def OnlyEnforceIf(self, boolvar):
"""Adds an enforcement literal to the constraint.
Args:
boolvar: A boolean literal or a list of boolean literals.
Returns:
self.
This method adds one or more literals (that is a boolean variable or its
negation) as enforcement literals. The conjunction of all these literals
decides whether the constraint is active or not. It acts as an
implication, so if the conjunction is true, it implies that the constraint
must be enforced. If it is false, then the constraint is ignored.
The following constraints support enforcement literals:
bool or, bool and, and any linear constraints support any number of
enforcement literals.
"""
if isinstance(boolvar, numbers.Integral) and boolvar == 1:
# Always true. Do nothing.
pass
elif isinstance(boolvar, list):
for b in boolvar:
if isinstance(b, numbers.Integral) and b == 1:
pass
else:
self.__constraint.enforcement_literal.append(b.Index())
else:
self.__constraint.enforcement_literal.append(boolvar.Index())
return self
def Index(self):
return self.__index
def ConstraintProto(self):
return self.__constraint
class IntervalVar(object):
"""Represents a Interval variable.
An interval variable is both a constraint and a variable. It is defined by
three integer variables: start, size, and end.
It is a constraint because, internally, it enforces that start + size == end.
It is also a variable as it can appear in specific scheduling constraints:
NoOverlap, NoOverlap2D, Cumulative.
Optionally, an enforcement literal can be added to this
constraint. This enforcement literal is understood by the same constraints.
These constraints ignore interval variables with enforcement literals assigned
to false. Conversely, these constraints will also set these enforcement
literals to false if they cannot fit these intervals into the schedule.
"""
def __init__(self, model, start_index, size_index, end_index,
is_present_index, name):
self.__model = model
self.__index = len(model.constraints)
self.__ct = self.__model.constraints.add()
self.__ct.interval.start = start_index
self.__ct.interval.size = size_index
self.__ct.interval.end = end_index
if is_present_index is not None:
self.__ct.enforcement_literal.append(is_present_index)
if name:
self.__ct.name = name
def Index(self):
return self.__index
def __str__(self):
return self.__ct.name
def __repr__(self):
interval = self.__ct.interval
if self.__ct.enforcement_literal:
return '%s(start = %s, size = %s, end = %s, is_present = %s)' % (
self.__ct.name, ShortName(self.__model, interval.start),
ShortName(self.__model, interval.size),
ShortName(self.__model, interval.end),
ShortName(self.__model, self.__ct.enforcement_literal[0]))
else:
return '%s(start = %s, size = %s, end = %s)' % (
self.__ct.name, ShortName(self.__model, interval.start),
ShortName(self.__model, interval.size),
ShortName(self.__model, interval.end))
def Name(self):
return self.__ct.name
class CpModel(object):
"""Wrapper class around the cp_model proto.
This class provides two types of methods:
- NewXXX to create integer, boolean, or interval variables.
- AddXXX to create new constraints and add them to the model.
"""
def __init__(self):
self.__model = cp_model_pb2.CpModelProto()
self.__constant_map = {}
self.__optional_constant_map = {}
# Integer variable.
def NewIntVar(self, lb, ub, name):
"""Creates an integer variable with domain [lb, ub]."""
return IntVar(self.__model, [lb, ub], name)
def NewEnumeratedIntVar(self, bounds, name):
"""Creates an integer variable with an enumerated domain.
Args:
bounds: A flattened list of disjoint intervals.
name: The name of the variable.
Returns:
a variable whose domain is union[bounds[2*i]..bounds[2*i + 1]].
To create a variable with domain [1, 2, 3, 5, 7, 8], pass in the
array [1, 3, 5, 5, 7, 8].
"""
return IntVar(self.__model, bounds, name)
def NewBoolVar(self, name):
"""Creates a 0-1 variable with the given name."""
return IntVar(self.__model, [0, 1], name)
# Integer constraints.
def AddLinearConstraint(self, terms, lb, ub):
"""Adds the constraints lb <= sum(terms) <= ub, where term = (var, coef)."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
for t in terms:
if not isinstance(t[0], IntVar):
raise TypeError('Wrong argument' + str(t))
cp_model_helper.AssertIsInt64(t[1])
model_ct.linear.vars.append(t[0].Index())
model_ct.linear.coeffs.append(t[1])
model_ct.linear.domain.extend([lb, ub])
return ct
def AddSumConstraint(self, variables, lb, ub):
"""Adds the constraints lb <= sum(variables) <= ub."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
for v in variables:
model_ct.linear.vars.append(v.Index())
model_ct.linear.coeffs.append(1)
model_ct.linear.domain.extend([lb, ub])
return ct
def AddLinearConstraintWithBounds(self, terms, bounds):
"""Adds the constraints sum(terms) in bounds, where term = (var, coef)."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
for t in terms:
if not isinstance(t[0], IntVar):
raise TypeError('Wrong argument' + str(t))
cp_model_helper.AssertIsInt64(t[1])
model_ct.linear.vars.append(t[0].Index())
model_ct.linear.coeffs.append(t[1])
model_ct.linear.domain.extend(bounds)
return ct
def Add(self, ct):
"""Adds a LinearInequality to the model."""
if isinstance(ct, LinearInequality):
coeffs_map, constant = ct.Expression().GetVarValueMap()
bounds = [cp_model_helper.CapSub(x, constant) for x in ct.Bounds()]
return self.AddLinearConstraintWithBounds(
iteritems(coeffs_map), bounds)
elif ct and isinstance(ct, bool):
pass # Nothing to do, was already evaluated to true.
elif not ct and isinstance(ct, bool):
return self.AddBoolOr([]) # Evaluate to false.
else:
raise TypeError('Not supported: CpModel.Add(' + str(ct) + ')')
def AddAllDifferent(self, variables):
"""Adds AllDifferent(variables).
This constraint forces all variables to have different values.
Args:
variables: a list of integer variables.
Returns:
An instance of the Constraint class.
"""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.all_diff.vars.extend(
[self.GetOrMakeIndex(x) for x in variables])
return ct
def AddElement(self, index, variables, target):
"""Adds the element constraint: variables[index] == target."""
if not variables:
raise ValueError('AddElement expects a non empty variables array')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.element.index = self.GetOrMakeIndex(index)
model_ct.element.vars.extend(
[self.GetOrMakeIndex(x) for x in variables])
model_ct.element.target = self.GetOrMakeIndex(target)
return ct
def AddCircuit(self, arcs):
"""Adds Circuit(arcs).
Adds a circuit constraint from a sparse list of arcs that encode the graph.
A circuit is a unique Hamiltonian path in a subgraph of the total
graph. In case a node 'i' is not in the path, then there must be a
loop arc 'i -> i' associated with a true literal. Otherwise
this constraint will fail.
Args:
arcs: a list of arcs. An arc is a tuple (source_node, destination_node,
literal). The arc is selected in the circuit if the literal is true.
Both source_node and destination_node must be integer value between 0
and the number of nodes - 1.
Returns:
An instance of the Constraint class.
Raises:
ValueError: If the list of arc is empty.
"""
if not arcs:
raise ValueError('AddCircuit expects a non empty array of arcs')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
for arc in arcs:
cp_model_helper.AssertIsInt32(arc[0])
cp_model_helper.AssertIsInt32(arc[1])
lit = self.GetOrMakeBooleanIndex(arc[2])
model_ct.circuit.tails.append(arc[0])
model_ct.circuit.heads.append(arc[1])
model_ct.circuit.literals.append(lit)
return ct
def AddAllowedAssignments(self, variables, tuples_list):
"""Adds AllowedAssignments(variables, tuples_list).
An AllowedAssignments constraint is a constraint on an array of variables
that forces, when all variables are fixed to a single value, that the
corresponding list of values is equal to one of the tuple of the
tuple_list.
Args:
variables: A list of variables.
tuples_list: A list of admissible tuples. Each tuple must have the same
length as the variables, and the ith value of a tuple corresponds to the
ith variable.
Returns:
An instance of the Constraint class.
Raises:
TypeError: If a tuple does not have the same size as the list of
variables.
ValueError: If the array of variables is empty.
"""
if not variables:
raise ValueError(
'AddAllowedAssignments expects a non empty variables '
'array')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.table.vars.extend([self.GetOrMakeIndex(x) for x in variables])
arity = len(variables)
for t in tuples_list:
if len(t) != arity:
raise TypeError('Tuple ' + str(t) + ' has the wrong arity')
for v in t:
cp_model_helper.AssertIsInt64(v)
model_ct.table.values.extend(t)
def AddForbiddenAssignments(self, variables, tuples_list):
"""Adds AddForbiddenAssignments(variables, [tuples_list]).
A ForbiddenAssignments constraint is a constraint on an array of variables
where the list of impossible combinations is provided in the tuples list.
Args:
variables: A list of variables.
tuples_list: A list of forbidden tuples. Each tuple must have the same
length as the variables, and the ith value of a tuple corresponds to the
ith variable.
Returns:
An instance of the Constraint class.
Raises:
TypeError: If a tuple does not have the same size as the list of
variables.
ValueError: If the array of variables is empty.
"""
if not variables:
raise ValueError(
'AddForbiddenAssignments expects a non empty variables '
'array')
index = len(self.__model.constraints)
self.AddAllowedAssignments(variables, tuples_list)
self.__model.constraints[index].table.negated = True
def AddAutomaton(self, transition_variables, starting_state, final_states,
transition_triples):
"""Adds an automaton constraint.
An automaton constraint takes a list of variables (of size n), an initial
state, a set of final states, and a set of transitions. A transition is a
triplet ('tail', 'transition', 'head'), where 'tail' and 'head' are states,
and 'transition' is the label of an arc from 'head' to 'tail',
corresponding to the value of one variable in the list of variables.
This automata will be unrolled into a flow with n + 1 phases. Each phase
contains the possible states of the automaton. The first state contains the
initial state. The last phase contains the final states.
Between two consecutive phases i and i + 1, the automaton creates a set of
arcs. For each transition (tail, transition, head), it will add an arc from
the state 'tail' of phase i and the state 'head' of phase i + 1. This arc
labeled by the value 'transition' of the variables 'variables[i]'. That is,
this arc can only be selected if 'variables[i]' is assigned the value
'transition'.
A feasible solution of this constraint is an assignment of variables such
that, starting from the initial state in phase 0, there is a path labeled by
the values of the variables that ends in one of the final states in the
final phase.
Args:
transition_variables: A non empty list of variables whose values
correspond to the labels of the arcs traversed by the automata.
starting_state: The initial state of the automata.
final_states: A non empty list of admissible final states.
transition_triples: A list of transition for the automata, in the
following format (current_state, variable_value, next_state).
Returns:
An instance of the Constraint class.
Raises:
ValueError: if transition_variables, final_states, or transition_triples
are empty.
"""
if not transition_variables:
raise ValueError(
'AddAutomata expects a non empty transition_variables '
'array')
if not final_states:
raise ValueError('AddAutomata expects some final states')
if not transition_triples:
raise ValueError('AddAutomata expects some transtion triples')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.automata.vars.extend(
[self.GetOrMakeIndex(x) for x in transition_variables])
cp_model_helper.AssertIsInt64(starting_state)
model_ct.automata.starting_state = starting_state
for v in final_states:
cp_model_helper.AssertIsInt64(v)
model_ct.automata.final_states.append(v)
for t in transition_triples:
if len(t) != 3:
raise TypeError('Tuple ' + str(t) +
' has the wrong arity (!= 3)')
cp_model_helper.AssertIsInt64(t[0])
cp_model_helper.AssertIsInt64(t[1])
cp_model_helper.AssertIsInt64(t[2])
model_ct.automata.transition_tail.append(t[0])
model_ct.automata.transition_label.append(t[1])
model_ct.automata.transition_head.append(t[2])
def AddInverse(self, variables, inverse_variables):
"""Adds Inverse(variables, inverse_variables).
An inverse constraint enforces that if 'variables[i]' is assigned a value
'j', then inverse_variables[j] is assigned a value 'i'. And vice versa.
Args:
variables: An array of integer variables.
inverse_variables: An array of integer variables.
Returns:
An instance of the Constraint class.
Raises:
TypeError: if variables and inverse_variables have different length, or
if they are empty.
"""
if not variables or not inverse_variables:
raise TypeError(
'The Inverse constraint does not accept empty arrays')
if len(variables) != len(inverse_variables):
raise TypeError(
'In the inverse constraint, the two array variables and'
' inverse_variables must have the same length.')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.inverse.f_direct.extend(
[self.GetOrMakeIndex(x) for x in variables])
model_ct.inverse.f_inverse.extend(
[self.GetOrMakeIndex(x) for x in inverse_variables])
return ct
def AddReservoirConstraint(self, times, demands, min_level, max_level):
"""Adds Reservoir(times, demands, min_level, max_level).
Maintains a reservoir level within bounds. The water level starts at 0, and
at any time >= 0, it must be between min_level and max_level. Furthermore,
this constraints expect all times variables to be >= 0.
If the variable times[i] is assigned a value t, then the current level
changes by demands[i] (which is constant) at the time t.
Note that level min can be > 0, or level max can be < 0. It just forces
some demands to be executed at time 0 to make sure that we are within those
bounds with the executed demands. Therefore, at any time t >= 0:
sum(demands[i] if times[i] <= t) in [min_level, max_level]
Args:
times: A list of positive integer variables which specify the time of the
filling or emptying the reservoir.
demands: A list of integer values that specifies the amount of the
emptying or feeling.
min_level: At any time >= 0, the level of the reservoir must be greater of
equal than the min level.
max_level: At any time >= 0, the level of the reservoir must be less or
equal than the max level.
Returns:
An instance of the Constraint class.
Raises:
ValueError: if max_level < min_level.
"""
if max_level < min_level:
return ValueError(
'Reservoir constraint must have a max_level >= min_level')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.reservoir.times.extend([self.GetOrMakeIndex(x) for x in times])
model_ct.reservoir.demands.extend(demands)
model_ct.reservoir.min_level = min_level
model_ct.reservoir.max_level = max_level
return ct
def AddReservoirConstraintWithActive(self, times, demands, actives,
min_level, max_level):
"""Adds Reservoir(times, demands, actives, min_level, max_level).
Maintain a reservoir level within bounds. The water level starts at 0, and
at
any time >= 0, it must be within min_level, and max_level. Furthermore, this
constraints expect all times variables to be >= 0.
If actives[i] is true, and if times[i] is assigned a value t, then the
level of the reservoir changes by demands[i] (which is constant) at time t.
Note that level_min can be > 0, or level_max can be < 0. It just forces
some demands to be executed at time 0 to make sure that we are within those
bounds with the executed demands. Therefore, at any time t >= 0:
sum(demands[i] * actives[i] if times[i] <= t) in [min_level, max_level]
The array of boolean variables 'actives', if defined, indicates which
actions are actually performed.
Args:
times: A list of positive integer variables which specify the time of the
filling or emptying the reservoir.
demands: A list of integer values that specifies the amount of the
emptying or feeling.
actives: a list of boolean variables. They indicates if the
emptying/refilling events actually take place.
min_level: At any time >= 0, the level of the reservoir must be greater of
equal than the min level.
max_level: At any time >= 0, the level of the reservoir must be less or
equal than the max level.
Returns:
An instance of the Constraint class.
Raises:
ValueError: if max_level < min_level.
"""
if max_level < min_level:
return ValueError(
'Reservoir constraint must have a max_level >= min_level')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.reservoir.times.extend([self.GetOrMakeIndex(x) for x in times])
model_ct.reservoir.demands.extend(demands)
model_ct.reservoir.actives.extend(actives)
model_ct.reservoir.min_level = min_level
model_ct.reservoir.max_level = max_level
return ct
def AddMapDomain(self, var, bool_var_array, offset=0):
"""Adds var == i + offset <=> bool_var_array[i] == true for all i."""
for i, bool_var in enumerate(bool_var_array):
b_index = bool_var.Index()
var_index = var.Index()
model_ct = self.__model.constraints.add()
model_ct.linear.vars.append(var_index)
model_ct.linear.coeffs.append(1)
model_ct.linear.domain.extend([offset + i, offset + i])
model_ct.enforcement_literal.append(b_index)
model_ct = self.__model.constraints.add()
model_ct.linear.vars.append(var_index)
model_ct.linear.coeffs.append(1)
model_ct.enforcement_literal.append(-b_index - 1)
if offset + i - 1 >= INT_MIN:
model_ct.linear.domain.extend([INT_MIN, offset + i - 1])
if offset + i + 1 <= INT_MAX:
model_ct.linear.domain.extend([offset + i + 1, INT_MAX])
def AddImplication(self, a, b):
"""Adds a => b."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.bool_or.literals.append(self.GetOrMakeBooleanIndex(b))
model_ct.enforcement_literal.append(self.GetOrMakeBooleanIndex(a))
return ct
def AddBoolOr(self, literals):
"""Adds Or(literals) == true."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.bool_or.literals.extend(
[self.GetOrMakeBooleanIndex(x) for x in literals])
return ct
def AddBoolAnd(self, literals):
"""Adds And(literals) == true."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.bool_and.literals.extend(
[self.GetOrMakeBooleanIndex(x) for x in literals])
return ct
def AddBoolXOr(self, literals):
"""Adds XOr(literals) == true."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.bool_xor.literals.extend(
[self.GetOrMakeBooleanIndex(x) for x in literals])
return ct
def AddMinEquality(self, target, variables):
"""Adds target == Min(variables)."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.int_min.vars.extend(
[self.GetOrMakeIndex(x) for x in variables])
model_ct.int_min.target = self.GetOrMakeIndex(target)
return ct
def AddMaxEquality(self, target, args):
"""Adds target == Max(variables)."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.int_max.vars.extend([self.GetOrMakeIndex(x) for x in args])
model_ct.int_max.target = self.GetOrMakeIndex(target)
return ct
def AddDivisionEquality(self, target, num, denom):
"""Adds target == num // denom."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.int_div.vars.extend(
[self.GetOrMakeIndex(num),
self.GetOrMakeIndex(denom)])
model_ct.int_div.target = self.GetOrMakeIndex(target)
return ct
def AddAbsEquality(self, target, var):
"""Adds target == Abs(var)."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
index = self.GetOrMakeIndex(var)
model_ct.int_max.vars.extend([index, -index - 1])
model_ct.int_max.target = self.GetOrMakeIndex(target)
return ct
def AddModuloEquality(self, target, var, mod):
"""Adds target = var % mod."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.int_mod.vars.extend(
[self.GetOrMakeIndex(var),
self.GetOrMakeIndex(mod)])
model_ct.int_mod.target = self.GetOrMakeIndex(target)
return ct
def AddProdEquality(self, target, args):
"""Adds target == PROD(args)."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.int_prod.vars.extend([self.GetOrMakeIndex(x) for x in args])
model_ct.int_prod.target = self.GetOrMakeIndex(target)
return ct
# Scheduling support
def NewIntervalVar(self, start, size, end, name):
"""Creates an interval variable from start, size, and end.
An interval variable is a constraint, that is itself used in other
constraints like NoOverlap.
Internally, it ensures that start + size == end.
Args:
start: The start of the interval. It can be an integer value, or an
integer variable.
size: The size of the interval. It can be an integer value, or an integer
variable.
end: The end of the interval. It can be an integer value, or an integer
variable.
name: The name of the interval variable.
Returns:
An IntervalVar object.
"""
start_index = self.GetOrMakeIndex(start)
size_index = self.GetOrMakeIndex(size)
end_index = self.GetOrMakeIndex(end)
return IntervalVar(self.__model, start_index, size_index, end_index,
None, name)
def NewOptionalIntervalVar(self, start, size, end, is_present, name):
"""Creates an optional interval var from start, size, end and is_present.
An optional interval variable is a constraint, that is itself used in other
constraints like NoOverlap. This constraint is protected by an is_present
literal that indicates if it is active or not.
Internally, it ensures that is_present implies start + size == end.
Args:
start: The start of the interval. It can be an integer value, or an
integer variable.
size: The size of the interval. It can be an integer value, or an integer
variable.
end: The end of the interval. It can be an integer value, or an integer
variable.
is_present: A literal that indicates if the interval is active or not. A
inactive interval is simply ignored by all constraints.
name: The name of the interval variable.
Returns:
An IntervalVar object.
"""
is_present_index = self.GetOrMakeBooleanIndex(is_present)
start_index = self.GetOrMakeIndex(start)
size_index = self.GetOrMakeIndex(size)
end_index = self.GetOrMakeIndex(end)
return IntervalVar(self.__model, start_index, size_index, end_index,
is_present_index, name)
def AddNoOverlap(self, interval_vars):
"""Adds NoOverlap(interval_vars).
A NoOverlap constraint ensures that all present intervals do not overlap
in time.
Args:
interval_vars: The list of interval variables to constrain.
Returns:
An instance of the Constraint class.
"""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.no_overlap.intervals.extend(
[self.GetIntervalIndex(x) for x in interval_vars])
return ct
def AddNoOverlap2D(self, x_intervals, y_intervals):
"""Adds NoOverlap2D(x_intervals, y_intervals).
A NoOverlap2D constraint ensures that all present rectangles do not overlap
on a plan. Each rectangle is aligned with the X and Y axis, and is defined
by two intervals which represent its projection onto the X and Y axis.
Args:
x_intervals: The X coordinates of the rectangles.
y_intervals: The Y coordinates of the rectangles.
Returns:
An instance of the Constraint class.
"""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.no_overlap_2d.x_intervals.extend(
[self.GetIntervalIndex(x) for x in x_intervals])
model_ct.no_overlap_2d.y_intervals.extend(
[self.GetIntervalIndex(x) for x in y_intervals])
return ct
def AddCumulative(self, intervals, demands, capacity):
"""Adds Cumulative(intervals, demands, capacity).
This constraint enforces that:
for all t:
sum(demands[i]
if (start(intervals[t]) <= t < end(intervals[t])) and
(t is present)) <= capacity
Args:
intervals: The list of intervals.
demands: The list of demands for each interval. Each demand must be >= 0.
Each demand can be an integer value, or an integer variable.
capacity: The maximum capacity of the cumulative constraint. It must be a
positive integer value or variable.
Returns:
An instance of the Constraint class.
"""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.cumulative.intervals.extend(
[self.GetIntervalIndex(x) for x in intervals])
model_ct.cumulative.demands.extend(
[self.GetOrMakeIndex(x) for x in demands])
model_ct.cumulative.capacity = self.GetOrMakeIndex(capacity)
return ct
# Helpers.
def __str__(self):
return str(self.__model)
def ModelProto(self):
return self.__model
def Negated(self, index):
return -index - 1
def GetOrMakeIndex(self, arg):
"""Returns the index of a variables, its negation, or a number."""
if isinstance(arg, IntVar):
return arg.Index()
elif (isinstance(arg, _ProductCst) and
isinstance(arg.Expression(), IntVar) and arg.Coefficient() == -1):
return -arg.Expression().Index() - 1
elif isinstance(arg, numbers.Integral):
cp_model_helper.AssertIsInt64(arg)
return self.GetOrMakeIndexFromConstant(arg)
else:
raise TypeError('NotSupported: model.GetOrMakeIndex(' + str(arg) +
')')
def GetOrMakeBooleanIndex(self, arg):
"""Returns an index from a boolean expression."""
if isinstance(arg, IntVar):
self.AssertIsBooleanVariable(arg)
return arg.Index()
elif isinstance(arg, _NotBooleanVariable):
self.AssertIsBooleanVariable(arg.Not())
return arg.Index()
elif isinstance(arg, numbers.Integral):
cp_model_helper.AssertIsBoolean(arg)
return self.GetOrMakeIndexFromConstant(arg)
else:
raise TypeError('NotSupported: model.GetOrMakeBooleanIndex(' +
str(arg) + ')')
def GetIntervalIndex(self, arg):
if not isinstance(arg, IntervalVar):
raise TypeError('NotSupported: model.GetIntervalIndex(%s)' % arg)
return arg.Index()
def GetOrMakeIndexFromConstant(self, value):
if value in self.__constant_map:
return self.__constant_map[value]
index = len(self.__model.variables)
var = self.__model.variables.add()
var.domain.extend([value, value])
self.__constant_map[value] = index
return index
def VarIndexToVarProto(self, var_index):
if var_index > 0:
return self.__model.variables[var_index]
else:
return self.__model.variables[-var_index - 1]
def _SetObjective(self, obj, minimize):
"""Sets the objective of the model."""
if isinstance(obj, IntVar):
self.__model.ClearField('objective')
self.__model.objective.coeffs.append(1)
self.__model.objective.offset = 0
if minimize:
self.__model.objective.vars.append(obj.Index())
self.__model.objective.scaling_factor = 1
else:
self.__model.objective.vars.append(self.Negated(obj.Index()))
self.__model.objective.scaling_factor = -1
elif isinstance(obj, LinearExpression):
coeffs_map, constant = obj.GetVarValueMap()
self.__model.ClearField('objective')
if minimize:
self.__model.objective.scaling_factor = 1
self.__model.objective.offset = constant
else:
self.__model.objective.scaling_factor = -1
self.__model.objective.offset = -constant
for v, c, in iteritems(coeffs_map):
self.__model.objective.coeffs.append(c)
if minimize:
self.__model.objective.vars.append(v.Index())
else:
self.__model.objective.vars.append(self.Negated(v.Index()))
elif isinstance(obj, numbers.Integral):
self.__model.objective.offset = obj
self.__model.objective.scaling_factor = 1
else:
raise TypeError('TypeError: ' + str(obj) +
' is not a valid objective')
def Minimize(self, obj):
"""Sets the objective of the model to minimize(obj)."""
self._SetObjective(obj, minimize=True)
def Maximize(self, obj):
"""Sets the objective of the model to maximize(obj)."""
self._SetObjective(obj, minimize=False)
def HasObjective(self):
return self.__model.HasField('objective')
def AddDecisionStrategy(self, variables, var_strategy, domain_strategy):
"""Adds a search strategy to the model.
Args:
variables: a list of variables this strategy will assign.
var_strategy: heuristic to choose the next variable to assign.
domain_strategy: heuristic to reduce the domain of the selected variable.
Currently, this is advanced code, the union of all strategies added to
the model must be complete, i.e. instantiates all variables. Otherwise,
Solve() will fail.
"""
strategy = self.__model.search_strategy.add()
for v in variables:
strategy.variables.append(v.Index())
strategy.variable_selection_strategy = var_strategy
strategy.domain_reduction_strategy = domain_strategy
def ModelStats(self):
"""Returns some statistics on the model as a string."""
return pywrapsat.SatHelper.ModelStats(self.__model)
def Validate(self):
"""Returns a string explaining the issue is the model is not valid."""
return pywrapsat.SatHelper.ValidateModel(self.__model)
def AssertIsBooleanVariable(self, x):
if isinstance(x, IntVar):
var = self.__model.variables[x.Index()]
if len(var.domain) != 2 or var.domain[0] < 0 or var.domain[1] > 1:
raise TypeError('TypeError: ' + str(x) +
' is not a boolean variable')
elif not isinstance(x, _NotBooleanVariable):
raise TypeError('TypeError: ' + str(x) +
' is not a boolean variable')
def EvaluateLinearExpression(expression, solution):
"""Evaluate an linear expression against a solution."""
if isinstance(expression, numbers.Integral):
return expression
value = 0
to_process = [(expression, 1)]
while to_process:
expr, coef = to_process.pop()
if isinstance(expr, _ProductCst):
to_process.append((expr.Expression(), coef * expr.Coefficient()))
elif isinstance(expr, _SumArray):
for e in expr.Array():
to_process.append((e, coef))
value += expr.Constant() * coef
elif isinstance(expr, IntVar):
value += coef * solution.solution[expr.Index()]
elif isinstance(expr, _NotBooleanVariable):
raise TypeError('Cannot interpret literals in a linear expression.')
return value
def EvaluateBooleanExpression(literal, solution):
"""Evaluate an boolean expression against a solution."""
if isinstance(literal, numbers.Integral):
return bool(literal)
elif isinstance(literal, IntVar) or isinstance(literal,
_NotBooleanVariable):
index = literal.Index()
if index >= 0:
return bool(solution.solution[index])
else:
return not solution.solution[-index - 1]
else:
raise TypeError(
'Cannot interpret %s as a boolean expression.' % literal)
class CpSolver(object):
"""Main solver class.
The purpose of this class is to search for a solution of a model given to the
Solve() method.
Once Solve() is called, this class allows inspecting the solution found
with the Value() and BooleanValue() methods, as well as general statistics
about the solve procedure.
"""
def __init__(self):
self.__model = None
self.__solution = None
self.parameters = sat_parameters_pb2.SatParameters()
def Solve(self, model):
"""Solves the given model and returns the solve status."""
self.__solution = pywrapsat.SatHelper.SolveWithParameters(
model.ModelProto(), self.parameters)
return self.__solution.status
def SolveWithSolutionCallback(self, model, callback):
"""Solves a problem and pass each solution found to the callback."""
self.__solution = (
pywrapsat.SatHelper.SolveWithParametersAndSolutionCallback(
model.ModelProto(), self.parameters, callback))
return self.__solution.status
def SearchForAllSolutions(self, model, callback):
"""Search for all solutions of a satisfiability problem.
This method searches for all feasible solution of a given model.
Then it feeds the solution to the callback.
Args:
model: The model to solve.
callback: The callback that will be called at each solution.
Returns:
The status of the solve (FEASIBLE, INFEASIBLE...).
"""
if model.HasObjective():
raise TypeError('Search for all solutions is only defined on '
'satisfiability problems')
# Store old values.
enumerate_all = self.parameters.enumerate_all_solutions
self.parameters.enumerate_all_solutions = True
self.__solution = (
pywrapsat.SatHelper.SolveWithParametersAndSolutionCallback(
model.ModelProto(), self.parameters, callback))
# Restore parameters.
self.parameters.enumerate_all_solutions = enumerate_all
return self.__solution.status
def Value(self, expression):
"""Returns the value of an linear expression after solve."""
if not self.__solution:
raise RuntimeError('Solve() has not be called.')
return EvaluateLinearExpression(expression, self.__solution)
def BooleanValue(self, literal):
"""Returns the boolean value of a literal after solve."""
if not self.__solution:
raise RuntimeError('Solve() has not be called.')
return EvaluateBooleanExpression(literal, self.__solution)
def ObjectiveValue(self):
"""Returns the value of objective after solve."""
return self.__solution.objective_value
def BestObjectiveBound(self):
"""Returns the best lower (upper) bound found when min(max)imizing."""
return self.__solution.best_objective_bound
def StatusName(self, status):
"""Returns the name of the status returned by Solve()."""
return cp_model_pb2.CpSolverStatus.Name(status)
def NumBooleans(self):
"""Returns the number of boolean variables managed by the SAT solver."""
return self.__solution.num_booleans
def NumConflicts(self):
"""Returns the number of conflicts since the creation of the solver."""
return self.__solution.num_conflicts
def NumBranches(self):
"""Returns the number of search branches explored by the solver."""
return self.__solution.num_branches
def WallTime(self):
"""Returns the wall time in seconds since the creation of the solver."""
return self.__solution.wall_time
def UserTime(self):
"""Returns the user time in seconds since the creation of the solver."""
return self.__solution.user_time
def ResponseStats(self):
"""Returns some statistics on the solution found as a string."""
return pywrapsat.SatHelper.SolverResponseStats(self.__solution)
class CpSolverSolutionCallback(pywrapsat.SolutionCallback):
"""Solution callback.
This class implements a callback that will be called at each new solution
found during search.
The method OnSolutionCallback() will be called by the solver, and must be
implemented. The current solution can be queried using the BooleanValue()
and Value() methods.
"""
def OnSolutionCallback(self):
"""Proxy to the same method in snake case."""
self.on_solution_callback()
def BooleanValue(self, lit):
"""Returns the boolean value of a boolean literal.
Args:
lit: A boolean variable or its negation.
Returns:
The boolean value of the literal in the solution.
Raises:
RuntimeError: if 'lit' is not a boolean variable or its negation.
"""
if not self.Response().solution:
raise RuntimeError('Solve() has not be called.')
if isinstance(lit, numbers.Integral):
return bool(lit)
elif isinstance(lit, IntVar) or isinstance(lit, _NotBooleanVariable):
index = lit.Index()
return self.SolutionBooleanValue(index)
else:
raise TypeError(
'Cannot interpret %s as a boolean expression.' % lit)
def Value(self, expression):
"""Evaluates an linear expression in the current solution.
Args:
expression: a linear expression of the model.
Returns:
An integer value equal to the evaluation of the linear expression
against the current solution.
Raises:
RuntimeError: if 'expression' is not a LinearExpression.
"""
if not self.Response().solution:
raise RuntimeError('Solve() has not be called.')
if isinstance(expression, numbers.Integral):
return expression
value = 0
to_process = [(expression, 1)]
while to_process:
expr, coef = to_process.pop()
if isinstance(expr, _ProductCst):
to_process.append((expr.Expression(),
coef * expr.Coefficient()))
elif isinstance(expr, _SumArray):
for e in expr.Array():
to_process.append((e, coef))
value += expr.Constant() * coef
elif isinstance(expr, IntVar):
value += coef * self.SolutionIntegerValue(expr.Index())
elif isinstance(expr, _NotBooleanVariable):
raise TypeError(
'Cannot interpret literals in a linear expression.')
return value
class ObjectiveSolutionPrinter(CpSolverSolutionCallback):
"""Print intermediate solutions objective and time."""
def __init__(self):
CpSolverSolutionCallback.__init__(self)
self.__solution_count = 0
self.__start_time = time.time()
def on_solution_callback(self):
"""Called on each new solution."""
current_time = time.time()
objective = self.ObjectiveValue()
print('Solution %i, time = %f s, objective = [%i, %i]' %
(self.__solution_count, current_time - self.__start_time,
objective, self.BestObjectiveBound()))
self.__solution_count += 1
|
"""PyPi Version."""
__version__ = "0.3.2"
|
from django import forms
PROBLEM_REQ_FILENAME = 'filename'
PROBLEM_REQ_DATA = 'data'
class SaveProblemForm(forms.Form):
"""Test gRPC requests, as originating from the UI"""
filename = forms.CharField()
data = forms.CharField(widget=forms.Textarea,
initial='',
help_text="Enter data to save")
|
#!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Requirements:
# sudo apt-get install python-argparse
"""
Modified and extended by Matthias Mueller - Intel Intelligent Systems Lab - 2020
The controls are event-based and not synchronized to the frames.
This script matches the control signals to frames.
Specifically, if there was no control signal event within some threshold (default: 1ms), the last control signal before the frame is used.
"""
import argparse
import sys
import os
import numpy
from . import utils
def read_file_list(filename):
"""
Reads a trajectory from a text file.
File format:
The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)
and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp.
Input:
filename -- File name
Output:
dict -- dictionary of (stamp,data) tuples
"""
f = open(filename)
header = f.readline() #discard header
data = f.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
data = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
data = [(int(l[0]),l[1:]) for l in data if len(l)>1]
return dict(data)
def associate(first_list, second_list, max_offset):
"""
Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim
to find the closest match for every input tuple.
Input:
first_list -- first dictionary of (stamp,data) tuples
second_list -- second dictionary of (stamp,data) tuples
offset -- time offset between both dictionaries (e.g., to model the delay between the sensors)
max_difference -- search radius for candidate generation
Output:
matches -- list of matched tuples ((stamp1,data1),(stamp2,data2))
"""
first_keys = list(first_list)
second_keys = list(second_list)
potential_matches = [(b-a, a, b)
for a in first_keys
for b in second_keys
if (b-a) < max_offset] #Control before image or within max_offset
potential_matches.sort(reverse = True)
matches = []
for diff, a, b in potential_matches:
if a in first_keys and b in second_keys:
first_keys.remove(a) #Remove frame that was assigned
matches.append((a, b)) #Append tuple
matches.sort()
return matches
def match_frame_ctrl_cmd(data_dir, datasets, max_offset, redo_matching=False, remove_zeros=True):
frames = []
for dataset in datasets:
for folder in utils.list_dirs(os.path.join(data_dir, dataset)):
session_dir = os.path.join(data_dir, dataset, folder)
frame_list = match_frame_session(session_dir, max_offset, redo_matching, remove_zeros)
for timestamp in list(frame_list):
frames.append(frame_list[timestamp][0])
return frames
def match_frame_session(session_dir, max_offset, redo_matching=False, remove_zeros=True):
sensor_path = os.path.join(session_dir, "sensor_data")
img_path = os.path.join(session_dir, "images")
print("Processing folder %s" %(session_dir))
if (not redo_matching and os.path.isfile(os.path.join(sensor_path,"matched_frame_ctrl.txt"))):
print(" Frames and controls already matched.")
else:
#Match frames with control signals
frame_list = read_file_list(os.path.join(sensor_path,"rgbFrames.txt"))
if len(frame_list) == 0:
raise Exception("Empty rgbFrames.txt")
ctrl_list = read_file_list(os.path.join(sensor_path,"ctrlLog.txt"))
if len(ctrl_list) == 0:
raise Exception("Empty ctrlLog.txt")
matches = associate(frame_list, ctrl_list, max_offset)
with open(os.path.join(sensor_path,"matched_frame_ctrl.txt"), 'w') as f:
f.write("timestamp (frame),time_offset (ctrl-frame),frame,left,right\n")
for a,b in matches:
f.write("%d %d %s %s \n"%(a,b-a," ".join(frame_list[a]), " ".join(ctrl_list[b])))
print(" Frames and controls matched.")
if (not redo_matching and os.path.isfile(os.path.join(sensor_path,"matched_frame_ctrl_cmd.txt"))):
print(" Frames and commands already matched.")
else:
#Match frames and controls with indicator commands
frame_list = read_file_list(os.path.join(sensor_path,"matched_frame_ctrl.txt"))
if len(frame_list) == 0:
raise Exception("Empty matched_frame_ctrl.txt")
cmd_list = read_file_list(os.path.join(sensor_path,"indicatorLog.txt"))
#Set indicator signal to 0 for initial frames
if len(cmd_list) == 0 or sorted(frame_list)[0]<sorted(cmd_list)[0]:
cmd_list[sorted(frame_list)[0]] = ['0']
matches = associate(frame_list, cmd_list, max_offset)
with open(os.path.join(sensor_path,"matched_frame_ctrl_cmd.txt"), 'w') as f:
f.write("timestamp (frame),time_offset (cmd-frame),time_offset (ctrl-frame),frame,left,right,cmd\n")
for a,b in matches:
f.write("%d %d %s %s \n"%(a,b-a," ".join(frame_list[a]), " ".join(cmd_list[b])))
print(" Frames and commands matched.")
if (not redo_matching and os.path.isfile(os.path.join(sensor_path,"matched_frame_ctrl_cmd_processed.txt"))):
print(" Preprocessing already completed.")
else:
#Cleanup: Add path and remove frames where vehicle was stationary
frame_list = read_file_list(os.path.join(sensor_path,"matched_frame_ctrl_cmd.txt"))
with open(os.path.join(sensor_path,"matched_frame_ctrl_cmd_processed.txt"), 'w') as f:
f.write("timestamp,frame,left,right,cmd\n")
for timestamp in list(frame_list):
if (len(frame_list[timestamp]) < 6):
continue
left = int(frame_list[timestamp][3])
right = int(frame_list[timestamp][4])
if (remove_zeros and left-right==0 and left+right==0):
print (" Removed timestamp:%s, left:%d, right:%d" %(timestamp,left,right))
del frame_list[timestamp]
else:
frame_name=(os.path.join(img_path,frame_list[timestamp][2]+"_crop.jpeg"))
cmd = int(frame_list[timestamp][5])
f.write("%s,%s,%d,%d,%d\n"%(timestamp,frame_name,left,right,cmd))
print(" Preprocessing completed.")
return read_file_list(os.path.join(sensor_path,"matched_frame_ctrl_cmd_processed.txt"))
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VpnSitesConfigurationOperations(object):
"""VpnSitesConfigurationOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2020-05-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-05-01"
self.config = config
def _download_initial(
self, resource_group_name, virtual_wan_name, output_blob_sas_url, vpn_sites=None, custom_headers=None, raw=False, **operation_config):
request = models.GetVpnSitesConfigurationRequest(vpn_sites=vpn_sites, output_blob_sas_url=output_blob_sas_url)
# Construct URL
url = self.download.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(request, 'GetVpnSitesConfigurationRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def download(
self, resource_group_name, virtual_wan_name, output_blob_sas_url, vpn_sites=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gives the sas-url to download the configurations for vpn-sites in a
resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN for which
configuration of all vpn-sites is needed.
:type virtual_wan_name: str
:param output_blob_sas_url: The sas-url to download the configurations
for vpn-sites.
:type output_blob_sas_url: str
:param vpn_sites: List of resource-ids of the vpn-sites for which
config is to be downloaded.
:type vpn_sites: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._download_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
output_blob_sas_url=output_blob_sas_url,
vpn_sites=vpn_sites,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
download.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'}
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = "Nicola Peditto <n.peditto@gmail.com>"
import abc
import six
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Device(object):
"""Base class for each s4t Lightning-rod device.
"""
def __init__(self, device_type):
self.device_type = device_type
def finalize(self):
pass
|
from django.db import models
# Create your models here.
class Image(models.Model):
image_name = models.CharField(max_length=30)
image_description = models.CharField(max_length=255)
image_location = models.ForeignKey('Location',on_delete=models.CASCADE)
image_category = models.ForeignKey('Category',on_delete=models.CASCADE)
image= models.ImageField(upload_to='images/',null=True,blank=True)
def __str__(self):
return self.image_name
def save_image(self):
self.save()
def delete_image(self):
self.delete()
@classmethod
def get_image_by_id(cls,id):
image = cls.objects.get(id = id)
return image
@classmethod
def search_by_category(cls,search_term):
images = cls.objects.filter(categories__name__contains = search_term)
return images
@classmethod
def filter_by_location(cls,search_term):
location = Location.objects.get(name = search_term)
images = cls.objects.filter(location = location)
return images
class Location(models.Model):
location = models.CharField(max_length=30)
def __str__(self):
return self.location
def save_location(self):
self.save()
def delete_location(self):
self.delete()
def update_location(self):
self.update()
class Category(models.Model):
category = models.CharField(max_length=30)
def __str__(self):
return self.category
def save_category(self):
self.save()
def update_category(self):
self.update()
def delete_category(self):
self.delete()
|
from kf_d3m_primitives.interpretability.shap_explainers.shap_values_pipeline import ShapPipeline
def _test_fit_produce(dataset):
pipeline = ShapPipeline()
pipeline.write_pipeline()
pipeline.fit_produce(dataset)
pipeline.delete_pipeline()
def test_fit_produce_dataset_baseball():
_test_fit_produce('185_baseball_MIN_METADATA')
def test_fit_produce_dataset_acled():
_test_fit_produce('LL0_acled_reduced_MIN_METADATA')
|
from typing import Optional, Tuple
from ..utils.events import EventedModel
from ._viewer_constants import CursorStyle
class Cursor(EventedModel):
"""Cursor object with position and properties of the cursor.
Attributes
----------
position : tuple or None
Position of the cursor in world coordinates. None if outside the
world.
scaled : bool
Flag to indicate whether cursor size should be scaled to zoom.
Only relevant for circle and square cursors which are drawn
with a particular size.
size : float
Size of the cursor in canvas pixels.Only relevant for circle
and square cursors which are drawn with a particular size.
style : str
Style of the cursor. Must be one of
* square: A square
* circle: A circle
* cross: A cross
* forbidden: A forbidden symbol
* pointing: A finger for pointing
* standard: The standard cursor
_view_direction : Optional[Tuple[float, ...]]
The vector describing the direction of the camera in the scene.
This is None when viewing in 2D.
"""
# fields
position: Tuple[float, ...] = (1, 1)
scaled: bool = True
size: int = 1
style: CursorStyle = CursorStyle.STANDARD
_view_direction: Optional[Tuple[float, ...]] = None
|
from typing import Dict, List, Optional, Union
class Activities:
def __init__(self, swarm):
self.swarm = swarm
def get(self,
*,
change: Optional[int] = None,
stream: Optional[str] = None,
category: Optional[str] = None,
after: Optional[int] = None,
limit: Optional[int] = None,
fields: Optional[List[str]] = None
) -> dict:
"""
Retrieve the activity list.
* change: ``int`` (optional)
Filter activity entries by associated changelist id. This only includes
records for which there is an activity entry in Swarm.
* stream: ``str`` (optional)
Filter activity stream to query for entries. This can include
user-initiated actions (``user-alice``), activity relating to a user’s
followed projects/users (``personal-alice``), review streams
(``review-1234``), and project streams (``project-exampleproject``).
* category: ``str`` (optional)
Type of activity, examples: ``change``, ``comment``, ``job``, ``review``.
* after: ``int`` (optional)
An activity ID to seek to. Activity entries up to and including the
specified ID are excluded from the results and do not count towards ``limit``.
Useful for pagination. Commonly set to the ``lastSeen`` property from a
previous query.
* limit: ``int`` (optional)
Maximum number of activity entries to return. This does not guarantee
that ``limit`` entries are returned. It does guarantee that the number
of entries returned won’t exceed ``limit``. Server-side filtering may
exclude some activity entries for permissions reasons. Default: 100
* fields: ``List[str]`` (optional)
List of fields to show. Omitting this parameter or passing an empty
value shows all fields.
:returns: ``dict``
:raises: ``SwarmError``
"""
params = dict() # type: Dict[str, Union[int, str]]
if change:
params['change'] = change
if stream:
params['stream'] = stream
if category:
params['type'] = category
if after:
params['after'] = after
if limit:
params['max'] = limit
if fields:
params['fields'] = ','.join(fields)
return self.swarm._request('GET', 'activity', params=params)
def create(self,
*,
category: str,
user: str,
action: str,
target: str,
topic: Optional[str] = None,
description: Optional[str] = None,
change: Optional[int] = None,
streams: Optional[List[str]] = None,
link: Optional[str] = None
) -> dict:
"""
Retrieve the activity list.
* category: ``str``
Type of activity, used for filtering activity streams.
Values can include ``change``, ``comment``, ``job``, ``review``).
* user: ``str``
User who performed the action.
* action: ``str``
Action that was performed - past-tense, for example, ``created``,
``commented on``.
* target: ``str``
Target that the action was performed on, for example, ``issue 1234``.
* topic: ``str``
Topic for the activity entry. Topics are essentially comment thread IDs.
Examples: ``reviews/1234`` or ``jobs/job001234``.
* description: ``str``
Optional description of object or activity to provide context.
* change: ``int``
Optional changelist ID this activity is related to.
Used to filter activity related to restricted changes.
* streams: ``List[str]``
Optional array of streams to display on. This can include user-initiated
actions (``user-alice``), activity relating to a user’s followed
projects/users (``personal-alice``), review streams (``review-1234``)
and project streams (``project-exampleproject``).
* link: ``str``
Optional URL for ``target``.
:returns: ``dict``
:raises: ``SwarmError``
"""
data = dict() # type: Dict[str, Union[int, str, List[str]]]
if category:
data['type'] = category
if user:
data['user'] = user
if action:
data['action'] = action
if target:
data['target'] = target
if topic:
data['topic'] = topic
if description:
data['description'] = description
if change:
data['change'] = change
if streams:
data['streams'] = streams
if link:
data['link'] = link
return self.swarm._request('POST', 'activity', json=data)
|
"""
Application Errors
"""
class ApplicationError(Exception):
def __init__(self, message, code):
self.message = message
self.code = code
super(Exception, self).__init__(message)
class InvalidJSON(ApplicationError):
def __init__(self):
ApplicationError.__init__(self,
"No JSON object could be decoded.",
400
)
class AuthError(ApplicationError):
def __init__(self):
ApplicationError.__init__(self,
"User not authenticated",
401
)
class RouteNotFound(ApplicationError):
def __init__(self, action):
ApplicationError.__init__(self,
"%s route could not be found" % action,
404
)
class ServerError(ApplicationError):
def __init__(self):
ApplicationError.__init__(self,
"we screwed up and have some debugging to do",
500
)
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['say_hello', 'HelloSayer']
# Cell
def say_hello(to):
"Say hello to somebody."
return f'Hello {to}!'
# Cell
class HelloSayer:
"Say hello to `to` using `say_hello`"
def __init__(self, to): self.to = to
def say(self):
"Do the saying"
return say_hello(self.to)
|
# nuScenes dev-kit.
# Code written by Holger Caesar, Varun Bankiti, and Alex Lang, 2019.
import json
import numpy as np
from matplotlib import pyplot as plt
from nuscenes import NuScenes
from nuscenes.eval.detection.constants import TP_METRICS, DETECTION_NAMES, DETECTION_COLORS, TP_METRICS_UNITS, \
PRETTY_DETECTION_NAMES, PRETTY_TP_METRICS
from nuscenes.eval.detection.data_classes import EvalBoxes
from nuscenes.eval.detection.data_classes import MetricDataList, DetectionMetrics
from nuscenes.eval.detection.utils import boxes_to_sensor
from nuscenes.utils.data_classes import LidarPointCloud
from nuscenes.utils.geometry_utils import view_points
def visualize_sample(nusc: NuScenes,
sample_token: str,
gt_boxes: EvalBoxes,
pred_boxes: EvalBoxes,
nsweeps: int = 1,
conf_th: float = 0.15,
eval_range: float = 50,
verbose: bool = True,
savepath: str = None) -> None:
"""
Visualizes a sample from BEV with annotations and detection results.
:param nusc: NuScenes object.
:param sample_token: The nuScenes sample token.
:param gt_boxes: Ground truth boxes grouped by sample.
:param pred_boxes: Prediction grouped by sample.
:param nsweeps: Number of sweeps used for lidar visualization.
:param conf_th: The confidence threshold used to filter negatives.
:param eval_range: Range in meters beyond which boxes are ignored.
:param verbose: Whether to print to stdout.
:param savepath: If given, saves the the rendering here instead of displaying.
"""
# Retrieve sensor & pose records.
sample_rec = nusc.get('sample', sample_token)
sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])
# Get boxes.
boxes_gt_global = gt_boxes[sample_token]
boxes_est_global = pred_boxes[sample_token]
# Map GT boxes to lidar.
boxes_gt = boxes_to_sensor(boxes_gt_global, pose_record, cs_record)
# Map EST boxes to lidar.
boxes_est = boxes_to_sensor(boxes_est_global, pose_record, cs_record)
# Add scores to EST boxes.
for box_est, box_est_global in zip(boxes_est, boxes_est_global):
box_est.score = box_est_global.detection_score
# Get point cloud in lidar frame.
pc, _ = LidarPointCloud.from_file_multisweep(nusc, sample_rec, 'LIDAR_TOP', 'LIDAR_TOP', nsweeps=nsweeps)
# Init axes.
_, ax = plt.subplots(1, 1, figsize=(9, 9))
# Show point cloud.
points = view_points(pc.points[:3, :], np.eye(4), normalize=False)
dists = np.sqrt(np.sum(pc.points[:2, :] ** 2, axis=0))
colors = np.minimum(1, dists / eval_range)
ax.scatter(points[0, :], points[1, :], c=colors, s=0.2)
# Show ego vehicle.
ax.plot(0, 0, 'x', color='black')
# Show GT boxes.
for box in boxes_gt:
box.render(ax, view=np.eye(4), colors=('g', 'g', 'g'), linewidth=2)
# Show EST boxes.
for box in boxes_est:
# Show only predictions with a high score.
assert not np.isnan(box.score), 'Error: Box score cannot be NaN!'
if box.score >= conf_th:
box.render(ax, view=np.eye(4), colors=('b', 'b', 'b'), linewidth=1)
# Limit visible range.
axes_limit = eval_range + 3 # Slightly bigger to include boxes that extend beyond the range.
ax.set_xlim(-axes_limit, axes_limit)
ax.set_ylim(-axes_limit, axes_limit)
# Show / save plot.
if verbose:
print('Rendering sample token %s' % sample_token)
plt.title(sample_token)
if savepath is not None:
plt.savefig(savepath)
plt.close()
else:
plt.show()
def setup_axis(xlabel: str = None,
ylabel: str = None,
xlim: int = None,
ylim: int = None,
title: str = None,
min_precision: float = None,
min_recall: float = None,
ax = None):
"""
Helper method that sets up the axis for a plot.
:param xlabel: x label text.
:param ylabel: y label text.
:param xlim: Upper limit for x axis.
:param ylim: Upper limit for y axis.
:param title: Axis title.
:param min_precision: Visualize minimum precision as horizontal line.
:param min_recall: Visualize minimum recall as vertical line.
:param ax: (optional) an existing axis to be modified.
:return: The axes object.
"""
if ax is None:
ax = plt.subplot()
ax.get_xaxis().tick_bottom()
ax.tick_params(labelsize=16)
ax.get_yaxis().tick_left()
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
if title is not None:
ax.set_title(title, size=24)
if xlabel is not None:
ax.set_xlabel(xlabel, size=16)
if ylabel is not None:
ax.set_ylabel(ylabel, size=16)
if xlim is not None:
ax.set_xlim(0, xlim)
if ylim is not None:
ax.set_ylim(0, ylim)
if min_recall is not None:
ax.axvline(x=min_recall, linestyle='--', color=(0, 0, 0, 0.3))
if min_precision is not None:
ax.axhline(y=min_precision, linestyle='--', color=(0, 0, 0, 0.3))
return ax
def class_pr_curve(md_list: MetricDataList,
metrics: DetectionMetrics,
detection_name: str,
min_precision: float,
min_recall: float,
savepath: str = None,
ax=None):
if ax is None:
ax = setup_axis(title=PRETTY_DETECTION_NAMES[detection_name], xlabel='Recall', ylabel='Precision', xlim=1,
ylim=1, min_precision=min_precision, min_recall=min_recall)
# Get recall vs precision values of given class for each distance threshold.
data = md_list.get_class_data(detection_name)
# Plot the recall vs. precision curve for each distance threshold.
for md, dist_th in data:
ap = metrics.get_label_ap(detection_name, dist_th)
ax.plot(md.recall, md.precision, label='Dist. : {}, AP: {:.1f}'.format(dist_th, ap * 100))
ax.legend(loc='best')
if savepath is not None:
plt.savefig(savepath)
plt.close()
def class_tp_curve(md_list: MetricDataList,
metrics: DetectionMetrics,
detection_name: str,
min_recall: float,
dist_th_tp: float,
savepath: str = None,
ax=None):
# Get metric data for given detection class with tp distance threshold.
md = md_list[(detection_name, dist_th_tp)]
min_recall_ind = round(100 * min_recall)
if min_recall_ind <= md.max_recall_ind:
# For traffic_cone and barrier only a subset of the metrics are plotted.
rel_metrics = [m for m in TP_METRICS if not np.isnan(metrics.get_label_tp(detection_name, m))]
ylimit = max([max(getattr(md, metric)[min_recall_ind:md.max_recall_ind + 1]) for metric in rel_metrics]) * 1.1
else:
ylimit = 1.0
if ax is None:
ax = setup_axis(title=PRETTY_DETECTION_NAMES[detection_name], xlabel='Recall', ylabel='Error', xlim=1,
min_recall=min_recall)
ax.set_ylim(0, ylimit)
# Plot the recall vs. error curve for each tp metric.
for metric in TP_METRICS:
tp = metrics.get_label_tp(detection_name, metric)
# Plot only if we have valid data.
if tp is not np.nan and min_recall_ind <= md.max_recall_ind:
recall, error = md.recall[:md.max_recall_ind + 1], getattr(md, metric)[:md.max_recall_ind + 1]
else:
recall, error = [], []
# Change legend based on tp value
if tp is np.nan:
label = '{}: n/a'.format(PRETTY_TP_METRICS[metric])
elif min_recall_ind > md.max_recall_ind:
label = '{}: nan'.format(PRETTY_TP_METRICS[metric])
else:
label = '{}: {:.2f} ({})'.format(PRETTY_TP_METRICS[metric], tp, TP_METRICS_UNITS[metric])
ax.plot(recall, error, label=label)
ax.axvline(x=md.max_recall, linestyle='-.', color=(0, 0, 0, 0.3))
ax.legend(loc='best')
if savepath is not None:
plt.savefig(savepath)
plt.close()
def dist_pr_curve(md_list: MetricDataList,
metrics: DetectionMetrics,
dist_th: float,
min_precision: float,
min_recall: float,
savepath: str = None) -> None:
fig, (ax, lax) = plt.subplots(ncols=2, gridspec_kw={"width_ratios": [4, 1]},
figsize=(7.5, 5))
ax = setup_axis(xlabel='Recall', ylabel='Precision',
xlim=1, ylim=1, min_precision=min_precision, min_recall=min_recall, ax=ax)
# Plot the recall vs. precision curve for each detection class.
data = md_list.get_dist_data(dist_th)
for md, detection_name in data:
md = md_list[(detection_name, dist_th)]
ap = metrics.get_label_ap(detection_name, dist_th)
ax.plot(md.recall, md.precision, label='{}: {:.1f}%'.format(PRETTY_DETECTION_NAMES[detection_name], ap * 100),
color=DETECTION_COLORS[detection_name])
hx, lx = ax.get_legend_handles_labels()
lax.legend(hx, lx, borderaxespad=0)
lax.axis("off")
plt.tight_layout()
if savepath is not None:
plt.savefig(savepath)
plt.close()
def summary_plot(md_list: MetricDataList,
metrics: DetectionMetrics,
min_precision: float,
min_recall: float,
dist_th_tp: float,
savepath: str = None) -> None:
n_classes = len(DETECTION_NAMES)
_, axes = plt.subplots(nrows=n_classes, ncols=2, figsize=(15, 5 * n_classes))
for ind, detection_name in enumerate(DETECTION_NAMES):
title1, title2 = ('Recall vs Precision', 'Recall vs Error') if ind == 0 else (None, None)
ax1 = setup_axis(xlim=1, ylim=1, title=title1, min_precision=min_precision,
min_recall=min_recall, ax=axes[ind, 0])
ax1.set_ylabel('{} \n \n Precision'.format(PRETTY_DETECTION_NAMES[detection_name]), size=20)
ax2 = setup_axis(xlim=1, title=title2, min_recall=min_recall, ax=axes[ind, 1])
if ind == n_classes - 1:
ax1.set_xlabel('Recall', size=20)
ax2.set_xlabel('Recall', size=20)
class_pr_curve(md_list, metrics, detection_name, min_precision, min_recall, ax=ax1)
class_tp_curve(md_list, metrics, detection_name, min_recall, dist_th_tp=dist_th_tp, ax=ax2)
plt.tight_layout()
if savepath is not None:
plt.savefig(savepath)
plt.close()
def detailed_results_table_tex(metrics_path: str, output_path: str) -> None:
"""
Renders a detailed results table in tex.
:param metrics_path: path to a serialized DetectionMetrics file.
:param output_path: path to the output file.
"""
with open(metrics_path, 'r') as f:
metrics = json.load(f)
tex = ''
tex += '\\begin{table}[]\n'
tex += '\\small\n'
tex += '\\begin{tabular}{| c | c | c | c | c | c | c |} \\hline\n'
tex += '\\textbf{Class} & \\textbf{AP} & \\textbf{ATE} & \\textbf{ASE} & \\textbf{AOE} & ' \
'\\textbf{AVE} & ' \
'\\textbf{AAE} \\\\ \\hline ' \
'\\hline\n'
for name in DETECTION_NAMES:
ap = np.mean(metrics['label_aps'][name].values())
ate = metrics['label_tp_errors'][name]['trans_err']
ase = metrics['label_tp_errors'][name]['scale_err']
aoe = metrics['label_tp_errors'][name]['orient_err']
ave = metrics['label_tp_errors'][name]['vel_err']
aae = metrics['label_tp_errors'][name]['attr_err']
tex_name = PRETTY_DETECTION_NAMES[name]
if name == 'traffic_cone':
tex += '{} & {:.1f} & {:.2f} & {:.2f} & N/A & N/A & N/A \\\\ \\hline\n'.format(
tex_name, ap, ate, ase)
elif name == 'barrier':
tex += '{} & {:.1f} & {:.2f} & {:.2f} & {:.2f} & N/A & N/A \\\\ \\hline\n'.format(
tex_name, ap, ate, ase, aoe)
else:
tex += '{} & {:.1f} & {:.2f} & {:.2f} & {:.2f} & {:.2f} & {:.2f} \\\\ ' \
'\\hline\n'.format(tex_name, ap, ate, ase, aoe, ave, aae)
map_ = metrics['mean_ap']
mate = metrics['tp_errors']['trans_err']
mase = metrics['tp_errors']['scale_err']
maoe = metrics['tp_errors']['orient_err']
mave = metrics['tp_errors']['vel_err']
maae = metrics['tp_errors']['attr_err']
tex += '\\hline {} & {:.1f} & {:.2f} & {:.2f} & {:.2f} & {:.2f} & {:.2f} \\\\ ' \
'\\hline\n'.format('\\textbf{Mean}', map_, mate, mase, maoe, mave, maae)
tex += '\\end{tabular}\n'
# All one line
tex += '\\caption{Detailed detection performance. '
tex += 'AP: average precision, '
tex += 'ATE: average translation error (${}$), '.format(TP_METRICS_UNITS['trans_err'])
tex += 'ASE: average scale error (${}$), '.format(TP_METRICS_UNITS['scale_err'])
tex += 'AOE: average orientation error (${}$), '.format(TP_METRICS_UNITS['orient_err'])
tex += 'AVE: average velocity error (${}$), '.format(TP_METRICS_UNITS['vel_err'])
tex += 'AAE: average attribute error (${}$). '.format(TP_METRICS_UNITS['attr_err'])
tex += 'nuScenes Detection Score (NDS) = {:.1f} '.format(metrics['nd_score'] * 100)
tex += '}\n'
tex += '\\end{table}\n'
with open(output_path, 'w') as f:
f.write(tex)
|
#!/bin/python
from functools import wraps
def trace(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
print('%s(%r, %r) -> %r' %
(func.__name__, args, kwargs, result))
return result
return wrapper
|
import sys
import os
import struct
import datetime
from panda3d.core import PandaSystem
class SystemAnalyzer():
""" Small tool to analyze the system and also check if the users panda
build is out of date. """
@classmethod
def analyze(self):
""" Analyzes the user system. This should help debugging when the user
shares his log. """
print "System analyzer:"
def stat(name, *args):
print " ", str(name).ljust(20, " "), "=", ''.join([str(i) for i in args])
stat("System", sys.platform, " / ", os.name)
stat("Bitness", 8 * struct.calcsize("P"))
stat("Panda3D-Build Date", PandaSystem.getBuildDate())
stat("Panda3D-Compiler", PandaSystem.getCompiler())
stat("Panda3D-Distributor", PandaSystem.getDistributor())
stat("Panda3D-Version", PandaSystem.getVersionString())
stat("Panda3D-Platform", PandaSystem.getPlatform())
stat("Panda3D-Official?", PandaSystem.isOfficialVersion())
@classmethod
def checkPandaVersionOutOfDate(self, minDay, minMonth, minYear):
""" Checks if the panda build is out of date, so users don't complain
about stuff not working, because they simply didn't update """
built = PandaSystem.getBuildDate()
formated = datetime.datetime.strptime(built, "%b %d %Y %H:%M:%S")
required = datetime.datetime(minYear, minMonth, minDay, 12, 00)
if formated < required:
print "ERROR: Your Panda3D Build is out of date. Update to the latest"
print "git build in order to use the pipeline: "
print "https://github.com/panda3d/panda3d"
sys.exit(0)
# Check version
versionMinMinor = 9
versionMinMajor = 1
versionMismatch = False
if PandaSystem.getMajorVersion() < versionMinMajor:
versionMismatch = True
elif PandaSystem.getMinorVersion() < versionMinMinor:
versionMismatch = True
if versionMismatch:
print "ERROR: Your current panda build (", PandaSystem.getVersionString(), ") is"
print "not supported! The minimum required build is", str(versionMinMajor) + "." + str(versionMinMinor) + ".0"
sys.exit(0)
if __name__ == "__main__":
SystemAnalyzer.analyze()
SystemAnalyzer.checkPandaVersionOutOfDate(7,5,2015)
|
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
""" Tokenization classes for XLM-RoBERTa model."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from .tokenization_utils import PreTrainedTokenizer
from .utils import logging
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"xlm-roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-base-sentencepiece.bpe.model",
"xlm-roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll02-dutch-sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-spanish": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll02-spanish-sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll03-english": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll03-english-sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll03-german": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll03-german-sentencepiece.bpe.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class XLMRobertaTokenizer(PreTrainedTokenizer):
"""
Adapted from :class:`~transfomers.RobertaTokenizer` and class:`~transfomers.XLNetTokenizer`. Based on
`SentencePiece <https://github.com/google/sentencepiece>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the :obj:`cls_token`.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
Attributes: sp_model (:obj:`SentencePieceProcessor`): The `SentencePiece` processor that is used for every
conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["attention_mask"]
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
**kwargs
):
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs,
)
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
self.fairseq_offset = 1
self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM-RoBERTa sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s></s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
@property
def vocab_size(self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
return self.sp_model.EncodeAsPieces(text)
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
import configuration.configuration_time_line as timeline
default_Line = \
{
"TYPE":2,
"F_BUS":1,
"T_BUS":2,
"BR_R":0.3,
"BR_X":0.4,
"BR_B":0,
"RATE_A":5000,
"RATE_B":1000,
"RATE_C":10000,
"TAP":1,
"SHIFT":0,
"STATUS":1,
"PF":100,
"QF":0,
"TIME_GENERATED": timeline.default_time["Base_time"],
"TIME_APPLIED": [timeline.default_time["Base_time"], timeline.default_time["Look_ahead_time_uc"]],
"TIME_COMMANDED": timeline.default_time["Base_time"],
"COMMAND_STATUS":[1],
"COMMAND_TAP":[1]
}
|
from datetime import datetime, timedelta, timezone
from unittest.mock import MagicMock
from airflow import DAG
from airflow.models.taskinstance import TaskInstance
import util.operator_util as op_util
class PickleMock(MagicMock):
def __reduce__(self):
return (MagicMock, ())
def test_get_runner_operator_creates_valid_string():
dag = DAG(
dag_id='test_dag',
start_date=datetime.strptime('2019-01-01', '%Y-%m-%d')
)
runner = op_util.get_runner_operator(
dag, 'test_source', '/test/script/location.py'
)
expected_command = 'python /test/script/location.py --mode default'
assert runner.bash_command == expected_command
def test_get_dated_main_runner_handles_zero_shift():
dag = DAG(
dag_id='test_dag',
start_date=datetime.strptime('2019-01-01', '%Y-%m-%d')
)
execution_date = datetime.strptime(
'2019-01-01',
'%Y-%m-%d'
).replace(tzinfo=timezone.utc)
main_func = PickleMock()
runner = op_util.get_dated_main_runner_operator(
dag,
main_func,
timedelta(minutes=1)
)
ti = TaskInstance(runner, execution_date)
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
main_func.assert_called_with('2019-01-01')
def test_get_dated_main_runner_handles_day_shift():
dag = DAG(
dag_id='test_dag',
start_date=datetime.strptime('2019-01-01', '%Y-%m-%d')
)
execution_date = datetime.strptime(
'2019-01-01',
'%Y-%m-%d'
).replace(tzinfo=timezone.utc)
main_func = PickleMock()
runner = op_util.get_dated_main_runner_operator(
dag,
main_func,
timedelta(minutes=1),
day_shift=1
)
ti = TaskInstance(runner, execution_date)
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
main_func.assert_called_with('2018-12-31')
|
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
me = __file__
path = os.path.realpath(me)
sys.path.append(os.path.dirname(os.path.dirname(path)))
os.environ['DJANGO_SETTINGS_MODULE'] = 'astrometry.net.settings'
import settings
import django
django.setup()
from astrometry.net.models import *
from astrometry.util.file import *
def clean_dfs():
for df in DiskFile.objects.all().order_by('file_hash'):
if os.path.exists(df.get_path()):
continue
print('Does not exist:', df)
ocoll = df.collection
for coll in ['cached', 'resized', 'uploaded', 'uploaded-gunzip', 'uploaded-untar']:
df.collection = coll
if os.path.exists(df.get_path()):
print('--> found in', coll)
df.save()
continue
df.delete()
# print ' image_set:', df.image_set.all()
# for im in df.image_set.all():
# print ' uis:', im.userimage_set.all()
# print ' submissions:', df.submissions.all()
# print ' cached:', df.cachedfile_set.all()
def unlink_resized_fits():
uis = UserImage.objects.filter(image__disk_file__file_type='FITS image data')
print(uis.count(), 'UserImages are FITS')
for ui in uis:
im = ui.image
im.display_image = None
im.thumbnail = None
im.save()
print('Updated', len(uis), 'UserImages')
def delete_orphaned_images():
print('Checking for orphaned Images...')
ndel = 0
for im in Image.objects.all():
used = (im.userimage_set.count() +
im.image_thumbnail_set.count() +
im.image_display_set.count())
print('Image', im.id, 'used', used, 'times')
if used > 0:
continue
im.delete()
ndel += 1
print('Deleted', ndel, 'Images')
def delete_orphaned_diskfiles():
ndel = 0
for df in DiskFile.objects.all():
used = (df.image_set.count() +
df.submissions.count() +
df.cachedfile_set.count())
print('DiskFile', df.file_hash, 'used', used, 'times')
if used > 0:
continue
os.remove(df.get_path())
df.delete()
ndel += 1
print('Deleted', ndel, 'DiskFiles')
def clean_cache():
cfs = CachedFile.objects.all()
print(cfs.count(), 'CachedFiles')
cfs = cfs.filter(key__contains='galex')
print(cfs.count(), 'GALEX cached files')
#cfs = cfs.filter(key__contains='sdss_size')
#print(cfs.count(), 'SDSS cached files')
#cfs = cfs.filter(key__contains='jpg_image')
#print(cfs.count(), 'FITS->jpeg images')
#cfs = cfs.filter(key__contains='fits_table_')
#print(cfs.count(), 'FITS tables')
def do_delete(delcfs, deldfs, delfiles):
delcfs = list(delcfs)
deldfs = list(deldfs)
delfiles = list(delfiles)
print('Total of', len(delcfs), 'CachedFiles to delete')
print('Total of', len(delfiles), 'files to delete')
print('Total of', len(deldfs), 'DiskFiles to delete')
print('Deleting CachedFiles...')
for cf in delcfs:
cf.delete()
print('Deleting DiskFiles...')
for df in deldfs:
df.delete()
print('Deleting Files...')
for fn in delfiles:
os.unlink(fn)
delfiles = set()
delcfs = set()
deldfs = set()
for i,cf in enumerate(cfs):
if i % 1000 == 0:
do_delete(delcfs, deldfs, delfiles)
delfiles = set()
delcfs = set()
deldfs = set()
print()
print(cf.key)
try:
df = cf.disk_file
except:
print('DiskFile not found -- deleting CachedFile')
delcfs.add(cf)
continue
path = df.get_path()
print('->', path)
print('Other CachedFiles sharing this DiskFile:')
for ocf in df.cachedfile_set.all():
print(' ', ocf.key)
delcfs.add(ocf)
delcfs.add(cf)
deldfs.add(df)
delfiles.add(path)
do_delete(delcfs, deldfs, delfiles)
if __name__ == '__main__':
clean_cache()
sys.exit(0)
# Remove resized FITS image to retro-fix bug in an-fitstopnm
unlink_resized_fits()
# then remove orphaned Image objects
delete_orphaned_images()
# and orphaned DiskFiles
delete_orphaned_diskfiles()
# clean_dfs()
#
# cfs = CachedFile.objects.all()
# print 'Total of', cfs.count(), 'files cached'
# nbytes = 0
# for cf in cfs:
# df = cf.disk_file
# path = df.get_path()
# if not os.path.exists(path):
# print 'Path does not exist:', path
# print 'Other CachedFiles sharing this DiskFile:'
# df.cachedfile_set.all().delete()
# df.delete()
# #cf.delete()
# continue
# sz = file_size(path)
# print ' %-32s' % cf.key, '=>', path, ' (size: %i bytes)' % sz
# nbytes += sz
# print 'Total of', nbytes, 'bytes'
def clean_cache():
cfs = CachedFile.objects.all()
print(cfs.count(), 'CachedFiles')
cfs = cfs.filter(key__contains='galex')
print(cfs.count(), 'GALEX cached files')
delfiles = []
delcfs = []
deldfs = []
for cf in cfs:
print()
print(cf.key)
df = cf.disk_file
path = df.get_path()
print('->', path)
print('Other CachedFiles sharing this DiskFile:')
for ocf in df.cachedfile_set.all():
print(' ', ocf.key)
delcfs.append(ocf)
delcfs.append(cf)
deldfs.append(df)
delfiles.append(path)
delcfs = list(set(delcfs))
deldfs = list(set(deldfs))
delfiles = list(set(delfiles))
print('Total of', len(delcfs), 'CachedFiles to delete')
print('Total of', len(delfiles), 'files to delete')
print('Total of', len(deldfs), 'DiskFiles to delete')
for cf in delcfs:
cf.delete()
for df in deldfs:
df.delete()
for fn in delfiles:
os.unlink(fn)
|
import sounddevice as sd
from scipy.io.wavfile import write
from predictProcess import *
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, LSTM
from keras.utils import to_categorical
import wandb
from wandb.keras import WandbCallback
from numpy import argmax
import matplotlib.pyplot as plt
from keras.utils import plot_model
from numpy import loadtxt
from keras.models import load_model
from keras.models import model_from_json
import os
import wave
import contextlib
# fname = './data/bed/0a7c2a8d_nohash_0.wav'
# with contextlib.closing(wave.open(fname,'r')) as f:
# frames = f.getnframes()
# rate = f.getframerate()
# duration = frames / float(rate)
# print(duration)
# print(frames)
# print(rate)
num = 0
def testRecord():
print("Recording for one second")
fs = 16000 # Sample rate
seconds = 2 # Duration of recording
#Change Range in the line below to record multiple recordings
for num in range(0,1):
print("PRESS RETURN TO RECORD")
key = input("PRESS RETURN TO RECORD :")
if key == "":
myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)
sd.wait() # Wait until recording is finished
name = "output{}.wav".format(num)
write(name, fs, myrecording)
# os.rename changes file location to the right folder however, it tends to corrupt the .WAV file
# os.rename('/Users/hamzaehsan/Desktop/titleTownTech/speechRecognition/output.wav', '/Users/hamzaehsan/Desktop/titleTownTech/speechRecognition/Predict/Prediction/output.wav')
print("output.wav has been saved to project directory")
testRecord()
|
from .matric_util import fast_hist, per_class_iu, fast_hist_crop
|
from .base import Serializer, SqlReader, NameCompare
from .engine import Engine
from opendp.smartnoise_t._ast.tokens import Literal
from opendp.smartnoise_t._ast.expressions.numeric import BareFunction
class SparkReader(SqlReader):
ENGINE = Engine.SPARK
def __init__(self, conn, **kwargs):
super().__init__(self.ENGINE)
self.api = conn
self.database = "Spark Session"
def execute(self, query, *ignore, accuracy:bool=False):
if not isinstance(query, str):
raise ValueError("Please pass strings to execute. To execute ASTs, use execute_typed.")
return self.api.sql(query)
def _to_df(rows):
return rows
def db_name(self):
return self.database
class SparkSerializer(Serializer):
def serialize(self, query):
for r_e in [n for n in query.find_nodes(BareFunction) if n.name == "RANDOM"]:
r_e.name = "rand"
for b in [n for n in query.find_nodes(Literal) if isinstance(n.value, bool)]:
b.text = "'True'" if b.value else "'False'"
return str(query)
class SparkNameCompare(NameCompare):
def __init__(self, search_path=None):
self.search_path = search_path if search_path is not None else ["dbo"]
def identifier_match(self, query, meta):
return self.strip_escapes(query).lower() == self.strip_escapes(meta).lower()
|
import os
import pandas as pd
def get_index_queries():
r''' Run before everything to speed up things
'''
return ['CREATE INDEX patientId FOR (p:Patient) ON (p.patientId);',
'CREATE INDEX conceptId FOR (c:Concept) ON (c.conceptId);',
'CREATE INDEX documentId FOR (d:Document) ON (d.documentId);']
def create_neo_csv(data, columns, output_dir='/etc/lib/neo4j/import/',
base_name='patients'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data
columns:
What data to use from the dataframe
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible
base_name:
Name of the csv
'''
if isinstance(data, pd.DataFrame):
df = data
else:
df = pd.read_csv(data)
# Remove duplicates
df = df.drop_duplicates(subset=columns)
out_df = df[columns]
data_path = os.path.join(output_dir, f"{base_name}.csv")
out_df.to_csv(data_path, index=False)
def create_patients_csv(data, output_dir='/etc/lib/neo4j/import/',
base_name='patients'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data: patientId,
sex, ethnicity, dob
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible,
but writing there could be only admin
'''
query = (
'USING PERIODIC COMMIT 100000 \n'
f'LOAD CSV WITH HEADERS FROM "file:///{base_name}.csv" AS row \n'
'CREATE (:Patient {patientId: toString(row.patientId), \n'
' sex: toString(row.sex), \n'
' ethnicity: toString(row.ethnicity), \n'
' dob: datetime(row.dob)}) \n'
)
create_neo_csv(data=data, columns=['patientId', 'sex', 'ethnicity', 'dob'],
output_dir=output_dir, base_name=base_name)
return query
def create_documents_csv(data, output_dir='/etc/lib/neo4j/import/',
base_name='documents'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data: documentId
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible
'''
query = (
'USING PERIODIC COMMIT 100000 \n'
f'LOAD CSV WITH HEADERS FROM "file:///{base_name}.csv" AS row \n'
'CREATE (:Document {documentId: toString(row.documentId)}) \n'
)
create_neo_csv(data=data, columns=['documentId'],
output_dir=output_dir, base_name=base_name)
return query
def create_concepts_csv(data, output_dir='/etc/lib/neo4j/import/',
base_name='concepts'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data: conceptId,
name and type
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible
'''
query = (
'USING PERIODIC COMMIT 100000 \n'
f'LOAD CSV WITH HEADERS FROM "file:///{base_name}.csv" AS row \n'
'CREATE (:Concept {conceptId: toString(row.conceptId), \n'
' type: toString(row.type), \n'
' name: toString(row.name)}) \n'
)
create_neo_csv(data=data, columns=['conceptId', 'name', 'type'],
output_dir=output_dir, base_name=base_name)
return query
def create_document2patient_csv(data, output_dir='/etc/lib/neo4j/import/',
base_name='document2patient'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data: patientId and
documentId
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible
'''
query = (
'USING PERIODIC COMMIT 100000 \n'
f'LOAD CSV WITH HEADERS FROM "file:///{base_name}.csv" AS row \n'
'MATCH (pt:Patient {patientId: toString(row.patientId)}) \n'
'MATCH (doc:Document {documentId: toString(row.documentId)}) \n'
'CREATE (pt)-[:HAS]->(doc); \n'
)
create_neo_csv(data=data, columns=['patientId', 'documentId'],
output_dir=output_dir, base_name=base_name)
return query
def create_concept_ontology_csv(data, output_dir='/etc/lib/neo4j/import/',
base_name='concept_ontology'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data: child, parent
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible
'''
query = (
'USING PERIODIC COMMIT 100000 \n'
f'LOAD CSV WITH HEADERS FROM "file:///{base_name}.csv" AS row \n'
'MATCH (child:Concept {conceptId: toString(row.child)}) \n'
'MATCH (parent:Concept {conceptId: toString(row.parent)}) \n'
'CREATE (child)-[:IS_A]->(parent); \n'
)
create_neo_csv(data=data, columns=['child', 'parent'],
output_dir=output_dir, base_name=base_name)
return query
def create_document2concept_csv(data, output_dir='/etc/lib/neo4j/import/',
base_name='document2concepts'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data: 'conceptId',
'documentId', 'contextSimilarity', 'start', 'end', 'timestamp',
'metaSubject', 'metaPresence', 'metaTime'
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible
'''
query = (
'USING PERIODIC COMMIT 100000 \n'
f'LOAD CSV WITH HEADERS FROM "file:///{base_name}.csv" AS row \n'
'MATCH (doc:Document{documentId: toString(row.documentId)}) \n'
'MATCH (concept:Concept {conceptId: toString(row.conceptId)}) \n'
'CREATE (doc)-[:HAS {start: toInteger(row.start), \n'
' end: toInteger(row.end), \n'
' timestamp: toInteger(row.timestamp), \n'
' contextSimilarity: toFloat(row.contextSimilarity), \n'
' metaSubject: toString(row.metaSubject), \n'
' metaPresence: toString(row.metaPresence), \n'
' metaTime: toString(row.metaTime) \n'
' }]->(concept); \n'
)
columns = ['conceptId', 'documentId', 'contextSimilarity', 'start',
'end', 'timestamp', 'metaSubject', 'metaPresence', 'metaTime']
create_neo_csv(data=data, columns=columns,
output_dir=output_dir, base_name=base_name)
return query
def get_data_from_docs(docs, doc2pt, doc2time=None):
data = [['conceptId', 'documentId', 'contextSimilarity',
'start', 'end', 'timestamp', 'metaSubject',
'metaPresence', 'metaTime']]
for doc_id, doc in docs.items():
row = []
for ent in doc['entities'].values():
#if ent['meta_anns']['Subject']['value'] == 'Patient' and \
# ent['meta_anns']['Presence']['value'] == 'True':
if doc2time is not None:
t = doc2time[doc_id]
else:
t = ent['document_timestamp']
row = [ent['cui'], doc_id,
ent['context_similarity'],
ent['start'], ent['end'],
t,
ent['meta_anns'].get('Subject', {}).get('value', None),
ent['meta_anns'].get('Presence', {}).get('value', None),
ent['meta_anns'].get('Time', {}).get('value', None)]
data.append(row)
row = []
return data
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##Text Classification Model using TF-IDF.
#First, import the MultinomialNB module and create a Multinomial Naive Bayes classifier object using MultinomialNB() function.
from __future__ import print_function
import logging
import sys
from optparse import OptionParser
from time import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import metrics
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.svm import LinearSVC
from sklearn.utils.extmath import density
if __name__ == '__main__':
# This will be the unit test
X_train, X_test, y_train, y_test = train_test_split(
text_counts, data['Sentiment'], test_size=0.3, random_state=1)
# Model Generation Using Multinomial Naive Bayes
clf = MultinomialNB().fit(X_train, y_train)
predicted= clf.predict(X_test)
print("MultinomialNB Accuracy:",metrics.accuracy_score(y_test, predicted))
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" % opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
print("done in %fs" % (time() - t0))
print()
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = np.asarray(vectorizer.get_feature_names())
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.f1_score(y_test, pred)
print("f1-score: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
class L1LinearSVC(LinearSVC):
def fit(self, X, y):
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
self.transformer_ = LinearSVC(penalty="l1",
dual=False, tol=1e-3)
X = self.transformer_.fit_transform(X, y)
return LinearSVC.fit(self, X, y)
def predict(self, X):
X = self.transformer_.transform(X)
return LinearSVC.predict(self, X)
print('=' * 80)
print("LinearSVC with L1-based feature selection")
results.append(benchmark(L1LinearSVC()))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
|
import factory
from sqlalchemy import or_
from backend.extensions import db
from backend.models import Hacknight, Participant, Team, User
class BaseFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
class Meta:
model = User
username = factory.Faker("email", locale="pl_PL")
password = "pass123"
@classmethod
def _create(cls, model_class, *args, **kwargs):
session = cls._meta.sqlalchemy_session
with session.no_autoflush:
existing = (
session.query(model_class)
.filter_by(username=kwargs["username"])
.first()
)
if existing:
kwargs["username"] = cls.username.generate({})
obj = super(UserFactory, cls)._create(model_class, *args, **kwargs)
return obj
class ParticipantFactory(BaseFactory):
class Meta:
model = Participant
first_name = factory.Faker("first_name")
last_name = factory.Faker("last_name")
email = factory.LazyAttribute(
lambda obj: "{}{}@codeforpoznan.test".format(obj.first_name, obj.last_name)
)
github = factory.LazyAttribute(lambda obj: f"{obj.first_name}{obj.last_name}")
phone = factory.Faker("random_int", min=100000000, max=999999999)
@classmethod
def _create(cls, model_class, *args, **kwargs):
session = cls._meta.sqlalchemy_session
with session.no_autoflush:
existing = (
session.query(model_class)
.filter(
or_(
model_class.email == kwargs["email"],
model_class.github == kwargs["github"],
)
)
.first()
)
if existing:
kwargs["email"] = cls.email.generate({})
kwargs["github"] = cls.github.generate({})
obj = super(ParticipantFactory, cls)._create(model_class, *args, **kwargs)
return obj
class HacknightFactory(BaseFactory):
class Meta:
model = Hacknight
date = factory.Faker("date_between", start_date="-20y", end_date="now")
@classmethod
def _create(cls, model_class, *args, **kwargs):
session = cls._meta.sqlalchemy_session
with session.no_autoflush:
existing = session.query(model_class).filter_by(date=kwargs["date"]).first()
if existing:
kwargs["date"] = cls.date.generate({})
obj = super(HacknightFactory, cls)._create(model_class, *args, **kwargs)
return obj
@factory.post_generation
def participants(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for participant in extracted:
self.participants.append(participant)
class TeamFactory(BaseFactory):
class Meta:
model = Team
project_name = factory.Faker("word", locale="pl_PL")
description = factory.Faker("paragraph", locale="pl_PL")
project_url = factory.LazyAttribute(
lambda obj: f"https://{obj.project_name}.codeforpoznan.test"
)
@classmethod
def _create(cls, model_class, *args, **kwargs):
session = cls._meta.sqlalchemy_session
with session.no_autoflush:
existing = (
session.query(model_class)
.filter_by(project_name=kwargs["project_name"])
.first()
)
if existing:
project_name = cls.project_name.generate({})
kwargs["project_name"] = project_name
kwargs["project_url"] = f"https://{project_name}.codeforpoznan.test"
obj = super(TeamFactory, cls)._create(model_class, *args, **kwargs)
return obj
@factory.post_generation
def members(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for member in extracted:
self.members.append(member)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" pytickersymbols
Copyright 2019 Slash Gordon
Use of this source code is governed by an MIT-style license that
can be found in the LICENSE file.
"""
from setuptools import setup, find_packages
EXCLUDE_FROM_PACKAGES = ['test', 'test.*', 'test*']
VERSION = '1.0.11'
def get_requirements(requirements):
with open(requirements) as requirement_file:
content = requirement_file.readlines()
content = [x.strip() for x in content]
return content
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="pytickersymbols",
version=VERSION,
author="Slash Gordon",
author_email="slash.gordon.dev@gmail.com",
py_modules=['pytickersymbols'],
package_dir={'': 'src'},
description="The lib provides ticker symbols for yahoo and google finance.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/portfolioplus/pytickersymbols",
packages=find_packages('src', exclude=EXCLUDE_FROM_PACKAGES),
package_data={'': ['data/*.yaml']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Office/Business :: Financial :: Investment',
],
)
|
'''
This simulation executes a trained neural network that approximates the
closed form solution given by 2 axis inv kin
'''
from __future__ import division
import numpy as np
import contextlib
with contextlib.redirect_stdout(None):
import pygame
import pygame.locals
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from robot_arm.arm_part import ArmPart
import helpers
from neural_network import FullyConnectedNetwork
'''Global Variables'''
input_shape = 2
output_shape = 4
drop_rte = 0.1
hidden_neurons = [40, 40, 40, 40,output_shape]
model = FullyConnectedNetwork(input_shape, hidden_neurons, drop_rte)
def load_model(model):
return model.load_state_dict(torch.load('./saved_models/deterministicmodel.pth'))
load_model(model)
if torch.cuda.is_available():
model.cuda()
print("Using GPU Acceleration")
else:
print("Not Using GPU Acceleration")
model.eval() #Model has been previously been trained. Set model to eval mode to use all connections
red = (255, 0, 0)
white = (255, 255, 255)
pygame.init()
pygame.display.set_caption('Fully Connected Network Approximating Inverse Kinematics')
width = 1000
height = 1000
display = pygame.display.set_mode((width, height))
frame_clock = pygame.time.Clock()
origin = (width / 2.0, height / 2.0)
upperarm = ArmPart('./robot_arm/upperarm.png', scale=.8)
lowerarm = ArmPart('./robot_arm/lowerarm.png', scale=.9)
sprites = []
num_steps_0 = 0
num_steps_1 = 0
cur_radians_0 = 0
cur_radians_1 = 0
origin_1 = (0, 0)
rotate_rte_0 = 0
rotate_rte_1 = 0
'''Main Script Logic'''
while True:
display.fill(white)
#Check if mouse is pressed and add location to sprites list if it is pressed
mouse_state = pygame.mouse.get_pressed()
if mouse_state[0] == 1:
sprites.append(pygame.mouse.get_pos())
sprites = helpers.return_ordered(sprites)
#If sprites list has elements and the rotation steps aren't equal to zero
#Calculate Inv Kinematic solution for the most recent sprite in the sprites list
if len(sprites) > 0 and num_steps_0 == 0 and num_steps_1 == 0:
#prepare input for neural network
if torch.cuda.is_available():
input_to_model = Variable(torch.from_numpy(np.asarray([(sprites[0][0] - 500.0 + 420)/840, (sprites[0][1] - 500.0 + 420)/840])).float().cuda())
else:
input_to_model = Variable(torch.from_numpy(np.asarray([(sprites[0][0] - 500.0 + 420)/840, (sprites[0][1] - 500.0 + 420)/840])).float())
#Inference model and calculate rotation steps needed
with torch.no_grad():
theta_0_sin, theta_0_cos, theta_1_sin, theta_1_cos = model.forward(input_to_model)
theta_0 = np.arctan2(theta_0_sin.item(), theta_0_cos.item())
theta_1 = np.arctan2(theta_1_sin.item(), theta_1_cos.item())
theta_0, theta_1 = helpers.convert_normal_angle(theta_0, theta_1)
if (sprites[0][0] >=0):
theta_add = (theta_1 + theta_0)% (2 * np.pi)
else:
theta_add = (theta_1 - theta_0)% (2 * np.pi)
num_steps_0, rotate_rte_0 = helpers.calc_rot(cur_radians_0, theta_0)
num_steps_1, rotate_rte_1 = helpers.calc_rot(cur_radians_1, theta_add)
#Rotate upper and lower arm
if num_steps_0 > 0 and num_steps_1 == 0:
ua_image, ua_rect = upperarm.rotate(rotate_rte_0)
fa_image, fa_rect = lowerarm.rotate(0.0)
num_steps_0 +=-1
#Rotate lower arm
elif num_steps_1 > 0 and num_steps_0 == 0:
fa_image, fa_rect = lowerarm.rotate(rotate_rte_1)
ua_image, ua_rect = upperarm.rotate(0.0)
num_steps_1 += -1
#Rotate upper arm
elif num_steps_0 > 0 and num_steps_1 > 0:
fa_image, fa_rect = lowerarm.rotate(rotate_rte_1)
ua_image, ua_rect = upperarm.rotate(rotate_rte_0)
num_steps_0 += -1
num_steps_1 += -1
#Arm has reached end point, pop sprite from sprites list
if num_steps_1 == 0 and num_steps_0 == 0:
fa_image, fa_rect = lowerarm.rotate(0.000)
ua_image, ua_rect = upperarm.rotate(0.000)
if len(sprites) > 0:
sprites.pop(0)
joints_x = np.cumsum([0,
upperarm.scale * np.cos(upperarm.rot_angle),
lowerarm.scale * np.cos(lowerarm.rot_angle)]) + origin[0]
joints_y = np.cumsum([0,
upperarm.scale * np.sin(upperarm.rot_angle),
lowerarm.scale * np.sin(lowerarm.rot_angle)]) * -1 + origin[1]
#Update location of joints
joints = [(int(x), int(y)) for x,y in zip(joints_x, joints_y)]
#Draw sprites on screen
for sprite in sprites:
pygame.draw.circle(display, red, sprite, 4)
#Reposition upper arm and lower arm
helpers.transform(ua_rect, joints[0], upperarm)
helpers.transform(fa_rect, joints[1], lowerarm)
#Draw upper and lower arm
display.blit(ua_image, ua_rect)
display.blit(fa_image, fa_rect)
#Get current location of arm parts
cur_radians_0 = helpers.print_angle(ua_rect.center[0], ua_rect.center[1], (500, 500))
cur_radians_1 = helpers.print_angle(fa_rect.center[0], fa_rect.center[1], (joints[1][0], joints[1][1]))
#Check for quit
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
frame_clock.tick(30)
|
"""
SQLite backend for the sqlite3 module in the standard library.
"""
import datetime
import decimal
import functools
import hashlib
import math
import operator
import random
import re
import statistics
import warnings
from itertools import chain
from sqlite3 import dbapi2 as Database
from django.core.exceptions import ImproperlyConfigured
from django.db import IntegrityError
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper, timezone_constructor
from django.utils import timezone
from django.utils.asyncio import async_unsafe
from django.utils.dateparse import parse_datetime, parse_time
from django.utils.duration import duration_microseconds
from django.utils.regex_helper import _lazy_re_compile
from .client import DatabaseClient
from .creation import DatabaseCreation
from .features import DatabaseFeatures
from .introspection import DatabaseIntrospection
from .operations import DatabaseOperations
from .schema import DatabaseSchemaEditor
def decoder(conv_func):
"""
Convert bytestrings from Python's sqlite3 interface to a regular string.
"""
return lambda s: conv_func(s.decode())
def none_guard(func):
"""
Decorator that returns None if any of the arguments to the decorated
function are None. Many SQL functions return NULL if any of their arguments
are NULL. This decorator simplifies the implementation of this for the
custom functions registered below.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
return None if None in args else func(*args, **kwargs)
return wrapper
def list_aggregate(function):
"""
Return an aggregate class that accumulates values in a list and applies
the provided function to the data.
"""
return type("ListAggregate", (list,), {"finalize": function, "step": list.append})
def check_sqlite_version():
if Database.sqlite_version_info < (3, 9, 0):
raise ImproperlyConfigured(
"SQLite 3.9.0 or later is required (found %s)." % Database.sqlite_version
)
check_sqlite_version()
Database.register_converter("bool", b"1".__eq__)
Database.register_converter("time", decoder(parse_time))
Database.register_converter("datetime", decoder(parse_datetime))
Database.register_converter("timestamp", decoder(parse_datetime))
Database.register_adapter(decimal.Decimal, str)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = "sqlite"
display_name = "SQLite"
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
"AutoField": "integer",
"BigAutoField": "integer",
"BinaryField": "BLOB",
"BooleanField": "bool",
"CharField": "varchar(%(max_length)s)",
"DateField": "date",
"DateTimeField": "datetime",
"DecimalField": "decimal",
"DurationField": "bigint",
"FileField": "varchar(%(max_length)s)",
"FilePathField": "varchar(%(max_length)s)",
"FloatField": "real",
"IntegerField": "integer",
"BigIntegerField": "bigint",
"IPAddressField": "char(15)",
"GenericIPAddressField": "char(39)",
"JSONField": "text",
"OneToOneField": "integer",
"PositiveBigIntegerField": "bigint unsigned",
"PositiveIntegerField": "integer unsigned",
"PositiveSmallIntegerField": "smallint unsigned",
"SlugField": "varchar(%(max_length)s)",
"SmallAutoField": "integer",
"SmallIntegerField": "smallint",
"TextField": "text",
"TimeField": "time",
"UUIDField": "char(32)",
}
data_type_check_constraints = {
"PositiveBigIntegerField": '"%(column)s" >= 0',
"JSONField": '(JSON_VALID("%(column)s") OR "%(column)s" IS NULL)',
"PositiveIntegerField": '"%(column)s" >= 0',
"PositiveSmallIntegerField": '"%(column)s" >= 0',
}
data_types_suffix = {
"AutoField": "AUTOINCREMENT",
"BigAutoField": "AUTOINCREMENT",
"SmallAutoField": "AUTOINCREMENT",
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See https://www.sqlite.org/lang_expr.html for an explanation.
operators = {
"exact": "= %s",
"iexact": "LIKE %s ESCAPE '\\'",
"contains": "LIKE %s ESCAPE '\\'",
"icontains": "LIKE %s ESCAPE '\\'",
"regex": "REGEXP %s",
"iregex": "REGEXP '(?i)' || %s",
"gt": "> %s",
"gte": ">= %s",
"lt": "< %s",
"lte": "<= %s",
"startswith": "LIKE %s ESCAPE '\\'",
"endswith": "LIKE %s ESCAPE '\\'",
"istartswith": "LIKE %s ESCAPE '\\'",
"iendswith": "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
"contains": r"LIKE '%%' || {} || '%%' ESCAPE '\'",
"icontains": r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
"startswith": r"LIKE {} || '%%' ESCAPE '\'",
"istartswith": r"LIKE UPPER({}) || '%%' ESCAPE '\'",
"endswith": r"LIKE '%%' || {} ESCAPE '\'",
"iendswith": r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict["NAME"]:
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value."
)
kwargs = {
"database": settings_dict["NAME"],
"detect_types": Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
**settings_dict["OPTIONS"],
}
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if "check_same_thread" in kwargs and kwargs["check_same_thread"]:
warnings.warn(
"The `check_same_thread` option was provided and set to "
"True. It will be overridden with False. Use the "
"`DatabaseWrapper.allow_thread_sharing` property instead "
"for controlling thread shareability.",
RuntimeWarning,
)
kwargs.update({"check_same_thread": False, "uri": True})
return kwargs
@async_unsafe
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
create_deterministic_function = functools.partial(
conn.create_function,
deterministic=True,
)
create_deterministic_function(
"django_date_extract", 2, _sqlite_datetime_extract
)
create_deterministic_function("django_date_trunc", 4, _sqlite_date_trunc)
create_deterministic_function(
"django_datetime_cast_date", 3, _sqlite_datetime_cast_date
)
create_deterministic_function(
"django_datetime_cast_time", 3, _sqlite_datetime_cast_time
)
create_deterministic_function(
"django_datetime_extract", 4, _sqlite_datetime_extract
)
create_deterministic_function(
"django_datetime_trunc", 4, _sqlite_datetime_trunc
)
create_deterministic_function("django_time_extract", 2, _sqlite_time_extract)
create_deterministic_function("django_time_trunc", 4, _sqlite_time_trunc)
create_deterministic_function("django_time_diff", 2, _sqlite_time_diff)
create_deterministic_function(
"django_timestamp_diff", 2, _sqlite_timestamp_diff
)
create_deterministic_function(
"django_format_dtdelta", 3, _sqlite_format_dtdelta
)
create_deterministic_function("regexp", 2, _sqlite_regexp)
create_deterministic_function("ACOS", 1, none_guard(math.acos))
create_deterministic_function("ASIN", 1, none_guard(math.asin))
create_deterministic_function("ATAN", 1, none_guard(math.atan))
create_deterministic_function("ATAN2", 2, none_guard(math.atan2))
create_deterministic_function("BITXOR", 2, none_guard(operator.xor))
create_deterministic_function("CEILING", 1, none_guard(math.ceil))
create_deterministic_function("COS", 1, none_guard(math.cos))
create_deterministic_function("COT", 1, none_guard(lambda x: 1 / math.tan(x)))
create_deterministic_function("DEGREES", 1, none_guard(math.degrees))
create_deterministic_function("EXP", 1, none_guard(math.exp))
create_deterministic_function("FLOOR", 1, none_guard(math.floor))
create_deterministic_function("LN", 1, none_guard(math.log))
create_deterministic_function("LOG", 2, none_guard(lambda x, y: math.log(y, x)))
create_deterministic_function("LPAD", 3, _sqlite_lpad)
create_deterministic_function(
"MD5", 1, none_guard(lambda x: hashlib.md5(x.encode()).hexdigest())
)
create_deterministic_function("MOD", 2, none_guard(math.fmod))
create_deterministic_function("PI", 0, lambda: math.pi)
create_deterministic_function("POWER", 2, none_guard(operator.pow))
create_deterministic_function("RADIANS", 1, none_guard(math.radians))
create_deterministic_function("REPEAT", 2, none_guard(operator.mul))
create_deterministic_function("REVERSE", 1, none_guard(lambda x: x[::-1]))
create_deterministic_function("RPAD", 3, _sqlite_rpad)
create_deterministic_function(
"SHA1", 1, none_guard(lambda x: hashlib.sha1(x.encode()).hexdigest())
)
create_deterministic_function(
"SHA224", 1, none_guard(lambda x: hashlib.sha224(x.encode()).hexdigest())
)
create_deterministic_function(
"SHA256", 1, none_guard(lambda x: hashlib.sha256(x.encode()).hexdigest())
)
create_deterministic_function(
"SHA384", 1, none_guard(lambda x: hashlib.sha384(x.encode()).hexdigest())
)
create_deterministic_function(
"SHA512", 1, none_guard(lambda x: hashlib.sha512(x.encode()).hexdigest())
)
create_deterministic_function(
"SIGN", 1, none_guard(lambda x: (x > 0) - (x < 0))
)
create_deterministic_function("SIN", 1, none_guard(math.sin))
create_deterministic_function("SQRT", 1, none_guard(math.sqrt))
create_deterministic_function("TAN", 1, none_guard(math.tan))
# Don't use the built-in RANDOM() function because it returns a value
# in the range [-1 * 2^63, 2^63 - 1] instead of [0, 1).
conn.create_function("RAND", 0, random.random)
conn.create_aggregate("STDDEV_POP", 1, list_aggregate(statistics.pstdev))
conn.create_aggregate("STDDEV_SAMP", 1, list_aggregate(statistics.stdev))
conn.create_aggregate("VAR_POP", 1, list_aggregate(statistics.pvariance))
conn.create_aggregate("VAR_SAMP", 1, list_aggregate(statistics.variance))
conn.execute("PRAGMA foreign_keys = ON")
return conn
def init_connection_state(self):
pass
def create_cursor(self, name=None):
return self.connection.cursor(factory=SQLiteCursorWrapper)
@async_unsafe
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db():
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ""
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def disable_constraint_checking(self):
with self.cursor() as cursor:
cursor.execute("PRAGMA foreign_keys = OFF")
# Foreign key constraints cannot be turned off while in a multi-
# statement transaction. Fetch the current state of the pragma
# to determine if constraints are effectively disabled.
enabled = cursor.execute("PRAGMA foreign_keys").fetchone()[0]
return not bool(enabled)
def enable_constraint_checking(self):
with self.cursor() as cursor:
cursor.execute("PRAGMA foreign_keys = ON")
def check_constraints(self, table_names=None):
"""
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
"""
if self.features.supports_pragma_foreign_key_check:
with self.cursor() as cursor:
if table_names is None:
violations = cursor.execute("PRAGMA foreign_key_check").fetchall()
else:
violations = chain.from_iterable(
cursor.execute(
"PRAGMA foreign_key_check(%s)"
% self.ops.quote_name(table_name)
).fetchall()
for table_name in table_names
)
# See https://www.sqlite.org/pragma.html#pragma_foreign_key_check
for (
table_name,
rowid,
referenced_table_name,
foreign_key_index,
) in violations:
foreign_key = cursor.execute(
"PRAGMA foreign_key_list(%s)" % self.ops.quote_name(table_name)
).fetchall()[foreign_key_index]
column_name, referenced_column_name = foreign_key[3:5]
primary_key_column_name = self.introspection.get_primary_key_column(
cursor, table_name
)
primary_key_value, bad_value = cursor.execute(
"SELECT %s, %s FROM %s WHERE rowid = %%s"
% (
self.ops.quote_name(primary_key_column_name),
self.ops.quote_name(column_name),
self.ops.quote_name(table_name),
),
(rowid,),
).fetchone()
raise IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s."
% (
table_name,
primary_key_value,
table_name,
column_name,
bad_value,
referenced_table_name,
referenced_column_name,
)
)
else:
with self.cursor() as cursor:
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(
cursor, table_name
)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for (
column_name,
referenced_table_name,
referenced_column_name,
) in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
"""
% (
primary_key_column_name,
column_name,
table_name,
referenced_table_name,
column_name,
referenced_column_name,
column_name,
referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s."
% (
table_name,
bad_row[0],
table_name,
column_name,
bad_row[1],
referenced_table_name,
referenced_column_name,
)
)
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self):
return self.creation.is_in_memory_db(self.settings_dict["NAME"])
FORMAT_QMARK_REGEX = _lazy_re_compile(r"(?<!%)%s")
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub("?", query).replace("%%", "%")
def _sqlite_datetime_parse(dt, tzname=None, conn_tzname=None):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (TypeError, ValueError):
return None
if conn_tzname:
dt = dt.replace(tzinfo=timezone_constructor(conn_tzname))
if tzname is not None and tzname != conn_tzname:
tzname, sign, offset = backend_utils.split_tzname_delta(tzname)
if offset:
hours, minutes = offset.split(":")
offset_delta = datetime.timedelta(hours=int(hours), minutes=int(minutes))
dt += offset_delta if sign == "+" else -offset_delta
dt = timezone.localtime(dt, timezone_constructor(tzname))
return dt
def _sqlite_date_trunc(lookup_type, dt, tzname, conn_tzname):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
if lookup_type == "year":
return "%i-01-01" % dt.year
elif lookup_type == "quarter":
month_in_quarter = dt.month - (dt.month - 1) % 3
return "%i-%02i-01" % (dt.year, month_in_quarter)
elif lookup_type == "month":
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == "week":
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
elif lookup_type == "day":
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_time_trunc(lookup_type, dt, tzname, conn_tzname):
if dt is None:
return None
dt_parsed = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt_parsed is None:
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
else:
dt = dt_parsed
if lookup_type == "hour":
return "%02i:00:00" % dt.hour
elif lookup_type == "minute":
return "%02i:%02i:00" % (dt.hour, dt.minute)
elif lookup_type == "second":
return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second)
def _sqlite_datetime_cast_date(dt, tzname, conn_tzname):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname, conn_tzname):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname=None, conn_tzname=None):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
if lookup_type == "week_day":
return (dt.isoweekday() % 7) + 1
elif lookup_type == "iso_week_day":
return dt.isoweekday()
elif lookup_type == "week":
return dt.isocalendar()[1]
elif lookup_type == "quarter":
return math.ceil(dt.month / 3)
elif lookup_type == "iso_year":
return dt.isocalendar()[0]
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname, conn_tzname):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
if lookup_type == "year":
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == "quarter":
month_in_quarter = dt.month - (dt.month - 1) % 3
return "%i-%02i-01 00:00:00" % (dt.year, month_in_quarter)
elif lookup_type == "month":
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == "week":
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == "day":
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == "hour":
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == "minute":
return "%i-%02i-%02i %02i:%02i:00" % (
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
)
elif lookup_type == "second":
return "%i-%02i-%02i %02i:%02i:%02i" % (
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_prepare_dtdelta_param(conn, param):
if conn in ["+", "-"]:
if isinstance(param, int):
return datetime.timedelta(0, 0, param)
else:
return backend_utils.typecast_timestamp(param)
return param
@none_guard
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a datetime
- A scalar value, e.g. float
"""
conn = conn.strip()
try:
real_lhs = _sqlite_prepare_dtdelta_param(conn, lhs)
real_rhs = _sqlite_prepare_dtdelta_param(conn, rhs)
except (ValueError, TypeError):
return None
if conn == "+":
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
out = str(real_lhs + real_rhs)
elif conn == "-":
out = str(real_lhs - real_rhs)
elif conn == "*":
out = real_lhs * real_rhs
else:
out = real_lhs / real_rhs
return out
@none_guard
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000)
+ (left.minute * 60 * 1000000)
+ (left.second * 1000000)
+ (left.microsecond)
- (right.hour * 60 * 60 * 1000000)
- (right.minute * 60 * 1000000)
- (right.second * 1000000)
- (right.microsecond)
)
@none_guard
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return duration_microseconds(left - right)
@none_guard
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, str(re_string)))
@none_guard
def _sqlite_lpad(text, length, fill_text):
if len(text) >= length:
return text[:length]
return (fill_text * length)[: length - len(text)] + text
@none_guard
def _sqlite_rpad(text, length, fill_text):
return (text + fill_text * length)[:length]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: altheim
# created: 2020-03-31
# modified: 2021-02-28
#
from colorama import init, Fore, Style
init()
try:
from gpiozero import MotionSensor
print('successfully imported gpiozero.')
except Exception:
print('unable to import gpiozero.')
from lib.logger import Logger, Level
from lib.event import Event
from lib.message import Message
# ..............................................................................
class MotionDetector():
'''
Uses an infrared PIR sensor to scan for cats, or other causes of motion.
'''
def __init__(self, config, message_factory, message_bus, level):
self._log = Logger('motion-detect', Level.INFO)
if config is None:
raise ValueError('no configuration provided.')
self._message_factory = message_factory
self._message_bus = message_bus
_config = config['ros'].get('motion_detect')
_pin = _config.get('pin')
_threshold_value = 0.1 # the value above which the device will be considered “on”
# set up pin where PIR is connected
self._sensor = MotionSensor(_pin, threshold=_threshold_value, pull_up=False)
self._sensor.when_motion = self._activated
self._sensor.when_no_motion = self._deactivated
self._disabling = False
self._enabled = False
self._closed = False
# arm behaviour
self._arm_movement_degree_step = 5.0
self._arm_up_delay = 0.09
self._arm_down_delay = 0.04
self._log.info('ready.')
# ..........................................................................
def _activated(self):
'''
The default function called when the sensor is activated.
'''
if self._enabled:
self._log.info(Fore.YELLOW + 'detected motion!')
self._message_bus.handle(self._message_factory.get_message(Event.MOTION_DETECT, True))
else:
self._log.info('motion detector not enabled.')
# ..........................................................................
def _deactivated(self):
'''
The default function called when the sensor is deactivated.
'''
if self._enabled:
self._log.info('deactivated motion detector.')
else:
self._log.debug('motion detector not enabled.')
# ..........................................................................
def enable(self):
self._log.debug('enabling...')
if self._closed:
self._log.warning('cannot enable: closed.')
return
self._enabled = True
self._log.debug('enabled.')
# ..........................................................................
def disable(self):
if self._disabling:
self._log.warning('already disabling.')
else:
self._disabling = True
self._enabled = False
self._log.debug('disabling...')
self._disabling = False
self._log.debug('disabled.')
# ..........................................................................
def close(self):
self.disable()
self._closed = True
#EOF
|
"""
This Python file contains database objects
and schema for CFLTools.
"""
from sqlalchemy import Column, ForeignKey, Integer, \
String, DateTime, Boolean, \
create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from cfltools.utilities import log_generator
# Instantiate the logger.
logger = log_generator(__name__)
# Define the sqlalchemy base class.
Base = declarative_base()
BaseSession = sessionmaker()
# Define global vars
# Lengths of various string fields.
# TODO: Adjust these lengths to be sensible.
IPv4_ADDR_LEN = 250
IPv6_ADDR_LEN = 250
INCIDENT_ID_LEN = 250
INCIDENT_NAME_LEN = 250
COUNTRY_CODE_LEN = 250
COUNTRY_LEN = 250
ASN_LEN = 250
ASN_DESC_LEN = 250
FOLDER_LOC_LEN = 500
DESC_LEN = 1000
SHORT_DESC_LEN = 1000
PHONE_LEN = 250
EMAIL_LEN = 250
MD5_LEN = 250
# Configuration file.
class Incident(Base):
"""
Database object to store information about
specific incidents.
"""
__tablename__ = 'incidents'
id = Column(Integer, primary_key=True)
incident_id = Column(Integer, unique=True)
incident_name = Column(String(INCIDENT_NAME_LEN))
folder_loc = Column(String(FOLDER_LOC_LEN))
description = Column(String(DESC_LEN))
# An incident is related to many IP Addresses.
ipaddrs = relationship("IPAddr", back_populates="incident")
def __repr__(self):
return """
<Incident(incident_id={}, incident_name={}, folder_loc={},
description={})
""" % (self.incident_id, self.incident_name, self.folder_loc,
self.description)
class IPAddr(Base):
"""
Database object to store all IP addresses seen by the
system.
IP addresses may appear multiple times in the database.
This database represents one unique IP that appeared in
one incident. If an IP appears in multiple incidents, it
will be listed multiple times with number of occurances
in logs related to that incident.
"""
__tablename__ = 'ipaddrs'
id = Column(Integer, primary_key=True)
ipv4 = Column(String(IPv4_ADDR_LEN))
ipv6 = Column(String(IPv6_ADDR_LEN))
number_occurances = Column(Integer)
start_time = Column(DateTime)
end_time = Column(DateTime)
whois_done = Column(Boolean, default=False)
is_tor_exit_node = Column(Boolean, default=False)
# An IP address is related to an ISP.
asn = Column(Integer, ForeignKey('isp.asn'))
isp = relationship("ISP", back_populates="ipaddrs")
# An IP address is related to an incident.
incident_id = Column(String(INCIDENT_ID_LEN), ForeignKey('incidents.incident_id'))
incident = relationship("Incident", back_populates="ipaddrs")
def __repr__(self):
return """
<IPAddr(ipv4={}, ipv6={}, number_occurances={}, incident_id={},
start_time={}, end_time={}, country_code={}
country={}, asn={}
whois_done={}, is_tor_exit_node={}
""" % (self.ipv4, self.ipv6, self.number_occurances,
self.incident_id, self.start_time, self.end_time,
self.country_code, self.country_code, self.asn,
self.whois_done, self.is_tor_exit_node)
class ISP(Base):
"""
Database to locally track whois data obtained from
previous queries. This is so that we don't have to
tax APIs with whois queries, and so that we can add
in LEO contact information for those ISPs that publish
it.
"""
__tablename__ = 'isp'
id = Column(Integer, primary_key=True)
asn = Column(Integer, unique=True)
description = Column(String(DESC_LEN))
contact_name = Column(String(SHORT_DESC_LEN))
online_service = Column(String(SHORT_DESC_LEN))
online_attn = Column(String(SHORT_DESC_LEN))
online_serv_address = Column(String(DESC_LEN))
phone = Column(String(PHONE_LEN))
fax = Column(String(PHONE_LEN))
email = Column(String(EMAIL_LEN))
notes = Column(String(DESC_LEN))
req_nda = Column(Boolean)
# An ASN is related to many IP addresses.
ipaddrs = relationship("IPAddr", back_populates="isp")
def __repr__(self):
return """
<ISP(asn={}, online_service={}) >
""" % (self.asn, self.online_service)
class SeenFile(Base):
"""
Database object to store information about an
already seen file. We'll use this later to avoid
double or redundant imports.
"""
__tablename__ = 'seenfiles'
id = Column(Integer, primary_key=True)
filename = Column(String(FOLDER_LOC_LEN))
md5 = Column(String(MD5_LEN))
incident_id = Column(Integer, unique=True)
def __repr__(self):
return """
<SeenFile(filename={}) >
""" % (self.filename)
def makesession(db_file=None):
"""
Creates a database session.
If the database parameter is not given, it will create a new
database in memory for testing.
"""
try:
if db_file is None:
logger.warning("Instantiating an in-memory-only database for testing.")
db_file = '/:memory:'
else:
from pathlib import Path
db_file = '/' + Path(db_file).as_posix()
logger.debug("Loading database: sqlite://%s", db_file)
engine = create_engine('sqlite://' + db_file, echo=False)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
except:
logger.critical("Failed to instantiate database sqlite://%s", db_file)
return session
|
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The BitRub Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode BASE58, P2PKH and P2SH addresses."""
import enum
from .script import hash256, hash160, sha256, CScript, OP_0
from .util import hex_str_to_bytes
from . import segwit_addr
ADDRESS_BCRT1_UNSPENDABLE = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj'
ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR = 'addr(bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj)#juyq9d97'
class AddressType(enum.Enum):
bech32 = 'bech32'
p2sh_segwit = 'p2sh-segwit'
legacy = 'legacy' # P2PKH
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = b.hex()
str = chr(version).encode('latin-1').hex() + str
checksum = hash256(hex_str_to_bytes(str)).hex()
str += checksum[:8]
value = int('0x'+str,0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
# TODO: def base58_decode
def keyhash_to_p2pkh(hash, main = False):
assert len(hash) == 20
version = 0 if main else 111
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main = False):
assert len(hash) == 20
version = 5 if main else 196
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main = False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main = False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main)
def key_to_p2sh_p2wpkh(key, main = False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main = False):
if (type(program) is str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return segwit_addr.encode("bc" if main else "bcrt", version, program)
def script_to_p2wsh(script, main = False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main = False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main = False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert False
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert False
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
# Version number typically updated by running `invoke set_version <version>`.
# Run `invoke --help set_version` or see tasks.py for details.
VERSION = '2.9.3.dev20151009'
def get_version(naked=False):
if naked:
return re.split('(a|b|rc|.dev)', VERSION)[0]
return VERSION
def get_full_version(program=None, naked=False):
version = '%s %s (%s %s on %s)' % (program or '',
get_version(naked),
get_interpreter(),
sys.version.split()[0],
sys.platform)
return version.strip()
def get_interpreter():
if sys.platform.startswith('java'):
return 'Jython'
if sys.platform == 'cli':
return 'IronPython'
if 'PyPy' in sys.version:
return 'PyPy'
return 'Python'
|
# Generated by Django 2.2.13 on 2022-01-21 06:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('story', '0008_auto_20220121_1051'),
]
operations = [
migrations.RemoveField(
model_name='stories',
name='duration_raw',
),
]
|
# Generated by Django 3.1.2 on 2020-11-03 14:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patients', '0007_auto_20201103_1421'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='able_to_walk_alone',
field=models.CharField(choices=[('Yes', 'Yes'), ('No', 'No')], max_length=225),
),
]
|
###
#
# Full history: see below
#
# Version: 1.0.0
# Date: 2020-04-15
# Author: Yves Vindevogel (vindevoy)
#
###
import cherrypy
import logging
import logging.config
import os
import yaml
from common.options import Options
# https://stackoverflow.com/questions/41879512/cherrypy-is-not-respecting-desired-logging-format
class LoggingLoader:
# No need to log anything, the loader is not yet loaded so you don't have a logger
@staticmethod
def configure():
cherrypy.engine.unsubscribe('graceful', cherrypy.log.reopen_files)
# DO NOT USE Content() here, it's not ready
settings_file = os.path.join(Options().data_dir, 'logging', 'settings.yml')
file = open(settings_file, 'r')
settings_yaml = yaml.load(file.read(), Loader=yaml.SafeLoader)
logging.config.dictConfig(settings_yaml)
Options().default_logging_level = settings_yaml['loggers']['']['level']
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='keras-trainer',
version='1.2.5',
description='A training abstraction for Keras models.',
author='Triage Technologies Inc.',
author_email='ai@triage.com',
url='https://www.triage.com/',
packages=find_packages(exclude=['tests', '.cache', '.venv', '.git', 'dist']),
install_requires=[
'Keras==2.2.4',
'h5py',
'pandas',
'Pillow',
'keras-model-specs',
'sklearn'
]
)
|
import decimal
import sys
from eth_utils import (
big_endian_to_int,
decode_hex,
int_to_big_endian,
to_normalized_address,
)
from eth_utils.toolz import (
complement,
)
from hypothesis import (
example,
given,
settings,
strategies as st,
)
import pytest
from eth_abi.constants import (
TT256M1,
)
from eth_abi.decoding import (
ContextFramesBytesIO,
UnsignedIntegerDecoder,
SignedIntegerDecoder,
UnsignedRealDecoder,
SignedRealDecoder,
UnsignedFixedDecoder,
SignedFixedDecoder,
StringDecoder,
BytesDecoder,
TupleDecoder,
BooleanDecoder,
AddressDecoder,
DynamicArrayDecoder,
)
from eth_abi.exceptions import (
InsufficientDataBytes,
NonEmptyPaddingBytes,
)
from eth_abi.registry import registry
from eth_abi.utils.padding import (
zpad32,
)
from eth_abi.utils.numeric import (
abi_decimal_context,
compute_signed_integer_bounds,
quantize_value,
ceil32,
)
def is_non_empty_non_null_byte_string(value):
return value and big_endian_to_int(value) != 0
def is_valid_padding_bytes(padding_bytes, data_bytes):
# Empty padding is always valid
if len(padding_bytes) == 0:
return True
leading_data_bit_is_one = (data_bytes[0] & 0b10000000) == 0b10000000
if leading_data_bit_is_one:
# All padding bits must be 1
if padding_bytes.replace(b'\xff', b'') == b'':
return True
else:
# All padding bits must be 0
if padding_bytes.replace(b'\x00', b'') == b'':
return True
return False
def all_bytes_equal(test_bytes, target):
if sys.version_info.major < 3:
return all(byte == chr(target) for byte in test_bytes)
else:
return all(byte == target for byte in test_bytes)
@settings(max_examples=250)
@given(
integer_bit_size=st.integers(min_value=1, max_value=32).map(lambda v: v * 8),
stream_bytes=st.binary(min_size=0, max_size=32),
data_byte_size=st.integers(min_value=0, max_value=32),
)
def test_decode_unsigned_int(integer_bit_size, stream_bytes, data_byte_size):
if integer_bit_size % 8 != 0:
with pytest.raises(ValueError):
UnsignedIntegerDecoder(
value_bit_size=integer_bit_size,
data_byte_size=data_byte_size,
)
return
elif integer_bit_size > data_byte_size * 8:
with pytest.raises(ValueError):
UnsignedIntegerDecoder(
value_bit_size=integer_bit_size,
data_byte_size=data_byte_size,
)
return
else:
decoder = UnsignedIntegerDecoder(
value_bit_size=integer_bit_size,
data_byte_size=data_byte_size,
)
stream = ContextFramesBytesIO(stream_bytes)
actual_value = big_endian_to_int(stream_bytes[:data_byte_size])
if len(stream_bytes) < data_byte_size:
with pytest.raises(InsufficientDataBytes):
decoder(stream)
return
elif actual_value > 2 ** integer_bit_size - 1:
with pytest.raises(NonEmptyPaddingBytes):
decoder(stream)
return
else:
decoded_value = decoder(stream)
assert decoded_value == actual_value
@settings(max_examples=250)
@given(
integer_bit_size=st.integers(min_value=1, max_value=32).map(lambda v: v * 8),
stream_bytes=st.binary(min_size=0, max_size=32),
data_byte_size=st.integers(min_value=0, max_value=32),
)
@example(8, b'\x00\x80', 2)
@example(8, b'\xff\xff', 2)
def test_decode_signed_int(integer_bit_size, stream_bytes, data_byte_size):
if integer_bit_size % 8 != 0:
with pytest.raises(ValueError):
SignedIntegerDecoder(
value_bit_size=integer_bit_size,
data_byte_size=data_byte_size,
)
return
elif integer_bit_size > data_byte_size * 8:
with pytest.raises(ValueError):
SignedIntegerDecoder(
value_bit_size=integer_bit_size,
data_byte_size=data_byte_size,
)
return
else:
decoder = SignedIntegerDecoder(
value_bit_size=integer_bit_size,
data_byte_size=data_byte_size,
)
stream = ContextFramesBytesIO(stream_bytes)
padding_bytes = data_byte_size - integer_bit_size // 8
raw_value = big_endian_to_int(stream_bytes[padding_bytes:data_byte_size])
if raw_value >= 2 ** (integer_bit_size - 1):
actual_value = raw_value - 2 ** integer_bit_size
else:
actual_value = raw_value
if len(stream_bytes) < data_byte_size:
with pytest.raises(InsufficientDataBytes):
decoder(stream)
return
elif (
(actual_value >= 0 and not all_bytes_equal(stream_bytes[:padding_bytes], 0)) or
(actual_value < 0 and not all_bytes_equal(stream_bytes[:padding_bytes], 255))
):
with pytest.raises(NonEmptyPaddingBytes):
decoder(stream)
return
else:
decoded_value = decoder(stream)
assert decoded_value == actual_value
@settings(max_examples=250)
@given(
string_bytes=st.binary(min_size=0, max_size=256),
pad_size=st.integers(min_value=0, max_value=32),
)
def test_decode_bytes_and_string(string_bytes, pad_size):
size_bytes = zpad32(int_to_big_endian(len(string_bytes)))
padded_string_bytes = string_bytes + b'\x00' * pad_size
stream_bytes = size_bytes + padded_string_bytes
stream = ContextFramesBytesIO(stream_bytes)
decoder = StringDecoder()
if len(padded_string_bytes) < ceil32(len(string_bytes)):
with pytest.raises(InsufficientDataBytes):
decoder(stream)
return
decoded_value = decoder(stream)
assert decoded_value == string_bytes
@settings(max_examples=250)
@given(
stream_bytes=st.binary(min_size=1, max_size=32),
data_byte_size=st.integers(min_value=1, max_value=32),
)
def test_decode_boolean(stream_bytes, data_byte_size):
stream = ContextFramesBytesIO(stream_bytes)
decoder = BooleanDecoder(data_byte_size=data_byte_size)
if len(stream_bytes) < data_byte_size:
with pytest.raises(InsufficientDataBytes):
decoder(stream)
return
padding_bytes = stream_bytes[:data_byte_size][:-1]
if is_non_empty_non_null_byte_string(padding_bytes):
with pytest.raises(NonEmptyPaddingBytes):
decoder(stream)
return
byte_value = stream_bytes[data_byte_size - 1]
if byte_value in {0, b'\x00'}:
actual_value = False
elif byte_value in {1, b'\x01'}:
actual_value = True
else:
with pytest.raises(NonEmptyPaddingBytes):
decoder(stream)
return
decoded_value = decoder(stream)
assert decoded_value is actual_value
@settings(max_examples=250)
@given(
value_byte_size=st.integers(min_value=1, max_value=32),
stream_bytes=st.binary(min_size=0, max_size=32),
data_byte_size=st.integers(min_value=0, max_value=32),
)
def test_decode_bytes_xx(value_byte_size, stream_bytes, data_byte_size):
if value_byte_size > data_byte_size:
with pytest.raises(ValueError):
BytesDecoder(
value_bit_size=value_byte_size * 8,
data_byte_size=data_byte_size,
)
return
else:
decoder = BytesDecoder(
value_bit_size=value_byte_size * 8,
data_byte_size=data_byte_size,
)
stream = ContextFramesBytesIO(stream_bytes)
actual_value = stream_bytes[:value_byte_size]
padding_bytes = stream_bytes[value_byte_size:data_byte_size]
if len(stream_bytes) < data_byte_size:
with pytest.raises(InsufficientDataBytes):
decoder(stream)
return
elif is_non_empty_non_null_byte_string(padding_bytes):
with pytest.raises(NonEmptyPaddingBytes):
decoder(stream)
return
else:
decoded_value = decoder(stream)
assert decoded_value == actual_value
@settings(max_examples=250)
@given(
address_bytes=st.binary(min_size=0, max_size=32),
padding_size=st.integers(min_value=10, max_value=14),
data_byte_size=st.integers(min_value=0, max_value=32),
)
def test_decode_address(address_bytes, padding_size, data_byte_size):
stream_bytes = b'\x00' * padding_size + address_bytes
if data_byte_size < 20:
with pytest.raises(ValueError):
AddressDecoder(
data_byte_size=data_byte_size,
)
return
else:
decoder = AddressDecoder(
data_byte_size=data_byte_size,
)
stream = ContextFramesBytesIO(stream_bytes)
padding_bytes = stream_bytes[:data_byte_size][:-20]
if len(stream_bytes) < data_byte_size:
with pytest.raises(InsufficientDataBytes):
decoder(stream)
return
elif is_non_empty_non_null_byte_string(padding_bytes):
with pytest.raises(NonEmptyPaddingBytes):
decoder(stream)
return
else:
decoded_value = decoder(stream)
actual_value = to_normalized_address(stream_bytes[:data_byte_size][-20:])
assert decoded_value == actual_value
@settings(max_examples=250)
@given(
array_size=st.integers(min_value=0, max_value=32),
array_values=st.lists(st.integers(min_value=0, max_value=TT256M1), min_size=0, max_size=64).map(tuple),
)
def test_decode_array_of_unsigned_integers(array_size, array_values):
size_bytes = zpad32(int_to_big_endian(array_size))
values_bytes = b''.join((
zpad32(int_to_big_endian(v)) for v in array_values
))
stream_bytes = size_bytes + values_bytes
decoder = DynamicArrayDecoder(
item_decoder=UnsignedIntegerDecoder(value_bit_size=256),
)
stream = ContextFramesBytesIO(stream_bytes)
if len(array_values) < array_size:
with pytest.raises(InsufficientDataBytes):
decoder(stream)
return
actual_values = decoder(stream)
assert actual_values == array_values[:array_size]
@pytest.mark.parametrize(
'types,data,expected',
(
(
('address', 'uint256'),
(
'0x'
'000000000000000000000000abf7d8b5c1322b3e553d2fac90ff006c30f1b875'
'0000000000000000000000000000000000000000000000000000005d21dba000'
),
('0xabf7d8b5c1322b3e553d2fac90ff006c30f1b875', 400000000000)
),
(
('uint256', 'bytes'),
(
'0x'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000040'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
),
(0, b''),
),
),
)
def test_tuple_decoder(types, data, expected):
decoders = [registry.get_decoder(t) for t in types]
decoder = TupleDecoder(decoders=decoders)
stream = ContextFramesBytesIO(decode_hex(data))
actual = decoder(stream)
assert actual == expected
@settings(max_examples=250)
@given(
high_bit_size=st.integers(min_value=1, max_value=32).map(lambda v: v * 8),
low_bit_size=st.integers(min_value=1, max_value=32).map(lambda v: v * 8),
integer_bit_size=st.integers(min_value=1, max_value=32).map(lambda v: v * 8),
stream_bytes=st.binary(min_size=0, max_size=32),
data_byte_size=st.integers(min_value=0, max_value=32),
)
def test_decode_unsigned_real(high_bit_size,
low_bit_size,
integer_bit_size,
stream_bytes,
data_byte_size):
if integer_bit_size > data_byte_size * 8:
with pytest.raises(ValueError):
UnsignedRealDecoder(
value_bit_size=integer_bit_size,
high_bit_size=high_bit_size,
low_bit_size=low_bit_size,
data_byte_size=data_byte_size,
)
return
elif high_bit_size + low_bit_size != integer_bit_size:
with pytest.raises(ValueError):
UnsignedRealDecoder(
value_bit_size=integer_bit_size,
high_bit_size=high_bit_size,
low_bit_size=low_bit_size,
data_byte_size=data_byte_size,
)
return
else:
decoder = UnsignedRealDecoder(
value_bit_size=integer_bit_size,
high_bit_size=high_bit_size,
low_bit_size=low_bit_size,
data_byte_size=data_byte_size,
)
stream = ContextFramesBytesIO(stream_bytes)
padding_bytes = stream_bytes[:data_byte_size][:data_byte_size - integer_bit_size // 8]
if len(stream_bytes) < data_byte_size:
with pytest.raises(InsufficientDataBytes):
decoder(stream)
return
elif is_non_empty_non_null_byte_string(padding_bytes):
with pytest.raises(NonEmptyPaddingBytes):
decoder(stream)
return
else:
decoded_value = decoder(stream)
unsigned_integer_value = big_endian_to_int(stream_bytes[:data_byte_size])
with decimal.localcontext(abi_decimal_context):
raw_real_value = decimal.Decimal(unsigned_integer_value) / 2 ** low_bit_size
actual_value = quantize_value(raw_real_value, low_bit_size)
assert decoded_value == actual_value
@settings(max_examples=250)
@given(
high_bit_size=st.integers(min_value=1, max_value=32).map(lambda v: v * 8),
low_bit_size=st.integers(min_value=1, max_value=32).map(lambda v: v * 8),
integer_bit_size=st.integers(min_value=1, max_value=32).map(lambda v: v * 8),
stream_bytes=st.binary(min_size=0, max_size=32),
data_byte_size=st.integers(min_value=0, max_value=32),
)
@example(
high_bit_size=8,
low_bit_size=8,
integer_bit_size=16,
stream_bytes=b'\xff\xff\xff\xa9\xf5\xb3',
data_byte_size=3,
)
def test_decode_signed_real(high_bit_size,
low_bit_size,
integer_bit_size,
stream_bytes,
data_byte_size):
if integer_bit_size > data_byte_size * 8:
with pytest.raises(ValueError):
SignedRealDecoder(
value_bit_size=integer_bit_size,
high_bit_size=high_bit_size,
low_bit_size=low_bit_size,
data_byte_size=data_byte_size,
)
return
elif high_bit_size + low_bit_size != integer_bit_size:
with pytest.raises(ValueError):
SignedRealDecoder(
value_bit_size=integer_bit_size,
high_bit_size=high_bit_size,
low_bit_size=low_bit_size,
data_byte_size=data_byte_size,
)
return
else:
decoder = SignedRealDecoder(
value_bit_size=integer_bit_size,
high_bit_size=high_bit_size,
low_bit_size=low_bit_size,
data_byte_size=data_byte_size,
)
stream = ContextFramesBytesIO(stream_bytes)
padding_offset = data_byte_size - integer_bit_size // 8
data_offset = padding_offset + integer_bit_size // 8
padding_bytes = stream_bytes[:data_byte_size][:padding_offset]
data_bytes = stream_bytes[:data_byte_size][padding_offset:data_offset]
if len(stream_bytes) < data_byte_size:
with pytest.raises(InsufficientDataBytes):
decoder(stream)
return
elif not is_valid_padding_bytes(padding_bytes, data_bytes):
with pytest.raises(NonEmptyPaddingBytes):
decoder(stream)
return
else:
decoded_value = decoder(stream)
if padding_bytes:
if decoded_value >= 0:
assert bytes(set(padding_bytes)) == b'\x00'
else:
assert bytes(set(padding_bytes)) == b'\xff'
_, upper_bound = compute_signed_integer_bounds(high_bit_size + low_bit_size)
unsigned_integer_value = big_endian_to_int(data_bytes)
if unsigned_integer_value >= upper_bound:
signed_integer_value = unsigned_integer_value - 2 ** (high_bit_size + low_bit_size)
else:
signed_integer_value = unsigned_integer_value
with decimal.localcontext(abi_decimal_context):
raw_actual_value = decimal.Decimal(signed_integer_value) / 2 ** low_bit_size
actual_value = quantize_value(raw_actual_value, low_bit_size)
assert decoded_value == actual_value
@settings(max_examples=250)
@given(
value_bit_size=st.integers(min_value=1, max_value=32).map(lambda v: v * 8),
frac_places=st.integers(min_value=1, max_value=80),
stream_bytes=st.binary(min_size=0, max_size=32),
data_byte_size=st.integers(min_value=0, max_value=32),
)
def test_decode_unsigned_fixed(value_bit_size,
frac_places,
stream_bytes,
data_byte_size):
if value_bit_size > data_byte_size * 8:
with pytest.raises(ValueError):
UnsignedFixedDecoder(
value_bit_size=value_bit_size,
frac_places=frac_places,
data_byte_size=data_byte_size,
)
return
decoder = UnsignedFixedDecoder(
value_bit_size=value_bit_size,
frac_places=frac_places,
data_byte_size=data_byte_size,
)
stream = ContextFramesBytesIO(stream_bytes)
padding_bytes = stream_bytes[:data_byte_size][:data_byte_size - value_bit_size // 8]
if len(stream_bytes) < data_byte_size:
with pytest.raises(InsufficientDataBytes):
decoder(stream)
return
if is_non_empty_non_null_byte_string(padding_bytes):
with pytest.raises(NonEmptyPaddingBytes):
decoder(stream)
return
# Ensure no exceptions
actual_value = decoder(stream)
@settings(max_examples=250)
@given(
value_bit_size=st.integers(min_value=1, max_value=32).map(lambda v: v * 8),
frac_places=st.integers(min_value=1, max_value=80),
stream_bytes=st.binary(min_size=0, max_size=32),
data_byte_size=st.integers(min_value=0, max_value=32),
)
def test_decode_signed_fixed(value_bit_size,
frac_places,
stream_bytes,
data_byte_size):
if value_bit_size > data_byte_size * 8:
with pytest.raises(ValueError):
SignedFixedDecoder(
value_bit_size=value_bit_size,
frac_places=frac_places,
data_byte_size=data_byte_size,
)
return
decoder = SignedFixedDecoder(
value_bit_size=value_bit_size,
frac_places=frac_places,
data_byte_size=data_byte_size,
)
stream = ContextFramesBytesIO(stream_bytes)
padding_offset = data_byte_size - value_bit_size // 8
data_offset = padding_offset + value_bit_size // 8
padding_bytes = stream_bytes[:data_byte_size][:padding_offset]
data_bytes = stream_bytes[:data_byte_size][padding_offset:data_offset]
if len(stream_bytes) < data_byte_size:
with pytest.raises(InsufficientDataBytes):
decoder(stream)
return
if not is_valid_padding_bytes(padding_bytes, data_bytes):
with pytest.raises(NonEmptyPaddingBytes):
decoder(stream)
return
actual_value = decoder(stream)
if padding_bytes:
if actual_value >= 0:
assert bytes(set(padding_bytes)) == b'\x00'
else:
assert bytes(set(padding_bytes)) == b'\xff'
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import curve_fit
def exp_func(x, a, b):
#e = 1.602176634e-19
#k = 8.617333262145e-5
#T = 3.00e2
return a * np.exp(b * np.double(x))
#return a * np.exp(- e * np.double(x) / b / k / T)
specimen_name = '48_2_7-6'
input_filename = 'Sh_48_2_7-6_08079.csv'
#specimen_name = '48_3_1_1'
#input_filename = 'Sh_48_3_1_1_01079.csv'
positive_diapason_tuples = [(0.5, 0.8), (0.8, 1.25), (1.25, 2.0)]#, (0.55, 0.65)]
negative_diapason_tuples = [(0.3, 0.48), (0.5, 1.0)]
df = pd.read_csv(input_filename)
#{'Voltage, V': volt_vals, 'Current, A': cur_vals})
#df.to_csv('Sh_48_3_2_1_01079_full_high.csv')
cur_vals = df['Current, A'].to_list()
volt_vals = df['Voltage, V'].to_list()
print cur_vals
print volt_vals
#divide positive part of UV-characteristic from negative one
pos_cur_vals, pos_volt_vals = zip(*[(j, v) for (j, v) in zip(cur_vals, volt_vals)
if v >= 0.])
#now let's invert and fit the negative part of IV-characteristic
neg_cur_vals, neg_volt_vals = zip(*[(-j, -v) for (j, v) in zip(cur_vals, volt_vals)
if v <= 0.])
neg_cur_vals = list(neg_cur_vals)
neg_volt_vals = list(neg_volt_vals)
pos_cur_vals = list(pos_cur_vals)
pos_volt_vals = list(pos_volt_vals)
#plt.plot(volt_vals, cur_vals, 'go', linewidth=1, markersize=1.5, label='exp')
fig = plt.figure(figsize=(15, 20))
plt.plot(pos_volt_vals, pos_cur_vals, 'go', linewidth=1, markersize=1.5, label='positive branch')
plt.plot(neg_volt_vals, neg_cur_vals, 'bo', linewidth=1, markersize=1.5, label='negative inverted')
opt_parameters = []
#plt.plot(volt_vals, cur_vals, 'go', linewidth=1, markersize=1.5, label='exp')
for i in range(len(positive_diapason_tuples)):
print 'Diapason ' + str(i) + ' for fit'
#select diapason
selected_cur_vals, selected_volt_vals = zip(*[(j, v) for (j, v) in zip(cur_vals, volt_vals)
if positive_diapason_tuples[i][0] <= v <= positive_diapason_tuples[i][1]])
#zip(cur_vals, volt_vals)[0]
selected_cur_vals = list(selected_cur_vals)
selected_volt_vals = list(selected_volt_vals)
popt, pcov = curve_fit(exp_func, selected_volt_vals, selected_cur_vals)
e = 1. # 1.602176634e-19 # C
k = 8.617333262145e-5 # eV/K
T = 273.15+25. # K
print popt
eta = np.double(e) / k / T / popt[1]
print 'Nonideality factor = ' + str(eta)
opt_parameters.append([popt[0]*1e3, eta])#(popt)
print pcov
perr = np.sqrt(np.diag(pcov))
print perr
x_fit = np.arange(min(selected_volt_vals), max(selected_volt_vals), 0.01) #np.arange(min(volt_vals), max(volt_vals), 0.01)
y_fit = exp_func(x_fit, popt[0], popt[1])
plt.plot(x_fit, y_fit, 'r-', linewidth=1, label='fit pos '+str(i))
plt.plot(x_fit, y_fit, 'r-', linewidth=10, alpha=0.3)
#plt.xlabel('Voltage, V')
#plt.ylabel('Current, A')
plt.yscale('log')
#plt.grid()
#plt.legend()
#plt.show()
print '\nNegative part fitting\n'
for i in range(len(negative_diapason_tuples)):
print 'Diapason ' + str(i) + ' for fit'
#select diapason
selected_cur_vals, selected_volt_vals = zip(*[(j, v) for (j, v) in zip(neg_cur_vals, neg_volt_vals)
if negative_diapason_tuples[i][0] <= v <= negative_diapason_tuples[i][1]])
#zip(cur_vals, volt_vals)[0]
selected_cur_vals = list(selected_cur_vals)
selected_volt_vals = list(selected_volt_vals)
popt, pcov = curve_fit(exp_func, selected_volt_vals, selected_cur_vals)
e = 1. # 1.602176634e-19 # C
k = 8.617333262145e-5 # eV/K
T = 273.15+25. # K
print popt
eta = np.double(e) / k / T / popt[1]
print 'Nonideality factor = ' + str(eta)
opt_parameters.append([popt[0]*1e3, eta])
print pcov
perr = np.sqrt(np.diag(pcov))
print perr
x_fit = np.arange(min(selected_volt_vals), max(selected_volt_vals), 0.01) #np.arange(min(volt_vals), max(volt_vals), 0.01)
y_fit = exp_func(x_fit, popt[0], popt[1])
plt.plot(x_fit, y_fit, 'r-', linewidth=1, label='fit neg '+str(i))
plt.plot(x_fit, y_fit, 'r-', linewidth=10, alpha=0.3)
plt.xlabel('Voltage, V')
plt.ylabel('Current, A')
plt.title('Specimen ' + specimen_name)
plt.grid()
plt.legend(loc='lower right')
plt.show()
fig.savefig('Specimen ' + specimen_name + '.png')
#df_opt_param = pd.DataFrame
negative_diapason_tuples = [(-j, -v) for (j, v) in negative_diapason_tuples]
#print positive_diapason_tuples + negative_diapason_tuples
#print np.array(opt_parameters)[:,0]
t = {'Diapason, V': positive_diapason_tuples + negative_diapason_tuples,
'I_0, mA': np.array(opt_parameters)[:,0],
'Nonideality factor': np.array(opt_parameters)[:,1]}
df_opt_param = pd.DataFrame(t)
print df_opt_param
filename = r'opt_params.csv'
erase_before_writing = False
with open(filename, 'a') as f:
if erase_before_writing:
f.truncate()
f.write('\nSpecimen ' + specimen_name + '\n\n')
df_opt_param.to_csv(f, header=True, index=False)
|
from dataclasses import dataclass
from typing import List
import betterproto
@dataclass(eq=False, repr=False)
class UpdateRequest(betterproto.Message):
recommendationid: int = betterproto.uint64_field(1)
review_text: str = betterproto.string_field(2)
voted_up: bool = betterproto.bool_field(3)
is_public: bool = betterproto.bool_field(4)
language: str = betterproto.string_field(5)
is_in_early_access: bool = betterproto.bool_field(6)
received_compensation: bool = betterproto.bool_field(7)
comments_disabled: bool = betterproto.bool_field(8)
@dataclass(eq=False, repr=False)
class UpdateResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetIndividualRecommendationsRequest(betterproto.Message):
requests: List["GetIndividualRecommendationsRequestRecommendationRequest"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class GetIndividualRecommendationsRequestRecommendationRequest(betterproto.Message):
steamid: int = betterproto.uint64_field(1)
appid: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class GetIndividualRecommendationsResponse(betterproto.Message):
recommendations: List["RecommendationDetails"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class RecommendationDetails(betterproto.Message):
recommendationid: int = betterproto.uint64_field(1)
steamid: int = betterproto.uint64_field(2)
appid: int = betterproto.uint32_field(3)
review: str = betterproto.string_field(4)
time_created: int = betterproto.uint32_field(5)
time_updated: int = betterproto.uint32_field(6)
votes_up: int = betterproto.uint32_field(7)
votes_down: int = betterproto.uint32_field(8)
vote_score: float = betterproto.float_field(9)
language: str = betterproto.string_field(10)
comment_count: int = betterproto.uint32_field(11)
voted_up: bool = betterproto.bool_field(12)
is_public: bool = betterproto.bool_field(13)
moderator_hidden: bool = betterproto.bool_field(14)
flagged_by_developer: int = betterproto.int32_field(15)
report_score: int = betterproto.uint32_field(16)
steamid_moderator: int = betterproto.uint64_field(17)
steamid_developer: int = betterproto.uint64_field(18)
steamid_dev_responder: int = betterproto.uint64_field(19)
developer_response: str = betterproto.string_field(20)
time_developer_responded: int = betterproto.uint32_field(21)
developer_flag_cleared: bool = betterproto.bool_field(22)
written_during_early_access: bool = betterproto.bool_field(23)
votes_funny: int = betterproto.uint32_field(24)
received_compensation: bool = betterproto.bool_field(25)
unverified_purchase: bool = betterproto.bool_field(26)
review_quality: int = betterproto.int32_field(27)
weighted_vote_score: float = betterproto.float_field(28)
moderation_note: str = betterproto.string_field(29)
payment_method: int = betterproto.int32_field(30)
playtime_2weeks: int = betterproto.int32_field(31)
playtime_forever: int = betterproto.int32_field(32)
last_playtime: int = betterproto.int32_field(33)
comments_disabled: bool = betterproto.bool_field(34)
playtime_at_review: int = betterproto.int32_field(35)
approved_for_china: bool = betterproto.bool_field(36)
ban_check_result: int = betterproto.int32_field(37)
refunded: bool = betterproto.bool_field(38)
account_score_spend: int = betterproto.int32_field(39)
reactions: List["RecommendationLoyaltyReaction"] = betterproto.message_field(40)
ipaddress: str = betterproto.string_field(41)
@dataclass(eq=False, repr=False)
class RecommendationLoyaltyReaction(betterproto.Message):
reaction_type: int = betterproto.uint32_field(1)
count: int = betterproto.uint32_field(2)
|
"""This module contains the general information for MgmtEntity ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class MgmtEntityConsts():
CHASSIS_DEVICE_IO_STATE1_OK = "ok"
CHASSIS_DEVICE_IO_STATE1_OPEN_ERROR = "openError"
CHASSIS_DEVICE_IO_STATE1_READ_ERROR = "readError"
CHASSIS_DEVICE_IO_STATE1_UNKNOWN = "unknown"
CHASSIS_DEVICE_IO_STATE1_WRITE_ERROR = "writeError"
CHASSIS_DEVICE_IO_STATE2_OK = "ok"
CHASSIS_DEVICE_IO_STATE2_OPEN_ERROR = "openError"
CHASSIS_DEVICE_IO_STATE2_READ_ERROR = "readError"
CHASSIS_DEVICE_IO_STATE2_UNKNOWN = "unknown"
CHASSIS_DEVICE_IO_STATE2_WRITE_ERROR = "writeError"
CHASSIS_DEVICE_IO_STATE3_OK = "ok"
CHASSIS_DEVICE_IO_STATE3_OPEN_ERROR = "openError"
CHASSIS_DEVICE_IO_STATE3_READ_ERROR = "readError"
CHASSIS_DEVICE_IO_STATE3_UNKNOWN = "unknown"
CHASSIS_DEVICE_IO_STATE3_WRITE_ERROR = "writeError"
HA_READY_FALSE = "false"
HA_READY_NO = "no"
HA_READY_TRUE = "true"
HA_READY_YES = "yes"
ID_A = "A"
ID_B = "B"
ID_NONE = "NONE"
ID_MGMT = "mgmt"
LEAD_ID_FOR_AUTO_INSTALL_A = "A"
LEAD_ID_FOR_AUTO_INSTALL_B = "B"
LEAD_ID_FOR_AUTO_INSTALL_NONE = "NONE"
LEAD_ID_FOR_AUTO_INSTALL_MGMT = "mgmt"
SSH_KEY_STATUS_MATCHED = "matched"
SSH_KEY_STATUS_MISMATCHED = "mismatched"
SSH_KEY_STATUS_NONE = "none"
VERSION_MISMATCH_FALSE = "false"
VERSION_MISMATCH_NO = "no"
VERSION_MISMATCH_TRUE = "true"
VERSION_MISMATCH_YES = "yes"
class MgmtEntity(ManagedObject):
"""This is MgmtEntity class."""
consts = MgmtEntityConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("MgmtEntity", "mgmtEntity", "mgmt-entity-[id]", VersionMeta.Version111a, "InputOutput", 0x1f, [], ["read-only"], [u'computeSystem', u'topSystem'], [], ["Get"])
prop_meta = {
"chassis1": MoPropertyMeta("chassis1", "chassis1", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"chassis2": MoPropertyMeta("chassis2", "chassis2", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"chassis3": MoPropertyMeta("chassis3", "chassis3", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"chassis_device_io_state1": MoPropertyMeta("chassis_device_io_state1", "chassisDeviceIoState1", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ok", "openError", "readError", "unknown", "writeError"], []),
"chassis_device_io_state2": MoPropertyMeta("chassis_device_io_state2", "chassisDeviceIoState2", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ok", "openError", "readError", "unknown", "writeError"], []),
"chassis_device_io_state3": MoPropertyMeta("chassis_device_io_state3", "chassisDeviceIoState3", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ok", "openError", "readError", "unknown", "writeError"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"ha_failure_reason": MoPropertyMeta("ha_failure_reason", "haFailureReason", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"ha_readiness": MoPropertyMeta("ha_readiness", "haReadiness", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"ha_ready": MoPropertyMeta("ha_ready", "haReady", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version111a, MoPropertyMeta.NAMING, 0x4, None, None, None, ["A", "B", "NONE", "mgmt"], []),
"lead_id_for_auto_install": MoPropertyMeta("lead_id_for_auto_install", "leadIdForAutoInstall", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["A", "B", "NONE", "mgmt"], []),
"leadership": MoPropertyMeta("leadership", "leadership", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"mgmt_services_state": MoPropertyMeta("mgmt_services_state", "mgmtServicesState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"problems": MoPropertyMeta("problems", "problems", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"ssh_auth_keys_csum": MoPropertyMeta("ssh_auth_keys_csum", "sshAuthKeysCsum", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"ssh_auth_keys_size": MoPropertyMeta("ssh_auth_keys_size", "sshAuthKeysSize", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"ssh_key_status": MoPropertyMeta("ssh_key_status", "sshKeyStatus", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["matched", "mismatched", "none"], ["0-4294967295"]),
"ssh_root_pub_key_csum": MoPropertyMeta("ssh_root_pub_key_csum", "sshRootPubKeyCsum", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"ssh_root_pub_key_size": MoPropertyMeta("ssh_root_pub_key_size", "sshRootPubKeySize", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"state": MoPropertyMeta("state", "state", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"umbilical_state": MoPropertyMeta("umbilical_state", "umbilicalState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"version_mismatch": MoPropertyMeta("version_mismatch", "versionMismatch", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
}
prop_map = {
"chassis1": "chassis1",
"chassis2": "chassis2",
"chassis3": "chassis3",
"chassisDeviceIoState1": "chassis_device_io_state1",
"chassisDeviceIoState2": "chassis_device_io_state2",
"chassisDeviceIoState3": "chassis_device_io_state3",
"childAction": "child_action",
"dn": "dn",
"haFailureReason": "ha_failure_reason",
"haReadiness": "ha_readiness",
"haReady": "ha_ready",
"id": "id",
"leadIdForAutoInstall": "lead_id_for_auto_install",
"leadership": "leadership",
"mgmtServicesState": "mgmt_services_state",
"problems": "problems",
"rn": "rn",
"sshAuthKeysCsum": "ssh_auth_keys_csum",
"sshAuthKeysSize": "ssh_auth_keys_size",
"sshKeyStatus": "ssh_key_status",
"sshRootPubKeyCsum": "ssh_root_pub_key_csum",
"sshRootPubKeySize": "ssh_root_pub_key_size",
"state": "state",
"status": "status",
"umbilicalState": "umbilical_state",
"versionMismatch": "version_mismatch",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.chassis1 = None
self.chassis2 = None
self.chassis3 = None
self.chassis_device_io_state1 = None
self.chassis_device_io_state2 = None
self.chassis_device_io_state3 = None
self.child_action = None
self.ha_failure_reason = None
self.ha_readiness = None
self.ha_ready = None
self.lead_id_for_auto_install = None
self.leadership = None
self.mgmt_services_state = None
self.problems = None
self.ssh_auth_keys_csum = None
self.ssh_auth_keys_size = None
self.ssh_key_status = None
self.ssh_root_pub_key_csum = None
self.ssh_root_pub_key_size = None
self.state = None
self.status = None
self.umbilical_state = None
self.version_mismatch = None
ManagedObject.__init__(self, "MgmtEntity", parent_mo_or_dn, **kwargs)
|
"""Usage: planners_evaluation.py [options]
Compare performances of several planners
Options:
-h --help
--generate <true or false> Generate new data [default: True].
--show <true_or_false> Plot results [default: True].
--directory <path> Specify directory path [default: ./out/planners].
--data_file <path> Specify output data file name [default: data.csv].
--budgets <start,end,N> Computational budgets available to planners, in logspace [default: 1,3,100].
--seeds <(s,)n> Number of evaluations of each configuration, with an optional first seed [default: 10].
--processes <p> Number of processes [default: 4]
--chunksize <c> Size of data chunks each processor receives [default: 1]
--range <start:end> Range of budgets to be plotted.
"""
from ast import literal_eval
from pathlib import Path
import tqdm
from docopt import docopt
from collections import OrderedDict
from itertools import product
from multiprocessing.pool import Pool
import gym
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import logging
sns.set(font_scale=1.5, rc={'text.usetex': True})
from rl_agents.agents.common.factory import load_environment, agent_factory
from rl_agents.trainer.evaluation import Evaluation
logger = logging.getLogger(__name__)
gamma = 0.7
SEED_MAX = 1e9
def env_configs():
# return ['configs/CartPoleEnv/env.json']
return ['configs/HighwayEnv/env.json']
# return ['configs/GridWorld/collect.json']
# return ['configs/FiniteMDPEnv/env_garnet.json']
# return ['configs/SailingEnv/env.json']
# return [Path("configs") / "DummyEnv" / "line_env.json"]
def agent_configs():
agents = {
"random": {
"__class__": "<class 'rl_agents.agents.simple.random.RandomUniformAgent'>"
},
"KL-OLOP": {
"__class__": "<class 'rl_agents.agents.tree_search.olop.OLOPAgent'>",
"gamma": gamma,
"upper_bound": {
"type": "kullback-leibler",
"threshold": "1*np.log(time)"
},
"lazy_tree_construction": True,
"continuation_type": "uniform",
"env_preprocessors": [{"method":"simplify"}]
},
"OPD": {
"__class__": "<class 'rl_agents.agents.tree_search.deterministic.DeterministicPlannerAgent'>",
"env_preprocessors": [{"method":"simplify"}],
"gamma": gamma,
},
"MDP-GapE": {
"__class__": "<class 'rl_agents.agents.tree_search.mdp_gape.MDPGapEAgent'>",
"gamma": gamma,
"accuracy": 0,
"confidence": 1,
"upper_bound":
{
"type": "kullback-leibler",
"time": "global",
"threshold": "1*np.log(time)",
"transition_threshold": "0.1*np.log(time)"
},
"max_next_states_count": 2,
"continuation_type": "uniform",
"step_strategy": "reset",
},
"BRUE": {
"__class__": "<class 'rl_agents.agents.tree_search.brue.BRUEAgent'>",
"gamma": gamma,
"step_strategy": "reset",
},
"UCT": {
"__class__": "<class 'rl_agents.agents.tree_search.mcts.MCTSAgent'>",
"gamma": gamma,
"closed_loop": False,
"env_preprocessors": [{"method":"simplify"}]
},
"GBOP": {
"__class__": "<class 'rl_agents.agents.tree_search.graph_based_stochastic.StochasticGraphBasedPlannerAgent'>",
"gamma": gamma,
"upper_bound":
{
"type": "kullback-leibler",
"threshold": "0*np.log(time)",
"transition_threshold": "0.1*np.log(time)"
},
"max_next_states_count": 3,
"accuracy": 5e-2
},
"GBOP-D": {
"__class__": "<class 'rl_agents.agents.tree_search.graph_based.GraphBasedPlannerAgent'>",
"gamma": gamma,
},
"value_iteration": {
"__class__": "<class 'rl_agents.agents.dynamic_programming.value_iteration.ValueIterationAgent'>",
"gamma": gamma,
"iterations": int(3 / (1 - gamma))
}
}
return OrderedDict(agents)
def evaluate(experiment):
# Prepare workspace
seed, budget, agent_config, env_config, path = experiment
gym.logger.set_level(gym.logger.DISABLED)
path = Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
# Make environment
env = load_environment(env_config)
# Make agent
agent_name, agent_config = agent_config
agent_config["budget"] = int(budget)
agent = agent_factory(env, agent_config)
logger.debug("Evaluating agent {} with budget {} on seed {}".format(agent_name, budget, seed))
# Compute true value
compute_regret = False
compute_return = True
if compute_regret:
env.seed(seed)
observation = env.reset()
vi = agent_factory(env, agent_configs()["value_iteration"])
best_action = vi.act(observation)
action = agent.act(observation)
q = vi.state_action_value
simple_regret = q[vi.mdp.state, best_action] - q[vi.mdp.state, action]
gap = q[vi.mdp.state, best_action] - np.sort(q[vi.mdp.state, :])[-2]
else:
simple_regret = 0
gap = 0
if compute_return:
# Evaluate
evaluation = Evaluation(env,
agent,
directory=Path("out") / "planners" / agent_name,
num_episodes=1,
sim_seed=seed,
display_env=False,
display_agent=False,
display_rewards=False)
evaluation.test()
rewards = evaluation.monitor.stats_recorder.episode_rewards_[0]
length = evaluation.monitor.stats_recorder.episode_lengths[0]
total_reward = np.sum(rewards)
cum_discount = lambda signal: np.sum([gamma**t * signal[t] for t in range(len(signal))])
return_ = cum_discount(rewards)
mean_return = np.mean([cum_discount(rewards[t:]) for t in range(len(rewards))])
else:
length = 0
total_reward = 0
return_ = 0
mean_return = 0
# Save results
result = {
"agent": agent_name,
"budget": budget,
"seed": seed,
"total_reward": total_reward,
"return": return_,
"mean_return": mean_return,
"length": length,
"simple_regret": simple_regret,
"gap": gap
}
df = pd.DataFrame.from_records([result])
with open(path, 'a') as f:
df.to_csv(f, sep=',', encoding='utf-8', header=f.tell() == 0, index=False)
def prepare_experiments(budgets, seeds, path):
budgets = np.unique(np.logspace(*literal_eval(budgets)).astype(int))
selected_agents = [
"KL-OLOP",
"OPD"
"UCT"
]
agents = {agent: config for agent, config in agent_configs().items() if agent in selected_agents}
seeds = seeds.split(",")
first_seed = int(seeds[0]) if len(seeds) == 2 else np.random.randint(0, SEED_MAX, dtype=int)
seeds_count = int(seeds[-1])
seeds = (first_seed + np.arange(seeds_count)).tolist()
envs = env_configs()
paths = [path]
experiments = list(product(seeds, budgets, agents.items(), envs, paths))
return experiments
latex_names = {
"simple_regret": "simple regret $r_n$",
"total_reward": "total reward $R$",
"mean_return": "mean return $E[R]$",
"1/epsilon": r"${1}/{\epsilon}$",
"MDP-GapE": r"\texttt{MDP-GapE}",
"KL-OLOP": r"\texttt{KL-OLOP}",
"BRUE": r"\texttt{BRUE}",
"GBOP": r"\texttt{GBOP}",
"UCT": r"\texttt{UCT}",
"budget": r"budget $n$",
}
def rename_df(df):
df = df.rename(columns=latex_names)
for key, value in latex_names.items():
df["agent"] = df["agent"].replace(key, value)
return df
def rename(value, latex=True):
return latex_names.get(value, value) if latex else value
def plot_all(data_file, directory, data_range):
print("Reading data from {}".format(directory / data_file))
df = pd.read_csv(str(directory / data_file))
df = df[~df.agent.isin(['agent'])].apply(pd.to_numeric, errors='ignore')
df = df.sort_values(by="agent")
m = df.loc[df['simple_regret'] != np.inf, 'simple_regret'].max()
df['simple_regret'].replace(np.inf, m, inplace=True)
df = rename_df(df)
if data_range:
start, end = data_range.split(':')
df = df[df["budget"].between(int(start), int(end))]
print("Number of seeds found: {}".format(df.seed.nunique()))
with sns.axes_style("ticks"):
sns.set_palette("colorblind")
fig, ax = plt.subplots()
for field in ["total_reward"]:
ax.set(xscale="log")
if field in ["simple_regret"]:
ax.set_yscale("symlog", linthreshy=1e-3)
sns.lineplot(x=rename("budget"), y=rename(field), ax=ax, hue="agent", style="agent", data=df)
# ax.yaxis.set_minor_locator(LogLocator(base=10, subs=(1.0,)))
# ax.yaxis.grid(True, which='minor', linestyle='-')
plt.legend(loc="lower left")
field_path = directory / "{}.pdf".format(field)
fig.savefig(field_path, bbox_inches='tight')
field_path = directory / "{}.png".format(field)
fig.savefig(field_path, bbox_inches='tight')
print("Saving {} plot to {}".format(field, field_path))
custom_processing(df, directory)
def custom_processing(df, directory):
pass
def main(args):
raise NotImplementedError('The Monitor wrapper which previously recorded statistics has been replaced by '
'RecordEpisodeStatistics, so this file needs to be updated.')
if args["--generate"] == "True":
experiments = prepare_experiments(args["--budgets"], args['--seeds'],
str(Path(args["--directory"]) / args["--data_file"]))
chunksize = int(args["--chunksize"])
with Pool(processes=int(args["--processes"])) as p:
list(tqdm.tqdm(p.imap_unordered(evaluate, experiments, chunksize=chunksize), total=len(experiments)))
if args["--show"] == "True":
plot_all(args["--data_file"], Path(args["--directory"]), args["--range"])
if __name__ == "__main__":
arguments = docopt(__doc__)
main(arguments)
|
import contextlib
import copy
import pathlib
import xml.etree.ElementTree
from unittest import mock
import pytest
np = pytest.importorskip("numpy")
import operator
import os
import time
import warnings
from functools import reduce
from io import StringIO
from operator import add, sub
from threading import Lock
from numpy import nancumprod, nancumsum
from tlz import concat, countby, merge
from tlz.curried import identity
import dask
import dask.array as da
from dask.array.core import (
Array,
BlockView,
PerformanceWarning,
blockdims_from_blockshape,
broadcast_chunks,
broadcast_shapes,
broadcast_to,
common_blockdim,
concatenate,
concatenate3,
concatenate_axes,
dotmany,
from_array,
from_delayed,
from_func,
getter,
graph_from_arraylike,
normalize_chunks,
optimize,
stack,
store,
)
from dask.array.utils import assert_eq, same_keys
from dask.base import compute_as_if_collection, tokenize
from dask.blockwise import broadcast_dimensions
from dask.blockwise import make_blockwise_graph as top
from dask.blockwise import optimize_blockwise
from dask.delayed import Delayed, delayed
from dask.highlevelgraph import HighLevelGraph, MaterializedLayer
from dask.layers import Blockwise
from dask.utils import SerializableLock, apply, key_split, parse_bytes, tmpdir, tmpfile
from dask.utils_test import dec, hlg_layer_topological, inc
from ..chunk import getitem
from .test_dispatch import EncapsulateNDArray
@pytest.mark.parametrize("inline_array", [True, False])
def test_graph_from_arraylike(inline_array):
d = 2
chunk = (2, 3)
shape = tuple(d * n for n in chunk)
arr = np.ones(shape)
dsk = graph_from_arraylike(
arr, chunk, shape=shape, name="X", inline_array=inline_array
)
assert isinstance(dsk, HighLevelGraph)
if inline_array:
assert len(dsk.layers) == 1
assert isinstance(hlg_layer_topological(dsk, 0), Blockwise)
else:
assert len(dsk.layers) == 2
assert isinstance(hlg_layer_topological(dsk, 0), MaterializedLayer)
assert isinstance(hlg_layer_topological(dsk, 1), Blockwise)
dsk = dict(dsk)
# Somewhat odd membership check to avoid numpy elemwise __in__ overload
assert any(arr is v for v in dsk.values()) is not inline_array
def test_top():
assert top(inc, "z", "ij", "x", "ij", numblocks={"x": (2, 2)}) == {
("z", 0, 0): (inc, ("x", 0, 0)),
("z", 0, 1): (inc, ("x", 0, 1)),
("z", 1, 0): (inc, ("x", 1, 0)),
("z", 1, 1): (inc, ("x", 1, 1)),
}
assert top(
add, "z", "ij", "x", "ij", "y", "ij", numblocks={"x": (2, 2), "y": (2, 2)}
) == {
("z", 0, 0): (add, ("x", 0, 0), ("y", 0, 0)),
("z", 0, 1): (add, ("x", 0, 1), ("y", 0, 1)),
("z", 1, 0): (add, ("x", 1, 0), ("y", 1, 0)),
("z", 1, 1): (add, ("x", 1, 1), ("y", 1, 1)),
}
assert top(
dotmany, "z", "ik", "x", "ij", "y", "jk", numblocks={"x": (2, 2), "y": (2, 2)}
) == {
("z", 0, 0): (dotmany, [("x", 0, 0), ("x", 0, 1)], [("y", 0, 0), ("y", 1, 0)]),
("z", 0, 1): (dotmany, [("x", 0, 0), ("x", 0, 1)], [("y", 0, 1), ("y", 1, 1)]),
("z", 1, 0): (dotmany, [("x", 1, 0), ("x", 1, 1)], [("y", 0, 0), ("y", 1, 0)]),
("z", 1, 1): (dotmany, [("x", 1, 0), ("x", 1, 1)], [("y", 0, 1), ("y", 1, 1)]),
}
assert top(identity, "z", "", "x", "ij", numblocks={"x": (2, 2)}) == {
("z",): (identity, [[("x", 0, 0), ("x", 0, 1)], [("x", 1, 0), ("x", 1, 1)]])
}
def test_top_with_kwargs():
assert top(add, "z", "i", "x", "i", numblocks={"x": (2, 0)}, b=100) == {
("z", 0): (apply, add, [("x", 0)], {"b": 100}),
("z", 1): (apply, add, [("x", 1)], {"b": 100}),
}
def test_top_supports_broadcasting_rules():
assert top(
add, "z", "ij", "x", "ij", "y", "ij", numblocks={"x": (1, 2), "y": (2, 1)}
) == {
("z", 0, 0): (add, ("x", 0, 0), ("y", 0, 0)),
("z", 0, 1): (add, ("x", 0, 1), ("y", 0, 0)),
("z", 1, 0): (add, ("x", 0, 0), ("y", 1, 0)),
("z", 1, 1): (add, ("x", 0, 1), ("y", 1, 0)),
}
def test_top_literals():
assert top(add, "z", "ij", "x", "ij", 123, None, numblocks={"x": (2, 2)}) == {
("z", 0, 0): (add, ("x", 0, 0), 123),
("z", 0, 1): (add, ("x", 0, 1), 123),
("z", 1, 0): (add, ("x", 1, 0), 123),
("z", 1, 1): (add, ("x", 1, 1), 123),
}
def test_blockwise_literals():
x = da.ones((10, 10), chunks=(5, 5))
z = da.blockwise(add, "ij", x, "ij", 100, None, dtype=x.dtype)
assert_eq(z, x + 100)
z = da.blockwise(
lambda x, y, z: x * y + z, "ij", 2, None, x, "ij", 100, None, dtype=x.dtype
)
assert_eq(z, 2 * x + 100)
z = da.blockwise(getitem, "ij", x, "ij", slice(None), None, dtype=x.dtype)
assert_eq(z, x)
def test_blockwise_1_in_shape_I():
def test_f(a, b):
assert 1 in b.shape
p, k, N = 7, 2, 5
da.blockwise(
test_f,
"x",
da.zeros((2 * p, 9, k * N), chunks=(p, 3, k)),
"xzt",
da.zeros((2 * p, 9, 1), chunks=(p, 3, -1)),
"xzt",
concatenate=True,
dtype=float,
).compute()
def test_blockwise_1_in_shape_II():
def test_f(a, b):
assert 1 in b.shape
p, k, N = 7, 2, 5
da.blockwise(
test_f,
"x",
da.zeros((2 * p, 9, k * N, 8), chunks=(p, 9, k, 4)),
"xztu",
da.zeros((2 * p, 9, 1, 8), chunks=(p, 9, -1, 4)),
"xztu",
concatenate=True,
dtype=float,
).compute()
def test_blockwise_1_in_shape_III():
def test_f(a, b):
assert 1 in b.shape
k, N = 2, 5
da.blockwise(
test_f,
"x",
da.zeros((k * N, 9, 8), chunks=(k, 3, 4)),
"xtu",
da.zeros((1, 9, 8), chunks=(-1, 3, 4)),
"xtu",
concatenate=True,
dtype=float,
).compute()
def test_concatenate3_on_scalars():
assert_eq(concatenate3([1, 2]), np.array([1, 2]))
def test_chunked_dot_product():
x = np.arange(400).reshape((20, 20))
o = np.ones((20, 20))
getx = graph_from_arraylike(x, (5, 5), shape=(20, 20), name="x")
geto = graph_from_arraylike(o, (5, 5), shape=(20, 20), name="o")
result = top(
dotmany, "out", "ik", "x", "ij", "o", "jk", numblocks={"x": (4, 4), "o": (4, 4)}
)
dsk = merge(getx, geto, result)
out = dask.get(dsk, [[("out", i, j) for j in range(4)] for i in range(4)])
assert_eq(np.dot(x, o), concatenate3(out))
def test_chunked_transpose_plus_one():
x = np.arange(400).reshape((20, 20))
getx = graph_from_arraylike(x, (5, 5), shape=(20, 20), name="x")
f = lambda x: x.T + 1
comp = top(f, "out", "ij", "x", "ji", numblocks={"x": (4, 4)})
dsk = merge(getx, comp)
out = dask.get(dsk, [[("out", i, j) for j in range(4)] for i in range(4)])
assert_eq(concatenate3(out), x.T + 1)
def test_broadcast_dimensions_works_with_singleton_dimensions():
argpairs = [("x", "i")]
numblocks = {"x": ((1,),)}
assert broadcast_dimensions(argpairs, numblocks) == {"i": (1,)}
def test_broadcast_dimensions():
argpairs = [("x", "ij"), ("y", "ij")]
d = {"x": ("Hello", 1), "y": (1, (2, 3))}
assert broadcast_dimensions(argpairs, d) == {"i": "Hello", "j": (2, 3)}
def test_Array():
arr = object() # arraylike is unimportant since we never compute
shape = (1000, 1000)
chunks = (100, 100)
name = "x"
dsk = graph_from_arraylike(arr, chunks, shape, name)
a = Array(dsk, name, chunks, shape=shape, dtype="f8")
assert a.numblocks == (10, 10)
assert a.__dask_keys__() == [[("x", i, j) for j in range(10)] for i in range(10)]
assert a.chunks == ((100,) * 10, (100,) * 10)
assert a.shape == shape
assert len(a) == shape[0]
with pytest.raises(ValueError):
Array(dsk, name, chunks, shape=shape)
with pytest.raises(TypeError):
Array(dsk, name, chunks, shape=shape, dtype="f8", meta=np.empty(0, 0))
def test_uneven_chunks():
a = Array({}, "x", chunks=(3, 3), shape=(10, 10), dtype="f8")
assert a.chunks == ((3, 3, 3, 1), (3, 3, 3, 1))
def test_numblocks_suppoorts_singleton_block_dims():
arr = object() # arraylike is unimportant since we never compute
shape = (100, 10)
chunks = (10, 10)
name = "x"
dsk = graph_from_arraylike(arr, chunks, shape, name)
a = Array(dsk, name, chunks, shape=shape, dtype="f8")
assert set(concat(a.__dask_keys__())) == {("x", i, 0) for i in range(10)}
def test_keys():
dsk = {("x", i, j): () for i in range(5) for j in range(6)}
dx = Array(dsk, "x", chunks=(10, 10), shape=(50, 60), dtype="f8")
assert dx.__dask_keys__() == [[(dx.name, i, j) for j in range(6)] for i in range(5)]
# Cache works
assert dx.__dask_keys__() is dx.__dask_keys__()
# Test mutating names clears key cache
dx.dask = {("y", i, j): () for i in range(5) for j in range(6)}
dx._name = "y"
new_keys = [[(dx.name, i, j) for j in range(6)] for i in range(5)]
assert dx.__dask_keys__() == new_keys
assert np.array_equal(dx._key_array, np.array(new_keys, dtype="object"))
d = Array({}, "x", (), shape=(), dtype="f8")
assert d.__dask_keys__() == [("x",)]
def test_Array_computation():
a = Array({("x", 0, 0): np.eye(3)}, "x", shape=(3, 3), chunks=(3, 3), dtype="f8")
assert_eq(np.array(a), np.eye(3))
assert isinstance(a.compute(), np.ndarray)
assert float(a[0, 0]) == 1
def test_Array_numpy_gufunc_call__array_ufunc__01():
x = da.random.normal(size=(3, 10, 10), chunks=(2, 10, 10))
nx = x.compute()
ny = np.linalg._umath_linalg.inv(nx)
y = np.linalg._umath_linalg.inv(x)
assert_eq(ny, y)
def test_Array_numpy_gufunc_call__array_ufunc__02():
x = da.random.normal(size=(3, 10, 10), chunks=(2, 10, 10))
nx = x.compute()
nw, nv = np.linalg._umath_linalg.eig(nx)
w, v = np.linalg._umath_linalg.eig(x)
assert_eq(nw, w)
assert_eq(nv, v)
def test_stack():
a, b, c = (
Array(
graph_from_arraylike(object(), chunks=(2, 3), shape=(4, 6), name=name),
name,
chunks=(2, 3),
dtype="f8",
shape=(4, 6),
)
for name in "ABC"
)
s = stack([a, b, c], axis=0)
colon = slice(None, None, None)
assert s.shape == (3, 4, 6)
assert s.chunks == ((1, 1, 1), (2, 2), (3, 3))
assert s.chunksize == (1, 2, 3)
assert s.dask[(s.name, 0, 1, 0)] == (getitem, ("A", 1, 0), (None, colon, colon))
assert s.dask[(s.name, 2, 1, 0)] == (getitem, ("C", 1, 0), (None, colon, colon))
assert same_keys(s, stack([a, b, c], axis=0))
s2 = stack([a, b, c], axis=1)
assert s2.shape == (4, 3, 6)
assert s2.chunks == ((2, 2), (1, 1, 1), (3, 3))
assert s2.chunksize == (2, 1, 3)
assert s2.dask[(s2.name, 0, 1, 0)] == (getitem, ("B", 0, 0), (colon, None, colon))
assert s2.dask[(s2.name, 1, 1, 0)] == (getitem, ("B", 1, 0), (colon, None, colon))
assert same_keys(s2, stack([a, b, c], axis=1))
s2 = stack([a, b, c], axis=2)
assert s2.shape == (4, 6, 3)
assert s2.chunks == ((2, 2), (3, 3), (1, 1, 1))
assert s2.chunksize == (2, 3, 1)
assert s2.dask[(s2.name, 0, 1, 0)] == (getitem, ("A", 0, 1), (colon, colon, None))
assert s2.dask[(s2.name, 1, 1, 2)] == (getitem, ("C", 1, 1), (colon, colon, None))
assert same_keys(s2, stack([a, b, c], axis=2))
pytest.raises(ValueError, lambda: stack([]))
pytest.raises(ValueError, lambda: stack([a, b, c], axis=3))
assert set(b.dask.keys()).issubset(s2.dask.keys())
assert stack([a, b, c], axis=-1).chunks == stack([a, b, c], axis=2).chunks
def test_stack_zero_size():
x = np.empty((2, 0, 3))
y = da.from_array(x, chunks=1)
result_np = np.concatenate([x, x])
result_da = da.concatenate([y, y])
assert_eq(result_np, result_da)
def test_short_stack():
x = np.array([1])
d = da.from_array(x, chunks=(1,))
s = da.stack([d])
assert s.shape == (1, 1)
chunks = compute_as_if_collection(Array, s.dask, s.__dask_keys__())
assert chunks[0][0].shape == (1, 1)
def test_stack_scalars():
d = da.arange(4, chunks=2)
s = da.stack([d.mean(), d.sum()])
assert s.compute().tolist() == [np.arange(4).mean(), np.arange(4).sum()]
def test_stack_promote_type():
i = np.arange(10, dtype="i4")
f = np.arange(10, dtype="f4")
di = da.from_array(i, chunks=5)
df = da.from_array(f, chunks=5)
res = da.stack([di, df])
assert_eq(res, np.stack([i, f]))
def test_stack_rechunk():
x = da.random.random(10, chunks=5)
y = da.random.random(10, chunks=4)
z = da.stack([x, y], axis=0)
assert z.shape == (2, 10)
assert z.chunks == ((1, 1), (4, 1, 3, 2))
assert_eq(z, np.stack([x.compute(), y.compute()], axis=0))
def test_stack_unknown_chunksizes():
dd = pytest.importorskip("dask.dataframe")
pd = pytest.importorskip("pandas")
a_df = pd.DataFrame({"x": np.arange(12)})
b_df = pd.DataFrame({"y": np.arange(12) * 10})
a_ddf = dd.from_pandas(a_df, sort=False, npartitions=3)
b_ddf = dd.from_pandas(b_df, sort=False, npartitions=3)
a_x = a_ddf.values
b_x = b_ddf.values
assert np.isnan(a_x.shape[0])
assert np.isnan(b_x.shape[0])
with pytest.raises(ValueError) as exc_info:
da.stack([a_x, b_x], axis=0)
assert "shape" in str(exc_info.value)
assert "nan" in str(exc_info.value)
c_x = da.stack([a_x, b_x], axis=0, allow_unknown_chunksizes=True)
assert_eq(c_x, np.stack([a_df.values, b_df.values], axis=0))
with pytest.raises(ValueError) as exc_info:
da.stack([a_x, b_x], axis=1)
assert "shape" in str(exc_info.value)
assert "nan" in str(exc_info.value)
c_x = da.stack([a_x, b_x], axis=1, allow_unknown_chunksizes=True)
assert_eq(c_x, np.stack([a_df.values, b_df.values], axis=1))
m_df = pd.DataFrame({"m": np.arange(12) * 100})
n_df = pd.DataFrame({"n": np.arange(12) * 1000})
m_ddf = dd.from_pandas(m_df, sort=False, npartitions=3)
n_ddf = dd.from_pandas(n_df, sort=False, npartitions=3)
m_x = m_ddf.values
n_x = n_ddf.values
assert np.isnan(m_x.shape[0])
assert np.isnan(n_x.shape[0])
with pytest.raises(ValueError) as exc_info:
da.stack([[a_x, b_x], [m_x, n_x]])
assert "shape" in str(exc_info.value)
assert "nan" in str(exc_info.value)
c_x = da.stack([[a_x, b_x], [m_x, n_x]], allow_unknown_chunksizes=True)
assert_eq(c_x, np.stack([[a_df.values, b_df.values], [m_df.values, n_df.values]]))
def test_concatenate():
a, b, c = (
Array(
graph_from_arraylike(object(), chunks=(2, 3), shape=(4, 6), name=name),
name,
chunks=(2, 3),
dtype="f8",
shape=(4, 6),
)
for name in "ABC"
)
x = concatenate([a, b, c], axis=0)
assert x.shape == (12, 6)
assert x.chunks == ((2, 2, 2, 2, 2, 2), (3, 3))
assert x.dask[(x.name, 0, 1)] == ("A", 0, 1)
assert x.dask[(x.name, 5, 0)] == ("C", 1, 0)
assert same_keys(x, concatenate([a, b, c], axis=0))
y = concatenate([a, b, c], axis=1)
assert y.shape == (4, 18)
assert y.chunks == ((2, 2), (3, 3, 3, 3, 3, 3))
assert y.dask[(y.name, 1, 0)] == ("A", 1, 0)
assert y.dask[(y.name, 1, 5)] == ("C", 1, 1)
assert same_keys(y, concatenate([a, b, c], axis=1))
assert set(b.dask.keys()).issubset(y.dask.keys())
z = concatenate([a], axis=0)
assert z.shape == a.shape
assert z.chunks == a.chunks
assert z.dask == a.dask
assert z is a
assert (
concatenate([a, b, c], axis=-1).chunks == concatenate([a, b, c], axis=1).chunks
)
pytest.raises(ValueError, lambda: concatenate([]))
pytest.raises(ValueError, lambda: concatenate([a, b, c], axis=2))
@pytest.mark.parametrize(
"dtypes", [((">f8", ">f8"), "float64"), (("<f4", "<f8"), "float64")]
)
def test_concatenate_types(dtypes):
dts_in, dt_out = dtypes
arrs = [np.zeros(4, dtype=dt) for dt in dts_in]
darrs = [from_array(arr, chunks=(2,)) for arr in arrs]
x = concatenate(darrs, axis=0)
assert x.dtype == dt_out
def test_concatenate_unknown_axes():
dd = pytest.importorskip("dask.dataframe")
pd = pytest.importorskip("pandas")
a_df = pd.DataFrame({"x": np.arange(12)})
b_df = pd.DataFrame({"y": np.arange(12) * 10})
a_ddf = dd.from_pandas(a_df, sort=False, npartitions=3)
b_ddf = dd.from_pandas(b_df, sort=False, npartitions=3)
a_x = a_ddf.values
b_x = b_ddf.values
assert np.isnan(a_x.shape[0])
assert np.isnan(b_x.shape[0])
da.concatenate([a_x, b_x], axis=0) # works fine
with pytest.raises(ValueError) as exc_info:
da.concatenate([a_x, b_x], axis=1) # unknown chunks
assert "nan" in str(exc_info.value)
assert "allow_unknown_chunksize" in str(exc_info.value)
c_x = da.concatenate(
[a_x, b_x], axis=1, allow_unknown_chunksizes=True
) # unknown chunks
assert_eq(c_x, np.concatenate([a_df.values, b_df.values], axis=1))
def test_concatenate_rechunk():
x = da.random.random((6, 6), chunks=(3, 3))
y = da.random.random((6, 6), chunks=(2, 2))
z = da.concatenate([x, y], axis=0)
assert z.shape == (12, 6)
assert z.chunks == ((3, 3, 2, 2, 2), (2, 1, 1, 2))
assert_eq(z, np.concatenate([x.compute(), y.compute()], axis=0))
z = da.concatenate([x, y], axis=1)
assert z.shape == (6, 12)
assert z.chunks == ((2, 1, 1, 2), (3, 3, 2, 2, 2))
assert_eq(z, np.concatenate([x.compute(), y.compute()], axis=1))
def test_concatenate_fixlen_strings():
x = np.array(["a", "b", "c"])
y = np.array(["aa", "bb", "cc"])
a = da.from_array(x, chunks=(2,))
b = da.from_array(y, chunks=(2,))
assert_eq(np.concatenate([x, y]), da.concatenate([a, b]))
def test_concatenate_zero_size():
x = np.random.random(10)
y = da.from_array(x, chunks=3)
result_np = np.concatenate([x, x[:0]])
result_da = da.concatenate([y, y[:0]])
assert_eq(result_np, result_da)
assert result_da is y
# dtype of a size 0 arrays can affect the output dtype
result_np = np.concatenate([np.zeros(0, dtype=float), np.zeros(1, dtype=int)])
result_da = da.concatenate([da.zeros(0, dtype=float), da.zeros(1, dtype=int)])
assert_eq(result_np, result_da)
# All empty arrays case
result_np = np.concatenate([np.zeros(0), np.zeros(0)])
result_da = da.concatenate([da.zeros(0), da.zeros(0)])
assert_eq(result_np, result_da)
def test_block_simple_row_wise():
a1 = np.ones((2, 2))
a2 = 2 * a1
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([a1, a2])
result = da.block([d1, d2])
assert_eq(expected, result)
expected = np.block([a1, a2[:, :0]])
result = da.block([d1, d2[:, :0]])
assert result is d1
assert_eq(expected, result)
def test_block_simple_column_wise():
a1 = np.ones((2, 2))
a2 = 2 * a1
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([[a1], [a2]])
result = da.block([[d1], [d2]])
assert_eq(expected, result)
def test_block_with_1d_arrays_row_wise():
# # # 1-D vectors are treated as row arrays
a1 = np.array([1, 2, 3])
a2 = np.array([2, 3, 4])
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([a1, a2])
result = da.block([d1, d2])
assert_eq(expected, result)
expected = np.block([a1, a2[:0]])
result = da.block([d1, d2[:0]])
assert result is d1
assert_eq(expected, result)
def test_block_with_1d_arrays_multiple_rows():
a1 = np.array([1, 2, 3])
a2 = np.array([2, 3, 4])
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([[a1, a2], [a1, a2]])
result = da.block([[d1, d2], [d1, d2]])
assert_eq(expected, result)
def test_block_with_1d_arrays_column_wise():
# # # 1-D vectors are treated as row arrays
a1 = np.array([1, 2, 3])
a2 = np.array([2, 3, 4])
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([[a1], [a2]])
result = da.block([[d1], [d2]])
assert_eq(expected, result)
def test_block_mixed_1d_and_2d():
a1 = np.ones((2, 2))
a2 = np.array([2, 2])
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([[d1], [d2]])
result = da.block([[a1], [a2]])
assert_eq(expected, result)
def test_block_complicated():
# a bit more complicated
a1 = np.array([[1, 1, 1]])
a2 = np.array([[2, 2, 2]])
a3 = np.array([[3, 3, 3, 3, 3, 3]])
a4 = np.array([4, 4, 4, 4, 4, 4])
a5 = np.array(5)
a6 = np.array([6, 6, 6, 6, 6])
a7 = np.zeros((2, 6))
d1 = da.asarray(a1)
d2 = da.asarray(a2)
d3 = da.asarray(a3)
d4 = da.asarray(a4)
d5 = da.asarray(a5)
d6 = da.asarray(a6)
d7 = da.asarray(a7)
expected = np.block([[a1, a2], [a3], [a4], [a5, a6], [a7]])
result = da.block([[d1, d2], [d3], [d4], [d5, d6], [d7]])
assert_eq(expected, result)
def test_block_nested():
a1 = np.array([1, 1, 1])
a2 = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
a3 = np.array([3, 3, 3])
a4 = np.array([4, 4, 4])
a5 = np.array(5)
a6 = np.array([6, 6, 6, 6, 6])
a7 = np.zeros((2, 6))
d1 = da.asarray(a1)
d2 = da.asarray(a2)
d3 = da.asarray(a3)
d4 = da.asarray(a4)
d5 = da.asarray(a5)
d6 = da.asarray(a6)
d7 = da.asarray(a7)
expected = np.block([[np.block([[a1], [a3], [a4]]), a2], [a5, a6], [a7]])
result = da.block([[da.block([[d1], [d3], [d4]]), d2], [d5, d6], [d7]])
assert_eq(expected, result)
def test_block_3d():
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
a010 = np.ones((2, 3, 2), int) * 3
a001 = np.ones((2, 2, 3), int) * 4
a011 = np.ones((2, 3, 3), int) * 5
a101 = np.ones((3, 2, 3), int) * 6
a110 = np.ones((3, 3, 2), int) * 7
a111 = np.ones((3, 3, 3), int) * 8
d000 = da.asarray(a000)
d100 = da.asarray(a100)
d010 = da.asarray(a010)
d001 = da.asarray(a001)
d011 = da.asarray(a011)
d101 = da.asarray(a101)
d110 = da.asarray(a110)
d111 = da.asarray(a111)
expected = np.block([[[a000, a001], [a010, a011]], [[a100, a101], [a110, a111]]])
result = da.block([[[d000, d001], [d010, d011]], [[d100, d101], [d110, d111]]])
assert_eq(expected, result)
expected = np.block(
[
[[a000, a001[:, :, :0]], [a010[:, :0, :], a011[:, :0, :0]]],
[[a100[:0, :, :], a101[:0, :, :0]], [a110[:0, :0, :], a111[:0, :0, :0]]],
]
)
result = da.block(
[
[[d000, d001[:, :, :0]], [d010[:, :0, :], d011[:, :0, :0]]],
[[d100[:0, :, :], d101[:0, :, :0]], [d110[:0, :0, :], d111[:0, :0, :0]]],
]
)
assert result is d000
assert_eq(expected, result)
def test_block_with_mismatched_shape():
a = np.array([0, 0])
b = np.eye(2)
for arrays in [[a, b], [b, a]]:
with pytest.raises(ValueError):
da.block(arrays)
def test_block_no_lists():
assert_eq(da.block(1), np.block(1))
assert_eq(da.block(np.eye(3)), np.block(np.eye(3)))
def test_block_invalid_nesting():
for arrays in [
[1, [2]],
[1, []],
[[1], 2],
[[], 2],
[[[1], [2]], [[3, 4]], [5]], # missing brackets
]:
with pytest.raises(ValueError) as e:
da.block(arrays)
e.match(r"depths are mismatched")
def test_block_empty_lists():
for arrays in [[], [[]], [[1], []]]:
with pytest.raises(ValueError) as e:
da.block(arrays)
e.match(r"empty")
def test_block_tuple():
for arrays in [([1, 2], [3, 4]), [(1, 2), (3, 4)]]:
with pytest.raises(TypeError) as e:
da.block(arrays)
e.match(r"tuple")
def test_broadcast_shapes():
with warnings.catch_warnings(record=True) as record:
assert () == broadcast_shapes()
assert (2, 5) == broadcast_shapes((2, 5))
assert (0, 5) == broadcast_shapes((0, 1), (1, 5))
assert np.allclose(
(2, np.nan), broadcast_shapes((1, np.nan), (2, 1)), equal_nan=True
)
assert np.allclose(
(2, np.nan), broadcast_shapes((2, 1), (1, np.nan)), equal_nan=True
)
assert (3, 4, 5) == broadcast_shapes((3, 4, 5), (4, 1), ())
assert (3, 4) == broadcast_shapes((3, 1), (1, 4), (4,))
assert (5, 6, 7, 3, 4) == broadcast_shapes((3, 1), (), (5, 6, 7, 1, 4))
assert not record
pytest.raises(ValueError, lambda: broadcast_shapes((3,), (3, 4)))
pytest.raises(ValueError, lambda: broadcast_shapes((2, 3), (2, 3, 1)))
pytest.raises(ValueError, lambda: broadcast_shapes((2, 3), (1, np.nan)))
def test_elemwise_on_scalars():
x = np.arange(10, dtype=np.int64)
a = from_array(x, chunks=(5,))
assert len(a.__dask_keys__()) == 2
assert_eq(a.sum() ** 2, x.sum() ** 2)
y = np.arange(10, dtype=np.int32)
b = from_array(y, chunks=(5,))
result = a.sum() * b
# Dask 0-d arrays do not behave like numpy scalars for type promotion
assert result.dtype == np.int64
assert result.compute().dtype == np.int64
assert (x.sum() * y).dtype == np.int32
assert_eq((x.sum() * y).astype(np.int64), result)
def test_elemwise_with_ndarrays():
x = np.arange(3)
y = np.arange(12).reshape(4, 3)
a = from_array(x, chunks=(3,))
b = from_array(y, chunks=(2, 3))
assert_eq(x + a, 2 * x)
assert_eq(a + x, 2 * x)
assert_eq(x + b, x + y)
assert_eq(b + x, x + y)
assert_eq(a + y, x + y)
assert_eq(y + a, x + y)
# Error on shape mismatch
pytest.raises(ValueError, lambda: a + y.T)
pytest.raises(ValueError, lambda: a + np.arange(2))
def test_elemwise_differently_chunked():
x = np.arange(3)
y = np.arange(12).reshape(4, 3)
a = from_array(x, chunks=(3,))
b = from_array(y, chunks=(2, 2))
assert_eq(a + b, x + y)
assert_eq(b + a, x + y)
def test_elemwise_dtype():
values = [
da.from_array(np.ones(5, np.float32), chunks=3),
da.from_array(np.ones(5, np.int16), chunks=3),
da.from_array(np.ones(5, np.int64), chunks=3),
da.from_array(np.ones((), np.float64), chunks=()) * 1e200,
np.ones(5, np.float32),
1,
1.0,
1e200,
np.int64(1),
np.ones((), np.int64),
]
for x in values:
for y in values:
assert da.maximum(x, y).dtype == da.result_type(x, y)
def test_operators():
x = np.arange(10)
y = np.arange(10).reshape((10, 1))
a = from_array(x, chunks=(5,))
b = from_array(y, chunks=(5, 1))
c = a + 1
assert_eq(c, x + 1)
c = a + b
assert_eq(c, x + x.reshape((10, 1)))
expr = (3 / a * b) ** 2 > 5
with pytest.warns(None): # ZeroDivisionWarning
assert_eq(expr, (3 / x * y) ** 2 > 5)
with pytest.warns(None): # OverflowWarning
c = da.exp(a)
assert_eq(c, np.exp(x))
assert_eq(abs(-a), a)
assert_eq(a, +x)
def test_operator_dtype_promotion():
x = np.arange(10, dtype=np.float32)
y = np.array([1])
a = from_array(x, chunks=(5,))
assert_eq(x + 1, a + 1) # still float32
assert_eq(x + 1e50, a + 1e50) # now float64
assert_eq(x + y, a + y) # also float64
def test_field_access():
x = np.array([(1, 1.0), (2, 2.0)], dtype=[("a", "i4"), ("b", "f4")])
y = from_array(x, chunks=(1,))
assert_eq(y["a"], x["a"])
assert_eq(y[["b", "a"]], x[["b", "a"]])
assert same_keys(y[["b", "a"]], y[["b", "a"]])
def test_field_access_with_shape():
dtype = [("col1", ("f4", (3, 2))), ("col2", ("f4", 3))]
data = np.ones((100, 50), dtype=dtype)
x = da.from_array(data, 10)
assert_eq(x["col1"], data["col1"])
assert_eq(x[["col1"]], data[["col1"]])
assert_eq(x["col2"], data["col2"])
assert_eq(x[["col1", "col2"]], data[["col1", "col2"]])
def test_matmul():
x = np.random.random((5, 5))
y = np.random.random((5, 2))
a = from_array(x, chunks=(1, 5))
b = from_array(y, chunks=(5, 1))
assert_eq(operator.matmul(a, b), a.dot(b))
assert_eq(operator.matmul(a, b), operator.matmul(x, y))
assert_eq(operator.matmul(a, y), operator.matmul(x, b))
list_vec = list(range(1, 6))
assert_eq(operator.matmul(list_vec, b), operator.matmul(list_vec, y))
assert_eq(operator.matmul(x, list_vec), operator.matmul(a, list_vec))
z = np.random.random((5, 5, 5))
c = from_array(z, chunks=(1, 5, 1))
assert_eq(operator.matmul(a, z), operator.matmul(x, c))
assert_eq(operator.matmul(z, a), operator.matmul(c, x))
def test_matmul_array_ufunc():
# regression test for https://github.com/dask/dask/issues/4353
x = np.random.random((5, 5))
y = np.random.random((5, 2))
a = from_array(x, chunks=(1, 5))
b = from_array(y, chunks=(5, 1))
result = b.__array_ufunc__(np.matmul, "__call__", a, b)
assert_eq(result, x.dot(y))
def test_T():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert_eq(x.T, a.T)
def test_broadcast_to():
x = np.random.randint(10, size=(5, 1, 6))
a = from_array(x, chunks=(3, 1, 3))
for shape in [a.shape, (5, 0, 6), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
xb = np.broadcast_to(x, shape)
ab = broadcast_to(a, shape)
assert_eq(xb, ab)
if a.shape == ab.shape:
assert a is ab
pytest.raises(ValueError, lambda: broadcast_to(a, (2, 1, 6)))
pytest.raises(ValueError, lambda: broadcast_to(a, (3,)))
def test_broadcast_to_array():
x = np.random.randint(10, size=(5, 1, 6))
for shape in [(5, 0, 6), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
a = np.broadcast_to(x, shape)
d = broadcast_to(x, shape)
assert_eq(a, d)
def test_broadcast_to_scalar():
x = 5
for shape in [tuple(), (0,), (2, 3), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
a = np.broadcast_to(x, shape)
d = broadcast_to(x, shape)
assert_eq(a, d)
def test_broadcast_to_chunks():
x = np.random.randint(10, size=(5, 1, 6))
a = from_array(x, chunks=(3, 1, 3))
for shape, chunks, expected_chunks in [
((5, 3, 6), (3, -1, 3), ((3, 2), (3,), (3, 3))),
((5, 3, 6), (3, 1, 3), ((3, 2), (1, 1, 1), (3, 3))),
((2, 5, 3, 6), (1, 3, 1, 3), ((1, 1), (3, 2), (1, 1, 1), (3, 3))),
]:
xb = np.broadcast_to(x, shape)
ab = broadcast_to(a, shape, chunks=chunks)
assert_eq(xb, ab)
assert ab.chunks == expected_chunks
with pytest.raises(ValueError):
broadcast_to(a, a.shape, chunks=((2, 3), (1,), (3, 3)))
with pytest.raises(ValueError):
broadcast_to(a, a.shape, chunks=((3, 2), (3,), (3, 3)))
with pytest.raises(ValueError):
broadcast_to(a, (5, 2, 6), chunks=((3, 2), (3,), (3, 3)))
def test_broadcast_arrays():
assert np.broadcast_arrays() == da.broadcast_arrays()
a = np.arange(4)
d_a = da.from_array(a, chunks=tuple(s // 2 for s in a.shape))
a_0 = np.arange(4)[None, :]
a_1 = np.arange(4)[:, None]
d_a_0 = d_a[None, :]
d_a_1 = d_a[:, None]
a_r = np.broadcast_arrays(a_0, a_1)
d_r = da.broadcast_arrays(d_a_0, d_a_1)
assert isinstance(d_r, list)
assert len(a_r) == len(d_r)
for e_a_r, e_d_r in zip(a_r, d_r):
assert_eq(e_a_r, e_d_r)
def test_broadcast_arrays_uneven_chunks():
x = da.ones(30, chunks=(3,))
y = da.ones(30, chunks=(5,))
z = np.broadcast_arrays(x, y)
assert_eq(z, z)
x = da.ones((1, 30), chunks=(1, 3))
y = da.ones(30, chunks=(5,))
z = np.broadcast_arrays(x, y)
assert_eq(z, z)
@pytest.mark.parametrize(
"u_shape, v_shape",
[
[tuple(), (2, 3)],
[(1,), (2, 3)],
[(1, 1), (2, 3)],
[(0, 3), (1, 3)],
[(2, 0), (2, 1)],
[(1, 0), (2, 1)],
[(0, 1), (1, 3)],
],
)
def test_broadcast_operator(u_shape, v_shape):
u = np.random.random(u_shape)
v = np.random.random(v_shape)
d_u = from_array(u, chunks=1)
d_v = from_array(v, chunks=1)
w = u * v
d_w = d_u * d_v
assert_eq(w, d_w)
@pytest.mark.parametrize(
"original_shape,new_shape,chunks",
[
((10,), (10,), (3, 3, 4)),
((10,), (10, 1, 1), 5),
((10,), (1, 10), 5),
((24,), (2, 3, 4), 12),
((1, 24), (2, 3, 4), 12),
((2, 3, 4), (24,), (1, 3, 4)),
((2, 3, 4), (24,), 4),
((2, 3, 4), (24, 1), 4),
((2, 3, 4), (1, 24), 4),
((4, 4, 1), (4, 4), 2),
((4, 4), (4, 4, 1), 2),
((1, 4, 4), (4, 4), 2),
((1, 4, 4), (4, 4, 1), 2),
((1, 4, 4), (1, 1, 4, 4), 2),
((4, 4), (1, 4, 4, 1), 2),
((4, 4), (1, 4, 4), 2),
((2, 3), (2, 3), (1, 2)),
((2, 3), (3, 2), 3),
((4, 2, 3), (4, 6), 4),
((3, 4, 5, 6), (3, 4, 5, 6), (2, 3, 4, 5)),
((), (1,), 1),
((1,), (), 1),
((24,), (3, 8), 24),
((24,), (4, 6), 6),
((24,), (4, 3, 2), 6),
((24,), (4, 6, 1), 6),
((24,), (4, 6), (6, 12, 6)),
((64, 4), (8, 8, 4), (16, 2)),
((4, 64), (4, 8, 4, 2), (2, 16)),
((4, 8, 4, 2), (2, 1, 2, 32, 2), (2, 4, 2, 2)),
((4, 1, 4), (4, 4), (2, 1, 2)),
((0, 10), (0, 5, 2), (5, 5)),
((5, 0, 2), (0, 10), (5, 2, 2)),
((0,), (2, 0, 2), (4,)),
((2, 0, 2), (0,), (4, 4, 4)),
],
)
def test_reshape(original_shape, new_shape, chunks):
x = np.random.randint(10, size=original_shape)
a = from_array(x, chunks=chunks)
xr = x.reshape(new_shape)
ar = a.reshape(new_shape)
if a.shape == new_shape:
assert a is ar
assert_eq(xr, ar)
def test_reshape_exceptions():
x = np.random.randint(10, size=(5,))
a = from_array(x, chunks=(2,))
with pytest.raises(ValueError):
da.reshape(a, (100,))
def test_reshape_splat():
x = da.ones((5, 5), chunks=(2, 2))
assert_eq(x.reshape((25,)), x.reshape(25))
def test_reshape_fails_for_dask_only():
cases = [((3, 4), (4, 3), 2)]
for original_shape, new_shape, chunks in cases:
x = np.random.randint(10, size=original_shape)
a = from_array(x, chunks=chunks)
assert x.reshape(new_shape).shape == new_shape
with pytest.raises(ValueError):
da.reshape(a, new_shape)
def test_reshape_unknown_dimensions():
for original_shape in [(24,), (2, 12), (2, 3, 4)]:
for new_shape in [(-1,), (2, -1), (-1, 3, 4)]:
x = np.random.randint(10, size=original_shape)
a = from_array(x, 24)
assert_eq(x.reshape(new_shape), a.reshape(new_shape))
pytest.raises(ValueError, lambda: da.reshape(a, (-1, -1)))
@pytest.mark.parametrize(
"limit", # in bytes
[
None, # Default value: dask.config.get("array.chunk-size")
134217728, # 128 MiB (default value size on a typical laptop)
67108864, # 64 MiB (half the typical default value size)
],
)
@pytest.mark.parametrize(
"shape, chunks, reshape_size",
[
# Test reshape where output chunks would otherwise be too large
((300, 180, 4, 18483), (-1, -1, 1, 183), (300, 180, -1)),
# Test reshape where multiple chunks match between input and output
((300, 300, 4, 18483), (-1, -1, 1, 183), (300, 300, -1)),
],
)
def test_reshape_avoids_large_chunks(limit, shape, chunks, reshape_size):
array = da.random.random(shape, chunks=chunks)
if limit is None:
with dask.config.set(**{"array.slicing.split_large_chunks": True}):
result = array.reshape(*reshape_size, limit=limit)
else:
result = array.reshape(*reshape_size, limit=limit)
nbytes = array.dtype.itemsize
max_chunksize_in_bytes = reduce(operator.mul, result.chunksize) * nbytes
if limit is None:
limit = parse_bytes(dask.config.get("array.chunk-size"))
assert max_chunksize_in_bytes < limit
def test_reshape_warns_by_default_if_it_is_producing_large_chunks():
# Test reshape where output chunks would otherwise be too large
shape, chunks, reshape_size = (300, 180, 4, 18483), (-1, -1, 1, 183), (300, 180, -1)
array = da.random.random(shape, chunks=chunks)
with pytest.warns(PerformanceWarning) as record:
result = array.reshape(*reshape_size)
nbytes = array.dtype.itemsize
max_chunksize_in_bytes = reduce(operator.mul, result.chunksize) * nbytes
limit = parse_bytes(dask.config.get("array.chunk-size"))
assert max_chunksize_in_bytes > limit
assert len(record) == 1
with dask.config.set(**{"array.slicing.split_large_chunks": False}):
result = array.reshape(*reshape_size)
nbytes = array.dtype.itemsize
max_chunksize_in_bytes = reduce(operator.mul, result.chunksize) * nbytes
limit = parse_bytes(dask.config.get("array.chunk-size"))
assert max_chunksize_in_bytes > limit
with dask.config.set(**{"array.slicing.split_large_chunks": True}):
result = array.reshape(*reshape_size)
nbytes = array.dtype.itemsize
max_chunksize_in_bytes = reduce(operator.mul, result.chunksize) * nbytes
limit = parse_bytes(dask.config.get("array.chunk-size"))
assert max_chunksize_in_bytes < limit
def test_full():
d = da.full((3, 4), 2, chunks=((2, 1), (2, 2)))
assert d.chunks == ((2, 1), (2, 2))
assert_eq(d, np.full((3, 4), 2))
def test_map_blocks():
x = np.arange(400).reshape((20, 20))
d = from_array(x, chunks=(7, 7))
e = d.map_blocks(inc, dtype=d.dtype)
assert d.chunks == e.chunks
assert_eq(e, x + 1)
e = d.map_blocks(inc, name="increment")
assert e.name.startswith("increment-")
assert d.map_blocks(inc, name="foo").name != d.map_blocks(dec, name="foo").name
d = from_array(x, chunks=(10, 10))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=(5, 5), dtype=d.dtype)
assert e.chunks == ((5, 5), (5, 5))
assert_eq(e, x[::2, ::2])
d = from_array(x, chunks=(8, 8))
e = d.map_blocks(
lambda x: x[::2, ::2], chunks=((4, 4, 2), (4, 4, 2)), dtype=d.dtype
)
assert_eq(e, x[::2, ::2])
def test_map_blocks2():
x = np.arange(10, dtype="i8")
d = from_array(x, chunks=(2,))
def func(block, block_id=None, c=0):
return np.ones_like(block) * sum(block_id) + c
out = d.map_blocks(func, dtype="i8")
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype="i8")
assert_eq(out, expected)
assert same_keys(d.map_blocks(func, dtype="i8"), out)
out = d.map_blocks(func, dtype="i8", c=1)
expected = expected + 1
assert_eq(out, expected)
assert same_keys(d.map_blocks(func, dtype="i8", c=1), out)
def test_map_blocks_block_info():
x = da.arange(50, chunks=10)
def func(a, b, c, block_info=None):
for idx in [0, 2, None]: # positions in args
assert block_info[idx]["shape"] == (50,)
assert block_info[idx]["num-chunks"] == (5,)
start, stop = block_info[idx]["array-location"][0]
assert stop - start == 10
assert 0 <= start <= 40
assert 10 <= stop <= 50
assert 0 <= block_info[idx]["chunk-location"][0] <= 4
assert block_info[None]["chunk-shape"] == (10,)
assert block_info[None]["dtype"] == x.dtype
return a + b + c
z = da.map_blocks(func, x, 100, x + 1, dtype=x.dtype)
assert_eq(z, x + x + 1 + 100)
def test_map_blocks_block_info_with_new_axis():
# https://github.com/dask/dask/issues/4298
values = da.from_array(np.array(["a", "a", "b", "c"]), 2)
def func(x, block_info=None):
assert block_info.keys() == {0, None}
assert block_info[0]["shape"] == (4,)
assert block_info[0]["num-chunks"] == (2,)
assert block_info[None]["shape"] == (4, 3)
assert block_info[None]["num-chunks"] == (2, 1)
assert block_info[None]["chunk-shape"] == (2, 3)
assert block_info[None]["dtype"] == np.dtype("f8")
assert block_info[0]["chunk-location"] in {(0,), (1,)}
if block_info[0]["chunk-location"] == (0,):
assert block_info[0]["array-location"] == [(0, 2)]
assert block_info[None]["chunk-location"] == (0, 0)
assert block_info[None]["array-location"] == [(0, 2), (0, 3)]
elif block_info[0]["chunk-location"] == (1,):
assert block_info[0]["array-location"] == [(2, 4)]
assert block_info[None]["chunk-location"] == (1, 0)
assert block_info[None]["array-location"] == [(2, 4), (0, 3)]
return np.ones((len(x), 3))
z = values.map_blocks(func, chunks=((2, 2), 3), new_axis=1, dtype="f8")
assert_eq(z, np.ones((4, 3), dtype="f8"))
def test_map_blocks_block_info_with_drop_axis():
# https://github.com/dask/dask/issues/4584
values = da.from_array(
np.array(
[[1, 2, 4], [8, 16, 32], [64, 128, 256], [1024, 2048, 4096]], dtype="u4"
),
(2, 1),
)
def func(x, block_info=None):
assert block_info.keys() == {0, None}
assert block_info[0]["shape"] == (4, 3)
# drop_axis concatenates along the dropped dimension, hence not (2, 3)
assert block_info[0]["num-chunks"] == (2, 1)
assert block_info[None]["shape"] == (4,)
assert block_info[None]["num-chunks"] == (2,)
assert block_info[None]["chunk-shape"] == (2,)
assert block_info[None]["dtype"] == np.dtype("u4")
assert block_info[0]["chunk-location"] in {(0, 0), (1, 0)}
if block_info[0]["chunk-location"] == (0, 0):
assert block_info[0]["array-location"] == [(0, 2), (0, 3)]
assert block_info[None]["chunk-location"] == (0,)
assert block_info[None]["array-location"] == [(0, 2)]
elif block_info[0]["chunk-location"] == (1, 0):
assert block_info[0]["array-location"] == [(2, 4), (0, 3)]
assert block_info[None]["chunk-location"] == (1,)
assert block_info[None]["array-location"] == [(2, 4)]
return np.sum(x, axis=1, dtype="u4")
z = values.map_blocks(func, drop_axis=1, dtype="u4")
assert_eq(z, np.array([7, 56, 448, 7168], dtype="u4"))
def test_map_blocks_block_info_with_broadcast():
expected0 = [
{
"shape": (3, 4),
"num-chunks": (1, 2),
"array-location": [(0, 3), (0, 2)],
"chunk-location": (0, 0),
},
{
"shape": (3, 4),
"num-chunks": (1, 2),
"array-location": [(0, 3), (2, 4)],
"chunk-location": (0, 1),
},
]
expected1 = [
{
"shape": (6, 2),
"num-chunks": (2, 1),
"array-location": [(0, 3), (0, 2)],
"chunk-location": (0, 0),
},
{
"shape": (6, 2),
"num-chunks": (2, 1),
"array-location": [(3, 6), (0, 2)],
"chunk-location": (1, 0),
},
]
expected2 = [
{
"shape": (4,),
"num-chunks": (2,),
"array-location": [(0, 2)],
"chunk-location": (0,),
},
{
"shape": (4,),
"num-chunks": (2,),
"array-location": [(2, 4)],
"chunk-location": (1,),
},
]
expected = [
{
0: expected0[0],
1: expected1[0],
2: expected2[0],
None: {
"shape": (6, 4),
"num-chunks": (2, 2),
"dtype": np.float_,
"chunk-shape": (3, 2),
"array-location": [(0, 3), (0, 2)],
"chunk-location": (0, 0),
},
},
{
0: expected0[1],
1: expected1[0],
2: expected2[1],
None: {
"shape": (6, 4),
"num-chunks": (2, 2),
"dtype": np.float_,
"chunk-shape": (3, 2),
"array-location": [(0, 3), (2, 4)],
"chunk-location": (0, 1),
},
},
{
0: expected0[0],
1: expected1[1],
2: expected2[0],
None: {
"shape": (6, 4),
"num-chunks": (2, 2),
"dtype": np.float_,
"chunk-shape": (3, 2),
"array-location": [(3, 6), (0, 2)],
"chunk-location": (1, 0),
},
},
{
0: expected0[1],
1: expected1[1],
2: expected2[1],
None: {
"shape": (6, 4),
"num-chunks": (2, 2),
"dtype": np.float_,
"chunk-shape": (3, 2),
"array-location": [(3, 6), (2, 4)],
"chunk-location": (1, 1),
},
},
]
def func(x, y, z, block_info=None):
for info in expected:
if block_info[None]["chunk-location"] == info[None]["chunk-location"]:
assert block_info == info
break
else:
assert False
return x + y + z
a = da.ones((3, 4), chunks=(3, 2))
b = da.ones((6, 2), chunks=(3, 2))
c = da.ones((4,), chunks=(2,))
d = da.map_blocks(func, a, b, c, chunks=((3, 3), (2, 2)), dtype=a.dtype)
assert d.chunks == ((3, 3), (2, 2))
assert_eq(d, 3 * np.ones((6, 4)))
def test_map_blocks_with_constants():
d = da.arange(10, chunks=3)
e = d.map_blocks(add, 100, dtype=d.dtype)
assert_eq(e, np.arange(10) + 100)
assert_eq(da.map_blocks(sub, d, 10, dtype=d.dtype), np.arange(10) - 10)
assert_eq(da.map_blocks(sub, 10, d, dtype=d.dtype), 10 - np.arange(10))
def test_map_blocks_with_kwargs():
d = da.arange(10, chunks=5)
result = d.map_blocks(np.max, axis=0, keepdims=True, dtype=d.dtype, chunks=(1,))
assert_eq(result, np.array([4, 9]))
def test_map_blocks_infer_chunks_broadcast():
dx = da.from_array([[1, 2, 3, 4]], chunks=((1,), (2, 2)))
dy = da.from_array([[10, 20], [30, 40]], chunks=((1, 1), (2,)))
result = da.map_blocks(lambda x, y: x + y, dx, dy)
assert result.chunks == ((1, 1), (2, 2))
assert_eq(result, np.array([[11, 22, 13, 24], [31, 42, 33, 44]]))
def test_map_blocks_with_chunks():
dx = da.ones((5, 3), chunks=(2, 2))
dy = da.ones((5, 3), chunks=(2, 2))
dz = da.map_blocks(np.add, dx, dy, chunks=dx.chunks)
assert_eq(dz, np.ones((5, 3)) * 2)
def test_map_blocks_dtype_inference():
x = np.arange(50).reshape((5, 10))
y = np.arange(10)
dx = da.from_array(x, chunks=5)
dy = da.from_array(y, chunks=5)
def foo(x, *args, **kwargs):
cast = kwargs.pop("cast", "i8")
return (x + sum(args)).astype(cast)
assert_eq(dx.map_blocks(foo, dy, 1), foo(dx, dy, 1))
assert_eq(dx.map_blocks(foo, dy, 1, cast="f8"), foo(dx, dy, 1, cast="f8"))
assert_eq(
dx.map_blocks(foo, dy, 1, cast="f8", dtype="f8"),
foo(dx, dy, 1, cast="f8", dtype="f8"),
)
def foo(x):
raise RuntimeError("Woops")
with pytest.raises(ValueError) as e:
dx.map_blocks(foo)
msg = str(e.value)
assert "dtype" in msg
def test_map_blocks_infer_newaxis():
x = da.ones((5, 3), chunks=(2, 2))
y = da.map_blocks(lambda x: x[None], x, chunks=((1,), (2, 2, 1), (2, 1)))
assert_eq(y, da.ones((1, 5, 3)))
def test_map_blocks_no_array_args():
def func(dtype, block_info=None):
loc = block_info[None]["array-location"]
return np.arange(loc[0][0], loc[0][1], dtype=dtype)
x = da.map_blocks(func, np.float32, chunks=((5, 3),), dtype=np.float32)
assert x.chunks == ((5, 3),)
assert_eq(x, np.arange(8, dtype=np.float32))
def test_map_blocks_unique_name_chunks_dtype():
def func(block_info=None):
loc = block_info[None]["array-location"]
dtype = block_info[None]["dtype"]
return np.arange(loc[0][0], loc[0][1], dtype=dtype)
x = da.map_blocks(func, chunks=((5, 3),), dtype=np.float32)
assert x.chunks == ((5, 3),)
assert_eq(x, np.arange(8, dtype=np.float32))
y = da.map_blocks(func, chunks=((2, 2, 1, 3),), dtype=np.float32)
assert y.chunks == ((2, 2, 1, 3),)
assert_eq(y, np.arange(8, dtype=np.float32))
assert x.name != y.name
z = da.map_blocks(func, chunks=((5, 3),), dtype=np.float64)
assert z.chunks == ((5, 3),)
assert_eq(z, np.arange(8, dtype=np.float64))
assert x.name != z.name
assert y.name != z.name
def test_map_blocks_unique_name_drop_axis():
def func(some_3d, block_info=None):
if not block_info:
return some_3d
dtype = block_info[None]["dtype"]
return np.zeros(block_info[None]["shape"], dtype=dtype)
input_arr = da.zeros((3, 4, 5), chunks=((3,), (4,), (5,)), dtype=np.float32)
x = da.map_blocks(func, input_arr, drop_axis=[0], dtype=np.float32)
assert x.chunks == ((4,), (5,))
assert_eq(x, np.zeros((4, 5), dtype=np.float32))
y = da.map_blocks(func, input_arr, drop_axis=[2], dtype=np.float32)
assert y.chunks == ((3,), (4,))
assert_eq(y, np.zeros((3, 4), dtype=np.float32))
assert x.name != y.name
def test_map_blocks_unique_name_new_axis():
def func(some_2d, block_info=None):
if not block_info:
return some_2d
dtype = block_info[None]["dtype"]
return np.zeros(block_info[None]["shape"], dtype=dtype)
input_arr = da.zeros((3, 4), chunks=((3,), (4,)), dtype=np.float32)
x = da.map_blocks(func, input_arr, new_axis=[0], dtype=np.float32)
assert x.chunks == ((1,), (3,), (4,))
assert_eq(x, np.zeros((1, 3, 4), dtype=np.float32))
y = da.map_blocks(func, input_arr, new_axis=[2], dtype=np.float32)
assert y.chunks == ((3,), (4,), (1,))
assert_eq(y, np.zeros((3, 4, 1), dtype=np.float32))
assert x.name != y.name
@pytest.mark.parametrize("func", [lambda x, y: x + y, lambda x, y, block_info: x + y])
def test_map_blocks_optimize_blockwise(func):
# Check that map_blocks layers can merge with elementwise layers
base = [da.full((1,), i, chunks=1) for i in range(4)]
a = base[0] + base[1]
b = da.map_blocks(func, a, base[2], dtype=np.int8)
c = b + base[3]
dsk = c.__dask_graph__()
optimized = optimize_blockwise(dsk)
# Everything should be fused into a single layer.
# If the lambda includes block_info, there will be two layers.
assert len(optimized.layers) == len(dsk.layers) - 6
def test_repr():
d = da.ones((4, 4), chunks=(2, 2))
assert key_split(d.name) in repr(d)
assert str(d.shape) in repr(d)
assert str(d.dtype) in repr(d)
d = da.ones((4000, 4), chunks=(4, 2))
assert len(str(d)) < 1000
def test_repr_meta():
d = da.ones((4, 4), chunks=(2, 2))
assert "chunktype=numpy.ndarray" in repr(d)
# Test non-numpy meta
sparse = pytest.importorskip("sparse")
s = d.map_blocks(sparse.COO)
assert "chunktype=sparse.COO" in repr(s)
def test_repr_html_array_highlevelgraph():
pytest.importorskip("jinja2")
x = da.ones((9, 9), chunks=(3, 3)).T[0:4, 0:4]
hg = x.dask
assert xml.etree.ElementTree.fromstring(hg._repr_html_()) is not None
for layer in hg.layers.values():
assert xml.etree.ElementTree.fromstring(layer._repr_html_()) is not None
def test_slicing_with_ellipsis():
x = np.arange(256).reshape((4, 4, 4, 4))
d = da.from_array(x, chunks=((2, 2, 2, 2)))
assert_eq(d[..., 1], x[..., 1])
assert_eq(d[0, ..., 1], x[0, ..., 1])
def test_slicing_with_ndarray():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=((4, 4)))
assert_eq(d[np.arange(8)], x)
assert_eq(d[np.ones(8, dtype=bool)], x)
assert_eq(d[np.array([1])], x[[1]])
assert_eq(d[np.array([True, False, True] + [False] * 5)], x[[0, 2]])
def test_slicing_flexible_type():
a = np.array([["a", "b"], ["c", "d"]])
b = da.from_array(a, 2)
assert_eq(a[:, 0], b[:, 0])
def test_slicing_with_object_dtype():
# https://github.com/dask/dask/issues/6892
d = da.from_array(np.array(["a", "b"], dtype=object), chunks=(1,))
assert d.dtype == d[(0,)].dtype
def test_dtype():
d = da.ones((4, 4), chunks=(2, 2))
assert d.dtype == d.compute().dtype
assert (d * 1.0).dtype == (d + 1.0).compute().dtype
assert d.sum().dtype == d.sum().compute().dtype # no shape
def test_blockdims_from_blockshape():
assert blockdims_from_blockshape((10, 10), (4, 3)) == ((4, 4, 2), (3, 3, 3, 1))
pytest.raises(TypeError, lambda: blockdims_from_blockshape((10,), None))
assert blockdims_from_blockshape((1e2, 3), [1e1, 3]) == ((10,) * 10, (3,))
assert blockdims_from_blockshape((np.int8(10),), (5,)) == ((5, 5),)
def test_coerce():
d0 = da.from_array(np.array(1), chunks=(1,))
d1 = da.from_array(np.array([1]), chunks=(1,))
with dask.config.set(scheduler="sync"):
for d in d0, d1:
assert bool(d) is True
assert int(d) == 1
assert float(d) == 1.0
assert complex(d) == complex(1)
a2 = np.arange(2)
d2 = da.from_array(a2, chunks=(2,))
for func in (int, float, complex):
pytest.raises(TypeError, lambda: func(d2))
def test_bool():
arr = np.arange(100).reshape((10, 10))
darr = da.from_array(arr, chunks=(10, 10))
with pytest.raises(ValueError):
bool(darr)
bool(darr == darr)
def test_store_kwargs():
d = da.ones((10, 10), chunks=(2, 2))
a = d + 1
called = [False]
def get_func(*args, **kwargs):
assert kwargs.pop("foo") == "test kwarg"
r = dask.get(*args, **kwargs)
called[0] = True
return r
called[0] = False
at = np.zeros(shape=(10, 10))
store([a], [at], scheduler=get_func, foo="test kwarg")
assert called[0]
called[0] = False
at = np.zeros(shape=(10, 10))
a.store(at, scheduler=get_func, foo="test kwarg")
assert called[0]
called[0] = False
at = np.zeros(shape=(10, 10))
store([a], [at], scheduler=get_func, return_stored=True, foo="test kwarg")
assert called[0]
def test_store_delayed_target():
from dask.delayed import delayed
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
# empty buffers to be used as targets
targs = {}
def make_target(key):
a = np.empty((4, 4))
targs[key] = a
return a
# delayed calls to these targets
atd = delayed(make_target)("at")
btd = delayed(make_target)("bt")
# test not keeping result
st = store([a, b], [atd, btd])
at = targs["at"]
bt = targs["bt"]
assert st is None
assert_eq(at, a)
assert_eq(bt, b)
# test keeping result
for st_compute in [False, True]:
targs.clear()
st = store([a, b], [atd, btd], return_stored=True, compute=st_compute)
if st_compute:
assert all(not any(dask.core.get_deps(e.dask)[0].values()) for e in st)
st = dask.compute(*st)
at = targs["at"]
bt = targs["bt"]
assert st is not None
assert isinstance(st, tuple)
assert all([isinstance(v, np.ndarray) for v in st])
assert_eq(at, a)
assert_eq(bt, b)
assert_eq(st[0], a)
assert_eq(st[1], b)
pytest.raises(ValueError, lambda: store([a], [at, bt]))
pytest.raises(ValueError, lambda: store(at, at))
pytest.raises(ValueError, lambda: store([at, bt], [at, bt]))
def test_store():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.empty(shape=(4, 4))
bt = np.empty(shape=(4, 4))
st = store([a, b], [at, bt])
assert st is None
assert (at == 2).all()
assert (bt == 3).all()
pytest.raises(ValueError, lambda: store([a], [at, bt]))
pytest.raises(ValueError, lambda: store(at, at))
pytest.raises(ValueError, lambda: store([at, bt], [at, bt]))
def test_store_regions():
d = da.ones((4, 4, 4), dtype=int, chunks=(2, 2, 2))
a, b = d + 1, d + 2
a = a[:, 1:, :].astype(float)
region = (slice(None, None, 2), slice(None), [1, 2, 4, 5])
# Single region:
at = np.zeros(shape=(8, 3, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store([a, b], [at, bt], regions=region, compute=False)
assert isinstance(v, Delayed)
assert (at == 0).all() and (bt[region] == 0).all()
assert all([ev is None for ev in v.compute()])
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not (bt == 0).all()
assert not (at == 2).all() and not (at == 0).all()
# Multiple regions:
at = np.zeros(shape=(8, 3, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store([a, b], [at, bt], regions=[region, region], compute=False)
assert isinstance(v, Delayed)
assert (at == 0).all() and (bt[region] == 0).all()
assert all([ev is None for ev in v.compute()])
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not (bt == 0).all()
assert not (at == 2).all() and not (at == 0).all()
# Single region (keep result):
for st_compute in [False, True]:
at = np.zeros(shape=(8, 3, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store(
[a, b], [at, bt], regions=region, compute=st_compute, return_stored=True
)
assert isinstance(v, tuple)
assert all([isinstance(e, da.Array) for e in v])
if st_compute:
assert all(not any(dask.core.get_deps(e.dask)[0].values()) for e in v)
else:
assert (at == 0).all() and (bt[region] == 0).all()
ar, br = v
assert ar.dtype == a.dtype
assert br.dtype == b.dtype
assert ar.shape == a.shape
assert br.shape == b.shape
assert ar.chunks == a.chunks
assert br.chunks == b.chunks
ar, br = da.compute(ar, br)
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not (bt == 0).all()
assert not (at == 2).all() and not (at == 0).all()
assert (br == 3).all()
assert (ar == 2).all()
# Multiple regions (keep result):
for st_compute in [False, True]:
at = np.zeros(shape=(8, 3, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store(
[a, b],
[at, bt],
regions=[region, region],
compute=st_compute,
return_stored=True,
)
assert isinstance(v, tuple)
assert all([isinstance(e, da.Array) for e in v])
if st_compute:
assert all(not any(dask.core.get_deps(e.dask)[0].values()) for e in v)
else:
assert (at == 0).all() and (bt[region] == 0).all()
ar, br = v
assert ar.dtype == a.dtype
assert br.dtype == b.dtype
assert ar.shape == a.shape
assert br.shape == b.shape
assert ar.chunks == a.chunks
assert br.chunks == b.chunks
ar, br = da.compute(ar, br)
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not (bt == 0).all()
assert not (at == 2).all() and not (at == 0).all()
assert (br == 3).all()
assert (ar == 2).all()
def test_store_compute_false():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.zeros(shape=(4, 4))
bt = np.zeros(shape=(4, 4))
v = store([a, b], [at, bt], compute=False)
assert isinstance(v, Delayed)
# You need a well-formed HighLevelgraph for e.g. dask.graph_manipulation.bind
for layer in v.__dask_layers__():
assert layer in v.dask.layers
assert (at == 0).all() and (bt == 0).all()
assert all([ev is None for ev in v.compute()])
assert (at == 2).all() and (bt == 3).all()
at = np.zeros(shape=(4, 4))
bt = np.zeros(shape=(4, 4))
dat, dbt = store([a, b], [at, bt], compute=False, return_stored=True)
assert isinstance(dat, Array) and isinstance(dbt, Array)
assert (at == 0).all() and (bt == 0).all()
assert (dat.compute() == at).all() and (dbt.compute() == bt).all()
assert (at == 2).all() and (bt == 3).all()
def test_store_nocompute_regions():
x = da.ones(10, chunks=1)
y = np.zeros((2, 10))
d1 = da.store(x, y, regions=(0,), compute=False)
d2 = da.store(x, y, regions=(1,), compute=False)
assert d1.key != d2.key
class ThreadSafetyError(Exception):
pass
class NonthreadSafeStore:
def __init__(self):
self.in_use = False
def __setitem__(self, key, value):
if self.in_use:
raise ThreadSafetyError()
self.in_use = True
time.sleep(0.001)
self.in_use = False
class ThreadSafeStore:
def __init__(self):
self.concurrent_uses = 0
self.max_concurrent_uses = 0
def __setitem__(self, key, value):
self.concurrent_uses += 1
self.max_concurrent_uses = max(self.concurrent_uses, self.max_concurrent_uses)
time.sleep(0.01)
self.concurrent_uses -= 1
class CounterLock:
def __init__(self, *args, **kwargs):
self.lock = Lock(*args, **kwargs)
self.acquire_count = 0
self.release_count = 0
def acquire(self, *args, **kwargs):
self.acquire_count += 1
return self.lock.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
self.release_count += 1
return self.lock.release(*args, **kwargs)
def test_store_locks():
_Lock = type(Lock())
d = da.ones((10, 10), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.zeros(shape=(10, 10))
bt = np.zeros(shape=(10, 10))
lock = Lock()
v = store([a, b], [at, bt], compute=False, lock=lock)
assert isinstance(v, Delayed)
dsk = v.dask
locks = {vv for v in dsk.values() for vv in v if isinstance(vv, _Lock)}
assert locks == {lock}
# Ensure same lock applies over multiple stores
at = NonthreadSafeStore()
v = store([a, b], [at, at], lock=lock, scheduler="threads", num_workers=10)
assert v is None
# Don't assume thread safety by default
at = NonthreadSafeStore()
assert store(a, at, scheduler="threads", num_workers=10) is None
assert a.store(at, scheduler="threads", num_workers=10) is None
# Ensure locks can be removed
at = ThreadSafeStore()
for i in range(10):
st = a.store(at, lock=False, scheduler="threads", num_workers=10)
assert st is None
if at.max_concurrent_uses > 1:
break
if i == 9:
assert False
# Verify number of lock calls
nchunks = np.sum([np.prod([len(c) for c in e.chunks]) for e in [a, b]])
for c in (False, True):
at = np.zeros(shape=(10, 10))
bt = np.zeros(shape=(10, 10))
lock = CounterLock()
v = store([a, b], [at, bt], lock=lock, compute=c, return_stored=True)
assert all(isinstance(e, Array) for e in v)
da.compute(v)
# When `return_stored=True` and `compute=False`,
# the lock should be acquired only once for store and load steps
# as they are fused together into one step.
assert lock.acquire_count == lock.release_count
if c:
assert lock.acquire_count == 2 * nchunks
else:
assert lock.acquire_count == nchunks
def test_store_method_return():
d = da.ones((10, 10), chunks=(2, 2))
a = d + 1
for compute in [False, True]:
for return_stored in [False, True]:
at = np.zeros(shape=(10, 10))
r = a.store(
at, scheduler="threads", compute=compute, return_stored=return_stored
)
if return_stored:
assert isinstance(r, Array)
elif compute:
assert r is None
else:
assert isinstance(r, Delayed)
@pytest.mark.xfail(reason="can't lock with multiprocessing")
def test_store_multiprocessing_lock():
d = da.ones((10, 10), chunks=(2, 2))
a = d + 1
at = np.zeros(shape=(10, 10))
st = a.store(at, scheduler="processes", num_workers=10)
assert st is None
@pytest.mark.parametrize("return_stored", [False, True])
@pytest.mark.parametrize("delayed_target", [False, True])
def test_store_deterministic_keys(return_stored, delayed_target):
a = da.ones((10, 10), chunks=(2, 2))
at = np.zeros(shape=(10, 10))
if delayed_target:
at = delayed(at)
st1 = a.store(at, return_stored=return_stored, compute=False)
st2 = a.store(at, return_stored=return_stored, compute=False)
assert st1.dask.keys() == st2.dask.keys()
def test_to_hdf5():
h5py = pytest.importorskip("h5py")
x = da.ones((4, 4), chunks=(2, 2))
y = da.ones(4, chunks=2, dtype="i4")
with tmpfile(".hdf5") as fn:
x.to_hdf5(fn, "/x")
with h5py.File(fn, mode="r+") as f:
d = f["/x"]
assert_eq(d[:], x)
assert d.chunks == (2, 2)
with tmpfile(".hdf5") as fn:
x.to_hdf5(fn, "/x", chunks=None)
with h5py.File(fn, mode="r+") as f:
d = f["/x"]
assert_eq(d[:], x)
assert d.chunks is None
with tmpfile(".hdf5") as fn:
x.to_hdf5(fn, "/x", chunks=(1, 1))
with h5py.File(fn, mode="r+") as f:
d = f["/x"]
assert_eq(d[:], x)
assert d.chunks == (1, 1)
with tmpfile(".hdf5") as fn:
da.to_hdf5(fn, {"/x": x, "/y": y})
with h5py.File(fn, mode="r+") as f:
assert_eq(f["/x"][:], x)
assert f["/x"].chunks == (2, 2)
assert_eq(f["/y"][:], y)
assert f["/y"].chunks == (2,)
def test_to_dask_dataframe():
dd = pytest.importorskip("dask.dataframe")
a = da.ones((4,), chunks=(2,))
d = a.to_dask_dataframe()
assert isinstance(d, dd.Series)
a = da.ones((4, 4), chunks=(2, 2))
d = a.to_dask_dataframe()
assert isinstance(d, dd.DataFrame)
def test_np_array_with_zero_dimensions():
d = da.ones((4, 4), chunks=(2, 2))
assert_eq(np.array(d.sum()), np.array(d.compute().sum()))
def test_dtype_complex():
x = np.arange(24).reshape((4, 6)).astype("f4")
y = np.arange(24).reshape((4, 6)).astype("i8")
z = np.arange(24).reshape((4, 6)).astype("i2")
a = da.from_array(x, chunks=(2, 3))
b = da.from_array(y, chunks=(2, 3))
c = da.from_array(z, chunks=(2, 3))
def assert_eq(a, b):
return isinstance(a, np.dtype) and isinstance(b, np.dtype) and str(a) == str(b)
assert_eq(a.dtype, x.dtype)
assert_eq(b.dtype, y.dtype)
assert_eq((a + 1).dtype, (x + 1).dtype)
assert_eq((a + b).dtype, (x + y).dtype)
assert_eq(a.T.dtype, x.T.dtype)
assert_eq(a[:3].dtype, x[:3].dtype)
assert_eq((a.dot(b.T)).dtype, (x.dot(y.T)).dtype)
assert_eq(stack([a, b]).dtype, np.vstack([x, y]).dtype)
assert_eq(concatenate([a, b]).dtype, np.concatenate([x, y]).dtype)
assert_eq(b.std().dtype, y.std().dtype)
assert_eq(c.sum().dtype, z.sum().dtype)
assert_eq(a.min().dtype, a.min().dtype)
assert_eq(b.std().dtype, b.std().dtype)
assert_eq(a.argmin(axis=0).dtype, a.argmin(axis=0).dtype)
assert_eq(da.sin(c).dtype, np.sin(z).dtype)
assert_eq(da.exp(b).dtype, np.exp(y).dtype)
assert_eq(da.floor(a).dtype, np.floor(x).dtype)
assert_eq(da.isnan(b).dtype, np.isnan(y).dtype)
with contextlib.suppress(ImportError):
assert da.isnull(b).dtype == "bool"
assert da.notnull(b).dtype == "bool"
x = np.array([("a", 1)], dtype=[("text", "S1"), ("numbers", "i4")])
d = da.from_array(x, chunks=(1,))
assert_eq(d["text"].dtype, x["text"].dtype)
assert_eq(d[["numbers", "text"]].dtype, x[["numbers", "text"]].dtype)
def test_astype():
x = np.ones((5, 5), dtype="f8")
d = da.from_array(x, chunks=(2, 2))
assert d.astype("i8").dtype == "i8"
assert_eq(d.astype("i8"), x.astype("i8"))
assert same_keys(d.astype("i8"), d.astype("i8"))
with pytest.raises(TypeError):
d.astype("i8", casting="safe")
with pytest.raises(TypeError):
d.astype("i8", not_a_real_kwarg="foo")
# smoketest with kwargs
assert_eq(d.astype("i8", copy=False), x.astype("i8", copy=False))
# Check it's a noop
assert d.astype("f8") is d
def test_arithmetic():
x = np.arange(5).astype("f4") + 2
y = np.arange(5).astype("i8") + 2
z = np.arange(5).astype("i4") + 2
a = da.from_array(x, chunks=(2,))
b = da.from_array(y, chunks=(2,))
c = da.from_array(z, chunks=(2,))
assert_eq(a + b, x + y)
assert_eq(a * b, x * y)
assert_eq(a - b, x - y)
assert_eq(a / b, x / y)
assert_eq(b & b, y & y)
assert_eq(b | b, y | y)
assert_eq(b ^ b, y ^ y)
assert_eq(a // b, x // y)
assert_eq(a ** b, x ** y)
assert_eq(a % b, x % y)
assert_eq(a > b, x > y)
assert_eq(a < b, x < y)
assert_eq(a >= b, x >= y)
assert_eq(a <= b, x <= y)
assert_eq(a == b, x == y)
assert_eq(a != b, x != y)
assert_eq(a + 2, x + 2)
assert_eq(a * 2, x * 2)
assert_eq(a - 2, x - 2)
assert_eq(a / 2, x / 2)
assert_eq(b & True, y & True)
assert_eq(b | True, y | True)
assert_eq(b ^ True, y ^ True)
assert_eq(a // 2, x // 2)
assert_eq(a ** 2, x ** 2)
assert_eq(a % 2, x % 2)
assert_eq(a > 2, x > 2)
assert_eq(a < 2, x < 2)
assert_eq(a >= 2, x >= 2)
assert_eq(a <= 2, x <= 2)
assert_eq(a == 2, x == 2)
assert_eq(a != 2, x != 2)
assert_eq(2 + b, 2 + y)
assert_eq(2 * b, 2 * y)
assert_eq(2 - b, 2 - y)
assert_eq(2 / b, 2 / y)
assert_eq(True & b, True & y)
assert_eq(True | b, True | y)
assert_eq(True ^ b, True ^ y)
assert_eq(2 // b, 2 // y)
assert_eq(2 ** b, 2 ** y)
assert_eq(2 % b, 2 % y)
assert_eq(2 > b, 2 > y)
assert_eq(2 < b, 2 < y)
assert_eq(2 >= b, 2 >= y)
assert_eq(2 <= b, 2 <= y)
assert_eq(2 == b, 2 == y)
assert_eq(2 != b, 2 != y)
assert_eq(-a, -x)
assert_eq(abs(a), abs(x))
assert_eq(~(a == b), ~(x == y))
assert_eq(~(a == b), ~(x == y))
assert_eq(da.logaddexp(a, b), np.logaddexp(x, y))
assert_eq(da.logaddexp2(a, b), np.logaddexp2(x, y))
with pytest.warns(None): # Overflow warning
assert_eq(da.exp(b), np.exp(y))
assert_eq(da.log(a), np.log(x))
assert_eq(da.log10(a), np.log10(x))
assert_eq(da.log1p(a), np.log1p(x))
with pytest.warns(None): # Overflow warning
assert_eq(da.expm1(b), np.expm1(y))
assert_eq(da.sqrt(a), np.sqrt(x))
assert_eq(da.square(a), np.square(x))
assert_eq(da.sin(a), np.sin(x))
assert_eq(da.cos(b), np.cos(y))
assert_eq(da.tan(a), np.tan(x))
assert_eq(da.arcsin(b / 10), np.arcsin(y / 10))
assert_eq(da.arccos(b / 10), np.arccos(y / 10))
assert_eq(da.arctan(b / 10), np.arctan(y / 10))
assert_eq(da.arctan2(b * 10, a), np.arctan2(y * 10, x))
assert_eq(da.hypot(b, a), np.hypot(y, x))
assert_eq(da.sinh(a), np.sinh(x))
with pytest.warns(None): # Overflow warning
assert_eq(da.cosh(b), np.cosh(y))
assert_eq(da.tanh(a), np.tanh(x))
assert_eq(da.arcsinh(b * 10), np.arcsinh(y * 10))
assert_eq(da.arccosh(b * 10), np.arccosh(y * 10))
assert_eq(da.arctanh(b / 10), np.arctanh(y / 10))
assert_eq(da.deg2rad(a), np.deg2rad(x))
assert_eq(da.rad2deg(a), np.rad2deg(x))
assert_eq(da.logical_and(a < 1, b < 4), np.logical_and(x < 1, y < 4))
assert_eq(da.logical_or(a < 1, b < 4), np.logical_or(x < 1, y < 4))
assert_eq(da.logical_xor(a < 1, b < 4), np.logical_xor(x < 1, y < 4))
assert_eq(da.logical_not(a < 1), np.logical_not(x < 1))
assert_eq(da.maximum(a, 5 - a), np.maximum(a, 5 - a))
assert_eq(da.minimum(a, 5 - a), np.minimum(a, 5 - a))
assert_eq(da.fmax(a, 5 - a), np.fmax(a, 5 - a))
assert_eq(da.fmin(a, 5 - a), np.fmin(a, 5 - a))
assert_eq(da.isreal(a + 1j * b), np.isreal(x + 1j * y))
assert_eq(da.iscomplex(a + 1j * b), np.iscomplex(x + 1j * y))
assert_eq(da.isfinite(a), np.isfinite(x))
assert_eq(da.isinf(a), np.isinf(x))
assert_eq(da.isnan(a), np.isnan(x))
assert_eq(da.signbit(a - 3), np.signbit(x - 3))
assert_eq(da.copysign(a - 3, b), np.copysign(x - 3, y))
assert_eq(da.nextafter(a - 3, b), np.nextafter(x - 3, y))
with pytest.warns(None): # overflow warning
assert_eq(da.ldexp(c, c), np.ldexp(z, z))
assert_eq(da.fmod(a * 12, b), np.fmod(x * 12, y))
assert_eq(da.floor(a * 0.5), np.floor(x * 0.5))
assert_eq(da.ceil(a), np.ceil(x))
assert_eq(da.trunc(a / 2), np.trunc(x / 2))
assert_eq(da.degrees(b), np.degrees(y))
assert_eq(da.radians(a), np.radians(x))
assert_eq(da.rint(a + 0.3), np.rint(x + 0.3))
assert_eq(da.fix(a - 2.5), np.fix(x - 2.5))
assert_eq(da.angle(a + 1j), np.angle(x + 1j))
assert_eq(da.real(a + 1j), np.real(x + 1j))
assert_eq((a + 1j).real, np.real(x + 1j))
assert_eq(da.imag(a + 1j), np.imag(x + 1j))
assert_eq((a + 1j).imag, np.imag(x + 1j))
assert_eq(da.conj(a + 1j * b), np.conj(x + 1j * y))
assert_eq((a + 1j * b).conj(), (x + 1j * y).conj())
assert_eq(da.clip(b, 1, 4), np.clip(y, 1, 4))
assert_eq(b.clip(1, 4), y.clip(1, 4))
assert_eq(da.fabs(b), np.fabs(y))
assert_eq(da.sign(b - 2), np.sign(y - 2))
assert_eq(da.absolute(b - 2), np.absolute(y - 2))
assert_eq(da.absolute(b - 2 + 1j), np.absolute(y - 2 + 1j))
l1, l2 = da.frexp(a)
r1, r2 = np.frexp(x)
assert_eq(l1, r1)
assert_eq(l2, r2)
l1, l2 = da.modf(a)
r1, r2 = np.modf(x)
assert_eq(l1, r1)
assert_eq(l2, r2)
assert_eq(da.around(a, -1), np.around(x, -1))
def test_elemwise_consistent_names():
a = da.from_array(np.arange(5, dtype="f4"), chunks=(2,))
b = da.from_array(np.arange(5, dtype="f4"), chunks=(2,))
assert same_keys(a + b, a + b)
assert same_keys(a + 2, a + 2)
assert same_keys(da.exp(a), da.exp(a))
assert same_keys(da.exp(a, dtype="f8"), da.exp(a, dtype="f8"))
assert same_keys(da.maximum(a, b), da.maximum(a, b))
def test_optimize():
x = np.arange(5).astype("f4")
a = da.from_array(x, chunks=(2,))
expr = a[1:4] + 1
result = optimize(expr.dask, expr.__dask_keys__())
assert isinstance(result, dict)
assert all(key in result for key in expr.__dask_keys__())
def test_slicing_with_non_ndarrays():
class ARangeSlice:
dtype = np.dtype("i8")
ndim = 1
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __array__(self):
return np.arange(self.start, self.stop)
class ARangeSlicable:
dtype = np.dtype("i8")
ndim = 1
def __init__(self, n):
self.n = n
@property
def shape(self):
return (self.n,)
def __getitem__(self, key):
return ARangeSlice(key[0].start, key[0].stop)
x = da.from_array(ARangeSlicable(10), chunks=(4,))
assert_eq((x + 1).sum(), (np.arange(10, dtype=x.dtype) + 1).sum())
@pytest.mark.filterwarnings("ignore:the matrix subclass")
def test_getter():
assert type(getter(np.matrix([[1]]), 0)) is np.ndarray
assert type(getter(np.matrix([[1]]), 0, asarray=False)) is np.matrix
assert_eq(getter([1, 2, 3, 4, 5], slice(1, 4)), np.array([2, 3, 4]))
assert_eq(getter(np.arange(5), (None, slice(None, None))), np.arange(5)[None, :])
def test_size():
x = da.ones((10, 2), chunks=(3, 1))
assert x.size == np.array(x).size
assert isinstance(x.size, int)
def test_nbytes():
x = da.ones((10, 2), chunks=(3, 1))
assert x.nbytes == np.array(x).nbytes
def test_itemsize():
x = da.ones((10, 2), chunks=(3, 1))
assert x.itemsize == 8
def test_Array_normalizes_dtype():
x = da.ones((3,), chunks=(1,), dtype=int)
assert isinstance(x.dtype, np.dtype)
@pytest.mark.parametrize("inline_array", [True, False])
def test_from_array_with_lock(inline_array):
x = np.arange(10)
class FussyLock(SerializableLock):
def acquire(self, blocking=True, timeout=-1):
if self.locked():
raise RuntimeError("I am locked")
return super().acquire(blocking, timeout)
lock = FussyLock()
d = da.from_array(x, chunks=5, lock=lock, inline_array=inline_array)
lock.acquire()
with pytest.raises(RuntimeError):
d.compute()
lock.release()
assert_eq(d, x)
lock = CounterLock()
e = da.from_array(x, chunks=5, lock=lock, inline_array=inline_array)
assert_eq(e, x)
# Note: the specific counts for composite arithmetic operations can vary
# significantly based on the complexity of the computation, whether we are inlining,
# and optimization fusion settings. But for this simple comparison it seems pretty
# stable.
assert lock.release_count == 2
assert lock.acquire_count == 2
class MyArray:
def __init__(self, x):
self.x = x
self.dtype = x.dtype
self.shape = x.shape
self.ndim = len(x.shape)
def __getitem__(self, i):
return self.x[i]
@pytest.mark.parametrize(
"x,chunks",
[
(np.arange(25).reshape((5, 5)), (5, 5)),
(np.arange(25).reshape((5, 5)), -1),
(np.array([[1]]), 1),
(np.array(1), 1),
],
)
@pytest.mark.parametrize("inline_array", [True, False])
def test_from_array_tasks_always_call_getter(x, chunks, inline_array):
dx = da.from_array(
MyArray(x), chunks=chunks, asarray=False, inline_array=inline_array
)
assert_eq(x, dx)
def test_from_array_ndarray_onechunk():
"""ndarray with a single chunk produces a minimal single key dict"""
x = np.array([[1, 2], [3, 4]])
dx = da.from_array(x, chunks=-1)
assert_eq(x, dx)
assert len(dx.dask) == 1
assert dx.dask[dx.name, 0, 0] is x
def test_from_array_ndarray_getitem():
"""For ndarray, don't use getter / getter_nofancy; use the cleaner
operator.getitem"""
x = np.array([[1, 2], [3, 4]])
dx = da.from_array(x, chunks=(1, 2))
assert_eq(x, dx)
assert (dx.dask[dx.name, 0, 0] == np.array([[1, 2]])).all()
@pytest.mark.parametrize("x", [[1, 2], (1, 2), memoryview(b"abc")])
def test_from_array_list(x):
"""Lists, tuples, and memoryviews are automatically converted to ndarray"""
dx = da.from_array(x, chunks=-1)
assert_eq(np.array(x), dx)
assert isinstance(dx.dask[dx.name, 0], np.ndarray)
dx = da.from_array(x, chunks=1)
assert_eq(np.array(x), dx)
assert dx.dask[dx.name, 0][0] == x[0]
# On MacOS Python 3.9, the order of the np.ScalarType tuple randomly changes across
# interpreter restarts, thus causing pytest-xdist failures; setting PYTHONHASHSEED does
# not help
@pytest.mark.parametrize(
"type_", sorted((t for t in np.ScalarType if t is not memoryview), key=str)
)
def test_from_array_scalar(type_):
"""Python and numpy scalars are automatically converted to ndarray"""
if type_ == np.datetime64:
x = np.datetime64("2000-01-01")
else:
x = type_(1)
dx = da.from_array(x, chunks=-1)
assert_eq(np.array(x), dx)
assert isinstance(
dx.dask[
dx.name,
],
np.ndarray,
)
@pytest.mark.parametrize("asarray,cls", [(True, np.ndarray), (False, np.matrix)])
@pytest.mark.parametrize("inline_array", [True, False])
@pytest.mark.filterwarnings("ignore:the matrix subclass")
def test_from_array_no_asarray(asarray, cls, inline_array):
def assert_chunks_are_of_type(x):
chunks = compute_as_if_collection(Array, x.dask, x.__dask_keys__())
# If it's a tuple of tuples we want to concat, but if it's a tuple
# of 1d arrays, we just want to iterate directly
for c in concat(chunks) if isinstance(chunks[0], tuple) else chunks:
assert type(c) is cls
x = np.matrix(np.arange(100).reshape((10, 10)))
dx = da.from_array(x, chunks=(5, 5), asarray=asarray, inline_array=inline_array)
assert_chunks_are_of_type(dx)
assert_chunks_are_of_type(dx[0:5])
assert_chunks_are_of_type(dx[0:5][:, 0])
def test_from_array_getitem():
x = np.arange(10)
def my_getitem(x, ind):
return x[ind]
y = da.from_array(x, chunks=(5,), getitem=my_getitem)
for k, v in y.dask.items():
if isinstance(v, tuple):
assert v[0] is my_getitem
assert_eq(x, y)
def test_from_array_minus_one():
x = np.arange(10)
y = da.from_array(x, -1)
assert y.chunks == ((10,),)
assert_eq(x, y)
def test_from_array_copy():
# Regression test for https://github.com/dask/dask/issues/3751
x = np.arange(10)
y = da.from_array(x, -1)
assert y.npartitions == 1
y_c = y.copy()
assert y is not y_c
assert y.compute() is not y_c.compute()
def test_from_array_dask_array():
x = np.array([[1, 2], [3, 4]])
dx = da.from_array(x, chunks=(1, 2))
with pytest.raises(ValueError):
da.from_array(dx)
def test_from_array_dask_collection_warns():
class CustomCollection(np.ndarray):
def __dask_graph__(self):
return {"bar": 1}
x = CustomCollection([1, 2, 3])
with pytest.warns(UserWarning):
da.from_array(x)
# Ensure da.array warns too
with pytest.warns(UserWarning):
da.array(x)
def test_from_array_inline():
class MyArray(np.ndarray):
pass
a = np.array([1, 2, 3]).view(MyArray)
dsk = dict(da.from_array(a, name="my-array", inline_array=False).dask)
assert dsk["original-my-array"] is a
dsk = dict(da.from_array(a, name="my-array", inline_array=True).dask)
assert "original-my-array" not in dsk
@pytest.mark.parametrize("asarray", [da.asarray, da.asanyarray])
def test_asarray(asarray):
assert_eq(asarray([1, 2, 3]), np.asarray([1, 2, 3]))
x = asarray([1, 2, 3])
assert asarray(x) is x
y = [x[0], 2, x[2]]
assert_eq(asarray(y), x)
@pytest.mark.parametrize("asarray", [da.asarray, da.asanyarray])
def test_asarray_dask_dataframe(asarray):
# https://github.com/dask/dask/issues/3885
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
s = dd.from_pandas(pd.Series([1, 2, 3, 4]), 2)
result = asarray(s)
expected = s.values
assert_eq(result, expected)
df = s.to_frame(name="s")
result = asarray(df)
expected = df.values
assert_eq(result, expected)
@pytest.mark.parametrize("asarray", [da.asarray, da.asanyarray])
@pytest.mark.parametrize("inline_array", [True, False])
def test_asarray_h5py(asarray, inline_array):
h5py = pytest.importorskip("h5py")
with tmpfile(".hdf5") as fn:
with h5py.File(fn, mode="a") as f:
d = f.create_dataset("/x", shape=(2, 2), dtype=float)
x = asarray(d, inline_array=inline_array)
# Check for the array in the dsk
dsk = dict(x.dask)
assert (d in dsk.values()) is not inline_array
assert not any(isinstance(v, np.ndarray) for v in dsk.values())
def test_asarray_chunks():
with dask.config.set({"array.chunk-size": "100 B"}):
x = np.ones(1000)
d = da.asarray(x)
assert d.npartitions > 1
@pytest.mark.filterwarnings("ignore:the matrix subclass")
def test_asanyarray():
x = np.matrix([1, 2, 3])
dx = da.asanyarray(x)
assert dx.numblocks == (1, 1)
chunks = compute_as_if_collection(Array, dx.dask, dx.__dask_keys__())
assert isinstance(chunks[0][0], np.matrix)
assert da.asanyarray(dx) is dx
def test_asanyarray_dataframe():
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2)
x = np.asanyarray(df)
dx = da.asanyarray(ddf)
assert isinstance(dx, da.Array)
assert_eq(x, dx)
x = np.asanyarray(df.x)
dx = da.asanyarray(ddf.x)
assert isinstance(dx, da.Array)
assert_eq(x, dx)
def test_asanyarray_datetime64():
x = np.array(["2000-01-01"], dtype="datetime64")
dx = da.asanyarray(x)
assert isinstance(dx, da.Array)
assert_eq(x, dx)
def test_from_func():
x = np.arange(10)
f = lambda n: n * x
d = from_func(f, (10,), x.dtype, kwargs={"n": 2})
assert d.shape == x.shape
assert d.dtype == x.dtype
assert_eq(d, 2 * x)
assert same_keys(d, from_func(f, (10,), x.dtype, kwargs={"n": 2}))
def test_concatenate3_2():
x = np.array([1, 2])
assert_eq(concatenate3([x, x, x]), np.array([1, 2, 1, 2, 1, 2]))
x = np.array([[1, 2]])
assert (
concatenate3([[x, x, x], [x, x, x]])
== np.array([[1, 2, 1, 2, 1, 2], [1, 2, 1, 2, 1, 2]])
).all()
assert (
concatenate3([[x, x], [x, x], [x, x]])
== np.array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]])
).all()
x = np.arange(12).reshape((2, 2, 3))
assert_eq(
concatenate3([[[x, x, x], [x, x, x]], [[x, x, x], [x, x, x]]]),
np.array(
[
[
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
],
[
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11],
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11],
],
[
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
],
[
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11],
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11],
],
]
),
)
@pytest.mark.parametrize("one_d", [True, False])
@mock.patch.object(da.core, "_concatenate2", wraps=da.core._concatenate2)
def test_concatenate3_nep18_dispatching(mock_concatenate2, one_d):
x = EncapsulateNDArray(np.arange(10))
concat = [x, x] if one_d else [[x[None]], [x[None]]]
result = concatenate3(concat)
assert type(result) is type(x)
mock_concatenate2.assert_called()
mock_concatenate2.reset_mock()
# When all the inputs are supported by plain `np.concatenate`, we should take the concatenate3
# fastpath of allocating the full array up front and writing blocks into it.
concat = [x.arr, x.arr] if one_d else [[x.arr[None]], [x.arr[None]]]
plain_np_result = concatenate3(concat)
mock_concatenate2.assert_not_called()
assert type(plain_np_result) is np.ndarray
def test_map_blocks3():
x = np.arange(10)
y = np.arange(10) * 2
d = da.from_array(x, chunks=5)
e = da.from_array(y, chunks=5)
assert_eq(
da.core.map_blocks(lambda a, b: a + 2 * b, d, e, dtype=d.dtype), x + 2 * y
)
z = np.arange(100).reshape((10, 10))
f = da.from_array(z, chunks=5)
func = lambda a, b: a + 2 * b
res = da.core.map_blocks(func, d, f, dtype=d.dtype)
assert_eq(res, x + 2 * z)
assert same_keys(da.core.map_blocks(func, d, f, dtype=d.dtype), res)
assert_eq(da.map_blocks(func, f, d, dtype=d.dtype), z + 2 * x)
def test_from_array_with_missing_chunks():
x = np.random.randn(2, 4, 3)
d = da.from_array(x, chunks=(None, 2, None))
assert d.chunks == da.from_array(x, chunks=(2, 2, 3)).chunks
def test_normalize_chunks():
assert normalize_chunks(3, (4, 6)) == ((3, 1), (3, 3))
assert normalize_chunks(((3, 3), (8,)), (6, 8)) == ((3, 3), (8,))
assert normalize_chunks((4, 5), (9,)) == ((4, 5),)
assert normalize_chunks((4, 5), (9, 9)) == ((4, 4, 1), (5, 4))
assert normalize_chunks(-1, (5, 5)) == ((5,), (5,))
assert normalize_chunks((3, -1), (5, 5)) == ((3, 2), (5,))
assert normalize_chunks((3, None), (5, 5)) == ((3, 2), (5,))
assert normalize_chunks({0: 3}, (5, 5)) == ((3, 2), (5,))
assert normalize_chunks([[2, 2], [3, 3]]) == ((2, 2), (3, 3))
assert normalize_chunks(10, (30, 5)) == ((10, 10, 10), (5,))
assert normalize_chunks((), (0, 0)) == ((0,), (0,))
assert normalize_chunks(-1, (0, 3)) == ((0,), (3,))
assert normalize_chunks("auto", shape=(20,), limit=5, dtype="uint8") == (
(5, 5, 5, 5),
)
assert normalize_chunks(("auto", None), (5, 5), dtype=int) == ((5,), (5,))
with pytest.raises(ValueError):
normalize_chunks(((10,),), (11,))
with pytest.raises(ValueError):
normalize_chunks(((5,), (5,)), (5,))
def test_align_chunks_to_previous_chunks():
chunks = normalize_chunks(
"auto", shape=(2000,), previous_chunks=(512,), limit="600 B", dtype=np.uint8
)
assert chunks == ((512, 512, 512, 2000 - 512 * 3),)
chunks = normalize_chunks(
"auto", shape=(2000,), previous_chunks=(128,), limit="600 B", dtype=np.uint8
)
assert chunks == ((512, 512, 512, 2000 - 512 * 3),)
chunks = normalize_chunks(
"auto", shape=(2000,), previous_chunks=(512,), limit="1200 B", dtype=np.uint8
)
assert chunks == ((1024, 2000 - 1024),)
chunks = normalize_chunks(
"auto",
shape=(3, 10211, 10376),
previous_chunks=(1, 512, 512),
limit="1MiB",
dtype=np.float32,
)
assert chunks[0] == (1, 1, 1)
assert all(c % 512 == 0 for c in chunks[1][:-1])
assert all(c % 512 == 0 for c in chunks[2][:-1])
def test_raise_on_no_chunks():
x = da.ones(6, chunks=3)
try:
Array(x.dask, x.name, chunks=None, dtype=x.dtype, shape=None)
assert False
except ValueError as e:
assert "dask" in str(e)
assert ".org" in str(e)
def test_chunks_is_immutable():
x = da.ones(6, chunks=3)
try:
x.chunks = 2
assert False
except TypeError as e:
assert "rechunk(2)" in str(e)
def test_raise_on_bad_kwargs():
x = da.ones(5, chunks=3)
try:
da.minimum(x, foo=None)
except TypeError as e:
assert "minimum" in str(e)
assert "foo" in str(e)
def test_long_slice():
x = np.arange(10000)
d = da.from_array(x, chunks=1)
assert_eq(d[8000:8200], x[8000:8200])
def test_h5py_newaxis():
h5py = pytest.importorskip("h5py")
with tmpfile("h5") as fn:
with h5py.File(fn, mode="a") as f:
x = f.create_dataset("/x", shape=(10, 10), dtype="f8")
d = da.from_array(x, chunks=(5, 5))
assert d[None, :, :].compute(scheduler="sync").shape == (1, 10, 10)
assert d[:, None, :].compute(scheduler="sync").shape == (10, 1, 10)
assert d[:, :, None].compute(scheduler="sync").shape == (10, 10, 1)
assert same_keys(d[:, :, None], d[:, :, None])
def test_ellipsis_slicing():
assert_eq(da.ones(4, chunks=2)[...], np.ones(4))
def test_point_slicing():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
result = d.vindex[[1, 2, 5, 5], [3, 1, 6, 1]]
assert_eq(result, x[[1, 2, 5, 5], [3, 1, 6, 1]])
result = d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]]
assert_eq(result, x[[0, 1, 6, 0], [0, 1, 0, 7]])
assert same_keys(result, d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]])
def test_point_slicing_with_full_slice():
from dask.array.core import _get_axis, _vindex_transpose
x = np.arange(4 * 5 * 6 * 7).reshape((4, 5, 6, 7))
d = da.from_array(x, chunks=(2, 3, 3, 4))
inds = [
[[1, 2, 3], None, [3, 2, 1], [5, 3, 4]],
[[1, 2, 3], None, [4, 3, 2], None],
[[1, 2, 3], [3, 2, 1]],
[[1, 2, 3], [3, 2, 1], [3, 2, 1], [5, 3, 4]],
[[], [], [], None],
[np.array([1, 2, 3]), None, np.array([4, 3, 2]), None],
[None, None, [1, 2, 3], [4, 3, 2]],
[None, [0, 2, 3], None, [0, 3, 2]],
]
for ind in inds:
slc = [
i if isinstance(i, (np.ndarray, list)) else slice(None, None) for i in ind
]
result = d.vindex[tuple(slc)]
# Rotate the expected result accordingly
axis = _get_axis(ind)
expected = _vindex_transpose(x[tuple(slc)], axis)
assert_eq(result, expected)
# Always have the first axis be the length of the points
k = len(next(i for i in ind if isinstance(i, (np.ndarray, list))))
assert result.shape[0] == k
def test_slice_with_floats():
d = da.ones((5,), chunks=(3,))
with pytest.raises(IndexError):
d[1.5]
with pytest.raises(IndexError):
d[0:1.5]
with pytest.raises(IndexError):
d[[1, 1.5]]
def test_slice_with_integer_types():
x = np.arange(10)
dx = da.from_array(x, chunks=5)
inds = np.array([0, 3, 6], dtype="u8")
assert_eq(dx[inds], x[inds])
assert_eq(dx[inds.astype("u4")], x[inds.astype("u4")])
inds = np.array([0, 3, 6], dtype=np.int64)
assert_eq(dx[inds], x[inds])
assert_eq(dx[inds.astype("u4")], x[inds.astype("u4")])
def test_index_with_integer_types():
x = np.arange(10)
dx = da.from_array(x, chunks=5)
inds = int(3)
assert_eq(dx[inds], x[inds])
inds = np.int64(3)
assert_eq(dx[inds], x[inds])
def test_vindex_basic():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
# cases where basic and advanced indexing coincide
result = d.vindex[0]
assert_eq(result, x[0])
result = d.vindex[0, 1]
assert_eq(result, x[0, 1])
result = d.vindex[[0, 1], ::-1] # slices last
assert_eq(result, x[:2, ::-1])
def test_vindex_nd():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
result = d.vindex[[[0, 1], [6, 0]], [[0, 1], [0, 7]]]
assert_eq(result, x[[[0, 1], [6, 0]], [[0, 1], [0, 7]]])
result = d.vindex[np.arange(7)[:, None], np.arange(8)[None, :]]
assert_eq(result, x)
result = d.vindex[np.arange(7)[None, :], np.arange(8)[:, None]]
assert_eq(result, x.T)
def test_vindex_negative():
x = np.arange(10)
d = da.from_array(x, chunks=(5, 5))
result = d.vindex[np.array([0, -1])]
assert_eq(result, x[np.array([0, -1])])
def test_vindex_errors():
d = da.ones((5, 5, 5), chunks=(3, 3, 3))
pytest.raises(IndexError, lambda: d.vindex[np.newaxis])
pytest.raises(IndexError, lambda: d.vindex[[1, 2], [1, 2, 3]])
pytest.raises(IndexError, lambda: d.vindex[[True] * 5])
pytest.raises(IndexError, lambda: d.vindex[[0], [5]])
pytest.raises(IndexError, lambda: d.vindex[[0], [-6]])
def test_vindex_merge():
from dask.array.core import _vindex_merge
locations = [1], [2, 0]
values = [np.array([[1, 2, 3]]), np.array([[10, 20, 30], [40, 50, 60]])]
assert (
_vindex_merge(locations, values)
== np.array([[40, 50, 60], [1, 2, 3], [10, 20, 30]])
).all()
def test_vindex_identity():
rng = da.random.RandomState(42)
a, b = 10, 20
x = rng.random(a, chunks=a // 2)
assert x is x.vindex[:]
assert x is x.vindex[:a]
pytest.raises(IndexError, lambda: x.vindex[: a - 1])
pytest.raises(IndexError, lambda: x.vindex[1:])
pytest.raises(IndexError, lambda: x.vindex[0:a:2])
x = rng.random((a, b), chunks=(a // 2, b // 2))
assert x is x.vindex[:, :]
assert x is x.vindex[:a, :b]
pytest.raises(IndexError, lambda: x.vindex[:, : b - 1])
pytest.raises(IndexError, lambda: x.vindex[:, 1:])
pytest.raises(IndexError, lambda: x.vindex[:, 0:b:2])
def test_empty_array():
assert_eq(np.arange(0), da.arange(0, chunks=5))
def test_memmap():
with tmpfile("npy") as fn_1:
with tmpfile("npy") as fn_2:
try:
x = da.arange(100, chunks=15)
target = np.memmap(fn_1, shape=x.shape, mode="w+", dtype=x.dtype)
x.store(target)
assert_eq(target, x, check_type=False)
np.save(fn_2, target)
assert_eq(np.load(fn_2, mmap_mode="r"), x, check_type=False)
finally:
target._mmap.close()
def test_to_npy_stack():
x = np.arange(5 * 10 * 10).reshape((5, 10, 10))
d = da.from_array(x, chunks=(2, 4, 4))
with tmpdir() as dirname:
stackdir = os.path.join(dirname, "test")
da.to_npy_stack(stackdir, d, axis=0)
assert os.path.exists(os.path.join(stackdir, "0.npy"))
assert (np.load(os.path.join(stackdir, "1.npy")) == x[2:4]).all()
e = da.from_npy_stack(stackdir)
assert_eq(d, e)
def test_view():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(2, 3))
assert_eq(x.view(), d.view())
assert_eq(x.view("i4"), d.view("i4"))
assert_eq(x.view("i2"), d.view("i2"))
assert all(isinstance(s, int) for s in d.shape)
x = np.arange(8, dtype="i1")
d = da.from_array(x, chunks=(4,))
assert_eq(x.view("i4"), d.view("i4"))
with pytest.raises(ValueError):
x = np.arange(8, dtype="i1")
d = da.from_array(x, chunks=(3,))
d.view("i4")
with pytest.raises(ValueError):
d.view("i4", order="asdf")
def test_view_fortran():
x = np.asfortranarray(np.arange(64).reshape((8, 8)))
d = da.from_array(x, chunks=(2, 3))
assert_eq(x.T.view("i4").T, d.view("i4", order="F"))
assert_eq(x.T.view("i2").T, d.view("i2", order="F"))
def test_h5py_tokenize():
h5py = pytest.importorskip("h5py")
with tmpfile("hdf5") as fn1:
with tmpfile("hdf5") as fn2:
f = h5py.File(fn1, mode="a")
g = h5py.File(fn2, mode="a")
f["x"] = np.arange(10).astype(float)
g["x"] = np.ones(10).astype(float)
x1 = f["x"]
x2 = g["x"]
assert tokenize(x1) != tokenize(x2)
def test_map_blocks_with_changed_dimension():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(7, 4))
e = d.map_blocks(lambda b: b.sum(axis=0), chunks=(4,), drop_axis=0, dtype=d.dtype)
assert e.chunks == ((4, 4),)
assert_eq(e, x.sum(axis=0))
# Provided chunks have wrong shape
with pytest.raises(ValueError):
d.map_blocks(lambda b: b.sum(axis=0), chunks=(), drop_axis=0)
with pytest.raises(ValueError):
d.map_blocks(lambda b: b.sum(axis=0), chunks=((4, 4, 4),), drop_axis=0)
with pytest.raises(ValueError):
d.map_blocks(lambda b: b.sum(axis=1), chunks=((3, 4),), drop_axis=1)
d = da.from_array(x, chunks=(4, 8))
e = d.map_blocks(lambda b: b.sum(axis=1), drop_axis=1, dtype=d.dtype)
assert e.chunks == ((4, 3),)
assert_eq(e, x.sum(axis=1))
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
e = d.map_blocks(
lambda b: b[None, :, :, None],
chunks=(1, 4, 4, 1),
new_axis=[0, 3],
dtype=d.dtype,
)
assert e.chunks == ((1,), (4, 4), (4, 4), (1,))
assert_eq(e, x[None, :, :, None])
e = d.map_blocks(lambda b: b[None, :, :, None], new_axis=[0, 3], dtype=d.dtype)
assert e.chunks == ((1,), (4, 4), (4, 4), (1,))
assert_eq(e, x[None, :, :, None])
# Adding axis with a gap
with pytest.raises(ValueError):
d.map_blocks(lambda b: b, new_axis=(3, 4))
# Both new_axis and drop_axis
d = da.from_array(x, chunks=(8, 4))
e = d.map_blocks(
lambda b: b.sum(axis=0)[:, None, None],
drop_axis=0,
new_axis=(1, 2),
dtype=d.dtype,
)
assert e.chunks == ((4, 4), (1,), (1,))
assert_eq(e, x.sum(axis=0)[:, None, None])
d = da.from_array(x, chunks=(4, 8))
e = d.map_blocks(
lambda b: b.sum(axis=1)[:, None, None],
drop_axis=1,
new_axis=(1, 2),
dtype=d.dtype,
)
assert e.chunks == ((4, 4), (1,), (1,))
assert_eq(e, x.sum(axis=1)[:, None, None])
def test_map_blocks_with_negative_drop_axis():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(7, 4))
for drop_axis in [0, -2]:
# test with equivalent positive and negative drop_axis
e = d.map_blocks(
lambda b: b.sum(axis=0), chunks=(4,), drop_axis=drop_axis, dtype=d.dtype
)
assert e.chunks == ((4, 4),)
assert_eq(e, x.sum(axis=0))
def test_map_blocks_with_invalid_drop_axis():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(7, 4))
for drop_axis in [x.ndim, -x.ndim - 1]:
with pytest.raises(ValueError):
d.map_blocks(
lambda b: b.sum(axis=0), chunks=(4,), drop_axis=drop_axis, dtype=d.dtype
)
def test_map_blocks_with_changed_dimension_and_broadcast_chunks():
# https://github.com/dask/dask/issues/4299
a = da.from_array([1, 2, 3], 3)
b = da.from_array(np.array([0, 1, 2, 0, 1, 2]), chunks=3)
result = da.map_blocks(operator.add, a, b, chunks=b.chunks)
expected = da.from_array(np.array([1, 3, 5, 1, 3, 5]), chunks=3)
assert_eq(result, expected)
def test_broadcast_chunks():
assert broadcast_chunks() == ()
assert broadcast_chunks(((2, 3),)) == ((2, 3),)
assert broadcast_chunks(((5, 5),), ((5, 5),)) == ((5, 5),)
a = ((10, 10, 10), (5, 5))
b = ((5, 5),)
assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5))
assert broadcast_chunks(b, a) == ((10, 10, 10), (5, 5))
a = ((10, 10, 10), (5, 5))
b = ((1,), (5, 5))
assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5))
a = ((10, 10, 10), (5, 5))
b = ((3, 3), (5, 5))
with pytest.raises(ValueError):
broadcast_chunks(a, b)
a = ((1,), (5, 5))
b = ((1,), (5, 5))
assert broadcast_chunks(a, b) == a
a = ((1,), (np.nan, np.nan, np.nan))
b = ((3, 3), (1,))
r = broadcast_chunks(a, b)
assert r[0] == b[0] and np.allclose(r[1], a[1], equal_nan=True)
a = ((3, 3), (1,))
b = ((1,), (np.nan, np.nan, np.nan))
r = broadcast_chunks(a, b)
assert r[0] == a[0] and np.allclose(r[1], b[1], equal_nan=True)
a = ((3, 3), (5, 5))
b = ((1,), (np.nan, np.nan, np.nan))
with pytest.raises(ValueError):
broadcast_chunks(a, b)
def test_chunks_error():
x = np.ones((10, 10))
with pytest.raises(ValueError):
da.from_array(x, chunks=(5,))
def test_array_compute_forward_kwargs():
x = da.arange(10, chunks=2).sum()
x.compute(bogus_keyword=10)
def test_dont_fuse_outputs():
dsk = {("x", 0): np.array([1, 2]), ("x", 1): (inc, ("x", 0))}
a = da.Array(dsk, "x", chunks=(2,), shape=(4,), dtype=np.array([1]).dtype)
assert_eq(a, np.array([1, 2, 2, 3], dtype=a.dtype))
def test_dont_dealias_outputs():
dsk = {
("x", 0, 0): np.ones((2, 2)),
("x", 0, 1): np.ones((2, 2)),
("x", 1, 0): np.ones((2, 2)),
("x", 1, 1): ("x", 0, 0),
}
a = da.Array(dsk, "x", chunks=(2, 2), shape=(4, 4), dtype=np.ones(1).dtype)
assert_eq(a, np.ones((4, 4)))
def test_timedelta_op():
x = np.array([np.timedelta64(10, "h")])
y = np.timedelta64(1, "h")
a = da.from_array(x, chunks=(1,)) / y
assert a.compute() == x / y
def test_to_delayed():
x = da.random.random((4, 4), chunks=(2, 2))
y = x + 10
[[a, b], [c, d]] = y.to_delayed()
assert_eq(a.compute(), y[:2, :2])
s = 2
x = da.from_array(np.array(s), chunks=0)
a = x.to_delayed()[tuple()]
assert a.compute() == s
def test_to_delayed_optimize_graph():
x = da.ones((4, 4), chunks=(2, 2))
y = x[1:][1:][1:][:, 1:][:, 1:][:, 1:]
# optimizations
d = y.to_delayed().flatten().tolist()[0]
assert len([k for k in d.dask if k[0].startswith("getitem")]) == 1
assert d.key == (y.name, 0, 0)
assert d.dask.layers.keys() == {"delayed-" + y.name}
assert d.dask.dependencies == {"delayed-" + y.name: set()}
assert d.__dask_layers__() == ("delayed-" + y.name,)
# no optimizations
d2 = y.to_delayed(optimize_graph=False).flatten().tolist()[0]
assert d2.dask is y.dask
assert d2.key == (y.name, 0, 0)
assert d2.__dask_layers__() == y.__dask_layers__()
assert (d.compute() == d2.compute()).all()
def test_cumulative():
x = da.arange(20, chunks=5)
assert_eq(x.cumsum(axis=0), np.arange(20).cumsum())
assert_eq(x.cumprod(axis=0), np.arange(20).cumprod())
assert_eq(da.nancumsum(x, axis=0), nancumsum(np.arange(20)))
assert_eq(da.nancumprod(x, axis=0), nancumprod(np.arange(20)))
a = np.random.random(20)
rs = np.random.RandomState(0)
a[rs.rand(*a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=5)
assert_eq(da.nancumsum(x, axis=0), nancumsum(a))
assert_eq(da.nancumprod(x, axis=0), nancumprod(a))
a = np.random.random((20, 24))
x = da.from_array(a, chunks=(6, 5))
assert_eq(x.cumsum(axis=0), a.cumsum(axis=0))
assert_eq(x.cumsum(axis=1), a.cumsum(axis=1))
assert_eq(x.cumprod(axis=0), a.cumprod(axis=0))
assert_eq(x.cumprod(axis=1), a.cumprod(axis=1))
assert_eq(da.nancumsum(x, axis=0), nancumsum(a, axis=0))
assert_eq(da.nancumsum(x, axis=1), nancumsum(a, axis=1))
assert_eq(da.nancumprod(x, axis=0), nancumprod(a, axis=0))
assert_eq(da.nancumprod(x, axis=1), nancumprod(a, axis=1))
a = np.random.random((20, 24))
rs = np.random.RandomState(0)
a[rs.rand(*a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=(6, 5))
assert_eq(da.nancumsum(x, axis=0), nancumsum(a, axis=0))
assert_eq(da.nancumsum(x, axis=1), nancumsum(a, axis=1))
assert_eq(da.nancumprod(x, axis=0), nancumprod(a, axis=0))
assert_eq(da.nancumprod(x, axis=1), nancumprod(a, axis=1))
a = np.random.random((20, 24, 13))
x = da.from_array(a, chunks=(6, 5, 4))
for axis in [0, 1, 2, -1, -2, -3]:
assert_eq(x.cumsum(axis=axis), a.cumsum(axis=axis))
assert_eq(x.cumprod(axis=axis), a.cumprod(axis=axis))
assert_eq(da.nancumsum(x, axis=axis), nancumsum(a, axis=axis))
assert_eq(da.nancumprod(x, axis=axis), nancumprod(a, axis=axis))
a = np.random.random((20, 24, 13))
rs = np.random.RandomState(0)
a[rs.rand(*a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=(6, 5, 4))
for axis in [0, 1, 2, -1, -2, -3]:
assert_eq(da.nancumsum(x, axis=axis), nancumsum(a, axis=axis))
assert_eq(da.nancumprod(x, axis=axis), nancumprod(a, axis=axis))
with pytest.raises(ValueError):
x.cumsum(axis=3)
with pytest.raises(ValueError):
x.cumsum(axis=-4)
def test_from_delayed():
v = delayed(np.ones)((5, 3))
x = from_delayed(v, shape=(5, 3), dtype=np.ones(0).dtype)
assert isinstance(x, Array)
assert_eq(x, np.ones((5, 3)))
def test_from_delayed_meta():
v = delayed(np.ones)((5, 3))
x = from_delayed(v, shape=(5, 3), meta=np.ones(0))
assert isinstance(x, Array)
assert isinstance(x._meta, np.ndarray)
def test_A_property():
x = da.ones(5, chunks=(2,))
assert x.A is x
def test_copy_mutate():
x = da.arange(5, chunks=(2,))
y = x.copy()
memo = {}
y2 = copy.deepcopy(x, memo=memo)
x[x % 2 == 0] = -1
xx = np.arange(5)
xx[xx % 2 == 0] = -1
assert_eq(x, xx)
assert_eq(y, np.arange(5))
assert_eq(y2, np.arange(5))
assert memo[id(x)] is y2
def test_npartitions():
assert da.ones(5, chunks=(2,)).npartitions == 3
assert da.ones((5, 5), chunks=(2, 3)).npartitions == 6
def test_astype_gh1151():
a = np.arange(5).astype(np.int32)
b = da.from_array(a, (1,))
assert_eq(a.astype(np.int16), b.astype(np.int16))
def test_elemwise_name():
assert (da.ones(5, chunks=2) + 1).name.startswith("add-")
def test_map_blocks_name():
assert da.ones(5, chunks=2).map_blocks(inc).name.startswith("inc-")
def test_map_blocks_token_deprecated():
with pytest.warns(FutureWarning, match="use `name=` instead"):
x = da.ones(5, chunks=2).map_blocks(inc, token="foo")
assert x.name.startswith("foo-")
def test_from_array_names():
pytest.importorskip("distributed")
x = np.ones(10)
d = da.from_array(x, chunks=2)
names = countby(key_split, d.dask)
assert set(names.values()) == {5}
@pytest.mark.parametrize(
"array", [da.arange(100, chunks=25), da.ones((10, 10), chunks=25)]
)
def test_array_picklable(array):
from pickle import dumps, loads
a2 = loads(dumps(array))
assert_eq(array, a2)
def test_from_array_raises_on_bad_chunks():
x = np.ones(10)
with pytest.raises(ValueError):
da.from_array(x, chunks=(5, 5, 5))
# with pytest.raises(ValueError):
# da.from_array(x, chunks=100)
with pytest.raises(ValueError):
da.from_array(x, chunks=((5, 5, 5),))
def test_concatenate_axes():
x = np.ones((2, 2, 2))
assert_eq(concatenate_axes([x, x], axes=[0]), np.ones((4, 2, 2)))
assert_eq(concatenate_axes([x, x, x], axes=[0]), np.ones((6, 2, 2)))
assert_eq(concatenate_axes([x, x], axes=[1]), np.ones((2, 4, 2)))
assert_eq(concatenate_axes([[x, x], [x, x]], axes=[0, 1]), np.ones((4, 4, 2)))
assert_eq(concatenate_axes([[x, x], [x, x]], axes=[0, 2]), np.ones((4, 2, 4)))
assert_eq(concatenate_axes([[x, x, x], [x, x, x]], axes=[1, 2]), np.ones((2, 4, 6)))
with pytest.raises(ValueError):
concatenate_axes(
[[x, x], [x, x]], axes=[0]
) # not all nested lists accounted for
with pytest.raises(ValueError):
concatenate_axes([x, x], axes=[0, 1, 2, 3]) # too many axes
def test_blockwise_concatenate():
x = da.ones((4, 4, 4), chunks=(2, 2, 2))
y = da.ones((4, 4), chunks=(2, 2))
def f(a, b):
assert isinstance(a, np.ndarray)
assert isinstance(b, np.ndarray)
assert a.shape == (2, 4, 4)
assert b.shape == (4, 4)
return (a + b).sum(axis=(1, 2))
z = da.blockwise(f, "i", x, "ijk", y, "jk", concatenate=True, dtype=x.dtype)
assert_eq(z, np.ones(4) * 32)
z = da.blockwise(add, "ij", y, "ij", y, "ij", concatenate=True, dtype=x.dtype)
assert_eq(z, np.ones((4, 4)) * 2)
def f(a, b, c):
assert isinstance(a, np.ndarray)
assert isinstance(b, np.ndarray)
assert isinstance(c, np.ndarray)
assert a.shape == (4, 2, 4)
assert b.shape == (4, 4)
assert c.shape == (4, 2)
return np.ones(2)
z = da.blockwise(
f, "j", x, "ijk", y, "ki", y, "ij", concatenate=True, dtype=x.dtype
)
assert_eq(z, np.ones(4), check_shape=False)
def test_common_blockdim():
assert common_blockdim([(5,), (5,)]) == (5,)
assert common_blockdim([(5,), (2, 3)]) == (2, 3)
assert common_blockdim([(5, 5), (2, 3, 5)]) == (2, 3, 5)
assert common_blockdim([(5, 5), (2, 3, 5)]) == (2, 3, 5)
assert common_blockdim([(5, 2, 3), (2, 3, 5)]) == (2, 3, 2, 3)
assert common_blockdim([(1, 2), (2, 1)]) == (1, 1, 1)
assert common_blockdim([(1, 2, 2), (2, 1, 2), (2, 2, 1)]) == (1, 1, 1, 1, 1)
def test_uneven_chunks_that_fit_neatly():
x = da.arange(10, chunks=((5, 5),))
y = da.ones(10, chunks=((5, 2, 3),))
assert_eq(x + y, np.arange(10) + np.ones(10))
z = x + y
assert z.chunks == ((5, 2, 3),)
def test_elemwise_uneven_chunks():
x = da.arange(10, chunks=((4, 6),))
y = da.ones(10, chunks=((6, 4),))
assert_eq(x + y, np.arange(10) + np.ones(10))
z = x + y
assert z.chunks == ((4, 2, 4),)
x = da.random.random((10, 10), chunks=((4, 6), (5, 2, 3)))
y = da.random.random((4, 10, 10), chunks=((2, 2), (6, 4), (2, 3, 5)))
z = x + y
assert_eq(x + y, x.compute() + y.compute())
assert z.chunks == ((2, 2), (4, 2, 4), (2, 3, 2, 3))
def test_uneven_chunks_blockwise():
x = da.random.random((10, 10), chunks=((2, 3, 2, 3), (5, 5)))
y = da.random.random((10, 10), chunks=((4, 4, 2), (4, 2, 4)))
z = da.blockwise(np.dot, "ik", x, "ij", y, "jk", dtype=x.dtype, concatenate=True)
assert z.chunks == (x.chunks[0], y.chunks[1])
assert_eq(z, x.compute().dot(y))
def test_warn_bad_rechunking():
x = da.ones((20, 20), chunks=(20, 1))
y = da.ones((20, 20), chunks=(1, 20))
with pytest.warns(da.core.PerformanceWarning, match="factor of 20"):
x + y
def test_concatenate_stack_dont_warn():
with warnings.catch_warnings(record=True) as record:
da.concatenate([da.ones(2, chunks=1)] * 62)
assert not record
with warnings.catch_warnings(record=True) as record:
da.stack([da.ones(2, chunks=1)] * 62)
assert not record
def test_map_blocks_delayed():
x = da.ones((10, 10), chunks=(5, 5))
y = np.ones((5, 5))
z = x.map_blocks(add, y, dtype=x.dtype)
yy = delayed(y)
zz = x.map_blocks(add, yy, dtype=x.dtype)
assert_eq(z, zz)
assert yy.key in zz.dask
def test_no_chunks():
X = np.arange(11)
dsk = {("x", 0): np.arange(5), ("x", 1): np.arange(5, 11)}
x = Array(dsk, "x", ((np.nan, np.nan),), np.arange(1).dtype)
assert_eq(x + 1, X + 1)
assert_eq(x.sum(), X.sum())
assert_eq((x + 1).std(), (X + 1).std())
assert_eq((x + x).std(), (X + X).std())
assert_eq((x + x).std(keepdims=True), (X + X).std(keepdims=True))
def test_no_chunks_2d():
X = np.arange(24).reshape((4, 6))
x = da.from_array(X, chunks=(2, 2))
x._chunks = ((np.nan, np.nan), (np.nan, np.nan, np.nan))
with pytest.warns(None): # zero division warning
assert_eq(da.log(x), np.log(X))
assert_eq(x.T, X.T)
assert_eq(x.sum(axis=0, keepdims=True), X.sum(axis=0, keepdims=True))
assert_eq(x.sum(axis=1, keepdims=True), X.sum(axis=1, keepdims=True))
assert_eq(x.dot(x.T + 1), X.dot(X.T + 1))
def test_no_chunks_yes_chunks():
X = np.arange(24).reshape((4, 6))
x = da.from_array(X, chunks=(2, 2))
x._chunks = ((2, 2), (np.nan, np.nan, np.nan))
assert (x + 1).chunks == ((2, 2), (np.nan, np.nan, np.nan))
assert (x.T).chunks == ((np.nan, np.nan, np.nan), (2, 2))
assert (x.dot(x.T)).chunks == ((2, 2), (2, 2))
def test_raise_informative_errors_no_chunks():
X = np.arange(10)
a = da.from_array(X, chunks=(5, 5))
a._chunks = ((np.nan, np.nan),)
b = da.from_array(X, chunks=(4, 4, 2))
b._chunks = ((np.nan, np.nan, np.nan),)
for op in [
lambda: a + b,
lambda: a[1],
lambda: a[::2],
lambda: a[-5],
lambda: a.rechunk(3),
lambda: a.reshape(2, 5),
]:
with pytest.raises(ValueError) as e:
op()
if "chunk" not in str(e.value) or "unknown" not in str(e.value):
op()
def test_no_chunks_slicing_2d():
X = np.arange(24).reshape((4, 6))
x = da.from_array(X, chunks=(2, 2))
x._chunks = ((2, 2), (np.nan, np.nan, np.nan))
assert_eq(x[0], X[0])
for op in [lambda: x[:, 4], lambda: x[:, ::2], lambda: x[0, 2:4]]:
with pytest.raises(ValueError, match="chunk sizes are unknown"):
op()
def test_index_array_with_array_1d():
x = np.arange(10)
dx = da.from_array(x, chunks=(5,))
dx._chunks = ((np.nan, np.nan),)
assert_eq(x[x > 6], dx[dx > 6])
assert_eq(x[x % 2 == 0], dx[dx % 2 == 0])
dy = da.ones(11, chunks=(3,))
with pytest.raises(ValueError):
dx[dy > 5]
def test_index_array_with_array_2d():
x = np.arange(24).reshape((4, 6))
dx = da.from_array(x, chunks=(2, 2))
assert_eq(x[x > 6], dx[dx > 6])
assert_eq(x[x % 2 == 0], dx[dx % 2 == 0])
# Test with unknown chunks
dx._chunks = ((2, 2), (np.nan, np.nan, np.nan))
with pytest.warns(UserWarning, match="different ordering") as record:
assert sorted(x[x % 2 == 0].tolist()) == sorted(
dx[dx % 2 == 0].compute().tolist()
)
assert sorted(x[x > 6].tolist()) == sorted(dx[dx > 6].compute().tolist())
assert len(record) == 2
@pytest.mark.xfail(reason="Chunking does not align well")
def test_index_array_with_array_3d_2d():
x = np.arange(4 ** 3).reshape((4, 4, 4))
dx = da.from_array(x, chunks=(2, 2, 2))
ind = np.random.random((4, 4)) > 0.5
ind = np.arange(4 ** 2).reshape((4, 4)) % 2 == 0
dind = da.from_array(ind, (2, 2))
assert_eq(x[ind], dx[dind])
assert_eq(x[:, ind], dx[:, dind])
def test_setitem_1d():
x = np.arange(10)
dx = da.from_array(x.copy(), chunks=(5,))
x[x > 6] = -1
x[x % 2 == 0] = -2
dx[dx > 6] = -1
dx[dx % 2 == 0] = -2
assert_eq(x, dx)
def test_setitem_2d():
x = np.arange(24).reshape((4, 6))
dx = da.from_array(x.copy(), chunks=(2, 2))
x[x > 6] = -1
x[x % 2 == 0] = -2
dx[dx > 6] = -1
dx[dx % 2 == 0] = -2
assert_eq(x, dx)
def test_setitem_extended_API_0d():
# 0-d array
x = np.array(9)
dx = da.from_array(9)
x[()] = -1
dx[()] = -1
assert_eq(x, dx.compute())
x[...] = -11
dx[...] = -11
assert_eq(x, dx.compute())
@pytest.mark.parametrize(
"index, value",
[
[Ellipsis, -1],
[slice(2, 8, 2), -2],
[slice(8, None, 2), -3],
[slice(8, None, 2), [-30]],
[slice(1, None, -2), -4],
[slice(1, None, -2), [-40]],
[slice(3, None, 2), -5],
[slice(-3, None, -2), -6],
[slice(1, None, -2), -4],
[slice(3, None, 2), -5],
[slice(3, None, 2), [10, 11, 12, 13]],
[slice(-4, None, -2), [14, 15, 16, 17]],
],
)
def test_setitem_extended_API_1d(index, value):
# 1-d array
x = np.arange(10)
dx = da.from_array(x, chunks=(4, 6))
dx[index] = value
x[index] = value
assert_eq(x, dx.compute())
@pytest.mark.parametrize(
"index, value",
[
[Ellipsis, -1],
[(slice(None, None, 2), slice(None, None, -1)), -1],
[slice(1, None, 2), -1],
[[4, 3, 1], -1],
[(Ellipsis, 4), -1],
[5, -1],
[(slice(None), 2), range(6)],
[3, range(10)],
[(slice(None), [3, 5, 6]), [-30, -31, -32]],
[([-1, 0, 1], 2), [-30, -31, -32]],
[(slice(None, 2), slice(None, 3)), [-50, -51, -52]],
[(slice(None), [6, 1, 3]), [-60, -61, -62]],
[(slice(1, 3), slice(1, 4)), [[-70, -71, -72]]],
[(slice(None), [9, 8, 8]), [-80, -81, 91]],
[([True, False, False, False, True, False], 2), -1],
[(3, [True, True, False, True, True, False, True, False, True, True]), -1],
[(np.array([False, False, True, True, False, False]), slice(5, 7)), -1],
[
(
4,
da.from_array(
[False, False, True, True, False, False, True, False, False, True]
),
),
-1,
],
[
(
slice(2, 4),
da.from_array(
[False, False, True, True, False, False, True, False, False, True]
),
),
[[-100, -101, -102, -103], [-200, -201, -202, -203]],
],
[slice(5, None, 2), -99],
[slice(5, None, 2), range(1, 11)],
[slice(1, None, -2), -98],
[slice(1, None, -2), range(11, 21)],
],
)
def test_setitem_extended_API_2d(index, value):
# 2-d array
x = np.ma.arange(60).reshape((6, 10))
dx = da.from_array(x, chunks=(2, 3))
dx[index] = value
x[index] = value
assert_eq(x, dx.compute())
def test_setitem_extended_API_2d_rhs_func_of_lhs():
# Cases:
# * RHS and/or indices are a function of the LHS
# * Indices have unknown chunk sizes
# * RHS has extra leading size 1 dimensions compared to LHS
x = np.arange(60).reshape((6, 10))
chunks = (2, 3)
dx = da.from_array(x, chunks=chunks)
dx[2:4, dx[0] > 3] = -5
x[2:4, x[0] > 3] = -5
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[2, dx[0] < -2] = -7
x[2, x[0] < -2] = -7
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[dx % 2 == 0] = -8
x[x % 2 == 0] = -8
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[dx % 2 == 0] = -8
x[x % 2 == 0] = -8
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[3:5, 5:1:-2] = -dx[:2, 4:1:-2]
x[3:5, 5:1:-2] = -x[:2, 4:1:-2]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[0, 1:3] = -dx[0, 4:2:-1]
x[0, 1:3] = -x[0, 4:2:-1]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[...] = dx
x[...] = x
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[...] = dx[...]
x[...] = x[...]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[0] = dx[-1]
x[0] = x[-1]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[0, :] = dx[-2, :]
x[0, :] = x[-2, :]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[:, 1] = dx[:, -3]
x[:, 1] = x[:, -3]
assert_eq(x, dx.compute())
index = da.from_array([0, 2], chunks=(2,))
dx = da.from_array(x, chunks=chunks)
dx[index, 8] = [99, 88]
x[[0, 2], 8] = [99, 88]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[:, index] = dx[:, :2]
x[:, [0, 2]] = x[:, :2]
assert_eq(x, dx.compute())
index = da.where(da.arange(3, chunks=(1,)) < 2)[0]
dx = da.from_array(x, chunks=chunks)
dx[index, 7] = [-23, -33]
x[index.compute(), 7] = [-23, -33]
assert_eq(x, dx.compute())
index = da.where(da.arange(3, chunks=(1,)) < 2)[0]
dx = da.from_array(x, chunks=chunks)
dx[(index,)] = -34
x[(index.compute(),)] = -34
assert_eq(x, dx.compute())
index = index - 4
dx = da.from_array(x, chunks=chunks)
dx[index, 7] = [-43, -53]
x[index.compute(), 7] = [-43, -53]
assert_eq(x, dx.compute())
index = da.from_array([0, -1], chunks=(1,))
x[[0, -1]] = 9999
dx[(index,)] = 9999
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=(-1, -1))
dx[...] = da.from_array(x, chunks=chunks)
assert_eq(x, dx.compute())
# RHS has extra leading size 1 dimensions compared to LHS
dx = da.from_array(x.copy(), chunks=(2, 3))
v = x.reshape((1, 1) + x.shape)
x[...] = v
dx[...] = v
assert_eq(x, dx.compute())
index = da.where(da.arange(3, chunks=(1,)) < 2)[0]
v = -np.arange(12).reshape(1, 1, 6, 2)
x[:, [0, 1]] = v
dx[:, index] = v
assert_eq(x, dx.compute())
@pytest.mark.parametrize(
"index, value",
[
[(1, slice(1, 7, 2)), np.ma.masked],
[(slice(1, 5, 2), [7, 5]), np.ma.masked_all((2, 2))],
],
)
def test_setitem_extended_API_2d_mask(index, value):
x = np.ma.arange(60).reshape((6, 10))
dx = da.from_array(x.data, chunks=(2, 3))
dx[index] = value
x[index] = value
dx = dx.persist()
assert_eq(x, dx.compute())
assert_eq(x.mask, da.ma.getmaskarray(dx).compute())
def test_setitem_on_read_only_blocks():
# Outputs of broadcast_trick-style functions contain read-only
# arrays
dx = da.empty((4, 6), dtype=float, chunks=(2, 2))
dx[0] = 99
assert_eq(dx[0, 0], 99.0)
dx[0:2] = 88
assert_eq(dx[0, 0], 88.0)
def test_setitem_errs():
x = da.ones((4, 4), chunks=(2, 2))
with pytest.raises(ValueError):
x[x > 1] = x
# Shape mismatch
with pytest.raises(ValueError):
x[[True, True, False, False], 0] = [2, 3, 4]
with pytest.raises(ValueError):
x[[True, True, True, False], 0] = [2, 3]
with pytest.raises(ValueError):
x[0, [True, True, True, False]] = [2, 3]
with pytest.raises(ValueError):
x[0, [True, True, True, False]] = [1, 2, 3, 4, 5]
with pytest.raises(ValueError):
x[da.from_array([True, True, True, False]), 0] = [1, 2, 3, 4, 5]
with pytest.raises(ValueError):
x[0, da.from_array([True, False, False, True])] = [1, 2, 3, 4, 5]
with pytest.raises(ValueError):
x[:, 0] = [2, 3, 4]
with pytest.raises(ValueError):
x[0, :] = [1, 2, 3, 4, 5]
x = da.ones((4, 4), chunks=(2, 2))
# Too many indices
with pytest.raises(IndexError):
x[:, :, :] = 2
# 2-d boolean indexing a single dimension
with pytest.raises(IndexError):
x[[[True, True, False, False]], 0] = 5
# Too many/not enough booleans
with pytest.raises(IndexError):
x[[True, True, False]] = 5
with pytest.raises(IndexError):
x[[False, True, True, True, False]] = 5
# 2-d indexing a single dimension
with pytest.raises(IndexError):
x[[[1, 2, 3]], 0] = 5
# Multiple 1-d boolean/integer arrays
with pytest.raises(NotImplementedError):
x[[1, 2], [2, 3]] = 6
with pytest.raises(NotImplementedError):
x[[True, True, False, False], [2, 3]] = 5
with pytest.raises(NotImplementedError):
x[[True, True, False, False], [False, True, False, False]] = 7
# scalar boolean indexing
with pytest.raises(NotImplementedError):
x[True] = 5
with pytest.raises(NotImplementedError):
x[np.array(True)] = 5
with pytest.raises(NotImplementedError):
x[0, da.from_array(True)] = 5
# Scalar arrays
y = da.from_array(np.array(1))
with pytest.raises(IndexError):
y[:] = 2
# RHS has non-brodacastable extra leading dimensions
x = np.arange(12).reshape((3, 4))
dx = da.from_array(x, chunks=(2, 2))
with pytest.raises(ValueError):
dx[...] = np.arange(24).reshape((2, 1, 3, 4))
# RHS doesn't have chunks set
dx = da.unique(da.random.random([10]))
with pytest.raises(ValueError, match="Arrays chunk sizes are unknown"):
dx[0] = 0
def test_zero_slice_dtypes():
x = da.arange(5, chunks=1)
y = x[[]]
assert y.dtype == x.dtype
assert y.shape == (0,)
assert_eq(x[[]], np.arange(5)[[]])
def test_zero_sized_array_rechunk():
x = da.arange(5, chunks=1)[:0]
y = da.blockwise(identity, "i", x, "i", dtype=x.dtype)
assert_eq(x, y)
def test_blockwise_zero_shape():
da.blockwise(
lambda x: x,
"i",
da.arange(10, chunks=10),
"i",
da.from_array(np.ones((0, 2)), ((0,), 2)),
"ab",
da.from_array(np.ones((0,)), ((0,),)),
"a",
dtype="float64",
)
def test_blockwise_zero_shape_new_axes():
da.blockwise(
lambda x: np.ones(42),
"i",
da.from_array(np.ones((0, 2)), ((0,), 2)),
"ab",
da.from_array(np.ones((0,)), ((0,),)),
"a",
dtype="float64",
new_axes={"i": 42},
)
def test_broadcast_against_zero_shape():
assert_eq(da.arange(1, chunks=1)[:0] + 0, np.arange(1)[:0] + 0)
assert_eq(da.arange(1, chunks=1)[:0] + 0.1, np.arange(1)[:0] + 0.1)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:0] + 0, np.ones((5, 5))[:0] + 0)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:0] + 0.1, np.ones((5, 5))[:0] + 0.1)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:, :0] + 0, np.ones((5, 5))[:, :0] + 0)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:, :0] + 0.1, np.ones((5, 5))[:, :0] + 0.1)
def test_from_array_name():
x = np.array([1, 2, 3, 4, 5])
chunks = x.shape
# Default is tokenize the array
dx = da.from_array(x, chunks=chunks)
hashed_name = dx.name
assert da.from_array(x, chunks=chunks).name == hashed_name
# Specify name directly
assert da.from_array(x, chunks=chunks, name="x").name == "x"
# False gives a random name
dx2 = da.from_array(x, chunks=chunks, name=False)
dx3 = da.from_array(x, chunks=chunks, name=False)
assert dx2.name != hashed_name
assert dx3.name != hashed_name
assert dx2.name != dx3.name
def test_concatenate_errs():
with pytest.raises(ValueError, match=r"Shapes.*\(2, 1\)"):
da.concatenate(
[da.zeros((2, 1), chunks=(2, 1)), da.zeros((2, 3), chunks=(2, 3))]
)
with pytest.raises(ValueError):
da.concatenate(
[da.zeros((1, 2), chunks=(1, 2)), da.zeros((3, 2), chunks=(3, 2))], axis=1
)
def test_stack_errs():
with pytest.raises(ValueError) as e:
da.stack([da.zeros((2,), chunks=2)] * 10 + [da.zeros((3,), chunks=3)] * 10)
assert (
str(e.value)
== "Stacked arrays must have the same shape. The first array had shape (2,), while array 11 has shape (3,)."
)
assert len(str(e.value)) < 105
def test_blockwise_with_numpy_arrays():
x = np.ones(10)
y = da.ones(10, chunks=(5,))
assert_eq(x + y, x + x)
s = da.sum(x)
assert any(x is v for v in s.dask.values())
@pytest.mark.parametrize("chunks", (100, 6))
@pytest.mark.parametrize("other", [[0, 0, 1], [2, 1, 3], (0, 0, 1)])
def test_elemwise_with_lists(chunks, other):
x = np.arange(12).reshape((4, 3))
d = da.arange(12, chunks=chunks).reshape((4, 3))
x2 = np.vstack([x[:, 0], x[:, 1], x[:, 2]]).T
d2 = da.vstack([d[:, 0], d[:, 1], d[:, 2]]).T
assert_eq(x2, d2)
x3 = x2 * other
d3 = d2 * other
assert_eq(x3, d3)
def test_constructor_plugin():
L = []
L2 = []
with dask.config.set(array_plugins=[L.append, L2.append]):
x = da.ones(10, chunks=5)
y = x + 1
assert L == L2 == [x, y]
with dask.config.set(array_plugins=[lambda x: x.compute()]):
x = da.ones(10, chunks=5)
y = x + 1
assert isinstance(y, np.ndarray)
assert len(L) == 2
def test_no_warnings_on_metadata():
x = da.ones(5, chunks=3)
with warnings.catch_warnings(record=True) as record:
da.arccos(x)
assert not record
def test_delayed_array_key_hygeine():
a = da.zeros((1,), chunks=(1,))
d = delayed(identity)(a)
b = da.from_delayed(d, shape=a.shape, dtype=a.dtype)
assert_eq(a, b)
def test_empty_chunks_in_array_len():
x = da.ones((), chunks=())
with pytest.raises(TypeError) as exc_info:
len(x)
err_msg = "len() of unsized object"
assert err_msg in str(exc_info.value)
@pytest.mark.parametrize("dtype", [None, [("a", "f4"), ("b", object)]])
def test_meta(dtype):
a = da.zeros((1,), chunks=(1,))
assert a._meta.dtype == a.dtype
assert isinstance(a._meta, np.ndarray)
assert a.nbytes < 1000
@pytest.mark.parametrize(
"shape,limit,expected",
[
(100, 10, (10,) * 10),
(20, 10, (10, 10)),
(20, 5, (5, 5, 5, 5)),
(24, 5, (4, 4, 4, 4, 4, 4)), # common factor is close, use it
(23, 5, (5, 5, 5, 5, 3)), # relatively prime, don't use 1s
(1000, 167, (125,) * 8), # find close value
],
)
def test_normalize_chunks_auto_1d(shape, limit, expected):
result = normalize_chunks("auto", (shape,), limit=limit, dtype=np.uint8)
assert result == (expected,)
@pytest.mark.parametrize(
"shape,chunks,limit,expected",
[
((20, 20), ("auto", 2), 20, ((10, 10), (2,) * 10)),
(
(20, 20),
("auto", (2, 2, 2, 2, 2, 5, 5)),
20,
((4, 4, 4, 4, 4), (2, 2, 2, 2, 2, 5, 5)),
),
((1, 20), "auto", 10, ((1,), (10, 10))),
],
)
def test_normalize_chunks_auto_2d(shape, chunks, limit, expected):
result = normalize_chunks(chunks, shape, limit=limit, dtype="uint8")
assert result == expected
def test_normalize_chunks_auto_3d():
result = normalize_chunks(
("auto", "auto", 2), (20, 20, 20), limit=200, dtype="uint8"
)
expected = ((10, 10), (10, 10), (2,) * 10)
assert result == expected
result = normalize_chunks("auto", (20, 20, 20), limit=8, dtype="uint8")
expected = ((2,) * 10,) * 3
assert result == expected
def test_constructors_chunks_dict():
x = da.ones((20, 20), chunks={0: 10, 1: 5})
assert x.chunks == ((10, 10), (5, 5, 5, 5))
x = da.ones((20, 20), chunks={0: 10, 1: "auto"})
assert x.chunks == ((10, 10), (20,))
def test_from_array_chunks_dict():
with dask.config.set({"array.chunk-size": "128kiB"}):
x = np.empty((100, 100, 100))
y = da.from_array(x, chunks={0: 10, 1: -1, 2: "auto"})
z = da.from_array(x, chunks=(10, 100, 10))
assert y.chunks == z.chunks
@pytest.mark.parametrize("dtype", [object, [("a", object), ("b", int)]])
def test_normalize_chunks_object_dtype(dtype):
x = np.array(["a", "abc"], dtype=object)
with pytest.raises(NotImplementedError):
da.from_array(x, chunks="auto")
def test_normalize_chunks_tuples_of_tuples():
result = normalize_chunks(((2, 3, 5), "auto"), (10, 10), limit=10, dtype=np.uint8)
expected = ((2, 3, 5), (2, 2, 2, 2, 2))
assert result == expected
def test_normalize_chunks_nan():
with pytest.raises(ValueError) as info:
normalize_chunks("auto", (np.nan,), limit=10, dtype=np.uint8)
assert "auto" in str(info.value)
with pytest.raises(ValueError) as info:
normalize_chunks(((np.nan, np.nan), "auto"), (10, 10), limit=10, dtype=np.uint8)
assert "auto" in str(info.value)
def test_pandas_from_dask_array():
pd = pytest.importorskip("pandas")
from dask.dataframe._compat import PANDAS_GT_130, PANDAS_GT_131
a = da.ones((12,), chunks=4)
s = pd.Series(a, index=range(12))
if PANDAS_GT_130 and not PANDAS_GT_131:
# https://github.com/pandas-dev/pandas/issues/38645
assert s.dtype != a.dtype
else:
assert s.dtype == a.dtype
assert_eq(s.values, a)
def test_from_zarr_unique_name():
zarr = pytest.importorskip("zarr")
a = zarr.array([1, 2, 3])
b = zarr.array([4, 5, 6])
assert da.from_zarr(a).name != da.from_zarr(b).name
def test_from_zarr_name():
zarr = pytest.importorskip("zarr")
a = zarr.array([1, 2, 3])
assert da.from_zarr(a, name="foo").name == "foo"
def test_zarr_roundtrip():
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(d)
a2 = da.from_zarr(d)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_zarr_roundtrip_with_path_like():
pytest.importorskip("zarr")
with tmpdir() as d:
path = pathlib.Path(d)
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(path)
a2 = da.from_zarr(path)
assert_eq(a, a2)
assert a2.chunks == a.chunks
@pytest.mark.parametrize("compute", [False, True])
def test_zarr_return_stored(compute):
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a2 = a.to_zarr(d, compute=compute, return_stored=True)
assert isinstance(a2, Array)
assert_eq(a, a2, check_graph=False)
assert a2.chunks == a.chunks
@pytest.mark.parametrize("inline_array", [True, False])
def test_zarr_inline_array(inline_array):
zarr = pytest.importorskip("zarr")
a = zarr.array([1, 2, 3])
dsk = dict(da.from_zarr(a, inline_array=inline_array).dask)
assert len(dsk) == (0 if inline_array else 1) + 1
assert (a in dsk.values()) is not inline_array
def test_zarr_existing_array():
zarr = pytest.importorskip("zarr")
c = (1, 1)
a = da.ones((3, 3), chunks=c)
z = zarr.zeros_like(a, chunks=c)
a.to_zarr(z)
a2 = da.from_zarr(z)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_to_zarr_unknown_chunks_raises():
pytest.importorskip("zarr")
a = da.random.random((10,), chunks=(3,))
a = a[a > 0.5]
with pytest.raises(ValueError, match="unknown chunk sizes"):
a.to_zarr({})
def test_read_zarr_chunks():
pytest.importorskip("zarr")
a = da.zeros((9,), chunks=(3,))
with tmpdir() as d:
a.to_zarr(d)
arr = da.from_zarr(d, chunks=(5,))
assert arr.chunks == ((5, 4),)
def test_zarr_pass_mapper():
pytest.importorskip("zarr")
import zarr.storage
with tmpdir() as d:
mapper = zarr.storage.DirectoryStore(d)
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(mapper)
a2 = da.from_zarr(mapper)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_zarr_group():
zarr = pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(d, component="test")
with pytest.raises((OSError, ValueError)):
a.to_zarr(d, component="test", overwrite=False)
a.to_zarr(d, component="test", overwrite=True)
# second time is fine, group exists
a.to_zarr(d, component="test2", overwrite=False)
a.to_zarr(d, component="nested/test", overwrite=False)
group = zarr.open_group(d, mode="r")
assert list(group) == ["nested", "test", "test2"]
assert "test" in group["nested"]
a2 = da.from_zarr(d, component="test")
assert_eq(a, a2)
assert a2.chunks == a.chunks
@pytest.mark.parametrize(
"data",
[
[(), True],
[((1,),), True],
[((1, 1, 1),), True],
[((1,), (1,)), True],
[((2, 2, 1),), True],
[((2, 2, 3),), False],
[((1, 1, 1), (2, 2, 3)), False],
[((1, 2, 1),), False],
],
)
def test_regular_chunks(data):
chunkset, expected = data
assert da.core._check_regular_chunks(chunkset) == expected
def test_zarr_nocompute():
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
out = a.to_zarr(d, compute=False)
assert isinstance(out, Delayed)
dask.compute(out)
a2 = da.from_zarr(d)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_zarr_regions():
zarr = pytest.importorskip("zarr")
a = da.arange(16).reshape((4, 4)).rechunk(2)
z = zarr.zeros_like(a, chunks=2)
a[:2, :2].to_zarr(z, region=(slice(2), slice(2)))
a2 = da.from_zarr(z)
expected = [[0, 1, 0, 0], [4, 5, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
assert_eq(a2, expected)
assert a2.chunks == a.chunks
a[:3, 3:4].to_zarr(z, region=(slice(1, 4), slice(2, 3)))
a2 = da.from_zarr(z)
expected = [[0, 1, 0, 0], [4, 5, 3, 0], [0, 0, 7, 0], [0, 0, 11, 0]]
assert_eq(a2, expected)
assert a2.chunks == a.chunks
a[3:, 3:].to_zarr(z, region=(slice(2, 3), slice(1, 2)))
a2 = da.from_zarr(z)
expected = [[0, 1, 0, 0], [4, 5, 3, 0], [0, 15, 7, 0], [0, 0, 11, 0]]
assert_eq(a2, expected)
assert a2.chunks == a.chunks
with pytest.raises(ValueError):
with tmpdir() as d:
a.to_zarr(d, region=(slice(2), slice(2)))
def test_tiledb_roundtrip():
tiledb = pytest.importorskip("tiledb")
# 1) load with default chunking
# 2) load from existing tiledb.DenseArray
# 3) write to existing tiledb.DenseArray
a = da.random.random((3, 3))
with tmpdir() as uri:
da.to_tiledb(a, uri)
tdb = da.from_tiledb(uri)
assert_eq(a, tdb)
assert a.chunks == tdb.chunks
# from tiledb.array
with tiledb.open(uri) as t:
tdb2 = da.from_tiledb(t)
assert_eq(a, tdb2)
with tmpdir() as uri2:
with tiledb.empty_like(uri2, a) as t:
a.to_tiledb(t)
assert_eq(da.from_tiledb(uri2), a)
# specific chunking
with tmpdir() as uri:
a = da.random.random((3, 3), chunks=(1, 1))
a.to_tiledb(uri)
tdb = da.from_tiledb(uri)
assert_eq(a, tdb)
assert a.chunks == tdb.chunks
def test_tiledb_multiattr():
tiledb = pytest.importorskip("tiledb")
dom = tiledb.Domain(
tiledb.Dim("x", (0, 1000), tile=100), tiledb.Dim("y", (0, 1000), tile=100)
)
schema = tiledb.ArraySchema(
attrs=(tiledb.Attr("attr1"), tiledb.Attr("attr2")), domain=dom
)
with tmpdir() as uri:
tiledb.DenseArray.create(uri, schema)
tdb = tiledb.DenseArray(uri, "w")
ar1 = np.random.randn(*tdb.schema.shape)
ar2 = np.random.randn(*tdb.schema.shape)
tdb[:] = {"attr1": ar1, "attr2": ar2}
tdb = tiledb.DenseArray(uri, "r")
# basic round-trip from dask.array
d = da.from_tiledb(uri, attribute="attr2")
assert_eq(d, ar2)
# smoke-test computation directly on the TileDB view
d = da.from_tiledb(uri, attribute="attr2")
assert_eq(np.mean(ar2), d.mean().compute(scheduler="threads"))
def test_blockview():
x = da.arange(10, chunks=2)
blockview = BlockView(x)
assert x.blocks == blockview
assert isinstance(blockview[0], da.Array)
assert_eq(blockview[0], x[:2])
assert_eq(blockview[-1], x[-2:])
assert_eq(blockview[:3], x[:6])
assert_eq(blockview[[0, 1, 2]], x[:6])
assert_eq(blockview[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))
assert_eq(blockview.shape, tuple(map(len, x.chunks)))
assert_eq(blockview.size, np.prod(blockview.shape))
assert_eq(
blockview.ravel(), [blockview[idx] for idx in np.ndindex(blockview.shape)]
)
x = da.random.random((20, 20), chunks=(4, 5))
blockview = BlockView(x)
assert_eq(blockview[0], x[:4])
assert_eq(blockview[0, :3], x[:4, :15])
assert_eq(blockview[:, :3], x[:, :15])
assert_eq(blockview.shape, tuple(map(len, x.chunks)))
assert_eq(blockview.size, np.prod(blockview.shape))
assert_eq(
blockview.ravel(), [blockview[idx] for idx in np.ndindex(blockview.shape)]
)
x = da.ones((40, 40, 40), chunks=(10, 10, 10))
blockview = BlockView(x)
assert_eq(blockview[0, :, 0], np.ones((10, 40, 10)))
assert_eq(blockview.shape, tuple(map(len, x.chunks)))
assert_eq(blockview.size, np.prod(blockview.shape))
assert_eq(
blockview.ravel(), [blockview[idx] for idx in np.ndindex(blockview.shape)]
)
x = da.ones((2, 2), chunks=1)
with pytest.raises(ValueError):
blockview[[0, 1], [0, 1]]
with pytest.raises(ValueError):
blockview[np.array([0, 1]), [0, 1]]
with pytest.raises(ValueError) as info:
blockview[np.array([0, 1]), np.array([0, 1])]
assert "list" in str(info.value)
with pytest.raises(ValueError) as info:
blockview[None, :, :]
assert "newaxis" in str(info.value) and "not supported" in str(info.value)
with pytest.raises(IndexError) as info:
blockview[100, 100]
def test_blocks_indexer():
x = da.arange(10, chunks=2)
assert isinstance(x.blocks[0], da.Array)
assert_eq(x.blocks[0], x[:2])
assert_eq(x.blocks[-1], x[-2:])
assert_eq(x.blocks[:3], x[:6])
assert_eq(x.blocks[[0, 1, 2]], x[:6])
assert_eq(x.blocks[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))
x = da.random.random((20, 20), chunks=(4, 5))
assert_eq(x.blocks[0], x[:4])
assert_eq(x.blocks[0, :3], x[:4, :15])
assert_eq(x.blocks[:, :3], x[:, :15])
x = da.ones((40, 40, 40), chunks=(10, 10, 10))
assert_eq(x.blocks[0, :, 0], np.ones((10, 40, 10)))
x = da.ones((2, 2), chunks=1)
with pytest.raises(ValueError):
x.blocks[[0, 1], [0, 1]]
with pytest.raises(ValueError):
x.blocks[np.array([0, 1]), [0, 1]]
with pytest.raises(ValueError) as info:
x.blocks[np.array([0, 1]), np.array([0, 1])]
assert "list" in str(info.value)
with pytest.raises(ValueError) as info:
x.blocks[None, :, :]
assert "newaxis" in str(info.value) and "not supported" in str(info.value)
with pytest.raises(IndexError) as info:
x.blocks[100, 100]
def test_partitions_indexer():
# .partitions is an alias of .blocks for dask arrays
x = da.arange(10, chunks=2)
assert isinstance(x.partitions[0], da.Array)
assert_eq(x.partitions[0], x[:2])
assert_eq(x.partitions[-1], x[-2:])
assert_eq(x.partitions[:3], x[:6])
assert_eq(x.partitions[[0, 1, 2]], x[:6])
assert_eq(x.partitions[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))
x = da.random.random((20, 20), chunks=(4, 5))
assert_eq(x.partitions[0], x[:4])
assert_eq(x.partitions[0, :3], x[:4, :15])
assert_eq(x.partitions[:, :3], x[:, :15])
x = da.ones((40, 40, 40), chunks=(10, 10, 10))
assert_eq(x.partitions[0, :, 0], np.ones((10, 40, 10)))
x = da.ones((2, 2), chunks=1)
with pytest.raises(ValueError):
x.partitions[[0, 1], [0, 1]]
with pytest.raises(ValueError):
x.partitions[np.array([0, 1]), [0, 1]]
with pytest.raises(ValueError) as info:
x.partitions[np.array([0, 1]), np.array([0, 1])]
assert "list" in str(info.value)
with pytest.raises(ValueError) as info:
x.partitions[None, :, :]
assert "newaxis" in str(info.value) and "not supported" in str(info.value)
with pytest.raises(IndexError) as info:
x.partitions[100, 100]
@pytest.mark.filterwarnings("ignore:the matrix subclass:PendingDeprecationWarning")
def test_dask_array_holds_scipy_sparse_containers():
pytest.importorskip("scipy.sparse")
import scipy.sparse
x = da.random.random((1000, 10), chunks=(100, 10))
x[x < 0.9] = 0
xx = x.compute()
y = x.map_blocks(scipy.sparse.csr_matrix)
vs = y.to_delayed().flatten().tolist()
values = dask.compute(*vs, scheduler="single-threaded")
assert all(isinstance(v, scipy.sparse.csr_matrix) for v in values)
yy = y.compute(scheduler="single-threaded")
assert isinstance(yy, scipy.sparse.spmatrix)
assert (yy == xx).all()
z = x.T.map_blocks(scipy.sparse.csr_matrix)
zz = z.compute(scheduler="single-threaded")
assert isinstance(zz, scipy.sparse.spmatrix)
assert (zz == xx.T).all()
@pytest.mark.parametrize("axis", [0, 1])
def test_scipy_sparse_concatenate(axis):
pytest.importorskip("scipy.sparse")
import scipy.sparse
rs = da.random.RandomState(RandomState=np.random.RandomState)
xs = []
ys = []
for i in range(2):
x = rs.random((1000, 10), chunks=(100, 10))
x[x < 0.9] = 0
xs.append(x)
ys.append(x.map_blocks(scipy.sparse.csr_matrix))
z = da.concatenate(ys, axis=axis)
z = z.compute()
if axis == 0:
sp_concatenate = scipy.sparse.vstack
elif axis == 1:
sp_concatenate = scipy.sparse.hstack
z_expected = sp_concatenate([scipy.sparse.csr_matrix(e.compute()) for e in xs])
assert (z != z_expected).nnz == 0
def test_3851():
with warnings.catch_warnings() as record:
Y = da.random.random((10, 10), chunks="auto")
da.argmax(Y, axis=0).compute()
assert not record
def test_3925():
x = da.from_array(np.array(["a", "b", "c"], dtype=object), chunks=-1)
assert (x[0] == x[0]).compute(scheduler="sync")
def test_map_blocks_large_inputs_delayed():
a = da.ones(10, chunks=(5,))
b = np.ones(1000000)
c = a.map_blocks(add, b)
assert any(b is v for v in c.dask.values())
assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence
d = a.map_blocks(lambda x, y: x + y.sum(), y=b)
assert_eq(d, d)
assert any(b is v for v in d.dask.values())
assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence
def test_blockwise_large_inputs_delayed():
a = da.ones(10, chunks=(5,))
b = np.ones(1000000)
c = da.blockwise(add, "i", a, "i", b, None, dtype=a.dtype)
assert any(b is v for v in c.dask.values())
assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence
d = da.blockwise(lambda x, y: x + y, "i", a, "i", y=b, dtype=a.dtype)
assert any(b is v for v in d.dask.values())
assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence
def test_slice_reversed():
x = da.ones(10, chunks=-1)
y = x[6:3]
assert_eq(y, np.ones(0))
def test_map_blocks_chunks():
x = da.arange(400, chunks=(100,))
y = da.arange(40, chunks=(10,))
def func(a, b):
return np.array([a.max(), b.max()])
assert_eq(
da.map_blocks(func, x, y, chunks=(2,), dtype=x.dtype),
np.array([99, 9, 199, 19, 299, 29, 399, 39]),
)
def test_nbytes_auto():
chunks = normalize_chunks("800B", shape=(500,), dtype="float64")
assert chunks == ((100, 100, 100, 100, 100),)
chunks = normalize_chunks("200B", shape=(10, 10), dtype="float64")
assert chunks == ((5, 5), (5, 5))
chunks = normalize_chunks((5, "200B"), shape=(10, 10), dtype="float64")
assert chunks == ((5, 5), (5, 5))
chunks = normalize_chunks("33B", shape=(10, 10), dtype="float64")
assert chunks == ((2, 2, 2, 2, 2), (2, 2, 2, 2, 2))
chunks = normalize_chunks("1800B", shape=(10, 20, 30), dtype="float64")
assert chunks == ((5, 5), (5, 5, 5, 5), (6, 6, 6, 6, 6))
with pytest.raises(ValueError):
normalize_chunks("10B", shape=(10,), limit=20, dtype="float64")
with pytest.raises(ValueError):
normalize_chunks("100B", shape=(10, 10), limit=20, dtype="float64")
with pytest.raises(ValueError):
normalize_chunks(("100B", "10B"), shape=(10, 10), dtype="float64")
with pytest.raises(ValueError):
normalize_chunks(("10B", "10B"), shape=(10, 10), limit=20, dtype="float64")
def test_auto_chunks_h5py():
h5py = pytest.importorskip("h5py")
with tmpfile(".hdf5") as fn:
with h5py.File(fn, mode="a") as f:
d = f.create_dataset(
"/x", shape=(1000, 1000), chunks=(32, 64), dtype="float64"
)
d[:] = 1
with h5py.File(fn, mode="a") as f:
d = f["x"]
with dask.config.set({"array.chunk-size": "1 MiB"}):
x = da.from_array(d)
assert isinstance(x._meta, np.ndarray)
assert x.chunks == ((256, 256, 256, 232), (512, 488))
def test_no_warnings_from_blockwise():
with pytest.warns(None) as record:
x = da.ones((3, 10, 10), chunks=(3, 2, 2))
da.map_blocks(lambda y: np.mean(y, axis=0), x, dtype=x.dtype, drop_axis=0)
assert not record
with pytest.warns(None) as record:
x = da.ones((15, 15), chunks=(5, 5))
(x.dot(x.T + 1) - x.mean(axis=0)).std()
assert not record
with pytest.warns(None) as record:
x = da.ones((1,), chunks=(1,))
1 / x[0]
assert not record
def test_from_array_meta():
sparse = pytest.importorskip("sparse")
x = np.ones(10)
meta = sparse.COO.from_numpy(x)
y = da.from_array(x, meta=meta)
assert isinstance(y._meta, sparse.COO)
def test_compute_chunk_sizes():
x = da.from_array(np.linspace(-1, 1, num=50), chunks=10)
y = x[x < 0]
assert np.isnan(y.shape[0])
assert y.chunks == ((np.nan,) * 5,)
z = y.compute_chunk_sizes()
assert y is z
assert z.chunks == ((10, 10, 5, 0, 0),)
assert len(z) == 25
# check that dtype of chunk dimensions is `int`
assert isinstance(z.chunks[0][0], int)
def test_compute_chunk_sizes_2d_array():
X = np.linspace(-1, 1, num=9 * 4).reshape(9, 4)
X = da.from_array(X, chunks=(3, 4))
idx = X.sum(axis=1) > 0
Y = X[idx]
# This is very similar to the DataFrame->Array conversion
assert np.isnan(Y.shape[0]) and Y.shape[1] == 4
assert Y.chunks == ((np.nan, np.nan, np.nan), (4,))
Z = Y.compute_chunk_sizes()
assert Y is Z
assert Z.chunks == ((0, 1, 3), (4,))
assert Z.shape == (4, 4)
def test_compute_chunk_sizes_3d_array(N=8):
X = np.linspace(-1, 2, num=8 * 8 * 8).reshape(8, 8, 8)
X = da.from_array(X, chunks=(4, 4, 4))
idx = X.sum(axis=0).sum(axis=0) > 0
Y = X[idx]
idx = X.sum(axis=1).sum(axis=1) < 0
Y = Y[:, idx]
idx = X.sum(axis=2).sum(axis=1) > 0.1
Y = Y[:, :, idx]
# Checking to make sure shapes are different on outputs
assert Y.compute().shape == (8, 3, 5)
assert X.compute().shape == (8, 8, 8)
assert Y.chunks == ((np.nan, np.nan),) * 3
assert all(np.isnan(s) for s in Y.shape)
Z = Y.compute_chunk_sizes()
assert Z is Y
assert Z.shape == (8, 3, 5)
assert Z.chunks == ((4, 4), (3, 0), (1, 4))
def _known(num=50):
return da.from_array(np.linspace(-1, 1, num=num), chunks=10)
@pytest.fixture()
def unknown():
x = _known()
y = x[x < 0]
assert y.chunks == ((np.nan,) * 5,)
return y
def test_compute_chunk_sizes_warning_fixes_rechunk(unknown):
y = unknown
with pytest.raises(ValueError, match="compute_chunk_sizes"):
y.rechunk("auto")
y.compute_chunk_sizes()
y.rechunk("auto")
def test_compute_chunk_sizes_warning_fixes_to_zarr(unknown):
pytest.importorskip("zarr")
y = unknown
with pytest.raises(ValueError, match="compute_chunk_sizes"):
with StringIO() as f:
y.to_zarr(f)
y.compute_chunk_sizes()
with pytest.raises(ValueError, match="irregular chunking"):
with StringIO() as f:
y.to_zarr(f)
def test_compute_chunk_sizes_warning_fixes_to_svg(unknown):
y = unknown
with pytest.raises(NotImplementedError, match="compute_chunk_sizes"):
y.to_svg()
y.compute_chunk_sizes()
y.to_svg()
def test_compute_chunk_sizes_warning_fixes_concatenate():
x = _known(num=100).reshape(10, 10)
idx = x.sum(axis=0) > 0
y1 = x[idx]
y2 = x[idx]
with pytest.raises(ValueError, match="compute_chunk_sizes"):
da.concatenate((y1, y2), axis=1)
y1.compute_chunk_sizes()
y2.compute_chunk_sizes()
da.concatenate((y1, y2), axis=1)
def test_compute_chunk_sizes_warning_fixes_reduction(unknown):
y = unknown
with pytest.raises(ValueError, match="compute_chunk_sizes"):
da.argmin(y)
y.compute_chunk_sizes()
da.argmin(y)
def test_compute_chunk_sizes_warning_fixes_reshape(unknown):
y = unknown
with pytest.raises(ValueError, match="compute_chunk_sizes"):
da.reshape(y, (5, 5))
y.compute_chunk_sizes()
da.reshape(y, (5, 5))
def test_compute_chunk_sizes_warning_fixes_slicing():
x = _known(num=100).reshape(10, 10)
y = x[x.sum(axis=0) < 0]
with pytest.raises(ValueError, match="compute_chunk_sizes"):
y[:3, :]
y.compute_chunk_sizes()
y[:3, :]
def test_rechunk_auto():
x = da.ones(10, chunks=(1,))
y = x.rechunk()
assert y.npartitions == 1
def test_chunk_assignment_invalidates_cached_properties():
x = da.ones((4,), chunks=(1,))
y = x.copy()
# change chunks directly, which should change all of the tested properties
y._chunks = ((2, 2), (0, 0, 0, 0))
assert not x.ndim == y.ndim
assert not x.shape == y.shape
assert not x.size == y.size
assert not x.numblocks == y.numblocks
assert not x.npartitions == y.npartitions
assert not x.__dask_keys__() == y.__dask_keys__()
assert not np.array_equal(x._key_array, y._key_array)
def test_map_blocks_series():
pd = pytest.importorskip("pandas")
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq as dd_assert_eq
x = da.ones(10, chunks=(5,))
s = x.map_blocks(pd.Series)
assert isinstance(s, dd.Series)
assert s.npartitions == x.npartitions
dd_assert_eq(s, s)
@pytest.mark.xfail(reason="need to remove singleton index dimension")
def test_map_blocks_dataframe():
pd = pytest.importorskip("pandas")
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq as dd_assert_eq
x = da.ones((10, 2), chunks=(5, 2))
s = x.map_blocks(pd.DataFrame)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == x.npartitions
dd_assert_eq(s, s)
def test_dask_layers():
a = da.ones(1)
assert a.dask.layers.keys() == {a.name}
assert a.dask.dependencies == {a.name: set()}
assert a.__dask_layers__() == (a.name,)
b = a + 1
assert b.dask.layers.keys() == {a.name, b.name}
assert b.dask.dependencies == {a.name: set(), b.name: {a.name}}
assert b.__dask_layers__() == (b.name,)
def test_len_object_with_unknown_size():
a = da.random.random(size=(20, 2))
b = a[a < 0.5]
with pytest.raises(ValueError, match="on object with unknown chunk size"):
assert len(b)
|
"""
Support code for building Python extensions on Windows.
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# 3. Force windows to use g77
"""
import os
import sys
import subprocess
import re
import textwrap
# Overwrite certain distutils.ccompiler functions:
import numpy.distutils.ccompiler # noqa: F401
from numpy.distutils import log
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# --> this is done in numpy/distutils/ccompiler.py
# 3. Force windows to use g77
import distutils.cygwinccompiler
from distutils.version import StrictVersion
from distutils.unixccompiler import UnixCCompiler
from distutils.msvccompiler import get_build_version as get_build_msvc_version
from distutils.errors import UnknownFileError
from numpy.distutils.misc_util import (msvc_runtime_library,
msvc_runtime_version,
msvc_runtime_major,
get_build_architecture)
def get_msvcr_replacement():
"""Replacement for outdated version of get_msvcr from cygwinccompiler"""
msvcr = msvc_runtime_library()
return [] if msvcr is None else [msvcr]
# monkey-patch cygwinccompiler with our updated version from misc_util
# to avoid getting an exception raised on Python 3.5
distutils.cygwinccompiler.get_msvcr = get_msvcr_replacement
# Useful to generate table of symbols from a dll
_START = re.compile(r'\[Ordinal/Name Pointer\] Table')
_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)')
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
""" A modified MingW32 compiler compatible with an MSVC built Python.
"""
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose,
dry_run, force)
# we need to support 3.2 which doesn't match the standard
# get_versions methods regex
if self.gcc_version is None:
p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,
stdout=subprocess.PIPE)
out_string = p.stdout.read()
p.stdout.close()
result = re.search(r'(\d+\.\d+)', out_string)
if result:
self.gcc_version = StrictVersion(result.group(1))
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if self.linker_dll == 'dllwrap':
# Commented out '--driver-name g++' part that fixes weird
# g++.exe: g++: No such file or directory
# error (mingw 1.0 in Enthon24 tree, gcc-3.4.5).
# If the --driver-name part is required for some environment
# then make the inclusion of this part specific to that
# environment.
self.linker = 'dllwrap' # --driver-name g++'
elif self.linker_dll == 'gcc':
self.linker = 'g++'
# **changes: eric jones 4/11/01
# 1. Check for import library on Windows. Build if it doesn't exist.
build_import_library()
# Check for custom msvc runtime library on Windows. Build if it doesn't exist.
msvcr_success = build_msvcr_library()
msvcr_dbg_success = build_msvcr_library(debug=True)
if msvcr_success or msvcr_dbg_success:
# add preprocessor statement for using customized msvcr lib
self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')
# Define the MSVC version as hint for MinGW
msvcr_version = msvc_runtime_version()
if msvcr_version:
self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version)
# MS_WIN64 should be defined when building for amd64 on windows,
# but python headers define it only for MS compilers, which has all
# kind of bad consequences, like using Py_ModuleInit4 instead of
# Py_ModuleInit4_64, etc... So we add it here
if get_build_architecture() == 'AMD64':
if self.gcc_version < "4.0":
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall',
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0'
' -Wall -Wstrict-prototypes',
linker_exe='gcc -g -mno-cygwin',
linker_so='gcc -g -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall',
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes',
linker_exe='gcc -g',
linker_so='gcc -g -shared')
else:
if self.gcc_version <= "3.0.0":
self.set_executables(
compiler='gcc -mno-cygwin -O2 -w',
compiler_so='gcc -mno-cygwin -mdll -O2 -w'
' -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='%s -mno-cygwin -mdll -static %s' %
(self.linker, entry_point))
elif self.gcc_version < "4.0":
self.set_executables(
compiler='gcc -mno-cygwin -O2 -Wall',
compiler_so='gcc -mno-cygwin -O2 -Wall'
' -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='g++ -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option
self.set_executables(compiler='gcc -O2 -Wall',
compiler_so='gcc -O2 -Wall -Wstrict-prototypes',
linker_exe='g++ ',
linker_so='g++ -shared')
# added for python2.3 support
# we can't pass it through set_executables because pre 2.2 would fail
self.compiler_cxx = ['g++']
# Maybe we should also append -mthreads, but then the finished dlls
# need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support
# thread-safe exception handling on `Mingw32')
# no additional libraries needed
#self.dll_libraries=[]
return
# __init__ ()
def link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
export_symbols = None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# Include the appropriate MSVC runtime library if Python was built
# with MSVC >= 7.0 (MinGW standard is msvcrt)
runtime_library = msvc_runtime_library()
if runtime_library:
if not libraries:
libraries = []
libraries.append(runtime_library)
args = (self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, #export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
if self.gcc_version < "3.0.0":
func = distutils.cygwinccompiler.CygwinCCompiler.link
else:
func = UnixCCompiler.link
func(*args[:func.__code__.co_argcount])
return
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
# added these lines to strip off windows drive letters
# without it, .o files are placed next to .c files
# instead of the build directory
drv, base = os.path.splitdrive(base)
if drv:
base = base[1:]
if ext not in (self.src_extensions + ['.rc', '.res']):
raise UnknownFileError(
"unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def find_python_dll():
# We can't do much here:
# - find it in the virtualenv (sys.prefix)
# - find it in python main dir (sys.base_prefix, if in a virtualenv)
# - sys.real_prefix is main dir for virtualenvs in Python 2.7
# - in system32,
# - ortherwise (Sxs), I don't know how to get it.
stems = [sys.prefix]
if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
stems.append(sys.base_prefix)
elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
stems.append(sys.real_prefix)
sub_dirs = ['', 'lib', 'bin']
# generate possible combinations of directory trees and sub-directories
lib_dirs = []
for stem in stems:
for folder in sub_dirs:
lib_dirs.append(os.path.join(stem, folder))
# add system directory as well
if 'SYSTEMROOT' in os.environ:
lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32'))
# search in the file system for possible candidates
major_version, minor_version = tuple(sys.version_info[:2])
patterns = ['python%d%d.dll']
for pat in patterns:
dllname = pat % (major_version, minor_version)
print("Looking for %s" % dllname)
for folder in lib_dirs:
dll = os.path.join(folder, dllname)
if os.path.exists(dll):
return dll
raise ValueError("%s not found in %s" % (dllname, lib_dirs))
def dump_table(dll):
st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE)
return st.stdout.readlines()
def generate_def(dll, dfile):
"""Given a dll file location, get all its exported symbols and dump them
into the given def file.
The .def file will be overwritten"""
dump = dump_table(dll)
for i in range(len(dump)):
if _START.match(dump[i].decode()):
break
else:
raise ValueError("Symbol table not found")
syms = []
for j in range(i+1, len(dump)):
m = _TABLE.match(dump[j].decode())
if m:
syms.append((int(m.group(1).strip()), m.group(2)))
else:
break
if len(syms) == 0:
log.warn('No symbols found in %s' % dll)
d = open(dfile, 'w')
d.write('LIBRARY %s\n' % os.path.basename(dll))
d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
d.write(';DATA PRELOAD SINGLE\n')
d.write('\nEXPORTS\n')
for s in syms:
#d.write('@%d %s\n' % (s[0], s[1]))
d.write('%s\n' % s[1])
d.close()
def find_dll(dll_name):
arch = {'AMD64' : 'amd64',
'Intel' : 'x86'}[get_build_architecture()]
def _find_dll_in_winsxs(dll_name):
# Walk through the WinSxS directory to find the dll.
winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'),
'winsxs')
if not os.path.exists(winsxs_path):
return None
for root, dirs, files in os.walk(winsxs_path):
if dll_name in files and arch in root:
return os.path.join(root, dll_name)
return None
def _find_dll_in_path(dll_name):
# First, look in the Python directory, then scan PATH for
# the given dll name.
for path in [sys.prefix] + os.environ['PATH'].split(';'):
filepath = os.path.join(path, dll_name)
if os.path.exists(filepath):
return os.path.abspath(filepath)
return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name)
def build_msvcr_library(debug=False):
if os.name != 'nt':
return False
# If the version number is None, then we couldn't find the MSVC runtime at
# all, because we are running on a Python distribution which is customed
# compiled; trust that the compiler is the same as the one available to us
# now, and that it is capable of linking with the correct runtime without
# any extra options.
msvcr_ver = msvc_runtime_major()
if msvcr_ver is None:
log.debug('Skip building import library: '
'Runtime is not compiled with MSVC')
return False
# Skip using a custom library for versions < MSVC 8.0
if msvcr_ver < 80:
log.debug('Skip building msvcr library:'
' custom functionality not present')
return False
msvcr_name = msvc_runtime_library()
if debug:
msvcr_name += 'd'
# Skip if custom library already exists
out_name = "lib%s.a" % msvcr_name
out_file = os.path.join(sys.prefix, 'libs', out_name)
if os.path.isfile(out_file):
log.debug('Skip building msvcr library: "%s" exists' %
(out_file,))
return True
# Find the msvcr dll
msvcr_dll_name = msvcr_name + '.dll'
dll_file = find_dll(msvcr_dll_name)
if not dll_file:
log.warn('Cannot build msvcr library: "%s" not found' %
msvcr_dll_name)
return False
def_name = "lib%s.def" % msvcr_name
def_file = os.path.join(sys.prefix, 'libs', def_name)
log.info('Building msvcr library: "%s" (from %s)' \
% (out_file, dll_file))
# Generate a symbol definition file from the msvcr dll
generate_def(dll_file, def_file)
# Create a custom mingw library for the given symbol definitions
cmd = ['dlltool', '-d', def_file, '-l', out_file]
retcode = subprocess.call(cmd)
# Clean up symbol definitions
os.remove(def_file)
return (not retcode)
def build_import_library():
if os.name != 'nt':
return
arch = get_build_architecture()
if arch == 'AMD64':
return _build_import_library_amd64()
elif arch == 'Intel':
return _build_import_library_x86()
else:
raise ValueError("Unhandled arch %s" % arch)
def _check_for_import_lib():
"""Check if an import library for the Python runtime already exists."""
major_version, minor_version = tuple(sys.version_info[:2])
# patterns for the file name of the library itself
patterns = ['libpython%d%d.a',
'libpython%d%d.dll.a',
'libpython%d.%d.dll.a']
# directory trees that may contain the library
stems = [sys.prefix]
if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
stems.append(sys.base_prefix)
elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
stems.append(sys.real_prefix)
# possible subdirectories within those trees where it is placed
sub_dirs = ['libs', 'lib']
# generate a list of candidate locations
candidates = []
for pat in patterns:
filename = pat % (major_version, minor_version)
for stem_dir in stems:
for folder in sub_dirs:
candidates.append(os.path.join(stem_dir, folder, filename))
# test the filesystem to see if we can find any of these
for fullname in candidates:
if os.path.isfile(fullname):
# already exists, in location given
return (True, fullname)
# needs to be built, preferred location given first
return (False, candidates[0])
def _build_import_library_amd64():
out_exists, out_file = _check_for_import_lib()
if out_exists:
log.debug('Skip building import library: "%s" exists', out_file)
return
# get the runtime dll for which we are building import library
dll_file = find_python_dll()
log.info('Building import library (arch=AMD64): "%s" (from %s)' %
(out_file, dll_file))
# generate symbol list from this library
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
generate_def(dll_file, def_file)
# generate import library from this symbol list
cmd = ['dlltool', '-d', def_file, '-l', out_file]
subprocess.Popen(cmd)
def _build_import_library_x86():
""" Build the import libraries for Mingw32-gcc on Windows
"""
out_exists, out_file = _check_for_import_lib()
if out_exists:
log.debug('Skip building import library: "%s" exists', out_file)
return
lib_name = "python%d%d.lib" % tuple(sys.version_info[:2])
lib_file = os.path.join(sys.prefix, 'libs', lib_name)
if not os.path.isfile(lib_file):
# didn't find library file in virtualenv, try base distribution, too,
# and use that instead if found there. for Python 2.7 venvs, the base
# directory is in attribute real_prefix instead of base_prefix.
if hasattr(sys, 'base_prefix'):
base_lib = os.path.join(sys.base_prefix, 'libs', lib_name)
elif hasattr(sys, 'real_prefix'):
base_lib = os.path.join(sys.real_prefix, 'libs', lib_name)
else:
base_lib = '' # os.path.isfile('') == False
if os.path.isfile(base_lib):
lib_file = base_lib
else:
log.warn('Cannot build import library: "%s" not found', lib_file)
return
log.info('Building import library (ARCH=x86): "%s"', out_file)
from numpy.distutils import lib2def
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)
nm_output = lib2def.getnm(nm_cmd)
dlist, flist = lib2def.parse_nm(nm_output)
lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))
dll_name = find_python_dll ()
args = (dll_name, def_file, out_file)
cmd = 'dlltool --dllname "%s" --def "%s" --output-lib "%s"' % args
status = os.system(cmd)
# for now, fail silently
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
return
#=====================================
# Dealing with Visual Studio MANIFESTS
#=====================================
# Functions to deal with visual studio manifests. Manifest are a mechanism to
# enforce strong DLL versioning on windows, and has nothing to do with
# distutils MANIFEST. manifests are XML files with version info, and used by
# the OS loader; they are necessary when linking against a DLL not in the
# system path; in particular, official python 2.6 binary is built against the
# MS runtime 9 (the one from VS 2008), which is not available on most windows
# systems; python 2.6 installer does install it in the Win SxS (Side by side)
# directory, but this requires the manifest for this to work. This is a big
# mess, thanks MS for a wonderful system.
# XXX: ideally, we should use exactly the same version as used by python. I
# submitted a patch to get this version, but it was only included for python
# 2.6.1 and above. So for versions below, we use a "best guess".
_MSVCRVER_TO_FULLVER = {}
if sys.platform == 'win32':
try:
import msvcrt
# I took one version in my SxS directory: no idea if it is the good
# one, and we can't retrieve it from python
_MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42"
_MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
# Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0
# on Windows XP:
_MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2)
_MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION
del major, minor, rest
except ImportError:
# If we are here, means python was not built with MSVC. Not sure what
# to do in that case: manifest building will fail, but it should not be
# used in that case anyway
log.warn('Cannot import msvcrt: using manifest will not be possible')
def msvc_manifest_xml(maj, min):
"""Given a major and minor version of the MSVCR, returns the
corresponding XML file."""
try:
fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]
except KeyError:
raise ValueError("Version %d,%d of MSVCRT not supported yet" %
(maj, min))
# Don't be fooled, it looks like an XML, but it is not. In particular, it
# should not have any space before starting, and its size should be
# divisible by 4, most likely for alignment constraints when the xml is
# embedded in the binary...
# This template was copied directly from the python 2.6 binary (using
# strings.exe from mingw on python.exe).
template = textwrap.dedent("""\
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>""")
return template % {'fullver': fullver, 'maj': maj, 'min': min}
def manifest_rc(name, type='dll'):
"""Return the rc file used to generate the res file which will be embedded
as manifest for given manifest file name, of given type ('dll' or
'exe').
Parameters
----------
name : str
name of the manifest file to embed
type : str {'dll', 'exe'}
type of the binary which will embed the manifest
"""
if type == 'dll':
rctype = 2
elif type == 'exe':
rctype = 1
else:
raise ValueError("Type %s not supported" % type)
return """\
#include "winuser.h"
%d RT_MANIFEST %s""" % (rctype, name)
def check_embedded_msvcr_match_linked(msver):
"""msver is the ms runtime version used for the MANIFEST."""
# check msvcr major version are the same for linking and
# embedding
maj = msvc_runtime_major()
if maj:
if not maj == int(msver):
raise ValueError(
"Discrepancy between linked msvcr " \
"(%d) and the one about to be embedded " \
"(%d)" % (int(msver), maj))
def configtest_name(config):
base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c"))
return os.path.splitext(base)[0]
def manifest_name(config):
# Get configest name (including suffix)
root = configtest_name(config)
exext = config.compiler.exe_extension
return root + exext + ".manifest"
def rc_name(config):
# Get configtest name (including suffix)
root = configtest_name(config)
return root + ".rc"
def generate_manifest(config):
msver = get_build_msvc_version()
if msver is not None:
if msver >= 8:
check_embedded_msvcr_match_linked(msver)
ma = int(msver)
mi = int((msver - ma) * 10)
# Write the manifest file
manxml = msvc_manifest_xml(ma, mi)
man = open(manifest_name(config), "w")
config.temp_files.append(manifest_name(config))
man.write(manxml)
man.close()
|
#!/usr/bin/env python
#
# Public Domain 2014-2015 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import os, re, sys
from collections import defaultdict
from glob import glob
import json
from datetime import datetime
try:
from stat_data \
import groups, no_scale_per_second_list, no_clear_list, prefix_list
except ImportError:
print >>sys.stderr, "Could not import stat_data.py, it should be" \
"in the same directory as %s" % sys.argv[0]
sys.exit(-1)
thisyear = datetime.today().year
def parsetime(s):
return datetime.strptime(s, "%b %d %H:%M:%S").replace(year=thisyear)
if sys.version_info<(2,7,0):
print >>sys.stderr, "You need python 2.7 or later to run this script"
sys.exit(-1)
# Plot a set of entries for a title.
def munge(args, title, values):
t0, v0 = values[0]
start_time = parsetime(t0)
ylabel = ' '.join(title.split(' ')).lower()
if title.split(' ')[1] != 'spinlock' and \
title.split(' ', 1)[1] in no_scale_per_second_list:
seconds = 1
else:
t1, v1 = values[1]
seconds = (parsetime(t1) - start_time).seconds
ylabel += ' per second'
if seconds == 0:
seconds = 1
stats_cleared = False
if args.clear or title.split(' ', 1)[1] in no_clear_list:
stats_cleared = True
# Split the values into a dictionary of y-axis values keyed by the x axis
ydata = {}
last_value = 0.0
for t, v in sorted(values):
float_v = float(v)
if not stats_cleared:
float_v = float_v - last_value
# Sometimes WiredTiger stats go backwards without clear, assume
# that means nothing happened
if float_v < 0:
float_v = 0.0
last_value = float(v)
ydata[t] = float_v / seconds
return ylabel, ydata
# Parse the command line
import argparse
def main():
parser = argparse.ArgumentParser(description='Create graphs from' \
'WiredTiger statistics.')
parser.add_argument('--all', '-A', action='store_true',
help='generate separate html files for each stats group')
parser.add_argument('--clear', action='store_true',
help='WiredTiger stats gathered with clear set')
parser.add_argument('--include', '-I', metavar='regexp',
type=re.compile, action='append',
help='only include series with titles matching regexp')
parser.add_argument('--list', action='store_true',
help='only list the parsed series, does not create html file')
parser.add_argument('--output', '-o', metavar='file', default='wtstats',
help='HTML output file prefix')
parser.add_argument('--json', action='store_true',
help='additionally output data series in json format')
parser.add_argument('files', metavar='file', nargs='+',
help='input files or directories generated by WiredTiger statistics' \
'logging')
args = parser.parse_args()
# Read the input file(s) into a dictionary of lists.
def getfiles(l):
for f in l:
if os.path.isfile(f):
yield f
elif os.path.isdir(f):
for s in glob(os.path.join(f, 'WiredTigerStat*')):
print 'Processing ' + s
yield s
d = defaultdict(list)
for f in getfiles(args.files):
for line in open(f, 'rU'):
month, day, time, v, title = line.strip('\n').split(" ", 4)
d[title].append((month + " " + day + " " + time, v))
# Process the series, eliminate constants
for title, values in sorted(d.iteritems()):
skip = True
t0, v0 = values[0]
for t, v in values:
if v != v0:
skip = False
break
if skip:
#print "Skipping", title
del d[title]
# Common prefix / suffix elimination
prefix = suffix = None
def common_prefix(a, b):
while not b.startswith(a):
a = a[:-1]
return a
def common_suffix(a, b):
while not a.endswith(b):
b = b[1:]
return b
def output_series(results, prefix=None, grouplist=[]):
# add .html ending if not present
filename, ext = os.path.splitext(args.output)
if ext == '':
ext = '.html'
# open the output file based on prefix
if prefix == None:
outputname = filename + ext
elif len(grouplist) == 0:
outputname = filename +'.' + prefix + ext
else:
outputname = filename +'.group.' + prefix + ext
if prefix != None and len(grouplist) == 0:
this_series = []
for title, ydata in results:
if not prefix in title:
continue
#print 'Appending to dataset: ' + title
this_series.append((title, ydata))
elif prefix != None and len(grouplist) > 0:
this_series = []
for title, ydata in results:
for subgroup in grouplist:
if not subgroup in title:
continue
# print 'Appending to dataset: ' + title
this_series.append((title, ydata))
else:
this_series = results
if len(this_series) == 0:
print 'Output: ' + outputname + ' has no data. Do not create.'
return
json_output = { "series": [] }
for title, ydata in this_series:
json_output["series"].append({
"key": title,
"values": ydata,
});
# load template
this_path = os.path.dirname(os.path.realpath(__file__))
srcfile = os.path.join(this_path, 'wtstats.html.template')
try:
srcfile = open(srcfile)
contents = srcfile.read()
except IOError:
print >>sys.stderr, "Cannot find template file 'wtstats.html." \
"template'. See ./template/README.md for more information."
sys.exit(-1)
srcfile.close()
# if --json write data to <filename>.json
if args.json:
jsonfile = filename + '.json'
with open(jsonfile, 'w') as f:
json.dump(json_output, f)
print "created %s" % jsonfile
# write output file
dstfile = open(outputname, 'wt')
replaced_contents = contents.replace('"### INSERT DATA HERE ###"',
json.dumps(json_output))
dstfile.write(replaced_contents)
dstfile.close()
print "created %s" % dstfile.name
# Split out the data, convert timestamps
results = []
for title, values in sorted(d.iteritems()):
title, ydata = munge(args, title, values)
# Ignore entries if a list of regular expressions was given
if args.include and not [r for r in args.include if r.search(title)]:
continue
prefix = title if prefix is None else common_prefix(prefix, title)
suffix = title if suffix is None else common_suffix(title, suffix)
results.append((title, ydata))
# Process titles, eliminate common prefixes and suffixes
if prefix or suffix:
new_results = []
for title, ydata in results:
title = title[len(prefix):]
if suffix:
title = title[:-len(suffix)]
new_results.append((title, ydata))
results = new_results
# Are we just listing the results?
if args.list:
for title, ydata in results:
print title
sys.exit(0)
output_series(results)
# If the user wants the stats split up by prefix type do so.
if args.all:
for prefix in prefix_list:
output_series(results, prefix)
for group in groups.keys():
output_series(results, group, groups[group])
if __name__ == '__main__':
main()
|
import textwrap
from datetime import datetime, timedelta
import uuid
from elasticsearch import Elasticsearch
from airflow import DAG # noqa
from airflow import macros # noqa
from airflow.operators.python_operator import PythonOperator # noqa
from pyhocon import ConfigFactory
from databuilder.extractor.neo4j_search_data_extractor import Neo4jSearchDataExtractor
from databuilder.extractor.athena_metadata_extractor import AthenaMetadataExtractor
from databuilder.extractor.sql_alchemy_extractor import SQLAlchemyExtractor
from databuilder.publisher.elasticsearch_publisher import ElasticsearchPublisher
from databuilder.extractor.neo4j_extractor import Neo4jExtractor
from databuilder.job.job import DefaultJob
from databuilder.loader.file_system_elasticsearch_json_loader import FSElasticsearchJSONLoader
from databuilder.loader.file_system_neo4j_csv_loader import FsNeo4jCSVLoader
from databuilder.publisher import neo4j_csv_publisher
from databuilder.publisher.neo4j_csv_publisher import Neo4jCsvPublisher
from databuilder.task.task import DefaultTask
from databuilder.transformer.base_transformer import NoopTransformer
dag_args = {
'concurrency': 10,
# One dagrun at a time
'max_active_runs': 1,
# 4AM, 4PM PST
'schedule_interval': '0 11 * * *',
'catchup': False
}
default_args = {
'owner': 'amundsen',
'start_date': datetime(2018, 6, 18),
'depends_on_past': False,
'email': [''],
'email_on_failure': False,
'email_on_retry': False,
'retries': 3,
'priority_weight': 10,
'retry_delay': timedelta(minutes=5),
'execution_timeout': timedelta(minutes=120)
}
# NEO4J cluster endpoints
NEO4J_ENDPOINT = 'bolt://127.0.0.1:7687'
neo4j_endpoint = NEO4J_ENDPOINT
neo4j_user = 'neo4j'
neo4j_password = 'test'
es = Elasticsearch([
{'host': '127.0.0.1'},
])
# TODO: user provides a list of schema for indexing
SUPPORTED_SCHEMAS = ['sampledb']
# String format - ('schema1', schema2', .... 'schemaN')
SUPPORTED_SCHEMA_SQL_IN_CLAUSE = "('{schemas}')".format(schemas="', '".join(SUPPORTED_SCHEMAS))
OPTIONAL_TABLE_NAMES = ''
AWS_ACCESS = 'YOUR_ACCESS_KEY'
AWS_SECRET = 'YOUR_SECRET_KEY'
def connection_string():
access_key = AWS_ACCESS
secret = AWS_SECRET
host = 'athena.us-east-1.amazonaws.com'
extras = 's3_staging_dir=s3://aws-athena-query-results-032106861074-us-east-1/'
return "awsathena+rest://%s:%s@%s:443/?%s" % (access_key, secret, host, extras)
def create_table_extract_job(**kwargs):
where_clause_suffix = textwrap.dedent("""
where table_schema in {schemas}
""").format(schemas=SUPPORTED_SCHEMA_SQL_IN_CLAUSE)
tmp_folder = '/var/tmp/amundsen/table_metadata'
node_files_folder = '{tmp_folder}/nodes/'.format(tmp_folder=tmp_folder)
relationship_files_folder = '{tmp_folder}/relationships/'.format(tmp_folder=tmp_folder)
job_config = ConfigFactory.from_dict({
'extractor.athena_metadata.{}'.format(AthenaMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY):
where_clause_suffix,
'extractor.athena_metadata.extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.CONN_STRING):
connection_string(),
'extractor.athena_metadata.{}'.format(AthenaMetadataExtractor.CATALOG_KEY): "'AwsDataCatalog'",
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.NODE_DIR_PATH):
node_files_folder,
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.RELATION_DIR_PATH):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NODE_FILES_DIR):
node_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.RELATION_FILES_DIR):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_END_POINT_KEY):
neo4j_endpoint,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_USER):
neo4j_user,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_PASSWORD):
neo4j_password,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.JOB_PUBLISH_TAG):
'unique_tag', # should use unique tag here like {ds}
})
job = DefaultJob(conf=job_config,
task=DefaultTask(extractor=AthenaMetadataExtractor(), loader=FsNeo4jCSVLoader(),
transformer=NoopTransformer()),
publisher=Neo4jCsvPublisher())
job.launch()
def create_es_publisher_sample_job():
# loader saves data to this location and publisher reads it from here
extracted_search_data_path = '/var/tmp/amundsen/search_data.json'
task = DefaultTask(loader=FSElasticsearchJSONLoader(),
extractor=Neo4jSearchDataExtractor(),
transformer=NoopTransformer())
# elastic search client instance
elasticsearch_client = es
# unique name of new index in Elasticsearch
elasticsearch_new_index_key = 'tables' + str(uuid.uuid4())
# related to mapping type from /databuilder/publisher/elasticsearch_publisher.py#L38
elasticsearch_new_index_key_type = 'table'
# alias for Elasticsearch used in amundsensearchlibrary/search_service/config.py as an index
elasticsearch_index_alias = 'table_search_index'
job_config = ConfigFactory.from_dict({
'extractor.search_data.extractor.neo4j.{}'.format(Neo4jExtractor.GRAPH_URL_CONFIG_KEY): neo4j_endpoint,
'extractor.search_data.extractor.neo4j.{}'.format(Neo4jExtractor.MODEL_CLASS_CONFIG_KEY):
'databuilder.models.table_elasticsearch_document.TableESDocument',
'extractor.search_data.extractor.neo4j.{}'.format(Neo4jExtractor.NEO4J_AUTH_USER): neo4j_user,
'extractor.search_data.extractor.neo4j.{}'.format(Neo4jExtractor.NEO4J_AUTH_PW): neo4j_password,
'loader.filesystem.elasticsearch.{}'.format(FSElasticsearchJSONLoader.FILE_PATH_CONFIG_KEY):
extracted_search_data_path,
'loader.filesystem.elasticsearch.{}'.format(FSElasticsearchJSONLoader.FILE_MODE_CONFIG_KEY): 'w',
'publisher.elasticsearch.{}'.format(ElasticsearchPublisher.FILE_PATH_CONFIG_KEY):
extracted_search_data_path,
'publisher.elasticsearch.{}'.format(ElasticsearchPublisher.FILE_MODE_CONFIG_KEY): 'r',
'publisher.elasticsearch.{}'.format(ElasticsearchPublisher.ELASTICSEARCH_CLIENT_CONFIG_KEY):
elasticsearch_client,
'publisher.elasticsearch.{}'.format(ElasticsearchPublisher.ELASTICSEARCH_NEW_INDEX_CONFIG_KEY):
elasticsearch_new_index_key,
'publisher.elasticsearch.{}'.format(ElasticsearchPublisher.ELASTICSEARCH_DOC_TYPE_CONFIG_KEY):
elasticsearch_new_index_key_type,
'publisher.elasticsearch.{}'.format(ElasticsearchPublisher.ELASTICSEARCH_ALIAS_CONFIG_KEY):
elasticsearch_index_alias
})
job = DefaultJob(conf=job_config,
task=task,
publisher=ElasticsearchPublisher())
job.launch()
with DAG('amundsen_databuilder', default_args=default_args, **dag_args) as dag:
create_table_extract_job()
# create_table_extract_job = PythonOperator(
# task_id='create_table_extract_job',
# python_callable=create_table_extract_job
# )
create_es_index_job = PythonOperator(
task_id='create_es_publisher_sample_job',
python_callable=create_es_publisher_sample_job
)
create_es_publisher_sample_job()
|
from moha.system.wavefunction.base import BaseWaveFunction
from moha.system.basis_set.ci_basis_set import CIBasisSet
import numpy as np
class CIWaveFunction(BaseWaveFunction):
"""Configuration interaction wavefunction class.
Attributes
----------
nelec : int
Number of electrons.
occ : dict
Occupation number of the wavefunction.
nspatial : int
Number of spatial orbitals.
basis_set : Basis
Basis set of the wavefunction.
coefficients : np.ndarray
Coefficientss of the wavefunction.
Properties
----------
ncoefficients : int
Number of coefficients.
nspin : int
Number of spin orbital
spin : int
Spin of the wavefunction
seniority: int
Seniority of the wavefunction
Methods
-------
__init__(self, nelec, nspatial, basis_set=None, coefficients=None)
Initialize the wavefunction.
assign_nelec(self, nelec)
Assign the number of electrons.
assign_nspatial(self, nspatial)
Assign the number of spatial orbitals.
assign_occ(self, occ)
Assign the occupation number of the wavefunction.
assign_basis_set(self, basis_set)
Assign basis set of the wavefunction.
assign_coefficients(self, coefficients)
Assign coefficients of the wavefunction.
"""
def __init__(self,nelec,nspatial,occ={},basis_set=None,coefficients=None):
"""Initialize the wavefunction.
Parameters
----------
nelec : int
Number of electrons.
nspin : int
Number of spin orbitals.
occ : dict
Occupation number of the wavefunction.
dtype : {float, complex, np.float64, np.complex128, None}
Numpy data type.
Default is `np.float64`.
memory : {float, int, str, None}
Memory available for the wavefunction.
Default does not limit memory usage (i.e. infinite).
"""
super().__init__(nelec,nspatial,occ,basis_set,coefficients)
@property
def configuration(self):
"""Return the cofiguration of the wavefunction.
Returns
-------
c : dict
Configuration of the wavefunction.
"""
c = {}
for spin in self.occ:
c[spin] = [1]*self.occ[spin] + [0]*(self.nspatial - self.occ[spin])
return c
@property
def ncoefficients(self):
"""Return the number of wavefunction coefficients.
Returns
-------
ncoefficients : int
Number of coefficients.
Raises
------
TypeError
If coefficients is not a np.ndarray instance.
"""
if not isinstance(self.coefficients,np.ndarray):
raise TypeError("Coefficients is not a np.ndarray instance.")
return self.coefficients.size
@property
def seniority(self):
"""Return the seniority of the wavefunction.
Seniority of a Slater determinant is its number of unpaired electrons. The seniority of the
wavefunction is the expected number of unpaired electrons.
Returns
-------
seniority : int
Seniority of the wavefunction.
Notes
-----
`None` means that all possible seniority are allowed.
"""
return None
def assign_basis_set(self, basis_set):
"""Assign the basis_set of the wavefunction.
Parameters
----------
basis_set
Basis set of the wavefunction.
Raises
------
TypeError
If basis set is not a CIBasisSet instance.
"""
if not isinstance(basis_set,CIBasisSet):
raise TypeError("Basis set must be CIBasisSet instance.")
self.basis_set = basis_set
def assign_coefficients(self, coefficients):
"""Assign the coefficients of the wavefunction.
Parameters
----------
coefficients
Parameters of the wavefunction.
Raises
------
TypeError
If coefficients is not a np.ndarray.
"""
if coefficients is None:
coefficients = np.zeros((self.nspatial))
elif not isinstance(coefficients,np.ndarray):
raise TypeError("Coefficients is not a np.ndarray instance.")
self.coefficients = coefficients
|
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to create SSL splits from a dataset.
"""
import json
import os
from collections import defaultdict
import numpy as np
import tensorflow as tf
from absl import app
from tqdm import trange, tqdm
from libml import utils
def get_class(serialized_example):
return tf.compat.v1.io.parse_single_example(serialized_example, features={'label': tf.compat.v1.io.FixedLenFeature([], tf.int64)})['label']
#Just returns the label of an item
def main(argv):
argv.pop(0)
if any(not tf.compat.v1.gfile.Exists(f) for f in argv[1:]):
raise FileNotFoundError(argv[1:])
target = argv[0]
input_files = argv[1:]
count = 0
id_class = []
class_id = defaultdict(list)
print('Computing class distribution')
tf.compat.v1.disable_eager_execution() #Added
dataset = tf.compat.v1.data.TFRecordDataset(input_files).map(get_class, 4).batch(1 << 10)
it = dataset.make_one_shot_iterator().get_next()
try:
with tf.compat.v1.Session() as session, tqdm(leave=False) as t:
while 1:
old_count = count
for i in session.run(it):
id_class.append(i) #id_class = [ 1, 0, 4, 3, 8, ...]
class_id[i].append(count)
count += 1
t.update(count - old_count)
except tf.errors.OutOfRangeError:
pass
print('%d records found' % count)
nclass = len(class_id) #class_id = {0: [] , 1: [], ...}
assert min(class_id.keys()) == 0 and max(class_id.keys()) == (nclass - 1)
train_stats = np.array([len(class_id[i]) for i in range(nclass)], np.float64) #Number of Images belonging to each class...
train_stats /= train_stats.max()
if 'stl10' in argv[1]:
# All of the unlabeled data is given label 0, but we know that
# STL has equally distributed data among the 10 classes.
train_stats[:] = 1
print(' Stats', ' '.join(['%.2f' % (100 * x) for x in train_stats]))
del class_id
print('Creating unlabeled dataset for in %s' % target)
npos = np.zeros(nclass, np.int64) #[0, 0, 0, ...]
class_data = [[] for _ in range(nclass)]
unlabel = []
tf.compat.v1.gfile.MakeDirs(os.path.dirname(target))
with tf.compat.v1.python_io.TFRecordWriter(target + '-unlabel.tfrecord') as writer_unlabel:
pos, loop = 0, trange(count, desc='Writing records')
for input_file in input_files: #can be skipped for us...
for record in tf.compat.v1.python_io.tf_record_iterator(input_file):
class_data[id_class[pos]].append((pos, record)) #class_data = [ [(1, 1th record)] [] [(0, 0th record)] [] [] ... []]
while True:
c = np.argmax(train_stats - npos / max(npos.max(), 1))
if class_data[c]:
p, v = class_data[c].pop(0)
unlabel.append(p)
writer_unlabel.write(v)
npos[c] += 1 #npos saves for each class, how many went to unlabelled...
else:
break
pos += 1
loop.update()
for remain in class_data:
for p, v in remain:
unlabel.append(p)
writer_unlabel.write(v)
loop.close()
with tf.compat.v1.gfile.Open(target + '-unlabel.json', 'w') as writer:
writer.write(json.dumps(dict(distribution=train_stats.tolist(), indexes=unlabel), indent=2, sort_keys=True))
if __name__ == '__main__':
utils.setup_tf()
app.run(main)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
PROJECT_NAME:TradingBot
NAME:Huobi
AUTHOR:Tong
Create Date:2018/7/25
'''
import time
from Service.Market_API.Utils import *
class huobi_rest_client:
'''
Market data API
'''
# 获取KLine
def get_kline(symbol, period, size=150):
"""
:param symbol
:param period: 可选值:{1min, 5min, 15min, 30min, 60min, 1day, 1mon, 1week, 1year }
:param size: 可选值: [1,2000]
:return:
"""
params = {'symbol': symbol,
'period': period,
'size': size}
url = MARKET_URL + '/market/history/kline'
return http_get_request(url, params)
# 获取marketdepth
def get_depth(symbol, type):
"""
:param symbol
:param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 }
:return:
"""
params = {'symbol': symbol,
'type': type}
url = MARKET_URL + '/market/depth'
return http_get_request(url, params)
# 获取tradedetail
def get_trade(symbol):
"""
:param symbol
:return:
"""
params = {'symbol': symbol}
url = MARKET_URL + '/market/trade'
return http_get_request(url, params)
# 获取merge ticker
def get_ticker(symbol):
"""
:param symbol:
:return:
"""
params = {'symbol': symbol}
url = MARKET_URL + '/market/detail/merged'
return http_get_request(url, params)
# 获取 Market Detail 24小时成交量数据
def get_detail(symbol):
"""
:param symbol
:return:
"""
params = {'symbol': symbol}
url = MARKET_URL + '/market/detail'
return http_get_request(url, params)
# 获取 支持的交易对
def get_symbols(long_polling=None):
"""
"""
params = {}
if long_polling:
params['long-polling'] = long_polling
path = '/v1/common/symbols'
return api_key_get(params, path)
'''
Trade/Account API
'''
def get_accounts(self):
"""
:return:
"""
path = "/v1/account/accounts"
params = {}
return api_key_get(params, path)
ACCOUNT_ID = None
# 获取当前账户资产
def get_balance(self, acct_id=None):
"""
:param acct_id
:return:
"""
global ACCOUNT_ID
if not acct_id:
accounts = self.get_accounts()
acct_id = accounts['data'][0]['id'];
url = "/v1/account/accounts/{0}/balance".format(acct_id)
params = {"account-id": acct_id}
return api_key_get(params, url)
# 下单
# 创建并执行订单
def send_order(self, amount, source, symbol, _type, price=0):
"""
:param amount:
:param source: 如果使用借贷资产交易,请在下单接口,请求参数source中填写'margin-api'
:param symbol:
:param _type: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param price:
:return:
"""
try:
accounts = self.get_accounts()
acct_id = accounts['data'][0]['id']
except BaseException as e:
print('get acct_id error.%s' % e)
acct_id = ACCOUNT_ID
params = {"account-id": acct_id,
"amount": amount,
"symbol": symbol,
"type": _type,
"source": source}
if price:
params["price"] = price
url = '/v1/order/orders/place'
return api_key_post(params, url)
# 撤销订单
def cancel_order(order_id):
"""
:param order_id:
:return:
"""
params = {}
url = "/v1/order/orders/{0}/submitcancel".format(order_id)
return api_key_post(params, url)
# 查询某个订单
def order_info(order_id):
"""
:param order_id:
:return:
"""
params = {}
url = "/v1/order/orders/{0}".format(order_id)
return api_key_get(params, url)
# 查询某个订单的成交明细
def order_matchresults(order_id):
"""
:param order_id:
:return:
"""
params = {}
url = "/v1/order/orders/{0}/matchresults".format(order_id)
return api_key_get(params, url)
# 查询当前委托、历史委托
def orders_list(symbol, states, types=None, start_date=None, end_date=None, _from=None, direct=None, size=None):
"""
:param symbol:
:param states: 可选值 {pre-submitted 准备提交, submitted 已提交, partial-filled 部分成交, partial-canceled 部分成交撤销, filled 完全成交, canceled 已撤销}
:param types: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param start_date:
:param end_date:
:param _from:
:param direct: 可选值{prev 向前,next 向后}
:param size:
:return:
"""
params = {'symbol': symbol,
'states': states}
if types:
params[types] = types
if start_date:
params['start-date'] = start_date
if end_date:
params['end-date'] = end_date
if _from:
params['from'] = _from
if direct:
params['direct'] = direct
if size:
params['size'] = size
url = '/v1/order/orders'
return api_key_get(params, url)
# 查询当前成交、历史成交
def orders_matchresults(symbol, types=None, start_date=None, end_date=None, _from=None, direct=None, size=None):
"""
:param symbol:
:param types: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param start_date:
:param end_date:
:param _from:
:param direct: 可选值{prev 向前,next 向后}
:param size:
:return:
"""
params = {'symbol': symbol}
if types:
params[types] = types
if start_date:
params['start-date'] = start_date
if end_date:
params['end-date'] = end_date
if _from:
params['from'] = _from
if direct:
params['direct'] = direct
if size:
params['size'] = size
url = '/v1/order/matchresults'
return api_key_get(params, url)
# 申请提现虚拟币
def withdraw(address, amount, currency, fee=0, addr_tag=""):
"""
:param address_id:
:param amount:
:param currency:btc, ltc, bcc, eth, etc ...(火币Pro支持的币种)
:param fee:
:param addr-tag:
:return: {
"status": "ok",
"data": 700
}
"""
params = {'address': address,
'amount': amount,
"currency": currency,
"fee": fee,
"addr-tag": addr_tag}
url = '/v1/dw/withdraw/api/create'
return api_key_post(params, url)
# 申请取消提现虚拟币
def cancel_withdraw(address_id):
"""
:param address_id:
:return: {
"status": "ok",
"data": 700
}
"""
params = {}
url = '/v1/dw/withdraw-virtual/{0}/cancel'.format(address_id)
return api_key_post(params, url)
'''
借贷API
'''
# 创建并执行借贷订单
def send_margin_order(self, amount, source, symbol, _type, price=0):
"""
:param amount:
:param source: 'margin-api'
:param symbol:
:param _type: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param price:
:return:
"""
try:
accounts = self.get_accounts()
acct_id = accounts['data'][0]['id']
except BaseException as e:
print('get acct_id error.%s' % e)
acct_id = ACCOUNT_ID
params = {"account-id": acct_id,
"amount": amount,
"symbol": symbol,
"type": _type,
"source": 'margin-api'}
if price:
params["price"] = price
url = '/v1/order/orders/place'
return api_key_post(params, url)
# 现货账户划入至借贷账户
def exchange_to_margin(symbol, currency, amount):
"""
:param amount:
:param currency:
:param symbol:
:return:
"""
params = {"symbol": symbol,
"currency": currency,
"amount": amount}
url = "/v1/dw/transfer-in/margin"
return api_key_post(params, url)
# 借贷账户划出至现货账户
def margin_to_exchange(symbol, currency, amount):
"""
:param amount:
:param currency:
:param symbol:
:return:
"""
params = {"symbol": symbol,
"currency": currency,
"amount": amount}
url = "/v1/dw/transfer-out/margin"
return api_key_post(params, url)
# 申请借贷
def get_margin(symbol, currency, amount):
"""
:param amount:
:param currency:
:param symbol:
:return:
"""
params = {"symbol": symbol,
"currency": currency,
"amount": amount}
url = "/v1/margin/orders"
return api_key_post(params, url)
# 归还借贷
def repay_margin(order_id, amount):
"""
:param order_id:
:param amount:
:return:
"""
params = {"order-id": order_id,
"amount": amount}
url = "/v1/margin/orders/{0}/repay".format(order_id)
return api_key_post(params, url)
# 借贷订单
def loan_orders(symbol, currency, start_date="", end_date="", start="", direct="", size=""):
"""
:param symbol:
:param currency:
:param direct: prev 向前,next 向后
:return:
"""
params = {"symbol": symbol,
"currency": currency}
if start_date:
params["start-date"] = start_date
if end_date:
params["end-date"] = end_date
if start:
params["from"] = start
if direct and direct in ["prev", "next"]:
params["direct"] = direct
if size:
params["size"] = size
url = "/v1/margin/loan-orders"
return api_key_get(params, url)
# 借贷账户详情,支持查询单个币种
def margin_balance(symbol):
"""
:param symbol:
:return:
"""
params = {}
url = "/v1/margin/accounts/balance"
if symbol:
params['symbol'] = symbol
return api_key_get(params, url)
def moving_average(result, begin, end, size):
sum = 0.0
num = count = 0
while count < end:
if count >= begin:
sum += result[count]['close']
num += 1
count += size
sum /= num
return sum
def find_balance(self, symbol, type):
balance = self.get_balance()
for key in balance['data']['list']:
if key['currency'] == symbol and key['type'] == type:
return float(key['balance'])
return -1.0
def sell(self, percent, symbol):
number = round(self.find_balance('eos', 'trade') - 0.01, 2)
if number > 0:
order = self.send_order(number * percent, 'api', symbol, 'sell-market', 0)
if order['status'] == 'ok':
print('sell success')
return number * percent
return -1.0
def buy(self, percent, symbol):
money = round(self.find_balance('usdt', 'trade') - 0.0001, 4)
if money > 0:
order = self.send_order(money * percent, 'api', symbol, 'buy-market', 0)
if order['status'] == 'ok':
print('buy success')
detail = self.order_matchresults(order['data'])
return float(detail['data'][0]['price'])
return -1.0
def check_account(self):
usdt = self.find_balance('usdt', 'trade')
eos = self.find_balance('eos', 'trade')
if usdt < 0.1:
return 1.0
elif eos < 0.1:
return 0.0
else:
res = self.get_kline('eosusdt', '1min', 1)
if eos > (usdt / float(res['data'][0]['close'])):
return 0.75
else:
return 0.25
def price_difference(price1, price2):
if price1 * 0.9945 < price2:
return 1
return 0
|
#!/usr/bin/env python3
# mypy: disallow_untyped_defs
# mypy: disallow_incomplete_defs
from __future__ import annotations
from processor.setting import Setting
from processor.display_settings import (
ResetSetting,
AdvancedSetting,
CurrentSetting,
CO2Setting,
)
from patient.rotary import Rotary, Dir
from patient.lcd import LCD, Align
from patient.backlight import Backlight
from patient.buzzer import Buzzer
from processor.config import config as _config
from patient.mac_address import get_box_name
import pigpio
from typing import Dict, Any, Optional
import time
import threading
class RotaryLCD(Rotary):
def __init__(
self,
config: Dict[str, Setting],
pi: pigpio.pi = None,
event: Optional[threading.Event] = None,
):
super().__init__(config, pi=pi)
shade = _config["patient"]["brightness"].get(int)
self.lcd = LCD(pi=pi)
self.backlight = Backlight(shade=shade, pi=pi)
self.buzzer = Buzzer(pi=pi)
self.buzzer_volume: int = _config["patient"]["buzzer-volume"].get(int)
self.lock = threading.Lock()
self.orig_timer_setting = _config["patient"]["silence-timeout"].get(int)
self.timer_setting: Optional[int] = None
self.waiter = threading.Event() if event is None else event
self.silence_holddown = _config["patient"]["silence-holddown"].get(float)
def external_update(self) -> None:
if (
isinstance(self.value(), (CurrentSetting, CO2Setting))
or self.time_left() > 0
and not self.pushed_in
):
with self.lock:
self.upper_display()
self.lower_display()
def __enter__(self) -> RotaryLCD:
self.lcd.__enter__()
self.backlight.__enter__()
self.buzzer.__enter__()
self.backlight.magenta()
self.lcd.upper("POVM Box name:")
self.lcd.lower(f"{get_box_name():<20}")
self.waiter.wait(3)
self.backlight.green(light=True)
self.lcd.upper("Turn to select alarm ")
self.lcd.lower("Push and turn to set ")
self.waiter.wait(2)
self.backlight.white()
super().__enter__()
# Start with a 10 second silence countdown - reset is unlikely to be needed
self.set_alarm_silence(10, reset=False)
return self
def __exit__(self, *exc: Any) -> None:
self.backlight.cyan()
self.lcd.clear()
self.lcd.upper("Princeton Open Vent")
self.lcd.lower("Patient loop closed")
super().__exit__(*exc)
self.buzzer.__exit__(*exc)
self.backlight.__exit__(*exc)
self.lcd.__exit__(*exc)
if self.pi is not None:
self.pi.stop()
self.pi = None
def reset(self) -> None:
for value in self.config.values():
value.reset()
def release(self) -> None:
value = self.value()
if isinstance(value, ResetSetting) and value.at_maximum():
self.reset()
self.lcd.lower("Reset complete")
super().release()
def pushed_turn(self, dir: Dir) -> None:
with self.lock:
# Top display keeps ID number!
super().pushed_turn(dir)
value = self.value()
if not value.STATIC_UPPER:
self.upper_display()
self.lower_display()
def turn(self, dir: Dir) -> None:
super().turn(dir)
with self.lock:
self.lower_display()
self.upper_display()
def extra_push(self) -> None:
self.timer_setting = self.orig_timer_setting
self.delayed_set_alarm_silence(999, delay=self.silence_holddown)
super().extra_push()
self.alert(False)
self.lcd.upper("Silence duration", pos=Align.CENTER, fill=" ")
self.lcd.lower(f"to {self.timer_setting} s", pos=Align.CENTER, fill=" ")
def extra_release(self) -> None:
if self.timer_setting is None:
return
with self.delay_lock:
if self._delay_timout_setter is None:
self.set_alarm_silence(self.timer_setting)
else:
self._delay_timout_setter.cancel()
self._delay_timout_setter = None
super().extra_release()
self.alert(False)
self.display()
if self.timer_setting is not None:
self.timer_setting = None
def extra_turn(self, dir: Dir) -> None:
if self.timer_setting is None:
return
if dir == Dir.CLOCKWISE and self.timer_setting < 995:
self.timer_setting += 5
elif dir == Dir.COUNTERCLOCKWISE and self.timer_setting > 0:
self.timer_setting -= 5
self.lcd.lower(f"to {self.timer_setting} s", pos=Align.CENTER, fill=" ")
def alert(self, full: bool = True) -> None:
with self.lock:
time_left = self.time_left()
if self.alarms and time_left < 0 and not self.extra_in:
self.backlight.red()
self.buzzer.buzz(self.buzzer_volume)
elif not self.alarms:
self.backlight.white()
self.buzzer.clear()
elif time_left > 0:
self.backlight.yellow()
self.buzzer.clear()
if full:
if time_left > 0:
self.set_alarm_silence(time_left, reset=False)
if isinstance(self.value(), AdvancedSetting):
self.upper_display()
else:
self.lower_display()
super().alert()
def _add_alarm_text(self, string: str) -> str:
time_left = self.time_left()
if time_left > 0:
char = "Q" if self.alarms else "S"
string = f"{string[:13]} {char}:{time_left:.0f}s"
string = f"{string:<20}"
elif self.alarms:
n = len(self.alarms)
if n == 1:
string = string[:14] + " ALARM"
else:
string = string[:13] + f" {n}ALRMS"
return string
def upper_display(self) -> None:
if self.extra_in:
return
current_name = self.value().lcd_name
current_number = f"{self._current + 1}"
if isinstance(self.value(), AdvancedSetting):
current_number += chr(ord("a") + self.value()._value % 26)
length_available = 20 - len(current_number) - 2
if len(current_name) > length_available:
print(f"Warning: Truncating {current_name!r}")
current_name = current_name[:length_available]
string = f"{current_number}: {current_name:<{length_available}}"
if isinstance(self.value(), AdvancedSetting):
string = self._add_alarm_text(string)
self.lcd.upper(string)
def lower_display(self) -> None:
if self.extra_in:
return
current_item = self.value()
string = f"{current_item:<20}"
if len(string) > 20:
print(f"Warning: Truncating {string!r}")
string = string[:20]
if not isinstance(self.value(), AdvancedSetting):
string = self._add_alarm_text(string)
self.lcd.lower(string)
def display(self) -> None:
with self.lock:
self.lcd.clear()
self.lower_display()
self.upper_display()
if __name__ == "__main__":
from processor.settings import get_live_settings
with RotaryLCD(get_live_settings()) as rotary:
rotary.display()
while True:
time.sleep(1)
|
#! python3
import zipfile
newZip = zipfile.ZipFile('new.zip', 'w')
newZip.write('spam.txt', compress_type=zipfile.ZIP_DEFLATED)
newZip.close()
|
"""
Scrapy Item
See documentation in docs/topics/item.rst
"""
from pprint import pformat
from collections import MutableMapping
from copy import deepcopy
from abc import ABCMeta
import six
from scrapy.utils.trackref import object_ref
class BaseItem(object_ref):
"""Base class for all scraped items."""
pass
class Field(dict):
"""Container of field metadata"""
class ItemMeta(ABCMeta):
def __new__(mcs, class_name, bases, attrs):
classcell = attrs.pop('__classcell__', None)
new_bases = tuple(base._class for base in bases if hasattr(base, '_class'))
_class = super(ItemMeta, mcs).__new__(mcs, 'x_' + class_name, new_bases, attrs)
fields = getattr(_class, 'fields', {})
new_attrs = {}
for n in dir(_class):
v = getattr(_class, n)
if isinstance(v, Field):
fields[n] = v
elif n in attrs:
new_attrs[n] = attrs[n]
new_attrs['fields'] = fields
new_attrs['_class'] = _class
if classcell is not None:
new_attrs['__classcell__'] = classcell
return super(ItemMeta, mcs).__new__(mcs, class_name, bases, new_attrs)
class DictItem(MutableMapping, BaseItem):
fields = {}
def __init__(self, *args, **kwargs):
self._values = {}
if args or kwargs: # avoid creating dict for most common case
for k, v in six.iteritems(dict(*args, **kwargs)):
self[k] = v
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
if key in self.fields:
self._values[key] = value
else:
raise KeyError("%s does not support field: %s" %
(self.__class__.__name__, key))
def __delitem__(self, key):
del self._values[key]
def __getattr__(self, name):
if name in self.fields:
raise AttributeError("Use item[%r] to get field value" % name)
raise AttributeError(name)
def __setattr__(self, name, value):
if not name.startswith('_'):
raise AttributeError("Use item[%r] = %r to set field value" %
(name, value))
super(DictItem, self).__setattr__(name, value)
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(self._values)
__hash__ = BaseItem.__hash__
def keys(self):
return self._values.keys()
def __repr__(self):
return pformat(dict(self))
def copy(self):
return self.__class__(self)
def deepcopy(self):
"""Return a `deep copy`_ of this item.
.. _deep copy: https://docs.python.org/library/copy.html#copy.deepcopy
"""
return deepcopy(self)
@six.add_metaclass(ItemMeta)
class Item(DictItem):
pass
|
__version__ = '0.2.7'
__author__ = 'chenjiandongx'
|
parallel = True
checkout_blocks_and_plots = False
|
import time
import json
import gzip
import boto3
import botocore.exceptions
import pandas as pd
import matplotlib.pyplot as plt
import util.notebook_utils
def wait_till_delete(callback, check_time = 5, timeout = None):
elapsed_time = 0
while timeout is None or elapsed_time < timeout:
try:
out = callback()
except botocore.exceptions.ClientError as e:
# When given the resource not found exception, deletion has occured
if e.response['Error']['Code'] == 'ResourceNotFoundException':
print('Successful delete')
return
else:
raise
time.sleep(check_time) # units of seconds
elapsed_time += check_time
raise TimeoutError( "Forecast resource deletion timed-out." )
def wait(callback, time_interval = 10):
status_indicator = util.notebook_utils.StatusIndicator()
while True:
status = callback()['Status']
status_indicator.update(status)
if status in ('ACTIVE', 'CREATE_FAILED'): break
time.sleep(time_interval)
status_indicator.end()
return (status=="ACTIVE")
def load_exact_sol(fname, item_id, is_schema_perm=False):
exact = pd.read_csv(fname, header = None)
exact.columns = ['item_id', 'timestamp', 'target']
if is_schema_perm:
exact.columns = ['timestamp', 'target', 'item_id']
return exact.loc[exact['item_id'] == item_id]
def get_or_create_iam_role( role_name ):
iam = boto3.client("iam")
assume_role_policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "forecast.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
try:
create_role_response = iam.create_role(
RoleName = role_name,
AssumeRolePolicyDocument = json.dumps(assume_role_policy_document)
)
role_arn = create_role_response["Role"]["Arn"]
print("Created", role_arn)
except iam.exceptions.EntityAlreadyExistsException:
print("The role " + role_name + " exists, ignore to create it")
role_arn = boto3.resource('iam').Role(role_name).arn
print("Attaching policies")
iam.attach_role_policy(
RoleName = role_name,
PolicyArn = "arn:aws:iam::aws:policy/AmazonForecastFullAccess"
)
iam.attach_role_policy(
RoleName=role_name,
PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess',
)
print("Waiting for a minute to allow IAM role policy attachment to propagate")
time.sleep(60)
print("Done.")
return role_arn
def delete_iam_role( role_name ):
iam = boto3.client("iam")
iam.detach_role_policy( PolicyArn = "arn:aws:iam::aws:policy/AmazonS3FullAccess", RoleName = role_name )
iam.detach_role_policy( PolicyArn = "arn:aws:iam::aws:policy/AmazonForecastFullAccess", RoleName = role_name )
iam.delete_role(RoleName=role_name)
def create_bucket(bucket_name, region=None):
"""Create an S3 bucket in a specified region
If a region is not specified, the bucket is created in the S3 default
region (us-east-1).
:param bucket_name: Bucket to create
:param region: String region to create bucket in, e.g., 'us-west-2'
:return: True if bucket created, else False
"""
# Create bucket
try:
if region is None:
s3_client = boto3.client('s3')
s3_client.create_bucket(Bucket=bucket_name)
else:
s3_client = boto3.client('s3', region_name=region)
location = {'LocationConstraint': region}
try:
s3_client.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration=location)
except:
s3_client.create_bucket(Bucket=bucket_name)
except ClientError as e:
print(e)
return False
return True
def plot_forecasts(fcsts, exact, freq = '1H', forecastHorizon=24, time_back = 80):
p10 = pd.DataFrame(fcsts['Forecast']['Predictions']['p10'])
p50 = pd.DataFrame(fcsts['Forecast']['Predictions']['p50'])
p90 = pd.DataFrame(fcsts['Forecast']['Predictions']['p90'])
pred_int = p50['Timestamp'].apply(lambda x: pd.Timestamp(x))
fcst_start_date = pred_int.iloc[0]
fcst_end_date = pred_int.iloc[-1]
time_int = exact['timestamp'].apply(lambda x: pd.Timestamp(x))
plt.plot(time_int[-time_back:],exact['target'].values[-time_back:], color = 'r')
plt.plot(pred_int, p50['Value'].values, color = 'k')
plt.fill_between(pred_int,
p10['Value'].values,
p90['Value'].values,
color='b', alpha=0.3);
plt.axvline(x=pd.Timestamp(fcst_start_date), linewidth=3, color='g', ls='dashed')
plt.axvline(x=pd.Timestamp(fcst_end_date), linewidth=3, color='g', ls='dashed')
plt.xticks(rotation=30)
plt.legend(['Target', 'Forecast'], loc = 'lower left')
def extract_gz( src, dst ):
print( f"Extracting {src} to {dst}" )
with open(dst, 'wb') as fd_dst:
with gzip.GzipFile( src, 'rb') as fd_src:
data = fd_src.read()
fd_dst.write(data)
print("Done.")
|
# TODO: Temporarily disabled due to importing old code into openshift-ansible
# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods
import os
import yaml
from pkg_resources import resource_filename
PERSIST_SETTINGS = [
'ansible_ssh_user',
'ansible_config',
'ansible_log_path',
'variant',
'variant_version',
'version',
]
DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname']
class OOConfigFileError(Exception):
"""The provided config file path can't be read/written
"""
pass
class OOConfigInvalidHostError(Exception):
""" Host in config is missing both ip and hostname. """
pass
class Host(object):
""" A system we will or have installed OpenShift on. """
def __init__(self, **kwargs):
self.ip = kwargs.get('ip', None)
self.hostname = kwargs.get('hostname', None)
self.public_ip = kwargs.get('public_ip', None)
self.public_hostname = kwargs.get('public_hostname', None)
self.connect_to = kwargs.get('connect_to', None)
self.preconfigured = kwargs.get('preconfigured', None)
# Should this host run as an OpenShift master:
self.master = kwargs.get('master', False)
# Should this host run as an OpenShift node:
self.node = kwargs.get('node', False)
# Should this host run as an HAProxy:
self.master_lb = kwargs.get('master_lb', False)
self.containerized = kwargs.get('containerized', False)
if self.connect_to is None:
raise OOConfigInvalidHostError("You must specify either an ip " \
"or hostname as 'connect_to'")
if self.master is False and self.node is False and self.master_lb is False:
raise OOConfigInvalidHostError(
"You must specify each host as either a master or a node.")
def __str__(self):
return self.connect_to
def __repr__(self):
return self.connect_to
def to_dict(self):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
'master', 'node', 'master_lb', 'containerized', 'connect_to', 'preconfigured']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
return d
def is_etcd_member(self, all_hosts):
""" Will this host be a member of a standalone etcd cluster. """
if not self.master:
return False
masters = [host for host in all_hosts if host.master]
if len(masters) > 1:
return True
return False
def is_dedicated_node(self):
""" Will this host be a dedicated node. (not a master) """
return self.node and not self.master
def is_schedulable_node(self, all_hosts):
""" Will this host be a node marked as schedulable. """
if not self.node:
return False
if not self.master:
return True
masters = [host for host in all_hosts if host.master]
nodes = [host for host in all_hosts if host.node]
if len(masters) == len(nodes):
return True
return False
class OOConfig(object):
default_dir = os.path.normpath(
os.environ.get('XDG_CONFIG_HOME',
os.environ['HOME'] + '/.config/') + '/openshift/')
default_file = '/installer.cfg.yml'
def __init__(self, config_path):
if config_path:
self.config_path = os.path.normpath(config_path)
else:
self.config_path = os.path.normpath(self.default_dir +
self.default_file)
self.settings = {}
self._read_config()
self._set_defaults()
def _read_config(self):
self.hosts = []
try:
if os.path.exists(self.config_path):
cfgfile = open(self.config_path, 'r')
self.settings = yaml.safe_load(cfgfile.read())
cfgfile.close()
# Use the presence of a Description as an indicator this is
# a legacy config file:
if 'Description' in self.settings:
self._upgrade_legacy_config()
# Parse the hosts into DTO objects:
if 'hosts' in self.settings:
for host in self.settings['hosts']:
self.hosts.append(Host(**host))
# Watchout for the variant_version coming in as a float:
if 'variant_version' in self.settings:
self.settings['variant_version'] = \
str(self.settings['variant_version'])
except IOError, ferr:
raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
ferr.strerror))
except yaml.scanner.ScannerError:
raise OOConfigFileError('Config file "{}" is not a valid YAML document'.format(self.config_path))
def _upgrade_legacy_config(self):
new_hosts = []
remove_settings = ['validated_facts', 'Description', 'Name',
'Subscription', 'Vendor', 'Version', 'masters', 'nodes']
if 'validated_facts' in self.settings:
for key, value in self.settings['validated_facts'].iteritems():
value['connect_to'] = key
if 'masters' in self.settings and key in self.settings['masters']:
value['master'] = True
if 'nodes' in self.settings and key in self.settings['nodes']:
value['node'] = True
new_hosts.append(value)
self.settings['hosts'] = new_hosts
for s in remove_settings:
if s in self.settings:
del self.settings[s]
# A legacy config implies openshift-enterprise 3.0:
self.settings['variant'] = 'openshift-enterprise'
self.settings['variant_version'] = '3.0'
def _set_defaults(self):
if 'ansible_inventory_directory' not in self.settings:
self.settings['ansible_inventory_directory'] = \
self._default_ansible_inv_dir()
if not os.path.exists(self.settings['ansible_inventory_directory']):
os.makedirs(self.settings['ansible_inventory_directory'])
if 'ansible_plugins_directory' not in self.settings:
self.settings['ansible_plugins_directory'] = resource_filename(__name__, 'ansible_plugins')
if 'version' not in self.settings:
self.settings['version'] = 'v1'
if 'ansible_callback_facts_yaml' not in self.settings:
self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
self.settings['ansible_inventory_directory']
if 'ansible_ssh_user' not in self.settings:
self.settings['ansible_ssh_user'] = ''
self.settings['ansible_inventory_path'] = '{}/hosts'.format(self.settings['ansible_inventory_directory'])
# clean up any empty sets
for setting in self.settings.keys():
if not self.settings[setting]:
self.settings.pop(setting)
def _default_ansible_inv_dir(self):
return os.path.normpath(
os.path.dirname(self.config_path) + "/.ansible")
def calc_missing_facts(self):
"""
Determine which host facts are not defined in the config.
Returns a hash of host to a list of the missing facts.
"""
result = {}
for host in self.hosts:
missing_facts = []
if host.preconfigured:
required_facts = PRECONFIGURED_REQUIRED_FACTS
else:
required_facts = DEFAULT_REQUIRED_FACTS
for required_fact in required_facts:
if not getattr(host, required_fact):
missing_facts.append(required_fact)
if len(missing_facts) > 0:
result[host.connect_to] = missing_facts
return result
def save_to_disk(self):
out_file = open(self.config_path, 'w')
out_file.write(self.yaml())
out_file.close()
def persist_settings(self):
p_settings = {}
for setting in PERSIST_SETTINGS:
if setting in self.settings and self.settings[setting]:
p_settings[setting] = self.settings[setting]
p_settings['hosts'] = []
for host in self.hosts:
p_settings['hosts'].append(host.to_dict())
if self.settings['ansible_inventory_directory'] != \
self._default_ansible_inv_dir():
p_settings['ansible_inventory_directory'] = \
self.settings['ansible_inventory_directory']
return p_settings
def yaml(self):
return yaml.safe_dump(self.persist_settings(), default_flow_style=False)
def __str__(self):
return self.yaml()
def get_host(self, name):
for host in self.hosts:
if host.connect_to == name:
return host
return None
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: spvault.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='spvault.proto',
package='spvault',
syntax='proto3',
serialized_options=b'Z\007./proto\252\002\007SPVault',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\rspvault.proto\x12\x07spvault\"X\n\x0b\x41uthRequest\x12\x0f\n\x07siteUrl\x18\x01 \x01(\t\x12#\n\x08strategy\x18\x02 \x01(\x0e\x32\x11.spvault.Strategy\x12\x13\n\x0b\x63redentials\x18\x03 \x01(\t\"&\n\x10TokenAuthRequest\x12\x12\n\nvaultToken\x18\x01 \x01(\t\"Y\n\tAuthReply\x12\x11\n\tauthToken\x18\x01 \x01(\t\x12%\n\ttokenType\x18\x02 \x01(\x0e\x32\x12.spvault.TokenType\x12\x12\n\nexpiration\x18\x03 \x01(\x03\"K\n\nRegRequest\x12)\n\x0b\x61uthRequest\x18\x01 \x01(\x0b\x32\x14.spvault.AuthRequest\x12\x12\n\nvaultToken\x18\x02 \x01(\t\"\x1e\n\x08RegReply\x12\x12\n\nvaultToken\x18\x01 \x01(\t\"\"\n\x0c\x44\x65RegRequest\x12\x12\n\nvaultToken\x18\x01 \x01(\t\"\x07\n\x05\x45mpty*;\n\x08Strategy\x12\t\n\x05\x61\x64\x64in\x10\x00\x12\x08\n\x04\x61\x64\x66s\x10\x01\x12\x07\n\x03\x66\x62\x61\x10\x02\x12\x08\n\x04saml\x10\x03\x12\x07\n\x03tmg\x10\x04*/\n\tTokenType\x12\n\n\x06\x42\x65\x61rer\x10\x00\x12\n\n\x06\x43ookie\x10\x01\x12\n\n\x06\x43ustom\x10\x03\x32\x83\x02\n\x05Vault\x12\x43\n\x15\x41uthenticateWithCreds\x12\x14.spvault.AuthRequest\x1a\x12.spvault.AuthReply\"\x00\x12H\n\x15\x41uthenticateWithToken\x12\x19.spvault.TokenAuthRequest\x1a\x12.spvault.AuthReply\"\x00\x12\x34\n\x08Register\x12\x13.spvault.RegRequest\x1a\x11.spvault.RegReply\"\x00\x12\x35\n\nDeRegister\x12\x15.spvault.DeRegRequest\x1a\x0e.spvault.Empty\"\x00\x42\x13Z\x07./proto\xaa\x02\x07SPVaultb\x06proto3'
)
_STRATEGY = _descriptor.EnumDescriptor(
name='Strategy',
full_name='spvault.Strategy',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='addin', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='adfs', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='fba', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='saml', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='tmg', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=401,
serialized_end=460,
)
_sym_db.RegisterEnumDescriptor(_STRATEGY)
Strategy = enum_type_wrapper.EnumTypeWrapper(_STRATEGY)
_TOKENTYPE = _descriptor.EnumDescriptor(
name='TokenType',
full_name='spvault.TokenType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='Bearer', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='Cookie', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='Custom', index=2, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=462,
serialized_end=509,
)
_sym_db.RegisterEnumDescriptor(_TOKENTYPE)
TokenType = enum_type_wrapper.EnumTypeWrapper(_TOKENTYPE)
addin = 0
adfs = 1
fba = 2
saml = 3
tmg = 4
Bearer = 0
Cookie = 1
Custom = 3
_AUTHREQUEST = _descriptor.Descriptor(
name='AuthRequest',
full_name='spvault.AuthRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='siteUrl', full_name='spvault.AuthRequest.siteUrl', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='strategy', full_name='spvault.AuthRequest.strategy', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='credentials', full_name='spvault.AuthRequest.credentials', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=26,
serialized_end=114,
)
_TOKENAUTHREQUEST = _descriptor.Descriptor(
name='TokenAuthRequest',
full_name='spvault.TokenAuthRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='vaultToken', full_name='spvault.TokenAuthRequest.vaultToken', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=154,
)
_AUTHREPLY = _descriptor.Descriptor(
name='AuthReply',
full_name='spvault.AuthReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='authToken', full_name='spvault.AuthReply.authToken', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tokenType', full_name='spvault.AuthReply.tokenType', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expiration', full_name='spvault.AuthReply.expiration', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=156,
serialized_end=245,
)
_REGREQUEST = _descriptor.Descriptor(
name='RegRequest',
full_name='spvault.RegRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='authRequest', full_name='spvault.RegRequest.authRequest', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vaultToken', full_name='spvault.RegRequest.vaultToken', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=247,
serialized_end=322,
)
_REGREPLY = _descriptor.Descriptor(
name='RegReply',
full_name='spvault.RegReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='vaultToken', full_name='spvault.RegReply.vaultToken', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=324,
serialized_end=354,
)
_DEREGREQUEST = _descriptor.Descriptor(
name='DeRegRequest',
full_name='spvault.DeRegRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='vaultToken', full_name='spvault.DeRegRequest.vaultToken', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=356,
serialized_end=390,
)
_EMPTY = _descriptor.Descriptor(
name='Empty',
full_name='spvault.Empty',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=392,
serialized_end=399,
)
_AUTHREQUEST.fields_by_name['strategy'].enum_type = _STRATEGY
_AUTHREPLY.fields_by_name['tokenType'].enum_type = _TOKENTYPE
_REGREQUEST.fields_by_name['authRequest'].message_type = _AUTHREQUEST
DESCRIPTOR.message_types_by_name['AuthRequest'] = _AUTHREQUEST
DESCRIPTOR.message_types_by_name['TokenAuthRequest'] = _TOKENAUTHREQUEST
DESCRIPTOR.message_types_by_name['AuthReply'] = _AUTHREPLY
DESCRIPTOR.message_types_by_name['RegRequest'] = _REGREQUEST
DESCRIPTOR.message_types_by_name['RegReply'] = _REGREPLY
DESCRIPTOR.message_types_by_name['DeRegRequest'] = _DEREGREQUEST
DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY
DESCRIPTOR.enum_types_by_name['Strategy'] = _STRATEGY
DESCRIPTOR.enum_types_by_name['TokenType'] = _TOKENTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AuthRequest = _reflection.GeneratedProtocolMessageType('AuthRequest', (_message.Message,), {
'DESCRIPTOR' : _AUTHREQUEST,
'__module__' : 'spvault_pb2'
# @@protoc_insertion_point(class_scope:spvault.AuthRequest)
})
_sym_db.RegisterMessage(AuthRequest)
TokenAuthRequest = _reflection.GeneratedProtocolMessageType('TokenAuthRequest', (_message.Message,), {
'DESCRIPTOR' : _TOKENAUTHREQUEST,
'__module__' : 'spvault_pb2'
# @@protoc_insertion_point(class_scope:spvault.TokenAuthRequest)
})
_sym_db.RegisterMessage(TokenAuthRequest)
AuthReply = _reflection.GeneratedProtocolMessageType('AuthReply', (_message.Message,), {
'DESCRIPTOR' : _AUTHREPLY,
'__module__' : 'spvault_pb2'
# @@protoc_insertion_point(class_scope:spvault.AuthReply)
})
_sym_db.RegisterMessage(AuthReply)
RegRequest = _reflection.GeneratedProtocolMessageType('RegRequest', (_message.Message,), {
'DESCRIPTOR' : _REGREQUEST,
'__module__' : 'spvault_pb2'
# @@protoc_insertion_point(class_scope:spvault.RegRequest)
})
_sym_db.RegisterMessage(RegRequest)
RegReply = _reflection.GeneratedProtocolMessageType('RegReply', (_message.Message,), {
'DESCRIPTOR' : _REGREPLY,
'__module__' : 'spvault_pb2'
# @@protoc_insertion_point(class_scope:spvault.RegReply)
})
_sym_db.RegisterMessage(RegReply)
DeRegRequest = _reflection.GeneratedProtocolMessageType('DeRegRequest', (_message.Message,), {
'DESCRIPTOR' : _DEREGREQUEST,
'__module__' : 'spvault_pb2'
# @@protoc_insertion_point(class_scope:spvault.DeRegRequest)
})
_sym_db.RegisterMessage(DeRegRequest)
Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), {
'DESCRIPTOR' : _EMPTY,
'__module__' : 'spvault_pb2'
# @@protoc_insertion_point(class_scope:spvault.Empty)
})
_sym_db.RegisterMessage(Empty)
DESCRIPTOR._options = None
_VAULT = _descriptor.ServiceDescriptor(
name='Vault',
full_name='spvault.Vault',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=512,
serialized_end=771,
methods=[
_descriptor.MethodDescriptor(
name='AuthenticateWithCreds',
full_name='spvault.Vault.AuthenticateWithCreds',
index=0,
containing_service=None,
input_type=_AUTHREQUEST,
output_type=_AUTHREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AuthenticateWithToken',
full_name='spvault.Vault.AuthenticateWithToken',
index=1,
containing_service=None,
input_type=_TOKENAUTHREQUEST,
output_type=_AUTHREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Register',
full_name='spvault.Vault.Register',
index=2,
containing_service=None,
input_type=_REGREQUEST,
output_type=_REGREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeRegister',
full_name='spvault.Vault.DeRegister',
index=3,
containing_service=None,
input_type=_DEREGREQUEST,
output_type=_EMPTY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_VAULT)
DESCRIPTOR.services_by_name['Vault'] = _VAULT
# @@protoc_insertion_point(module_scope)
|
from sklearn_explain.tests.skl_datasets_reg import skl_datasets_test as skltest
skltest.test_reg_dataset_and_model("freidman3" , "LGBMRegressor_15")
|
def LeftMax(array,i):
left=array[i]
for j in range(i):
# left=max(left,array[j])
if left < array[j]:
left = array[j]
else:
left=left
return left
def RightMax(array,i):
right=array[i]
for j in range(i+1,len(array)):
# right=max(right,array[j])
if right < array[j]:
right = array[j]
else:
right=right
return right
def TrappingWater(array):
totalwater=0
for i in range(1,len(array)-1):
leftMax=LeftMax(array,i)
rightMax=RightMax(array,i)
totalwater=totalwater+(min(leftMax,rightMax)-array[i])
return totalwater
array=[2,0,2]
print(TrappingWater(array))
|
# #----------------------------------------------------------------------
# Cisco.SANOS.get_chassis_id
# ---------------------------------------------------------------------
# Copyright (C) 2007-2016 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetchassisid import IGetChassisID
from noc.core.mac import MAC
class Script(BaseScript):
name = "Cisco.SANOS.get_chassis_id"
cache = True
interface = IGetChassisID
rx_mac = re.compile(
r"\s+MAC\s+Addresses\s+:\s+(?P<base>\S+)\n" r"\s+Number\s+of\s+MACs\s+:\s+(?P<count>\d+)\n",
re.IGNORECASE | re.MULTILINE | re.DOTALL,
)
def execute(self):
try:
v = self.cli("show sprom sup | include MAC")
except self.CLISyntaxError:
raise self.NotSupportedError()
r = []
for match in self.rx_mac.finditer(v):
base = match.group("base")
count = int(match.group("count"))
if count == 0:
continue
r += [{"first_chassis_mac": base, "last_chassis_mac": MAC(base).shift(count - 1)}]
return r
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import time
from datetime import datetime
from dateutil import tz, zoneinfo
from mock import mock
from jsonschema.exceptions import ValidationError
from c7n.filters import FilterValidationError
from c7n.resources import ec2
from c7n.resources.ec2 import actions, QueryFilter
from c7n import tags, utils
from .common import BaseTest
class TestTagAugmentation(BaseTest):
def test_tag_augment_empty(self):
session_factory = self.replay_flight_data(
'test_ec2_augment_tag_empty')
# recording was modified to be sans tags
ec2 = session_factory().client('ec2')
policy = self.load_policy({
'name': 'ec2-tags',
'resource': 'ec2'},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 0)
def test_tag_augment(self):
session_factory = self.replay_flight_data(
'test_ec2_augment_tags')
# recording was modified to be sans tags
ec2 = session_factory().client('ec2')
policy = self.load_policy({
'name': 'ec2-tags',
'resource': 'ec2',
'filters': [
{'tag:Env': 'Production'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
class TestMetricFilter(BaseTest):
def test_metric_filter(self):
session_factory = self.replay_flight_data(
'test_ec2_metric')
ec2 = session_factory().client('ec2')
policy = self.load_policy({
'name': 'ec2-utilization',
'resource': 'ec2',
'filters': [
{'type': 'metrics',
'name': 'CPUUtilization',
'days': 3,
'value': 1.5}
]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
class TestDisableApiTermination(BaseTest):
def test_term_prot_enabled(self):
session_factory = self.replay_flight_data(
'test_ec2_termination-protected_filter')
policy = self.load_policy({
'name': 'ec2-termination-enabled',
'resource': 'ec2',
'filters': [
{'type': 'termination-protected'}
]},
session_factory=session_factory
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['InstanceId'], 'i-092f500eaad726b71')
def test_term_prot_not_enabled(self):
session_factory = self.replay_flight_data(
'test_ec2_termination-protected_filter')
policy = self.load_policy({
'name': 'ec2-termination-NOT-enabled',
'resource': 'ec2',
'filters': [
{'not': [
{'type': 'termination-protected'}
]}
]},
session_factory=session_factory
)
resources = policy.run()
self.assertEqual(len(resources), 2)
self.assertEqual(
sorted([x['InstanceId'] for x in resources]),
['i-02117c13e1d21b229', 'i-0718418de3bb4ae2a']
)
def test_policy_permissions(self):
session_factory = self.replay_flight_data(
'test_ec2_termination-protected_filter')
policy = self.load_policy({
'name': 'ec2-termination-enabled',
'resource': 'ec2',
'filters': [
{'type': 'termination-protected'}
]},
session_factory=session_factory
)
perms = policy.get_permissions()
self.assertEqual(
perms,
set(('ec2:DescribeInstances',
'ec2:DescribeTags',
'ec2:DescribeInstanceAttribute'))
)
class TestHealthEventsFilter(BaseTest):
def test_ec2_health_events_filter(self):
session_factory = self.replay_flight_data(
'test_ec2_health_events_filter')
policy = self.load_policy({
'name': 'ec2-health-events-filter',
'resource': 'ec2',
'filters': [
{'type': 'health-event'}
]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
class TestTagTrim(BaseTest):
def test_ec2_tag_trim(self):
self.patch(tags.TagTrim, 'max_tag_count', 10)
session_factory = self.replay_flight_data(
'test_ec2_tag_trim')
ec2 = session_factory().client('ec2')
start_tags = {
t['Key']: t['Value'] for t in
ec2.describe_tags(
Filters=[{'Name': 'resource-id',
'Values': ['i-fdb01920']}])['Tags']}
policy = self.load_policy({
'name': 'ec2-tag-trim',
'resource': 'ec2',
'filters': [
{'type': 'tag-count', 'count': 10}],
'actions': [
{'type': 'tag-trim',
'space': 1,
'preserve': [
'Name',
'Env',
'Account',
'Platform',
'Classification',
'Planet'
]}
]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
end_tags = {
t['Key']: t['Value'] for t in
ec2.describe_tags(
Filters=[{'Name': 'resource-id',
'Values': ['i-fdb01920']}])['Tags']}
self.assertEqual(len(start_tags)-1, len(end_tags))
self.assertTrue('Containers' in start_tags)
self.assertFalse('Containers' in end_tags)
class TestVolumeFilter(BaseTest):
def test_ec2_attached_ebs_filter(self):
session_factory = self.replay_flight_data(
'test_ec2_attached_ebs_filter')
policy = self.load_policy({
'name': 'ec2-unencrypted-vol',
'resource': 'ec2',
'filters': [
{'type': 'ebs',
'key': 'Encrypted',
'value': False}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
# DISABLED / Re-record flight data on public account
def test_ec2_attached_volume_skip_block(self):
session_factory = self.replay_flight_data(
'test_ec2_attached_ebs_filter')
policy = self.load_policy({
'name': 'ec2-unencrypted-vol',
'resource': 'ec2',
'filters': [
{'type': 'ebs',
'skip-devices': ['/dev/sda1', '/dev/xvda', '/dev/sdb1'],
'key': 'Encrypted',
'value': False}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 0)
class TestResizeInstance(BaseTest):
def test_ec2_resize(self):
# preconditions - three instances (2 m4.4xlarge, 1 m4.1xlarge)
# one of the instances stopped
session_factory = self.replay_flight_data('test_ec2_resize')
policy = self.load_policy({
'name': 'ec2-resize',
'resource': 'ec2',
'filters': [
{'type': 'value',
'key': 'State.Name',
'value': ['running', 'stopped'],
'op': 'in'},
{'type': 'value',
'key': 'InstanceType',
'value': ['m4.2xlarge', 'm4.4xlarge'],
'op': 'in'},
],
'actions': [
{'type': 'resize',
'restart': True,
'default': 'm4.large',
'type-map': {
'm4.4xlarge': 'm4.2xlarge'}}]
}, session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 3)
stopped, running = [], []
for i in resources:
if i['State']['Name'] == 'running':
running.append(i['InstanceId'])
if i['State']['Name'] == 'stopped':
stopped.append(i['InstanceId'])
instances = utils.query_instances(
session_factory(),
InstanceIds=[r['InstanceId'] for r in resources])
cur_stopped, cur_running = [], []
for i in instances:
if i['State']['Name'] == 'running':
cur_running.append(i['InstanceId'])
if i['State']['Name'] == 'stopped':
cur_stopped.append(i['InstanceId'])
cur_running.sort()
running.sort()
self.assertEqual(cur_stopped, stopped)
self.assertEqual(cur_running, running)
instance_types = [i['InstanceType'] for i in instances]
instance_types.sort()
self.assertEqual(
instance_types,
list(sorted(['m4.large', 'm4.2xlarge', 'm4.2xlarge'])))
class TestStateTransitionAgeFilter(BaseTest):
def test_ec2_state_transition_age(self):
session_factory = self.replay_flight_data(
'test_ec2_state_transition_age_filter'
)
policy = self.load_policy({
'name': 'ec2-state-transition-age',
'resource': 'ec2',
'filters': [
{'State.Name': 'running'},
{'type': 'state-age',
'days': 30}]},
session_factory=session_factory)
resources = policy.run()
#compare stateTransition reason to expected
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['StateTransitionReason'], 'User initiated (2015-11-25 10:11:55 GMT)')
def test_date_parsing(self):
instance = ec2.StateTransitionAge(None)
# Missing key
self.assertIsNone(instance.get_resource_date({}))
# Bad date format
self.assertRaises(
ValueError,
instance.get_resource_date,
{'StateTransitionReason': "User initiated (201-02-06 17:77:00 GMT)"}
)
# Won't match regex
self.assertIsNone(
instance.get_resource_date({
'StateTransitionReason': "Server.InternalError"
}))
# Test for success
self.assertEqual(
instance.get_resource_date({
'StateTransitionReason': "User initiated (2017-02-06 17:57:00 GMT)"
}),
datetime(2017, 2, 6, 17, 57, tzinfo=tz.tzutc())
)
class TestImageAgeFilter(BaseTest):
def test_ec2_image_age(self):
session_factory = self.replay_flight_data(
'test_ec2_image_age_filter')
policy = self.load_policy({
'name': 'ec2-image-age',
'resource': 'ec2',
'filters': [
{'State.Name': 'running'},
{'type': 'image-age',
'days': 30}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
class TestImageFilter(BaseTest):
def test_ec2_image(self):
session_factory = self.replay_flight_data(
'test_ec2_image_filter')
policy = self.load_policy({
'name': 'ec2-image',
'resource': 'ec2',
'filters': [
{'type': 'image', 'key': 'Public', 'value': True}
]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['InstanceId'], 'i-039628786cabe8c16')
class TestInstanceAge(BaseTest):
# placebo doesn't record tz information
def test_ec2_instance_age(self):
session_factory = self.replay_flight_data(
'test_ec2_instance_age_filter')
policy = self.load_policy({
'name': 'ec2-instance-age',
'resource': 'ec2',
'filters': [
{'State.Name': 'running'},
{'type': 'instance-age',
'days': 0}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
class TestTag(BaseTest):
def test_ec2_tag(self):
session_factory = self.replay_flight_data(
'test_ec2_mark')
policy = self.load_policy({
'name': 'ec2-test-mark',
'resource': 'ec2',
'filters': [
{'State.Name': 'running'}],
'actions': [
{'type': 'tag',
'key': 'Testing',
'value': 'Testing123'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_ec2_tag_errors(self):
# Specifying both 'key' and 'tag' is an error
policy = {
'name': 'ec2-tag-error',
'resource': 'ec2',
'actions': [{
'type': 'tag',
'key': 'Testing',
'tag': 'foo',
'value': 'TestingError'
}]
}
self.assertRaises(FilterValidationError, self.load_policy, policy)
# Invalid op for 'mark-for-op' action
policy = {
'name': 'ec2-tag-error',
'resource': 'ec2',
'actions': [{
'type': 'mark-for-op',
'op': 'fake',
}]
}
self.assertRaises(FilterValidationError, self.load_policy, policy)
def test_ec2_untag(self):
session_factory = self.replay_flight_data(
'test_ec2_untag')
policy = self.load_policy({
'name': 'ec2-test-unmark',
'resource': 'ec2',
'filters': [
{'tag:Testing': 'not-null'}],
'actions': [
{'type': 'remove-tag',
'tags': ['Testing']}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_ec2_untag_array(self):
session_factory = self.replay_flight_data(
'test_ec2_untag_array')
policy = self.load_policy({
'name': 'ec2-test-unmark-array',
'resource': 'ec2',
'filters': [
{'tag:Testing': 'not-null'}],
'actions': [
{'type': 'remove-tag',
'tags': ['Testing', 'TestingTwo', 'TestingThree']}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_ec2_normalize_tag(self):
session_factory = self.replay_flight_data(
'test_ec2_normalize_tag')
policy = self.load_policy({
'name': 'ec2-test-normalize-tag-lower',
'resource': 'ec2',
'filters': [
{'tag:Testing-lower': 'not-null'}],
'actions': [
{'type': 'normalize-tag',
'key': 'Testing-lower',
'action': 'lower'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
policy = self.load_policy({
'name': 'ec2-test-normalize-tag-upper',
'resource': 'ec2',
'filters': [
{'tag:Testing-upper': 'not-null'}],
'actions': [
{'type': 'normalize-tag',
'key': 'Testing-upper',
'action': 'upper'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
policy = self.load_policy({
'name': 'ec2-test-normalize-tag-title',
'resource': 'ec2',
'filters': [
{'tag:Testing-title': 'not-null'}],
'actions': [
{'type': 'normalize-tag',
'key': 'Testing-title',
'action': 'title'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
policy = self.load_policy({
'name': 'ec2-test-normalize-tag-strip',
'resource': 'ec2',
'filters': [
{'tag:Testing-strip': 'not-null'}],
'actions': [
{'type': 'normalize-tag',
'key': 'Testing-strip',
'action': 'strip',
'value': 'blah'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_ec2_rename_tag(self):
session_factory = self.replay_flight_data(
'test_ec2_rename_tag')
policy = self.load_policy({
'name': 'ec2-rename-start',
'resource': 'ec2',
'filters': [
{'tag:Testing': 'present'}
]}, session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 3)
policy = self.load_policy({
'name': 'ec2-rename-tag',
'resource': 'ec2',
'actions': [{
'type': 'rename-tag',
'old_key': 'Testing',
'new_key': 'Testing1'}]}, session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 3)
policy = self.load_policy({
'name': 'ec2-rename-end',
'resource': 'ec2',
'filters': [
{'tag:Testing1': 'present'}
]}, session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 3)
def test_ec2_mark_zero(self):
localtz = zoneinfo.gettz('America/New_York')
dt = datetime.now(localtz)
dt = dt.replace(year=2017, month=11, day=24, hour=7, minute=00)
session_factory = self.replay_flight_data('test_ec2_mark_zero')
session = session_factory(region='us-east-1')
ec2 = session.client('ec2')
resource = ec2.describe_instances(
InstanceIds=['i-04d3e0630bd342566'])[
'Reservations'][0]['Instances'][0]
tags = [
t['Value'] for t in resource['Tags'] if t['Key'] == 'maid_status']
self.assertEqual(len(tags), 0)
policy = self.load_policy({
'name': 'ec2-mark-zero-days',
'resource': 'ec2',
'filters': [{'tag:CreatorName': 'joshuaroot'}],
'actions': [{
'type': 'mark-for-op',
'days': 0,
'op': 'terminate'}]
}, session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['InstanceId'], 'i-04d3e0630bd342566')
resource = ec2.describe_instances(
InstanceIds=['i-04d3e0630bd342566'])[
'Reservations'][0]['Instances'][0]
tags = [
t['Value'] for t in resource['Tags'] if t['Key'] == 'maid_status']
result = datetime.strptime(
tags[0].strip().split('@', 1)[-1], '%Y/%m/%d').replace(
tzinfo=localtz)
self.assertEqual(result.date(), dt.date())
class TestStop(BaseTest):
def test_ec2_stop(self):
session_factory = self.replay_flight_data(
'test_ec2_stop')
policy = self.load_policy({
'name': 'ec2-test-stop',
'resource': 'ec2',
'filters': [
{'tag:Testing': 'not-null'}],
'actions': [
{'type': 'stop'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
class TestReboot(BaseTest):
def test_ec2_reboot(self):
session_factory = self.replay_flight_data(
'test_ec2_reboot')
policy = self.load_policy({
'name': 'ec2-test-reboot',
'resource': 'ec2',
'filters': [
{'tag:Testing': 'not-null'}],
'actions': [
{'type': 'reboot'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 2)
running = []
for i in resources:
if i['State']['Name'] == 'running':
running.append(i['InstanceId'])
if self.recording:
time.sleep(25)
instances = utils.query_instances(
session_factory(),
InstanceIds=[r['InstanceId'] for r in resources])
cur_running = []
for i in instances:
if i['State']['Name'] == 'running':
cur_running.append(i['InstanceId'])
cur_running.sort()
running.sort()
self.assertEqual(cur_running, running)
class TestStart(BaseTest):
def test_ec2_start(self):
session_factory = self.replay_flight_data(
'test_ec2_start')
policy = self.load_policy({
'name': 'ec2-test-start',
'resource': 'ec2',
'filters': [],
'actions': [
{'type': 'start'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 2)
def test_ec2_start_fails(self):
session_factory = self.replay_flight_data(
'test_ec2_start')
policy = self.load_policy({
'name': 'ec2-test-start',
'resource': 'ec2',
'filters': [],
'actions': [
{'type': 'start'}]},
session_factory=session_factory)
output = self.capture_logging('custodian.actions', level=logging.DEBUG)
with mock.patch.object(ec2.Start, 'process_instance_set', return_value=True):
try:
resources = policy.run()
except RuntimeError as e:
pass
else:
self.fail("should have raised error")
log_output = output.getvalue()
self.assertIn('Could not start 1 of 1 instances', log_output)
self.assertIn("t2.micro us-west-2c", log_output)
self.assertIn("i-08270b9cfb568a1c4", log_output)
class TestOr(BaseTest):
def test_ec2_or_condition(self):
session_factory = self.replay_flight_data(
'test_ec2_stop')
policy = self.load_policy({
'name': 'ec2-test-snapshot',
'resource': 'ec2',
'filters': [
{"or": [
{"tag:Name": "CompileLambda"},
{"tag:Name": "Spinnaker"}]}]
}, session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 2)
self.assertEqual(
sorted([r['InstanceId'] for r in resources]),
[u'i-13413bd7', u'i-1aebf7c0'])
class TestSnapshot(BaseTest):
def test_ec2_snapshot_no_copy_tags(self):
session_factory = self.replay_flight_data(
'test_ec2_snapshot')
policy = self.load_policy({
'name': 'ec2-test-snapshot',
'resource': 'ec2',
'filters': [
{'tag:Name': 'CompileLambda'}],
'actions': [
{'type': 'snapshot'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_ec2_snapshot_copy_tags(self):
session_factory = self.replay_flight_data(
'test_ec2_snapshot')
policy = self.load_policy({
'name': 'ec2-test-snapshot',
'resource': 'ec2',
'filters': [
{'tag:Name': 'CompileLambda'}],
'actions': [
{'type': 'snapshot', 'copy-tags': ['ASV' 'Testing123']}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
class TestSetInstanceProfile(BaseTest):
def test_ec2_set_instance_profile_assocation(self):
session_factory = self.replay_flight_data(
'test_ec2_set_instance_profile_association')
policy = self.load_policy({
'name': 'ec2-test-set-instance-profile-association',
'resource': 'ec2',
'filters': [
{'tag:Name': 'MissingInstanceProfile'},
{'IamInstanceProfile': 'absent'}],
'actions': [
{'type': 'set-instance-profile',
'name': 'ec2-default'}]},
session_factory=session_factory)
resources = policy.run()
self.assertGreaterEqual(len(resources), 1)
ec2 = session_factory().client('ec2')
resources = ec2.describe_instances(
InstanceIds=[r['InstanceId'] for r in resources]
)
for r in resources['Reservations']:
for i in r['Instances']:
self.assertIn('IamInstanceProfile', i)
self.assertIn('Arn', i['IamInstanceProfile'])
self.assertIn(':instance-profile/ec2-default', i['IamInstanceProfile']['Arn'])
def test_ec2_set_instance_profile_disassocation(self):
session_factory = self.replay_flight_data(
'test_ec2_set_instance_profile_disassociation')
policy = self.load_policy({
'name': 'ec2-test-set-instance-profile-disassociation',
'resource': 'ec2',
'filters': [
{'tag:Name': 'MissingInstanceProfile'},
{'type': 'value',
'key': 'IamInstanceProfile.Arn',
'op': 'regex',
'value': '.*/ec2-default'}],
'actions': [
{'type': 'set-instance-profile'}]},
session_factory=session_factory)
resources = policy.run()
self.assertGreaterEqual(len(resources), 1)
ec2 = session_factory().client('ec2')
associations = ec2.describe_iam_instance_profile_associations(
Filters=[
{
'Name': 'instance-id',
'Values': [r['InstanceId'] for r in resources]
}
]
)
for a in associations['IamInstanceProfileAssociations']:
self.assertIn(a['State'], ('disassociating', 'disassociated'))
class TestEC2QueryFilter(unittest.TestCase):
def test_parse(self):
self.assertEqual(QueryFilter.parse([]), [])
x = QueryFilter.parse(
[{'instance-state-name': 'running'}])
self.assertEqual(
x[0].query(),
{'Name': 'instance-state-name', 'Values': ['running']})
self.assertTrue(
isinstance(
QueryFilter.parse(
[{'tag:ASV': 'REALTIMEMSG'}])[0],
QueryFilter))
self.assertRaises(
ValueError,
QueryFilter.parse,
[{'tag:ASV': None}])
class TestTerminate(BaseTest):
def test_ec2_terminate(self):
# Test conditions: single running instance, with delete protection
session_factory = self.replay_flight_data('test_ec2_terminate')
p = self.load_policy({
'name': 'ec2-term',
'resource': 'ec2',
'filters': [{'InstanceId': 'i-017cf4e2a33b853fe'}],
'actions': [
{'type': 'terminate',
'force': True}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
instances = utils.query_instances(
session_factory(), InstanceIds=['i-017cf4e2a33b853fe'])
self.assertEqual(instances[0]['State']['Name'], 'shutting-down')
class TestDefaultVpc(BaseTest):
def test_ec2_default_vpc(self):
session_factory = self.replay_flight_data('test_ec2_default_vpc')
p = self.load_policy(
{'name': 'ec2-default-filters',
'resource': 'ec2',
'filters': [
{'type': 'default-vpc'}]},
config={'region': 'us-west-2'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['InstanceId'], 'i-0bfe468063b02d018')
class TestSingletonFilter(BaseTest):
def test_ec2_singleton_filter(self):
session_factory = self.replay_flight_data('test_ec2_singleton')
p = self.load_policy(
{'name': 'ec2-singleton-filters',
'resource': 'ec2',
'filters': [
{'tag:Name': 'Singleton'},
{'type': 'singleton'}]},
config={'region': 'us-west-1'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['InstanceId'], 'i-00fe7967fb7167c62')
class TestActions(unittest.TestCase):
def test_action_construction(self):
self.assertIsInstance(
actions.factory('mark', None),
tags.Tag)
self.assertIsInstance(
actions.factory('stop', None),
ec2.Stop)
self.assertIsInstance(
actions.factory('terminate', None),
ec2.Terminate)
class TestModifySecurityGroupsActionSchema(BaseTest):
def test_remove_dependencies(self):
policy = {
'name': 'remove-with-no-isolation-or-add',
'resource': 'ec2',
'actions': [
{'type': 'modify-security-groups', 'remove': 'matched'}
]
}
self.assertRaises(
ValidationError, self.load_policy, data=policy, validate=True)
def test_invalid_remove_params(self):
# string invalid
policy = {
'name': 'remove-with-incorrect-param-string',
'resource': 'ec2',
'actions': [
{'type': 'modify-security-groups', 'remove': 'none'}
]
}
self.assertRaises(
ValidationError, self.load_policy, data=policy, validate=True)
# list - one valid, one invalid
policy = {
'name': 'remove-with-incorrect-param-list',
'resource': 'ec2',
'actions': [
{'type': 'modify-security-groups', 'remove': [
'invalid-sg', 'sg-abcd1234']}
]
}
self.assertRaises(
ValidationError, self.load_policy, policy, validate=True)
def test_invalid_add_params(self):
# string invalid
policy = {
'name': 'add-with-incorrect-param-string',
'resource': 'ec2',
'actions': [
{'type': 'modify-security-groups', 'add': 'none'},
{'type': 'modify-security-groups', 'add': [
'invalid-sg', 'sg-abcd1234']}
]
}
self.assertRaises(
ValidationError, self.load_policy, data=policy, validate=True)
def test_invalid_isolation_group_params(self):
policy = {
'name': 'isolation-group-with-incorrect-param-string',
'resource': 'ec2',
'actions': [
{'type': 'modify-security-groups', 'isolation-group': 'none'}
]
}
self.assertRaises(
ValidationError, self.load_policy, data=policy, validate=True)
# list - one valid, one invalid
policy = {
'name': 'isolation-group-with-incorrect-param-list',
'resource': 'ec2',
'actions': [
{'type': 'modify-security-groups',
'isolation-group': ['invalid-sg', 'sg-abcd1234']}
]
}
self.assertRaises(
ValidationError, self.load_policy, data=policy, validate=True)
class TestModifySecurityGroupAction(BaseTest):
def test_security_group_type(self):
# Test conditions:
# - running two instances; one with TestProductionInstanceProfile
# and one with none
# - security group named TEST-PROD-ONLY-SG exists in VPC and is
# attached to both test instances
session_factory = self.replay_flight_data(
'test_ec2_security_group_filter')
# Catch on anything that uses the *PROD-ONLY* security groups but isn't in a prod role
policy = self.load_policy({
'name': 'restrict-sensitive-sg',
'resource': 'ec2',
'filters': [
{'or': [
{'and': [
{'type': 'value', 'key': 'IamInstanceProfile.Arn',
'value': '(?!.*TestProductionInstanceProfile)(.*)',
'op': 'regex'},
{'type': 'value', 'key': 'IamInstanceProfile.Arn',
'value': 'not-null'}
]},
{'type': 'value', 'key': 'IamInstanceProfile',
'value': 'absent'}
]},
{'type': 'security-group', 'key': 'GroupName',
'value': '(.*PROD-ONLY.*)', 'op': 'regex'},
]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['InstanceId'], 'i-0dd3919bc5bac1ea8')
def test_security_group_modify_groups_action(self):
# Test conditions:
# - running two instances; one with TestProductionInstanceProfile
# and one with none
# - security group named TEST-PROD-ONLY-SG exists in VPC and is
# attached to both test instances
session_factory = self.replay_flight_data(
'test_ec2_modify_groups_action')
client = session_factory().client('ec2')
default_sg_id = client.describe_security_groups(
GroupNames=[
'default',
]
)['SecurityGroups'][0]['GroupId']
# Catch on anything that uses the *PROD-ONLY* security groups but isn't in a prod role
policy = self.load_policy({
'name': 'remove-sensitive-sg',
'resource': 'ec2',
'filters': [
{'or': [
{'and': [
{'type': 'value', 'key': 'IamInstanceProfile.Arn',
'value': '(?!.*TestProductionInstanceProfile)(.*)',
'op': 'regex'},
{'type': 'value', 'key': 'IamInstanceProfile.Arn',
'value': 'not-null'}
]},
{'type': 'value', 'key': 'IamInstanceProfile',
'value': 'absent'}
]},
{'type': 'security-group', 'key': 'GroupName',
'value': '(.*PROD-ONLY.*)', 'op': 'regex'}],
'actions': [
{'type': 'modify-security-groups', 'remove': 'matched',
'isolation-group': default_sg_id}]
},
session_factory=session_factory)
before_action_resources = policy.run()
after_action_resources = policy.run()
self.assertEqual(len(before_action_resources), 1)
self.assertEqual(
before_action_resources[0]['InstanceId'], 'i-0dd3919bc5bac1ea8')
self.assertEqual(len(after_action_resources), 0)
def test_invalid_modify_groups_schema(self):
policy = {
'name': 'invalid-modify-security-groups-action',
'resource': 'ec2',
'filters': [],
'actions': [
{'type': 'modify-security-groups', 'change': 'matched'}
]
}
self.assertRaises(
ValidationError, self.load_policy, policy, validate=True)
def test_ec2_add_security_groups(self):
# Test conditions:
# - running one instance with TestProductionInstanceProfile
# - security group named TEST-PROD-ONLY-SG exists in VPC and
# is attached to test instance
# - security group with id sg-8a4b64f7 exists in VPC and is selected
# in a policy to be attached
session_factory = self.replay_flight_data(
'test_ec2_add_security_groups')
policy = self.load_policy({
'name': 'add-sg-to-prod-instances',
'resource': 'ec2',
'filters': [
{'type': 'value', 'key': 'IamInstanceProfile.Arn',
'value': '(.*TestProductionInstanceProfile)', 'op': 'regex'}
],
'actions': [
{'type': 'modify-security-groups', 'add': 'sg-8a4b64f7'}
]
},
session_factory=session_factory)
first_resources = policy.run()
self.assertEqual(len(
first_resources[0]['NetworkInterfaces'][0]['Groups']), 1)
second_resources = policy.run()
self.assertEqual(len(
second_resources[0]['NetworkInterfaces'][0]['Groups']), 2)
class TestAutoRecoverAlarmAction(BaseTest):
def test_autorecover_alarm(self):
session_factory = self.replay_flight_data('test_ec2_autorecover_alarm')
p = self.load_policy(
{'name': 'ec2-autorecover-alarm',
'resource': 'ec2',
'filters': [
{'tag:c7n-test': 'autorecover-alarm'}],
'actions': [
{'type': 'autorecover-alarm'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
self.assertEqual(resources[0]['InstanceId'], 'i-0aaaaec4b77188b69')
try:
client = session_factory().client('cloudwatch')
result = client.describe_alarms(
AlarmNames=['recover-{}'.format(resources[0]['InstanceId'])])
self.assertTrue(result.get('MetricAlarms'))
except AssertionError:
self.fail('alarm not found')
class TestFilter(BaseTest):
def test_not_filter(self):
# This test is to get coverage for the `not` filter's process_set method
session_factory = self.replay_flight_data(
'test_ec2_not_filter')
policy = self.load_policy({
'name': 'list-ec2-test-not',
'resource': 'ec2',
'filters': [{
'not': [
{'InstanceId': 'i-036ee05e8c2ca83b3'}
]
}]
},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 2)
policy = self.load_policy({
'name': 'list-ec2-test-not',
'resource': 'ec2',
'filters': [{
'not': [{
'or': [
{'InstanceId': 'i-036ee05e8c2ca83b3'},
{'InstanceId': 'i-03d8207d8285cbf53'}
]
}]
}]
},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : downloader.py
@Date : 2021/09/15
@Author : Yaronzz
@Version : 1.0
@Contact : yaronhuang@foxmail.com
@Desc :
"""
import time
from PyQt5.Qt import QThread
from tidal_gui.viewModel.taskModel import TaskModel
class DownloaderImp(QThread):
def __init__(self):
super(DownloaderImp, self).__init__()
self._taskModel = None
def run(self):
print('DownloadImp start...')
while not self.isInterruptionRequested():
if self._taskModel is not None:
item = self._taskModel.getWaitDownloadItem()
if item is not None:
item.download()
time.sleep(1)
print('DownloadImp stop...')
def setTaskModel(self, model: TaskModel):
self._taskModel = model
def stop(self):
self.requestInterruption()
self.wait()
downloadImp = DownloaderImp()
|
import codecs
import game
from hacktools import common
def run():
infolder = "data/extract/DATA/files/movie/"
outfile = "data/movie_output.txt"
common.logMessage("Extracting MOVIE to", outfile, "...")
with codecs.open(outfile, "w", "utf-8") as out:
files = common.getFiles(infolder, ".bin")
for file in common.showProgress(files):
common.logDebug("Processing", file, "...")
with common.Stream(infolder + file, "rb", False) as f:
strnum = f.readUInt()
if strnum == 0:
continue
out.write("!FILE:" + file + "\n")
for i in range(strnum):
f.seek(8 + i * 12)
substart = f.readUInt()
subend = f.readUInt()
subpointer = f.readUInt()
f.seek(subpointer)
substr, strlen = game.readUTFString(f)
out.write(str(substart) + ":" + str(subend) + ":" + substr + "=\n")
common.logMessage("Done! Extracted", len(files), "files")
|
# Generated by Django 2.2.2 on 2019-06-05 19:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SearchQuery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('paperTitle', models.CharField(max_length=5000)),
('paperAuthors', models.CharField(max_length=5000)),
('paperDOI', models.IntegerField()),
('paperSubject', models.CharField(max_length=5000)),
],
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.