id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
117 | from pytest import raises
from discopy.cartesian import *
def test_Box_repr():
f = Box('f', 1, 2, lambda x: (x, x))
assert "Box('f', 1, 2" in repr(f)
def test_Function_str():
f = Function(2, 1, lambda x, y: x + y)
assert 'Function(dom=2, cod=1,' in str(f)
def test_Function_call():
f = Swap(2, 1)
values = (2, 3)
with raises(TypeError) as err:
f(*values)
assert str(err.value) == messages.expected_input_length(f, values)
def test_Function_then():
f, g = Function(2, 1, lambda x, y: x + y), Function(1, 1, lambda x: x + 1)
assert Function.id(2).then(*(f, g))(20, 21) == 42
def test_Function_then_err():
f = Function(2, 1, lambda x, y: x + y)
g = (lambda x: x, )
with raises(TypeError) as err:
f >> g
assert str(err.value) == messages.type_err(Function, g)
g = Function.id(2)
with raises(AxiomError) as err:
f >> g
assert str(err.value) == messages.does_not_compose(f, g)
def test_Function_tensor():
assert Function.id(3)(1, 2, 3)\
== Function.id(0).tensor(*(3 * [Function.id(1)]))(1, 2, 3)
def test_Function_tensor_err():
f = Function(2, 1, lambda x, y: x + y)
g = (lambda x: x, )
with raises(TypeError) as err:
f @ g
assert str(err.value) == messages.type_err(Function, g)
|
136 | import os
import sys
import shutil
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(cwd_path), 'rt-thread', 'tools'))
# BSP dist function
def dist_do_building(BSP_ROOT, dist_dir):
from mkdist import bsp_copy_files
import rtconfig
library_dir = os.path.join(dist_dir, 'libraries')
print("=> copy nrf52 bsp libraries")
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries')
bsp_copy_files(library_path, library_dir)
|
149 | from itertools import groupby
class Solution:
def countAndSay(self, n):
def gen(s):
return "".join(str(len(list(g))) + k for k, g in groupby(s))
s, i = "1", 1
while i < n:
s = gen(s)
i += 1
return s
|
157 | import json
import aiohttp
async def request(url, payload=None, params=None, headers=None):
headers = {'content-type': 'application/json', **(headers or {})}
data = payload and json.dumps(payload)
async with aiohttp.ClientSession() as client:
async with client.post(
url, data=data, params=params, headers=headers) as resp:
# TODO: Check response status
json_response = await resp.json()
return json_response
async def get_updates(base_url, timeout, offset):
params = {
'timeout': timeout,
'offset': offset
}
return await request(f'{base_url}/getUpdates', params=params)
async def send_message(base_url, chat_id, text, reply_markup=None):
payload = {
'chat_id': chat_id,
'text': text
}
if reply_markup is not None:
payload['reply_markup'] = reply_markup
return await request(f'{base_url}/sendMessage', payload)
async def answer_callback_query(
base_url, callback_query_id, text, show_alert,
url=None, cache_time=None):
payload = {
'callback_query_id': callback_query_id,
'text': text,
'show_alert': show_alert
}
if url is not None:
payload['url'] = url
if cache_time is not None:
payload['cache_time'] = cache_time
return await request(f'{base_url}/answerCallbackQuery', payload)
|
185 | import traceback
from pprint import pformat
from threading import Thread
import itchat
import logging
from wxpy.chat import Chat
from wxpy.chats import Chats
from wxpy.friend import Friend
from wxpy.group import Group
from wxpy.message import MessageConfigs, Messages, Message, MessageConfig
from wxpy.mp import MP
from wxpy.response import ResponseError
from wxpy.user import User
from wxpy.utils.constants import SYSTEM
from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list
logger = logging.getLogger('wxpy')
class Robot(object):
"""
机器人对象,用于登陆和操作微信账号,涵盖大部分 Web 微信的功能
"""
def __init__(
self, save_path=None, console_qr=False, qr_path=None,
qr_callback=None, login_callback=None, logout_callback=None
):
"""
:param save_path:
| 用于保存或载入登陆状态的文件路径,例如: 'wxpy.pkl',为空则不尝试载入。
| 填写本参数后,可在短时间内重新载入登陆状态,避免重复扫码,失效时会重新要求登陆
:param console_qr: 在终端中显示登陆二维码,需要安装 Pillow 模块
:param qr_path: 保存二维码的路径
:param qr_callback: 获得二维码时的回调,接收参数: uuid, status, qrcode
:param login_callback: 登陆时的回调,接收参数同上
:param logout_callback: 登出时的回调,接收参数同上
"""
self.core = itchat.Core()
itchat.instanceList.append(self)
self.core.auto_login(
hotReload=bool(save_path), statusStorageDir=save_path,
enableCmdQR=console_qr, picDir=qr_path, qrCallback=qr_callback,
loginCallback=login_callback, exitCallback=logout_callback
)
self.message_configs = MessageConfigs(self)
self.messages = Messages(robot=self)
self.file_helper = Chat(wrap_user_name('filehelper'))
self.file_helper.robot = self
self.file_helper.nick_name = '文件传输助手'
self.self = Chat(self.core.loginInfo['User'])
self.self.robot = self
self.save_path = save_path
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.self.name)
@handle_response()
def logout(self):
"""
登出当前账号
"""
return self.core.logout()
@property
def alive(self):
"""
当前的登陆状态
:return: 若为登陆状态,则为 True,否则为 False
"""
return self.core.alive
@alive.setter
def alive(self, value):
self.core.alive = value
def dump_login_status(self, save_path=None):
return self.core.dump_login_status(save_path or self.save_path)
# chats
def except_self(self, chats_or_dicts):
"""
从聊天对象合集或用户字典列表中排除自身
:param chats_or_dicts: 聊天对象合集或用户字典列表
:return: 排除自身后的列表
"""
return list(filter(lambda x: get_user_name(x) != self.self.user_name, chats_or_dicts))
def chats(self, update=False):
"""
获取所有聊天对象
:param update: 是否更新
:return: 聊天对象合集
"""
return Chats(self.friends(update) + self.groups(update) + self.mps(update), self)
def friends(self, update=False):
"""
获取所有好友
:param update: 是否更新
:return: 聊天对象合集
"""
@handle_response(Friend)
def do():
return self.core.get_friends(update=update)
ret = do()
ret.source = self
return ret
@handle_response(Group)
def groups(self, update=False, contact_only=False):
"""
获取所有群聊
:param update: 是否更新
:param contact_only: 是否限于保存为联系人的群聊
:return: 群聊合集
"""
return self.core.get_chatrooms(update=update, contactOnly=contact_only)
@handle_response(MP)
def mps(self, update=False):
"""
获取所有公众号
:param update: 是否更新
:return: 聊天对象合集
"""
return self.core.get_mps(update=update)
@handle_response(User)
def user_details(self, user_or_users, chunk_size=50):
"""
获取单个或批量获取多个用户的详细信息(地区、性别、签名等),但不可用于群聊成员
:param user_or_users: 单个或多个用户对象或 user_name
:param chunk_size: 分配请求时的单批数量,目前为 50
:return: 单个或多个用户用户的详细信息
"""
def chunks():
total = ensure_list(user_or_users)
for i in range(0, len(total), chunk_size):
yield total[i:i + chunk_size]
@handle_response()
def process_one_chunk(_chunk):
return self.core.update_friend(userName=get_user_name(_chunk))
if isinstance(user_or_users, (list, tuple)):
ret = list()
for chunk in chunks():
chunk_ret = process_one_chunk(chunk)
if isinstance(chunk_ret, list):
ret += chunk_ret
else:
ret.append(chunk_ret)
return ret
else:
return process_one_chunk(user_or_users)
def search(self, name=None, **attributes):
"""
在所有类型的聊天对象中进行搜索
:param name: 名称 (可以是昵称、备注等)
:param attributes: 属性键值对,键可以是 sex(性别), province(省份), city(城市) 等。例如可指定 province='广东'
:return: 匹配的聊天对象合集
"""
return self.chats().search(name, **attributes)
# add / create
@handle_response()
def add_friend(self, user, verify_content=''):
"""
添加用户为好友
:param user: 用户对象或用户名
:param verify_content: 验证说明信息
"""
return self.core.add_friend(
userName=get_user_name(user),
status=2,
verifyContent=verify_content,
autoUpdate=True
)
@handle_response()
def accept_friend(self, user, verify_content=''):
"""
接受用户为好友
:param user: 用户对象或用户名
:param verify_content: 验证说明信息
"""
# Todo: 验证好友接口可用性,并在接受好友时直接返回新好友
return self.core.add_friend(
userName=get_user_name(user),
status=3,
verifyContent=verify_content,
autoUpdate=True
)
def create_group(self, users, topic=None):
"""
创建一个新的群聊
:param users: 用户列表
:param topic: 群名称
:return: 若建群成功,返回一个新的群聊对象
"""
@handle_response()
def request():
return self.core.create_chatroom(
memberList=wrap_user_name(users),
topic=topic or ''
)
ret = request()
user_name = ret.get('ChatRoomName')
if user_name:
return Group(self.core.update_chatroom(userName=user_name))
else:
raise ResponseError('Failed to create group:\n{}'.format(pformat(ret)))
# messages
def _process_message(self, msg):
"""
处理接收到的消息
"""
if not self.alive:
return
func, run_async = self.message_configs.get_func(msg)
if not func:
return
def process():
# noinspection PyBroadException
try:
ret = func(msg)
if ret is not None:
if isinstance(ret, (tuple, list)):
self.core.send(
msg=str(ret[0]),
toUserName=msg.chat.user_name,
mediaId=ret[1]
)
else:
self.core.send(
msg=str(ret),
toUserName=msg.chat.user_name
)
except:
logger.warning(
'An error occurred in registered function, '
'use `Robot().start(debug=True)` to show detailed information')
logger.debug(traceback.format_exc())
if run_async:
Thread(target=process).start()
else:
process()
def register(
self, chats=None, msg_types=None,
except_self=True, run_async=True, enabled=True
):
"""
装饰器:用于注册消息配置
:param chats: 单个或列表形式的多个聊天对象或聊天类型,为空时匹配所有聊天对象
:param msg_types: 单个或列表形式的多个消息类型,为空时匹配所有消息类型 (SYSTEM 类消息除外)
:param except_self: 排除自己在手机上发送的消息
:param run_async: 异步执行配置的函数,可提高响应速度
:param enabled: 当前配置的默认开启状态,可事后动态开启或关闭
"""
def register(func):
self.message_configs.append(MessageConfig(
robot=self, func=func, chats=chats, msg_types=msg_types,
except_self=except_self, run_async=run_async, enabled=enabled
))
return func
return register
def start(self, block=True):
"""
开始监听和处理消息
:param block: 是否堵塞线程,为 False 时将在新的线程中运行
"""
def listen():
logger.info('{} Auto-reply started.'.format(self))
try:
while self.alive:
msg = Message(self.core.msgList.get(), self)
if msg.type is not SYSTEM:
self.messages.append(msg)
self._process_message(msg)
except KeyboardInterrupt:
logger.info('KeyboardInterrupt received, ending...')
self.alive = False
if self.core.useHotReload:
self.dump_login_status()
logger.info('Bye.')
if block:
listen()
else:
t = Thread(target=listen, daemon=True)
t.start()
|
201 | import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import DropboxOAuth2Provider
class DropboxOAuth2Adapter(OAuth2Adapter):
provider_id = DropboxOAuth2Provider.id
access_token_url = "https://api.dropbox.com/oauth2/token"
authorize_url = "https://www.dropbox.com/oauth2/authorize"
profile_url = "https://api.dropbox.com/2/users/get_current_account"
redirect_uri_protocol = "https"
def complete_login(self, request, app, token, **kwargs):
response = requests.post(
self.profile_url,
headers={"Authorization": "Bearer %s" % (token.token,)},
)
response.raise_for_status()
return self.get_provider().sociallogin_from_response(request, response.json())
oauth_login = OAuth2LoginView.adapter_view(DropboxOAuth2Adapter)
oauth_callback = OAuth2CallbackView.adapter_view(DropboxOAuth2Adapter)
|
274 | import logging
from collections import Counter
from django.core.management.base import BaseCommand
from django.db.models import Q
from TWLight.applications.models import Application
from TWLight.resources.models import Partner
from TWLight.applications.signals import Reminder
from TWLight.users.models import Editor
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
# This is not DRY. Originally, this pulled the queryset from
# TWLight.applications.views.ListApplicationsView.get_queryset().
# But that now expects a request object. So, we did a copy/paste.
# We're actually getting apps with a status of PENDING or QUESTION
# or APPROVED, and their corresponding user preferences being True
# for partners with a status of AVAILABLE.
all_apps = (
Application.objects.filter(
Q(
partner__coordinator__editor__user__userprofile__pending_app_reminders=True
)
& Q(status=Application.PENDING)
| Q(
partner__coordinator__editor__user__userprofile__discussion_app_reminders=True
)
& Q(status=Application.QUESTION)
| Q(
partner__coordinator__editor__user__userprofile__approved_app_reminders=True
)
& Q(status=Application.APPROVED),
partner__status__in=[Partner.AVAILABLE],
editor__isnull=False,
)
.exclude(editor__user__groups__name="restricted")
.order_by("status", "partner", "date_created")
)
# A deduplicated dict of coordinators from the pending app queryset, along
# with a count of how many total pending apps they have
coordinators = Counter(
all_apps.values_list(
"partner__coordinator__editor",
"partner__coordinator__email",
"partner__coordinator__editor__user__userprofile__lang",
)
)
for coordinator, count in list(coordinators.items()):
try:
# We create a dictionary with the three status codes
# we'd want to send emails for, and their corresponding
# counts.
app_status_and_count = {
Application.PENDING: all_apps.filter(
status=Application.PENDING,
partner__coordinator__editor=coordinator[0],
).count(),
Application.QUESTION: all_apps.filter(
status=Application.QUESTION,
partner__coordinator__editor=coordinator[0],
).count(),
Application.APPROVED: all_apps.filter(
status=Application.APPROVED,
partner__coordinator__editor=coordinator[0],
).count(),
}
editor = Editor.objects.get(id=coordinator[0])
except Editor.DoesNotExist:
logger.info(
"Editor {} does not exist; skipping.".format(coordinator[0])
)
break
# Only bother with the signal if we have a coordinator email.
if coordinator[1]:
Reminder.coordinator_reminder.send(
sender=self.__class__,
app_status_and_count=app_status_and_count,
coordinator_wp_username=editor.wp_username,
coordinator_email=coordinator[1],
coordinator_lang=coordinator[2],
)
|
330 | from tools.geofunc import GeoFunc
import pandas as pd
import json
def getData(index):
'''报错数据集有(空心):han,jakobs1,jakobs2 '''
'''形状过多暂时未处理:shapes、shirt、swim、trousers'''
name=["ga","albano","blaz1","blaz2","dighe1","dighe2","fu","han","jakobs1","jakobs2","mao","marques","shapes","shirts","swim","trousers"]
print("开始处理",name[index],"数据集")
'''暂时没有考虑宽度,全部缩放来表示'''
scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50]
print("缩放",scale[index],"倍")
df = pd.read_csv("data/"+name[index]+".csv")
polygons=[]
for i in range(0,df.shape[0]):
for j in range(0,df['num'][i]):
poly=json.loads(df['polygon'][i])
GeoFunc.normData(poly,scale[index])
polygons.append(poly)
return polygons
|
340 | import FWCore.ParameterSet.Config as cms
#
# module to make the MaxSumPtWMass jet combination
#
findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer("TtSemiLepJetCombMaxSumPtWMass",
## jet input
jets = cms.InputTag("selectedPatJets"),
## lepton input
leps = cms.InputTag("selectedPatMuons"),
## maximum number of jets to be considered
maxNJets = cms.int32(4),
## nominal WMass parameter (in GeV)
wMass = cms.double(80.4),
## use b-tagging two distinguish between light and b jets
useBTagging = cms.bool(False),
## choose algorithm for b-tagging
bTagAlgorithm = cms.string("trackCountingHighEffBJetTags"),
## minimum b discriminator value required for b jets and
## maximum b discriminator value allowed for non-b jets
minBDiscBJets = cms.double(1.0),
maxBDiscLightJets = cms.double(3.0)
)
|
351 | from vyper import ast as vy_ast
def test_output_class():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert isinstance(new_node, vy_ast.Int)
def test_source():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.src == new_node.src
assert old_node.node_source_code == new_node.node_source_code
def test_kwargs():
old_node = vy_ast.parse_to_ast("42").body[0].value
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.value == 42
assert new_node.value == 666
def test_compare_nodes():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert not vy_ast.compare_nodes(old_node, new_node)
def test_new_node_has_no_parent():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert new_node._parent is None
assert new_node._depth == 0
|
377 | import os
from functools import wraps
from os.path import join as join_path
from dash import Dash
from flask import make_response, render_template_string, redirect
excluded_resources_endpoints = (
'static', '_dash_assets.static', '/_favicon.ico', '/login', '/logout',
'/_user', '/auth')
def add_routes(app, authorizer):
"""Adds authentication endpoints to a flask app.
Decorates other endpoints to grant access.
The endpoints are:
* /login
* Method: GET
* /logout
* Method: GET
* Erases cookies
* /auth
* Method: GET
* Validates cookies if present or header authentication
* Header:
'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)'
* Sets cookies on login
* Rejects unauthorized users
Parameters
----------
app: flask.Flask or dash.Dash
The flask or dash application
excluded_resources_endpoints: tuple(str)
Tuple with endpoints where access must not be checked.
"""
def login():
ok, _ = authorizer.validate()
if ok:
return make_response(redirect('/'), 307)
return render_template_string(login_template)
def logout():
_, response = authorizer.clean_cookie()
return response
def auth():
_, response = authorizer.validate()
return response
def authorize_endpoint(function):
@wraps(function)
def authorized_function(*args, **kwargs):
ok, response = authorizer.validate()
if ok:
return function(*args, **kwargs)
return response
return authorized_function
if isinstance(app, Dash):
app = app.server
login_template = load_template('login.html')
app.add_url_rule('/auth', '/auth', auth)
app.add_url_rule('/login', '/login', login)
app.add_url_rule('/logout', '/logout', logout)
for endpoint, function in app.view_functions.items():
if endpoint not in excluded_resources_endpoints:
app.view_functions[endpoint] = authorize_endpoint(function)
def load_template(filename):
"""Loads the login html template."""
pyfile_path = os.path.dirname(os.path.abspath(__file__))
path = join_path(pyfile_path, 'templates', filename)
with open(path, 'r') as f:
return f.read().strip()
|
432 | from robotpy_ext.control.toggle import Toggle
from robotpy_ext.misc.precise_delay import NotifierDelay
class FakeJoystick:
def __init__(self):
self._pressed = [False] * 2
def getRawButton(self, num):
return self._pressed[num]
def press(self, num):
self._pressed[num] = True
def release(self, num):
self._pressed[num] = False
def test_toggle():
joystick = FakeJoystick()
toggleButton = Toggle(joystick, 0)
toggleButton2 = Toggle(joystick, 1)
assert toggleButton.off
joystick.press(0)
assert toggleButton.on
assert toggleButton2.off
joystick.release(0)
assert toggleButton.on
joystick.press(0)
assert toggleButton.off
joystick.release(0)
assert toggleButton.off
joystick.press(1)
assert toggleButton.off
assert toggleButton2.on
def test_toggle_debounce():
# TODO: use simulated time
delay = NotifierDelay(0.5)
joystick = FakeJoystick()
toggleButton = Toggle(joystick, 1, 0.1)
assert toggleButton.off
joystick.press(1)
assert toggleButton.on
joystick.release(1)
joystick.press(1)
joystick.release(1)
assert toggleButton.on
delay.wait()
assert toggleButton.on
joystick.press(1)
assert toggleButton.off
|
505 | from test_plus.test import TestCase
from ...administrative_units.factories import AdministrativeUnitFactory
from ...cases.factories import CaseFactory
from ...channels.factories import ChannelFactory
from ...events.factories import EventFactory
from ...features.factories import FeatureFactory, FeatureOptionFactory
from ...generic.tests.test_views import ReadOnlyViewSetMixin
from ...institutions.factories import InstitutionFactory
from ...letters.factories import DocumentTypeFactory, ReferenceNumberFactory
from ...search.tests.mixins import SearchQueryMixin
from ...tags.factories import TagFactory
from ...users.factories import UserFactory
class AdministrativeUnitAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_administrative_unit"
factory_class = AdministrativeUnitFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):
basename = "autocomplete_case"
factory_class = CaseFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class ChannelAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_channel"
factory_class = ChannelFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class DocumentTypeAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_document_type"
factory_class = DocumentTypeFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class ReferenceNumberAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_reference_number"
factory_class = ReferenceNumberFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class EventAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_event"
factory_class = EventFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class FeatureAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_feature"
factory_class = FeatureFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class FeatureOptionAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_feature_option"
factory_class = FeatureOptionFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class InstitutionAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_institution"
factory_class = InstitutionFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):
basename = "autocomplete_tag"
factory_class = TagFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):
basename = "autocomplete_user"
factory_class = UserFactory
initial_count = 1
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["username"], self.obj.username)
|
509 | import json
import re
import responses
from werkzeug.test import Client
from werkzeug.wrappers import Response
from satosa.proxy_server import make_app
from satosa.satosa_config import SATOSAConfig
class TestConsent:
def test_full_flow(self, satosa_config_dict, consent_module_config):
api_url = "https://consent.example.com/api"
redirect_url = "https://consent.example.com/redirect"
consent_module_config["config"]["api_url"] = api_url
consent_module_config["config"]["redirect_url"] = redirect_url
satosa_config_dict["MICRO_SERVICES"].append(consent_module_config)
# application
test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response)
# incoming auth req
http_resp = test_client.get("/{}/{}/request".format(satosa_config_dict["BACKEND_MODULES"][0]["name"],
satosa_config_dict["FRONTEND_MODULES"][0]["name"]))
assert http_resp.status_code == 200
verify_url_re = re.compile(r"{}/verify/\w+".format(api_url))
with responses.RequestsMock() as rsps:
# fake no previous consent
consent_request_url_re = re.compile(r"{}/creq/\w+".format(api_url))
rsps.add(responses.GET, verify_url_re, status=401)
rsps.add(responses.GET, consent_request_url_re, "test_ticket", status=200)
# incoming auth resp
http_resp = test_client.get("/{}/response".format(satosa_config_dict["BACKEND_MODULES"][0]["name"]))
assert http_resp.status_code == 302
assert http_resp.headers["Location"].startswith(redirect_url)
with responses.RequestsMock() as rsps:
# fake consent
rsps.add(responses.GET, verify_url_re, json.dumps({"foo": "bar"}), status=200)
# incoming consent response
http_resp = test_client.get("/consent/handle_consent")
assert http_resp.status_code == 200
|
511 | import argparse
import glob
import os
import pickle
from pathlib import Path
import numpy as np
from PIL import Image
from tqdm import tqdm
from src.align.align_trans import get_reference_facial_points, warp_and_crop_face
# sys.path.append("../../")
from src.align.detector import detect_faces
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="face alignment")
parser.add_argument(
"-source_root",
"--source_root",
help="specify your source dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-dest_root",
"--dest_root",
help="specify your destination dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-crop_size",
"--crop_size",
help="specify size of aligned faces, align and crop with padding",
default=112,
type=int,
)
args = parser.parse_args()
source_root = args.source_root # specify your source dir
dest_root = args.dest_root # specify your destination dir
crop_size = (
args.crop_size
) # specify size of aligned faces, align and crop with padding
scale = crop_size / 112.0
reference = get_reference_facial_points(default_square=True) * scale
cwd = os.getcwd() # delete '.DS_Store' existed in the source_root
os.chdir(source_root)
os.system("find . -name '*.DS_Store' -type f -delete")
os.chdir(cwd)
imfiles = [
f
for f in glob.glob(f"{source_root}F????/MID*/faces/msceleb*")
if Path(f).is_file()
]
# images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles}
meta = {}
# for subfolder in tqdm(os.listdir(source_root)):
for imfile in tqdm(imfiles):
ref = imfile.replace(source_root, "")
print("Processing\t{}".format(imfile))
img = Image.open(imfile)
try: # Handle exception
bbs, landmarks = detect_faces(img)
except Exception:
print("{} is discarded due to exception!".format(imfile))
continue
ref = imfile.replace(source_root, "")
ndetections = len(landmarks)
if (
ndetections == 0
): # If the landmarks cannot be detected, the img will be discarded
print("{} is discarded due to non-detected landmarks!".format(imfile))
meta[ref] = []
continue
li_meta = []
for i in range(ndetections):
im_meta = {}
im_meta["face"] = i
im_meta["landmarks"] = landmarks[i]
im_meta["bb"] = bbs[i]
facial5points = [[landmarks[i][j], landmarks[i][j + 5]] for j in range(5)]
warped_face = warp_and_crop_face(
np.array(img),
facial5points,
reference,
crop_size=(crop_size, crop_size),
)
img_warped = Image.fromarray(warped_face)
image_name = imfile.replace("images", "cropped").replace(
".jpg", "-{:02d}.jpg".format(i)
)
# im_meta['ref'] = "/".join(image_name.split('/')[-5:])
img_warped.save(image_name)
li_meta.append(im_meta)
meta[ref] = li_meta
with open(source_root + "cropped-meta.pkl", "wb") as f:
pickle.dump(meta, f)
|
513 | import collections
class ReadOnlyDict(collections.MutableMapping):
def __init__(self, store):
self.store = store
def __getitem__(self, key):
return self.store[key]
def __setitem__(self, key, value):
raise TypeError('Cannot modify ReadOnlyDict')
def __delitem__(self, key):
raise TypeError('Cannot modify ReadOnlyDict')
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __str__(self):
return 'ReadOnlyDict(%s)' % self.store
def __repr__(self):
return 'ReadOnlyDict(%r)' % self.store |
522 | import os
import unittest
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
import torch.nn as nn
from machina.optims import DistributedAdamW
def init_processes(rank, world_size,
function, backend='tcp'):
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank,
world_size=world_size)
function(rank, world_size)
class TestDistributedAdamW(unittest.TestCase):
def test_step(self):
def _run(rank, world_size):
model = nn.Linear(10, 1)
optimizer = DistributedAdamW(
model.parameters())
optimizer.zero_grad()
loss = model(torch.ones(10).float())
loss.backward()
optimizer.step()
processes = []
world_size = 4
for rank in range(world_size):
p = Process(target=init_processes,
args=(rank,
world_size,
_run))
p.start()
processes.append(p)
for p in processes:
p.join()
|
532 | import datetime
import io
import json_tricks
import logging
import os
from os.path import (abspath, basename, dirname, exists, expanduser,
join, realpath, relpath, splitext)
import re
import shutil
import sys
from traits.api import (Any, Dict, Enum, HasTraits, Instance, List, Long,
Str)
from whoosh import fields, qparser, query
from whoosh.util.times import datetime_to_long, long_to_datetime
from .common import get_project_dir
from .media import Media, MediaData, get_media_data
from .directory import Directory
from . import processor
logger = logging.getLogger(__name__)
if sys.version_info[0] > 2:
unicode = str
string_types = (str,)
import csv
else:
string_types = (basestring,)
import backports.csv as csv
INT = fields.NUMERIC(numtype=int)
FLOAT = fields.NUMERIC(numtype=float)
def get_file_saved_time(path):
dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime)
return dt.ctime()
def _get_sample(fname):
sample = ''
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
sample += fp.readline() + fp.readline()
return sample
def _get_csv_headers(fname):
sample = _get_sample(fname)
sniffer = csv.Sniffer()
has_header = sniffer.has_header(sample)
dialect = sniffer.sniff(sample)
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
header = next(reader)
return has_header, header, dialect
class TagInfo(HasTraits):
name = Str
type = Enum("string", "text", "int", "float", "bool")
default = Any
def __repr__(self):
return 'TagInfo(%r, %r)' % (self.name, self.type)
def _default_default(self):
map = {"string": "", "text": "", "int": 0, "float": 0.0,
"bool": False}
return map[self.type]
def open_file(fname_or_file, mode='rb'):
if hasattr(fname_or_file, 'read'):
return fname_or_file
else:
return open(fname_or_file, mode)
def sanitize_name(name):
name = name.lower()
name = re.sub(r'\s+', '_', name)
return re.sub(r'\W+', '', name)
def get_non_existing_filename(fname):
if exists(fname):
base, ext = splitext(basename(fname))
return join(dirname(fname), base + '_a' + ext)
else:
return fname
COMMON_TAGS = dict(
file_name='string', path='string', relpath='string',
ctime='string', mtime='string', size='int', type='string'
)
def _cleanup_query(q, tag_types):
type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes)
for term in q.leaves():
if isinstance(term, query.Term):
if isinstance(term.text, (str, unicode, bytes)):
fieldtype = tag_types[term.fieldname]
if fieldtype in type_map:
term.text = type_map[fieldtype](term.text)
else:
term.text = term.text.lower()
elif isinstance(term, query.Phrase):
term.words = [x.lower() for x in term.words]
def _check_value(value, expr):
if isinstance(expr, string_types):
return expr in value.lower()
else:
return expr == value
def _check_range(x, term):
result = True
if term.start is not None:
if term.startexcl:
result &= x > term.start
else:
result &= x >= term.start
if term.end is not None and result:
if term.endexcl:
result &= x < term.end
else:
result &= x <= term.end
return result
def _check_date_range(x, term):
result = True
if term.startdate is not None:
result &= x >= term.start
if term.enddate is not None and result:
result &= x <= term.end
return result
def _search_media(expr, m_key, get_tag):
"""Given search expression, index to media, and a getter to get the attribute
check if the media matches expression.
"""
if expr.is_leaf():
if isinstance(expr, query.Term):
attr = expr.fieldname
return _check_value(get_tag(m_key, attr), expr.text)
elif isinstance(expr, query.Phrase):
attr = expr.fieldname
text = " ".join(expr.words)
return _check_value(get_tag(m_key, attr), text)
elif isinstance(expr, query.DateRange):
if expr.fieldname == 'ctime':
value = get_tag(m_key, 'ctime_')
elif expr.fieldname == 'mtime':
value = get_tag(m_key, 'mtime_')
return _check_date_range(value, expr)
elif isinstance(expr, query.NumericRange):
attr = expr.fieldname
return _check_range(get_tag(m_key, attr), expr)
else:
print("Unsupported term: %r" % expr)
return False
else:
if isinstance(expr, query.And):
result = True
for child in expr.children():
result &= _search_media(child, m_key, get_tag)
if not result:
break
return result
elif isinstance(expr, query.Or):
result = False
for child in expr.children():
result |= _search_media(child, m_key, get_tag)
if result:
break
return result
elif isinstance(expr, query.Not):
subquery = list(expr.children())[0]
return not _search_media(subquery, m_key, get_tag)
else:
print("Unsupported term: %r" % expr)
return False
class Project(HasTraits):
name = Str
description = Str
path = Str
root = Instance(Directory)
tags = List(TagInfo)
_media = Dict(Str, Media)
extensions = List(Str)
processors = List(processor.FactoryBase)
number_of_files = Long
# Path where the project data is saved.
save_file = Str
last_save_time = Str
_data = Dict
_tag_data = Dict
_relpath2index = Dict()
_query_parser = Instance(qparser.QueryParser)
def add_tags(self, tags):
tags = list(self.tags) + tags
self.update_tags(tags)
def update_tags(self, new_tags):
old_tags = self.tags
new_tag_names = set(tag.name for tag in new_tags)
tag_info = dict((tag.name, tag.type) for tag in old_tags)
removed = []
added = []
for tag in new_tags:
if tag.name not in tag_info:
added.append(tag)
elif tag_info[tag.name] != tag.type:
removed.append(tag)
added.append(tag)
for tag in old_tags:
if tag.name not in new_tag_names:
removed.append(tag)
for tag in removed:
del self._tag_data[tag.name]
n_entries = len(self._relpath2index)
for tag in added:
self._tag_data[tag.name] = [tag.default]*n_entries
# The above can be the first time when self._tag_data is accessed, when
# creating a new project for example. In this case,
# self.__tag_data_default is called, so if self.tags is set then the
# removed tags will not exist in _tag_data causing an error. So we only
# set self.tags below.
self.tags = new_tags
# Update the cached media
for m in self._media.values():
for tag in removed:
del m.tags[tag.name]
for tag in added:
m.tags[tag.name] = tag.default
self._query_parser = self._make_query_parser()
def copy(self):
"""Make a copy of this project. This does not copy the data but only
the tags, extensions and the other settings of the project.
This will not copy any of the processor states but only their settings.
"""
name = self.name + ' copy'
p = Project(name=name)
traits = ['description', 'extensions', 'path', 'processors', 'tags']
p.copy_traits(self, traits, copy='deep')
# Clear out the _done information from the processors
for proc in p.processors:
proc._done.clear()
return p
# #### CRUD interface to the data ####
def update(self, media_data, tags=None):
"""Create/update the internal data given the media data and tags.
Parameters
----------
f: vixen.directory.File instance
tags: dict
"""
relpath = media_data.relpath
if not self.has_media(relpath):
index = len(self._relpath2index)
self._relpath2index[relpath] = index
for key in MediaData._fields:
self._data[key].append(None)
for tag in self.tags:
self._tag_data[tag.name].append(tag.default)
index = self._relpath2index[relpath]
for i, key in enumerate(MediaData._fields):
self._data[key][index] = media_data[i]
if tags:
for key, value in tags.items():
self._tag_data[key][index] = value
media = self._media.get(relpath)
if media is not None:
media.update(media_data, tags)
def get(self, relpath):
"""Given the relative path of some media, return a Media instance.
"""
if relpath in self._media:
return self._media[relpath]
else:
data = {}
index = self._relpath2index[relpath]
for key in MediaData._fields:
data[key] = self._data[key][index]
tags = {}
for key in self._tag_data:
tags[key] = self._tag_data[key][index]
media = Media.from_data(MediaData(**data), tags)
media.on_trait_change(self._media_tag_handler, 'tags_items')
self._media[relpath] = media
return media
def remove(self, relpaths):
"""Given a list of relative path of some media, remove them from the
database.
"""
relpath2index = self._relpath2index
indices = [(x, relpath2index[x]) for x in relpaths]
for relpath, index in sorted(indices, reverse=True):
last = len(relpath2index) - 1
if index == last:
self._delete_record(last, relpath)
else:
self._replace_with_last_record(index, last)
self._delete_record(last, relpath)
def has_media(self, relpath):
"""Returns True if the media data is available.
"""
return relpath in self._relpath2index
def keys(self):
"""Return all the keys for the media relative paths."""
return self._relpath2index.keys()
def _get_media_attr(self, index, attr):
"""Given an index to the media, return its value.
"""
if attr in self._data:
return self._data[attr][index]
elif attr in self._tag_data:
return self._tag_data[attr][index]
# #### End of CRUD interface to the data ####
def clean(self):
"""Scan the project and remove any dead entries.
This is useful when you remove or rename files. This does not refresh
the directory tree or set the number of files. It simply cleans up the
db of files that no longer exist.
"""
logger.info('Cleaning project: %s', self.name)
root_path = self.path
to_remove = []
relpath2index = self._relpath2index
for rpath in list(relpath2index.keys()):
fname = os.path.join(root_path, rpath)
if not os.path.exists(fname):
to_remove.append(rpath)
self.remove(to_remove)
def export_csv(self, fname, cols=None):
"""Export metadata to a csv file. If `cols` are not specified,
it writes out all the useful metadata.
Parameters
-----------
fname: str: a path to the csv file to dump.
cols: sequence: a sequence of columns to write.
"""
logger.info('Exporting CSV: %s', fname)
all_keys = ((set(MediaData._fields) | set(self._tag_data.keys()))
- set(('ctime_', 'mtime_')))
if cols is None:
cols = all_keys
cols = list(sorted(cols))
data_cols = set([x for x in cols if x in self._data])
with io.open(fname, 'w', newline='', encoding='utf-8') as of:
# Write the header.
writer = csv.writer(of)
writer.writerow(cols)
for i in range(len(self._relpath2index)):
line = []
for col in cols:
if col in data_cols:
elem = self._data[col][i]
else:
elem = self._tag_data[col][i]
line.append(elem)
writer.writerow(line)
def import_csv(self, fname):
"""Read tag information from given CSV filename.
Returns the success status and the error message if any. Note that this
only applies tags for column headers with known tags. Unknown tags are
not added.
Parameters
----------
fname : str Input filename.
"""
logger.info('Importing tags from: %s', fname)
has_header, header, dialect = _get_csv_headers(fname)
if not has_header:
return False, "The CSV file does not appear to have a header."
if 'path' not in header:
msg = "The CSV file does not have a 'path' column."
return False, msg
tags = {x: header.index(x.name) for x in self.tags if x.name in header}
path_idx = header.index('path')
TRUE = ('1', 't', 'true', 'y', 'yes')
type_map = {
'bool': lambda x: x.lower() in TRUE,
'string': lambda x: x,
'text': lambda x: x,
'int': int,
'float': float
}
count = 0
total = 0
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
next(reader) # Skip header
for record in reader:
total += 1
path = record[path_idx]
rpath = relpath(path, self.path)
index = self._relpath2index.get(rpath, None)
media = self._media.get(rpath)
if index is not None:
count += 1
for tag, header_index in tags.items():
data = record[header_index]
try:
value = type_map[tag.type](data)
if media is not None:
media.tags[tag.name] = value
else:
self._tag_data[tag.name][index] = value
except ValueError:
pass
msg = "Read tags for %d paths out of %d entries." % (count, total)
if count == 0 and total > 0:
msg += ("\nPlease check that your path column matches "
"the media paths.")
return False, msg
else:
msg += ("\nPlease check the imported tags and make sure you "
"save the project.")
return True, msg
def load(self, fp=None):
"""Load media info from opened file object.
"""
if fp is None:
if not exists(self.save_file):
return
fp = open_file(self.save_file, 'rb')
else:
fp = open_file(fp, 'rb')
data = json_tricks.load(
fp, preserve_order=False, ignore_comments=False
)
fp.close()
self.name = data.get('name', '')
self.description = data.get('description', '')
self.path = data.get('path')
self.tags = [TagInfo(name=x[0], type=x[1]) for x in data['tags']]
self.processors = [processor.load(x)
for x in data.get('processors', [])]
version = data.get('version')
if version == 1:
self._read_version1_media(data['media'])
else:
self._data = data['media_data']
self._tag_data = data['tag_data']
self._relpath2index = data['relpath2index']
root = Directory()
root.__setstate__(data.get('root'))
self.extensions = root.extensions
self.root = root
self.number_of_files = len(self._relpath2index)
def save(self):
"""Save current media info to a file object
"""
if len(self.save_file) > 0:
self.save_as(self.save_file)
self._update_last_save_time()
else:
raise IOError("No valid save file set.")
def save_as(self, fp):
"""Save copy to specified path.
"""
fp = open_file(fp, 'wb')
tags = [(t.name, t.type) for t in self.tags]
root = self.root.__getstate__()
processors = [processor.dump(x) for x in self.processors]
data = dict(
version=2, path=self.path, name=self.name,
description=self.description, tags=tags,
media_data=self._data, tag_data=self._tag_data,
relpath2index=self._relpath2index,
root=root, processors=processors
)
json_tricks.dump(data, fp, compression=True)
fp.close()
logger.info('Saved project: %s', self.name)
def scan(self, refresh=False):
"""Find all the media recursively inside the root directory.
This will not clobber existing records but will add any new ones.
"""
self._setup_root()
def _scan(dir):
for f in dir.files:
if not self.has_media(f.relpath) or refresh:
data = get_media_data(f.path, f.relpath)
self.update(data)
for d in dir.directories:
if refresh:
d.refresh()
_scan(d)
if refresh:
self.root.refresh()
_scan(self.root)
self.number_of_files = len(self._relpath2index)
def search(self, q):
"""A generator which yields the (filename, relpath) for each file
satisfying the search query.
"""
logger.info('Searching for %s', q)
try:
parsed_q = self._query_parser.parse(q)
except Exception:
logger.warn("Invalid search expression: %s", q)
print("Invalid search expression: %s" % q)
return
tag_types = self._get_tag_types()
_cleanup_query(parsed_q, tag_types)
for key, index in self._relpath2index.items():
if _search_media(parsed_q, index, self._get_media_attr):
yield basename(key), key
def refresh(self):
logger.info('Refreshing project: %s', self.name)
self.clean()
self.scan(refresh=True)
# #### Private protocol ################################################
def _setup_root(self):
path = abspath(expanduser(self.path))
root = self.root
if root is None or realpath(root.path) != realpath(path):
self.root = Directory(path=path, extensions=self.extensions)
def _tags_default(self):
return [TagInfo(name='completed', type='bool')]
def _save_file_default(self):
if len(self.name) > 0:
fname = sanitize_name(self.name) + '.vxn'
d = get_project_dir()
return get_non_existing_filename(join(d, fname))
else:
return ''
def _update_last_save_time(self):
self.last_save_time = get_file_saved_time(self.save_file)
def _last_save_time_default(self):
if exists(self.save_file):
return get_file_saved_time(self.save_file)
else:
return ''
def _name_changed(self, name):
if len(name) > 0:
old_save_file = self.save_file
old_dir = dirname(old_save_file)
new_save_file = join(old_dir, sanitize_name(name) + '.vxn')
if new_save_file != old_save_file:
self.save_file = new_save_file
if exists(old_save_file):
shutil.move(old_save_file, self.save_file)
def _extensions_changed(self, ext):
if self.root is not None:
self.root.extensions = ext
def _extensions_items_changed(self):
if self.root is not None:
self.root.extensions = self.extensions
def _get_tag_types(self):
result = dict(COMMON_TAGS)
result.update(dict((t.name, t.type) for t in self.tags))
return result
def _make_schema(self):
from whoosh.fields import BOOLEAN, DATETIME, TEXT, Schema
kw = dict(
type=TEXT, file_name=TEXT, path=TEXT,
mtime=DATETIME, ctime=DATETIME, size=INT
)
type_to_field = dict(
string=TEXT, text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN
)
for tag in self.tags:
kw[tag.name] = type_to_field[tag.type]
return Schema(**kw)
def _make_query_parser(self):
schema = self._make_schema()
qp = qparser.QueryParser('path', schema=schema)
qp.add_plugin(qparser.GtLtPlugin())
from whoosh.qparser.dateparse import DateParserPlugin
qp.add_plugin(DateParserPlugin())
return qp
def __query_parser_default(self):
return self._make_query_parser()
def __data_default(self):
data = {}
for key in MediaData._fields:
data[key] = []
return data
def __tag_data_default(self):
tags = {}
for key in self.tags:
tags[key.name] = []
return tags
def _media_tag_handler(self, obj, tname, old, new):
index = self._relpath2index[obj.relpath]
for tag in new.changed:
self._tag_data[tag][index] = obj.tags[tag]
def _read_version1_media(self, media):
data = self.__data_default()
tag_data = self.__tag_data_default()
relpath2index = {}
keymap = dict.fromkeys(MediaData._fields)
for k in keymap:
keymap[k] = k
keymap['_ctime'] = 'ctime_'
keymap['_mtime'] = 'mtime_'
for index, (key, m) in enumerate(media):
relpath2index[key] = index
tags = m.pop('tags')
for tname, v in tags.items():
tag_data[tname].append(v)
for k, v in m.items():
data[keymap[k]].append(v)
if 'file_name' not in m:
data['file_name'].append(basename(key))
data['mtime_'] = [datetime_to_long(x) for x in data['mtime_']]
data['ctime_'] = [datetime_to_long(x) for x in data['ctime_']]
self._data = data
self._tag_data = tag_data
self._relpath2index = relpath2index
def _delete_record(self, index, relpath):
for key in MediaData._fields:
del self._data[key][index]
for key in self._tag_data:
del self._tag_data[key][index]
if relpath in self._media:
del self._media[relpath]
del self._relpath2index[relpath]
def _replace_with_last_record(self, index, last):
_data = self._data
_tag_data = self._tag_data
for key in MediaData._fields:
_data[key][index] = _data[key][last]
for key in self._tag_data:
_tag_data[key][index] = _tag_data[key][last]
last_relpath = _data['relpath'][last]
self._relpath2index[last_relpath] = index
def _save_as_v1(self, fp):
"""Save copy to specified path.
This mainly exists for testing and making sure we still read the old
saved files.
"""
def _rewrite_dir(state):
"Rewrite directories in the old format."
state['files'] = [x[0] for x in state['files']]
state['directories'] = [_rewrite_dir(d)
for d in state['directories']]
state.pop('relpath')
state.pop('name')
return state
fp = open_file(fp, 'wb')
media = [(key, self.get(key).to_dict()) for key in self._relpath2index]
tags = [(t.name, t.type) for t in self.tags]
root = _rewrite_dir(self.root.__getstate__())
processors = [processor.dump(x) for x in self.processors]
for k, m in media:
m['_ctime'] = long_to_datetime(m['_ctime'])
m['_mtime'] = long_to_datetime(m['_mtime'])
data = dict(
version=1, path=self.path, name=self.name,
description=self.description, tags=tags, media=media,
root=root, processors=processors
)
json_tricks.dump(data, fp, compression=True)
fp.close()
logger.info('Saved project: %s', self.name)
|
534 | from io import StringIO
from unittest import TestCase
from dropSQL.parser.streams import *
class StreamTestCase(TestCase):
def test(self):
s = '12'
cs = Characters(StringIO(s))
ch = cs.peek().ok()
self.assertEqual(ch, '1')
ch = cs.peek().ok()
self.assertEqual(ch, '1')
ch = cs.next().ok()
self.assertEqual(ch, '1')
ch = cs.next().ok()
self.assertEqual(ch, '2')
r = cs.next()
self.assertFalse(r)
self.assertTrue(r.err())
r = cs.next()
self.assertFalse(r)
cs.back()
r = cs.next()
self.assertTrue(r)
self.assertEqual(r.ok(), '2')
cs.back(2)
r = cs.next()
self.assertTrue(r)
self.assertEqual(r.ok(), '1')
|
538 | MANIFEST = {
"hilt": {
"h1": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (110, 111)}},
"colours": {
"primary": (216, 216, 216), # d8d8d8
"secondary": (141, 141, 141), # 8d8d8d
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal/Salvaged materials",
},
"h2": {
"offsets": {"blade": 20, "button": {"x": (8, 8), "y": (100, 105)}},
"colours": {
"primary": (112, 112, 112), # 707070
"secondary": (0, 0, 0), # 000000
"tertiary": (212, 175, 55), # 000000
},
"length": 24,
"materials": "Alloy metal and carbon composite",
},
"h3": {
"offsets": {"blade": 0, "button": {"x": (10, 10), "y": (100, 118)}},
"colours": {
"primary": (157, 157, 157), # 707070
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal",
},
"h4": {
"offsets": {"blade": 7, "button": {"x": (8, 9), "y": (92, 100)}},
"colours": {
"primary": (0, 0, 0), # 000000
"secondary": (157, 157, 157), # 9d9d9d
"tertiary": (180, 97, 19), # b46113
},
"length": 13,
"materials": "Alloy metal",
},
"h5": {
"offsets": {"blade": 0, "button": {"x": (8, 8), "y": (92, 105)}},
"colours": {
"primary": (111, 111, 111), # 6f6f6f
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal",
},
"h6": {
"offsets": {"blade": 2, "button": {"x": (8, 9), "y": (112, 113)}},
"colours": {
"primary": (120, 120, 120), # 787878
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 22,
"materials": "Alloy metal/Salvaged materials",
},
"h7": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (105, 113)}},
"colours": {
"primary": (192, 192, 192), # c0c0c0
"secondary": (255, 215, 0), # ffd700
"tertiary": (0, 0, 0), # 000000
},
"length": 22,
"materials": "Alloy metal and Gold",
},
"h8": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (100, 111)}},
"colours": {
"primary": (216, 216, 216), # d8d8d8
"secondary": (180, 97, 19), # b46113
"tertiary": (0, 0, 0), # 000000
},
"length": 24,
"materials": "Alloy metal/Copper",
},
},
"blade": {
"b1": {"colour": "Red", "crystal": "Adegan crystal", "type": "Sith"},
"b2": {"colour": "Blue", "crystal": "Zophis crystal", "type": "Jedi"},
"b3": {"colour": "Green", "crystal": "Nishalorite stone", "type": "Jedi"},
"b4": {"colour": "Yellow", "crystal": "Kimber stone", "type": "Jedi"},
"b5": {"colour": "White", "crystal": "Dragite gem", "type": "Jedi"},
"b6": {"colour": "Purple", "crystal": "Krayt dragon pearl", "type": "Jedi"},
"b7": {"colour": "Blue/Green", "crystal": "Dantari crystal", "type": "Jedi"},
"b8": {
"colour": "Orange",
"crystal": ["Ilum crystal", "Ultima Pearl"],
"type": "Sith",
},
"b9": {
"colour": "Black",
"crystal": "Obsidian",
"type": ["Jedi", "Mandalorian"],
},
},
"pommel": {
"p1": {"length": 5,},
"p2": {"length": 14,},
"p3": {"length": 3,},
"p4": {"length": 8,},
"p5": {"length": 5,},
"p6": {"length": 5,},
"p7": {"length": 8,},
},
# These are lightsabers for a specific Jedi or Sith. Should use their name instead of
"unique_urls": {""},
}
|
545 | import json
from cisco_sdwan_policy.BaseObject import BaseObject
class Application(BaseObject):
def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs):
self.type = "appList"
self.id = id
self.name = name
self.references = reference
self.app_family=is_app_family
self._entries = app_list
self.url = "template/policy/list/app"
super().__init__(**kwargs)
self.modified=False
def get_entries(self):
return self._entries
def set_entries(self,entries):
self.modified=True
self._entries=entries
@classmethod
def from_json(cls,jsonfile,**kwargs):
id = jsonfile["listId"]
name = jsonfile["name"]
references = jsonfile.get("references")
if len(jsonfile["entries"])>0 and jsonfile["entries"][0].get("app"):
appFamily=False
entries = [i["app"] for i in jsonfile["entries"]]
else:
if not jsonfile["entries"][0].get("appFamily"):
return None
else:
appFamily=True
entries = [i["appFamily"] for i in jsonfile["entries"]]
return cls(name,entries,appFamily,id,references,**kwargs)
def to_json(self):
return {
"name":self.name,
"description":"Desc Not Required",
"type":"app",
"entries":[
{"appFamily" if self.app_family else "app":i} for i in self._entries]
}
|
552 | from functools import partial
from corpustools.corpus.classes import Word
from corpustools.symbolsim.edit_distance import edit_distance
from corpustools.symbolsim.khorsi import khorsi
from corpustools.symbolsim.phono_edit_distance import phono_edit_distance
from corpustools.symbolsim.phono_align import Aligner
from corpustools.multiproc import filter_mp, score_mp
def _is_edit_distance_neighbor(w, query, sequence_type, max_distance):
w_len = len(getattr(w, sequence_type))
query_len = len(getattr(query, sequence_type))
if w_len > query_len+max_distance:
return False
if w_len < query_len-max_distance:
return False
return edit_distance(getattr(w, sequence_type), getattr(query, sequence_type),
sequence_type, max_distance) <= max_distance
def _is_phono_edit_distance_neighbor(w, query, sequence_type, specifier, max_distance):
return phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, specifier) <= max_distance
def _is_khorsi_neighbor(w, query, freq_base, sequence_type, max_distance):
return khorsi(getattr(w, sequence_type), getattr(query, sequence_type), freq_base, sequence_type, max_distance) >= max_distance
def neighborhood_density_all_words(corpus_context, tierdict, tier_type = None, sequence_type = None,
algorithm = 'edit_distance', max_distance = 1, output_format = 'spelling',
num_cores = -1, settable_attr = None, collapse_homophones = False,
stop_check = None, call_back = None):
"""Calculate the neighborhood density of all words in the corpus and
adds them as attributes of the words.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
algorithm : str
The algorithm used to determine distance
max_distance : float, optional
Maximum edit distance from the queried word to consider a word a neighbor.
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
settable_attr: string
Name of attribute that neighbourhood density results will be assigned to
"""
function = partial(neighborhood_density, corpus_context,
tierdict = tierdict,
tier_type = tier_type,
sequence_type = sequence_type,
algorithm = algorithm,
max_distance = max_distance,
collapse_homophones = collapse_homophones)
if call_back is not None:
call_back('Calculating neighborhood densities...')
call_back(0,len(corpus_context))
cur = 0
results = dict()
last_value_removed = None
last_key_removed = None
if num_cores == -1 or num_cores == 1:
for w in corpus_context:
if stop_check is not None and stop_check():
return
if last_value_removed:
tierdict[last_key_removed].append(last_value_removed)
w_sequence = getattr(w, corpus_context.sequence_type)
last_key_removed = str(w_sequence)
for i, item in enumerate(tierdict[last_key_removed]):
if str(item) == str(w):
last_value_removed = tierdict[last_key_removed].pop(i)
break
res = neighborhood_density(corpus_context, w, tierdict,
tier_type = tier_type,
sequence_type = sequence_type,
algorithm = algorithm,
max_distance = max_distance,
collapse_homophones = collapse_homophones)
results[str(w)] = [getattr(r, output_format) for r in res[1]]
setattr(w.original, settable_attr.name, res[0])
# for w in corpus_context:
# if stop_check is not None and stop_check():
# return
# cur += 1
# call_back(cur)
# res = function(w)
# results[str(w)] = [getattr(r, output_format) for r in res[1]]
# setattr(w.original, settable_attr.name, res[0]-1)
# #the -1 is to account for the fact that words are counted as their own neighbour, and this is incorrect
# #subtracting 1 here is easier than fixing the neighbourhood density algorithm
else:
iterable = ((w,) for w in corpus_context)
neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size = 1)
for n in neighbors:
#Have to look up the key, then look up the object due to how
#multiprocessing pickles objects
setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])),
#corpus_context.attribute.name, n[1][0])
settable_attr.name, n[1][0])
return results
def neighborhood_density(corpus_context, query, tierdict,
algorithm = 'edit_distance', max_distance = 1, collapse_homophones = False,
force_quadratic = False, file_type = None, tier_type=None, sequence_type = None,
stop_check = None, call_back = None):
"""Calculate the neighborhood density of a particular word in the corpus.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query : Word
The word whose neighborhood density to calculate.
algorithm : str
The algorithm used to determine distance
max_distance : float, optional
Maximum edit distance from the queried word to consider a word a neighbor
force_quadratic : bool
Force use of the less efficient quadratic algorithm even when finding edit
distance of 1 neighborhoods
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
Returns
-------
tuple(int, set)
Tuple of the number of neighbors and the set of neighbor Words.
"""
matches = []
query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)
if call_back is not None:
call_back('Finding neighbors for {}...'.format(query))
call_back(0,len(corpus_context))
cur = 0
if algorithm == 'edit_distance' and max_distance == 1 and not force_quadratic:
return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict,
file_type=file_type, collapse_homophones=collapse_homophones)
if algorithm == 'edit_distance':
is_neighbor = partial(_is_edit_distance_neighbor,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
elif algorithm == 'phono_edit_distance':
is_neighbor = partial(_is_phono_edit_distance_neighbor,
specifier = corpus_context.specifier,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
elif algorithm == 'khorsi':
freq_base = corpus_context.get_frequency_base()
is_neighbor = partial(_is_khorsi_neighbor,
freq_base = freq_base,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
for w in corpus_context:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 10 == 0:
call_back(cur)
if not is_neighbor(w, query):
continue
matches.append(w)
neighbors = set(matches)-set([query])
return (len(neighbors), neighbors)
def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type,
tierdict, file_type=None, trans_delimiter='.', collapse_homophones = False):
"""Generates all neighbors of edit distance <= 1 and searches
for them in corpus_context.
Will be faster than neighborhood_density when:
n > m * (1 + s), where
n: number of words in corpus
m: length of query
s: size of segment inventory
"""
neighbors = list()
query = ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type)
for candidate in generate_neighbor_candidates(corpus_context, query, sequence_type):
if tier_type.att_type == 'tier':
cand_str = trans_delimiter.join(candidate)
else:
cand_str = ''.join(candidate)
if cand_str in tierdict:
for w in tierdict[cand_str]:
w_sequence = getattr(w, sequence_type)
if collapse_homophones and any(getattr(word, sequence_type) == w_sequence for word in neighbors):
continue
else:
neighbors.append(w)
return (len(neighbors), neighbors)
def generate_neighbor_candidates(corpus_context, query, sequence_type):
sequence = getattr(query, sequence_type)
yield [str(c) for c in sequence]
for i in range(len(sequence)):
yield [str(c) for c in sequence[:i]] + [str(c) for c in sequence[i+1:]] # deletion
for char in corpus_context.inventory:
if str(char) not in ['#', sequence[i]]:
yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i:]] # insertion
yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i+1:]] # substitution
for char in corpus_context.inventory: # final pass to get insertion at len+1
if str(char) not in ['#', sequence[i]]:
yield [str(c) for c in sequence[:]] + [str(char)] # insertion
def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type = None, num_cores = -1, collapse_homophones = False,
stop_check = None, call_back = None):
function = partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones = collapse_homophones)
if call_back is not None:
call_back('Calculating neighborhood densities...')
call_back(0,len(corpus_context))
cur = 0
results = dict()
last_value_removed = None
last_key_removed = None
if num_cores == -1 or num_cores == 1:
for w in corpus_context:
if stop_check is not None and stop_check():
return
if last_value_removed:
tierdict[last_key_removed].append(last_value_removed)
w_sequence = getattr(w, corpus_context.sequence_type)
last_key_removed = str(w_sequence)
for i, item in enumerate(tierdict[last_key_removed]):
if str(item) == str(w):
last_value_removed = tierdict[last_key_removed].pop(i)
break
res = find_mutation_minpairs(corpus_context, w,
tier_type=tier_type, collapse_homophones = collapse_homophones)
results[str(w)] = res[1]
setattr(w.original, corpus_context.attribute.name, res[0])
# for w in corpus_context:
# if stop_check is not None and stop_check():
# return
# cur += 1
# call_back(cur)
# res = function(w)
# results[str(w)] = res[1]#[str(r) for r in res[1]]
# setattr(w.original, corpus_context.attribute.name, res[0])
else:
iterable = ((w,) for w in corpus_context)
neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size= 1)
for n in neighbors:
#Have to look up the key, then look up the object due to how
#multiprocessing pickles objects
setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0])
return results
def find_mutation_minpairs(corpus_context, query, tier_type = None, collapse_homophones = False,
stop_check = None, call_back = None):
"""Find all minimal pairs of the query word based only on segment
mutations (not deletions/insertions)
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query : Word
The word whose minimal pairs to find
stop_check : callable or None
Optional function to check whether to gracefully terminate early
call_back : callable or None
Optional function to supply progress information during the function
Returns
-------
list
The found minimal pairs for the queried word
"""
matches = []
sequence_type = corpus_context.sequence_type
query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)
if call_back is not None:
call_back('Finding neighbors...')
call_back(0,len(corpus_context))
cur = 0
al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1)
for w in corpus_context:
w_sequence = getattr(w, sequence_type)
query_sequence = getattr(query, sequence_type)
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 10 == 0:
call_back(cur)
if (len(w_sequence) > len(query_sequence)+1 or
len(w_sequence) < len(query_sequence)-1):
continue
m = al.make_similarity_matrix(query_sequence, w_sequence)
if m[-1][-1]['f'] != 1:
continue
w_sequence = getattr(w, sequence_type)
if collapse_homophones and any(getattr(m, sequence_type) == w_sequence for m in matches):
continue
else:
#matches.append(str(w_sequence))
matches.append(w)
matches = [m.spelling for m in matches]
neighbors = list(set(matches)-set([str(query_sequence)]))
return (len(neighbors), neighbors)
def ensure_query_is_word(query, corpus, sequence_type, tier_type, trans_delimiter='.', file_type=None):
if isinstance(query, Word):
query_word = query
else:
if tier_type.att_type == 'spelling':
if file_type == sequence_type:
query_word = Word(**{sequence_type: list(query)})
else:
query_word = query.replace(trans_delimiter, '')
query_word = Word(**{sequence_type: list(query_word)})
elif tier_type.att_type == 'tier':
if file_type == sequence_type:
query_with̠td = '.'.join(query) if '.' not in query else query
for entry in corpus:
corpus_word_with_td = str(getattr(entry, sequence_type))
if query_with̠td == corpus_word_with_td: # if a word in corpus has the same transcription
return entry # that word in the corpus is to be referred to.
# the following should be run if no word found in corpus with the transcription
new_query = parse(query, trans_delimiter)
query_word = Word(**{sequence_type: new_query})
else: # if file contains spelling
try:
query_word = corpus.corpus.find(query)
except KeyError:
# if the word in the file can't be found in the corpus
new_query = parse(query, trans_delimiter)
query_word = Word(**{sequence_type: list(new_query)})
return query_word
def parse(word, delimiter):
return word.split(delimiter) if delimiter in word else list(word) |
571 | from CommonServerPython import *
''' IMPORTS '''
import re
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
VENDOR = 'Have I Been Pwned? V2'
MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1)
API_KEY = demisto.params().get('api_key')
USE_SSL = not demisto.params().get('insecure', False)
BASE_URL = 'https://haveibeenpwned.com/api/v3'
HEADERS = {
'hibp-api-key': API_KEY,
'user-agent': 'DBOT-API',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3
DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3
SUFFIXES = {
"email": '/breachedaccount/',
"domain": '/breaches?domain=',
"username": '/breachedaccount/',
"paste": '/pasteaccount/',
"email_truncate_verified": '?truncateResponse=false&includeUnverified=true',
"domain_truncate_verified": '&truncateResponse=false&includeUnverified=true',
"username_truncate_verified": '?truncateResponse=false&includeUnverified=true'
}
RETRIES_END_TIME = datetime.min
''' HELPER FUNCTIONS '''
def http_request(method, url_suffix, params=None, data=None):
while True:
res = requests.request(
method,
BASE_URL + url_suffix,
verify=USE_SSL,
params=params,
data=data,
headers=HEADERS
)
if res.status_code != 429:
# Rate limit response code
break
if datetime.now() > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
wait_regex = re.search(r'\d+', res.json()['message'])
if wait_regex:
wait_amount = wait_regex.group()
else:
demisto.error('failed extracting wait time will use default (5). Res body: {}'.format(res.text))
wait_amount = 5
if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
time.sleep(int(wait_amount))
if res.status_code == 404:
return None
if not res.status_code == 200:
if not res.status_code == 401:
demisto.error(
'Error in API call to Pwned Integration [%d]. Full text: %s' % (res.status_code, res.text))
return_error('Error in API call to Pwned Integration [%d] - %s' % (res.status_code, res.reason))
return None
return res.json()
def html_description_to_human_readable(breach_description):
"""
Converting from html description to hr
:param breach_description: Description of breach from API response
:return: Description string that altered HTML urls to clickable urls
for better readability in war-room
"""
html_link_pattern = re.compile('<a href="(.+?)"(.+?)>(.+?)</a>')
patterns_found = html_link_pattern.findall(breach_description)
for link in patterns_found:
html_actual_address = link[0]
html_readable_name = link[2]
link_from_desc = '[' + html_readable_name + ']' + '(' + html_actual_address + ')'
breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1)
return breach_description
def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None):
records_found = False
md = '### Have I Been Pwned query for ' + query_type.lower() + ': *' + query_arg + '*\n'
if api_res:
records_found = True
for breach in api_res:
verified_breach = 'Verified' if breach['IsVerified'] else 'Unverified'
md += '#### ' + breach['Title'] + ' (' + breach['Domain'] + '): ' + str(breach['PwnCount']) + \
' records breached [' + verified_breach + ' breach]\n'
md += 'Date: **' + breach['BreachDate'] + '**\n\n'
md += html_description_to_human_readable(breach['Description']) + '\n'
md += 'Data breached: **' + ','.join(breach['DataClasses']) + '**\n'
if api_paste_res:
records_found = True
pastes_list = []
for paste_breach in api_paste_res:
paste_entry = \
{
'Source': paste_breach['Source'],
'Title': paste_breach['Title'],
'ID': paste_breach['Id'],
'Date': '',
'Amount of emails in paste': str(paste_breach['EmailCount'])
}
if paste_breach['Date']:
paste_entry['Date'] = paste_breach['Date'].split('T')[0]
pastes_list.append(paste_entry)
md += tableToMarkdown('The email address was found in the following "Pastes":',
pastes_list,
['ID', 'Title', 'Date', 'Source', 'Amount of emails in paste'])
if not records_found:
md += 'No records found'
return md
def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score):
return {
'Indicator': indicator_value,
'Type': indicator_type,
'Vendor': VENDOR,
'Score': dbot_score
}
def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score):
context_dict = dict() # dict
if context_type == 'email':
context_dict['Address'] = context_main_value
else:
context_dict['Name'] = context_main_value
context_dict['Pwned-V2'] = {
'Compromised': {
'Vendor': VENDOR,
'Reporters': ', '.join(comp_sites + comp_pastes)
}
}
if malicious_score == 3:
context_dict['Malicious'] = add_malicious_to_context(context_type)
return context_dict
def add_malicious_to_context(malicious_type):
return {
'Vendor': VENDOR,
'Description': 'The ' + malicious_type + ' has been compromised'
}
def email_to_entry_context(email, api_email_res, api_paste_res):
dbot_score = 0
comp_email = dict() # type: dict
comp_sites = sorted([item['Title'] for item in api_email_res])
comp_pastes = sorted(set(item['Source'] for item in api_paste_res))
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_EMAIL
email_context = create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL)
comp_email[outputPaths['email']] = email_context
comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score)
return comp_email
def domain_to_entry_context(domain, api_res):
comp_sites = [item['Title'] for item in api_res]
comp_sites = sorted(comp_sites)
comp_domain = dict() # type: dict
dbot_score = 0
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_DOMAIN
domain_context = create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN)
comp_domain[outputPaths['domain']] = domain_context
comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score)
return comp_domain
def set_retry_end_time():
global RETRIES_END_TIME
if MAX_RETRY_ALLOWED != -1:
RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED))
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module(args_dict):
"""
If the http request was successful the test will return OK
:return: 3 arrays of outputs
"""
http_request('GET', SUFFIXES.get("username", '') + 'test')
return ['ok'], [None], [None]
def pwned_email_command(args_dict):
"""
Executing the pwned request for emails list, in order to support list input, the function returns 3 lists of outputs
:param args_dict: the demisto argument - in this case the email list is needed
:return: 3 arrays of outputs
"""
email_list = argToList(args_dict.get('email', ''))
api_email_res_list, api_paste_res_list = pwned_email(email_list)
md_list = []
ec_list = []
for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list):
md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res))
ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or []))
return md_list, ec_list, api_email_res_list
def pwned_email(email_list):
"""
Executing the http requests
:param email_list: the email list that needed for the http requests
:return: 2 arrays of http requests outputs
"""
api_email_res_list = []
api_paste_res_list = []
for email in email_list:
email_suffix = SUFFIXES.get("email") + email + SUFFIXES.get("email_truncate_verified")
paste_suffix = SUFFIXES.get("paste") + email
api_email_res_list.append(http_request('GET', url_suffix=email_suffix))
api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix))
return api_email_res_list, api_paste_res_list
def pwned_domain_command(args_dict):
"""
Executing the pwned request for domains list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the domain list is needed
:return: 3 arrays of outputs
"""
domain_list = argToList(args_dict.get('domain', ''))
api_res_list = pwned_domain(domain_list)
md_list = []
ec_list = []
for domain, api_res in zip(domain_list, api_res_list):
md_list.append(data_to_markdown('Domain', domain, api_res))
ec_list.append(domain_to_entry_context(domain, api_res or []))
return md_list, ec_list, api_res_list
def pwned_domain(domain_list):
"""
Executing the http request
:param domain_list: the domains list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for domain in domain_list:
suffix = SUFFIXES.get("domain") + domain + SUFFIXES.get("domain_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
def pwned_username_command(args_dict):
"""
Executing the pwned request for usernames list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the username list is needed
:return: 3 arrays of outputs
"""
username_list = argToList(args_dict.get('username', ''))
api_res_list = pwned_username(username_list)
md_list = []
ec_list = []
for username, api_res in zip(username_list, api_res_list):
md_list.append(data_to_markdown('Username', username, api_res))
ec_list.append(domain_to_entry_context(username, api_res or []))
return md_list, ec_list, api_res_list
def pwned_username(username_list):
"""
Executing the http request
:param username_list: the username list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for username in username_list:
suffix = SUFFIXES.get("username") + username + SUFFIXES.get("username_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
command = demisto.command()
LOG('Command being called is: {}'.format(command))
try:
handle_proxy()
set_retry_end_time()
commands = {
'test-module': test_module,
'email': pwned_email_command,
'pwned-email': pwned_email_command,
'domain': pwned_domain_command,
'pwned-domain': pwned_domain_command,
'pwned-username': pwned_username_command
}
if command in commands:
md_list, ec_list, api_email_res_list = commands[command](demisto.args())
for md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list):
return_outputs(md, ec, api_paste_res)
# Log exceptions
except Exception as e:
return_error(str(e))
|
596 | from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.discretizer import Discretizer
x = np.random.rand(10, 4)
n_features = x.shape[1]
feature_names = [str(_) for _ in range(n_features)]
categorical_features = [[], [1, 3]]
percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))]
tests = list(product(categorical_features, percentiles))
n_tests = len(tests)
@pytest.fixture
def cats_and_percentiles(request):
cat, perc = tests[request.param]
return cat, perc
@pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True)
def test_discretizer(cats_and_percentiles):
cat, perc = cats_and_percentiles
disc = Discretizer(x, cat, feature_names, perc)
to_disc = list(disc.names.keys())
assert len(to_disc) == (x.shape[1] - len(cat))
x_disc = disc.discretize(x)
for k, v in disc.names.items():
assert len(v) <= len(perc) + 1
assert callable(disc.lambdas[k])
assert (x_disc[:, k].min() == 0).all()
assert (x_disc[:, k].max() == len(perc)).all()
for i in range(x.shape[1]):
if i not in to_disc:
assert (x_disc[:, i] == x[:, i]).all()
|
600 | import hashlib
from typing import TypeVar, Union
import redis
from openff.toolkit.topology import Molecule
from openff.bespokefit.executor.services.qcgenerator import worker
from openff.bespokefit.schema.tasks import HessianTask, OptimizationTask, Torsion1DTask
from openff.bespokefit.utilities.molecule import canonical_order_atoms
_T = TypeVar("_T", HessianTask, OptimizationTask, Torsion1DTask)
def _canonicalize_task(task: _T) -> _T:
task = task.copy(deep=True)
# Ensure the SMILES has a canonical ordering to help ensure cache hits.
canonical_molecule = canonical_order_atoms(
Molecule.from_smiles(task.smiles, allow_undefined_stereo=True)
)
if isinstance(task, Torsion1DTask):
map_to_atom_index = {
j: i for i, j in canonical_molecule.properties["atom_map"].items()
}
central_atom_indices = sorted(
map_to_atom_index[task.central_bond[i]] for i in (0, 1)
)
canonical_molecule.properties["atom_map"] = {
atom_index: (i + 1) for i, atom_index in enumerate(central_atom_indices)
}
canonical_smiles = canonical_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=True
)
task.central_bond = (1, 2)
else:
canonical_smiles = canonical_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=False
)
task.smiles = canonical_smiles
return task
def cached_compute_task(
task: Union[HessianTask, OptimizationTask, Torsion1DTask],
redis_connection: redis.Redis,
) -> str:
"""Checks to see if a QC task has already been executed and if not send it to a
worker.
"""
if isinstance(task, Torsion1DTask):
compute = worker.compute_torsion_drive
elif isinstance(task, OptimizationTask):
compute = worker.compute_optimization
elif isinstance(task, HessianTask):
compute = worker.compute_hessian
else:
raise NotImplementedError()
# Canonicalize the task to improve the cache hit rate.
task = _canonicalize_task(task)
task_hash = hashlib.sha512(task.json().encode()).hexdigest()
task_id = redis_connection.hget("qcgenerator:task-ids", task_hash)
if task_id is not None:
return task_id.decode()
task_id = compute.delay(task_json=task.json()).id
redis_connection.hset("qcgenerator:types", task_id, task.type)
# Make sure to only set the hash after the type is set in case the connection
# goes down before this information is entered and subsequently discarded.
redis_connection.hset("qcgenerator:task-ids", task_hash, task_id)
return task_id
|
605 | import contextlib
from datetime import date
from datetime import datetime
from datetime import timezone
from functools import wraps
from io import BytesIO
from itertools import count
from typing import Any
from typing import Dict
from typing import Sequence
import pytest
from dateutil.parser import parse as parse_date
from dateutil.relativedelta import relativedelta
from django import forms
from django.core.exceptions import ValidationError
from django.template.loader import render_to_string
from django.urls import reverse
from freezegun import freeze_time
from lxml import etree
from common.models.records import TrackedModel
from common.renderers import counter_generator
from common.serializers import validate_taric_xml_record_order
from common.util import TaricDateRange
from common.util import get_accessor
from common.util import get_field_tuple
INTERDEPENDENT_IMPORT_IMPLEMENTED = True
UPDATE_IMPORTER_IMPLEMENTED = True
EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED = False
COMMODITIES_IMPLEMENTED = True
MEURSING_TABLES_IMPLEMENTED = False
PARTIAL_TEMPORARY_STOP_IMPLEMENTED = False
UTC = timezone.utc
requires_commodities = pytest.mark.skipif(
not COMMODITIES_IMPLEMENTED,
reason="Commodities not implemented",
)
requires_export_refund_nomenclature = pytest.mark.skipif(
not EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED,
reason="Export refund nomenclature not implemented",
)
requires_meursing_tables = pytest.mark.skipif(
not MEURSING_TABLES_IMPLEMENTED,
reason="Meursing tables not implemented",
)
requires_partial_temporary_stop = pytest.mark.skipif(
not PARTIAL_TEMPORARY_STOP_IMPLEMENTED,
reason="Partial temporary stop not implemented",
)
requires_interdependent_import = pytest.mark.skipif(
not INTERDEPENDENT_IMPORT_IMPLEMENTED,
reason="Interdependent imports not implemented",
)
requires_update_importer = pytest.mark.skipif(
not UPDATE_IMPORTER_IMPLEMENTED,
reason="Requires Updating importers to be implemented",
)
@contextlib.contextmanager
def raises_if(exception, expected):
try:
yield
except exception:
if not expected:
raise
else:
if expected:
pytest.fail(f"Did not raise {exception}")
def check_validator(validate, value, expected_valid):
try:
validate(value)
except ValidationError:
if expected_valid:
pytest.fail(f'Unexpected validation error for value "{value}"')
except Exception:
raise
else:
if not expected_valid:
pytest.fail(f'Expected validation error for value "{value}"')
def make_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are duplicates of each
other and returns the record created last."""
existing = factory.create()
# allow overriding identifying_fields
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
return factory.create(
**dict(get_field_tuple(existing, field) for field in identifying_fields)
)
def make_non_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are not duplicates of
each other and returns the record created last."""
existing = factory.create()
not_duplicate = factory.create()
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
assert any(
get_field_tuple(existing, f) != get_field_tuple(not_duplicate, f)
for f in identifying_fields
)
return not_duplicate
def get_checkable_data(model: TrackedModel, ignore=frozenset()):
"""
Returns a dict representing the model's data ignoring any automatically set
fields and fields with names passed to `ignore`.
The returned data will contain the identifying fields for any linked
models rather than internal PKs.
For example:
get_checkable_data(FootnoteDescriptionFactory(), ignore={"sid"})
# {
# "description": "My sample footnote text",
# "described_footnote": {
# "footnote_type__footnote_type_id": "FN"
# "footnote_id": "123",
# },
# }
"""
checked_field_names = {f.name for f in model.copyable_fields} - ignore
data = {
name: getattr(model, get_accessor(model._meta.get_field(name)))
for name in checked_field_names
}
identifying_fields = {
name: data[name].get_identifying_fields()
for name in checked_field_names
if hasattr(data[name], "get_identifying_fields")
}
data.update(identifying_fields)
return data
def assert_records_match(
expected: TrackedModel,
imported: TrackedModel,
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported model is the same
as the data in the expected model.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = get_checkable_data(expected, ignore=ignore)
imported_data = get_checkable_data(imported, ignore=ignore)
assert expected_data == imported_data
def assert_many_records_match(
expected: Sequence[TrackedModel],
imported: Sequence[TrackedModel],
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported models is the same
as the data in the expected models, and that the count of both is equal.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = [get_checkable_data(e, ignore=ignore) for e in expected]
imported_data = [get_checkable_data(i, ignore=ignore) for i in imported]
assert expected_data == imported_data
_transaction_counter = count(start=1)
def generate_test_import_xml(obj: dict) -> BytesIO:
xml = render_to_string(
template_name="workbaskets/taric/transaction_detail.xml",
context={
"envelope_id": next(_transaction_counter),
"tracked_models": [obj],
"transaction_id": next(_transaction_counter),
"message_counter": counter_generator(),
"counter_generator": counter_generator,
},
)
return BytesIO(xml.encode())
def taric_xml_record_codes(xml):
"""Yields tuples of (record_code, subrecord_code)"""
records = xml.xpath(".//*[local-name() = 'record']")
codes = etree.XPath(
".//*[local-name()='record.code' or local-name()='subrecord.code']/text()",
)
return [tuple(codes(record)) for record in records]
def validate_taric_xml(
factory=None,
instance=None,
factory_kwargs=None,
check_order=True,
):
def decorator(func):
def wraps(
api_client,
taric_schema,
approved_transaction,
valid_user,
*args,
**kwargs,
):
if not factory and not instance:
raise AssertionError(
"Either a factory or an object instance need to be provided",
)
if factory and instance:
raise AssertionError(
"Either a factory or an object instance need to be provided - not both.",
)
current_instance = instance or factory.create(
transaction=approved_transaction, **factory_kwargs or {}
)
api_client.force_login(user=valid_user)
response = api_client.get(
reverse(
"workbaskets:workbasket-detail",
kwargs={"pk": approved_transaction.workbasket.pk},
),
{"format": "xml"},
)
assert response.status_code == 200
content = response.content
xml = etree.XML(content)
taric_schema.validate(xml)
assert not taric_schema.error_log, f"XML errors: {taric_schema.error_log}"
if check_order:
validate_taric_xml_record_order(xml)
kwargs = {"xml": xml, **kwargs}
func(
*args,
**kwargs,
)
return wraps
return decorator
class Dates:
deltas = {
"normal": (relativedelta(), relativedelta(months=+1)),
"earlier": (relativedelta(years=-1), relativedelta(years=-1, months=+1)),
"later": (
relativedelta(years=+1, months=+1, days=+1),
relativedelta(years=+1, months=+2),
),
"big": (relativedelta(years=-2), relativedelta(years=+2, days=+1)),
"adjacent": (relativedelta(days=+1), relativedelta(months=+1)),
"adjacent_earlier": (relativedelta(months=-1), relativedelta(days=-1)),
"adjacent_later": (relativedelta(months=+1, days=+1), relativedelta(months=+2)),
"adjacent_no_end": (relativedelta(months=+1, days=+1), None),
"adjacent_even_later": (
relativedelta(months=+2, days=+1),
relativedelta(months=+3),
),
"adjacent_earlier_big": (
relativedelta(years=-2, months=-2),
relativedelta(years=-2),
),
"adjacent_later_big": (
relativedelta(months=+1, days=+1),
relativedelta(years=+2, months=+2),
),
"overlap_normal": (
relativedelta(days=+15),
relativedelta(days=+14, months=+1, years=+1),
),
"overlap_normal_earlier": (
relativedelta(months=-1, days=+14),
relativedelta(days=+14),
),
"overlap_normal_same_year": (
relativedelta(days=+15),
relativedelta(days=+14, months=+1),
),
"overlap_big": (relativedelta(years=+1), relativedelta(years=+3, days=+2)),
"after_big": (
relativedelta(years=+3, months=+1),
relativedelta(years=+3, months=+2),
),
"backwards": (relativedelta(months=+1), relativedelta(days=+1)),
"starts_with_normal": (relativedelta(), relativedelta(days=+14)),
"ends_with_normal": (relativedelta(days=+14), relativedelta(months=+1)),
"current": (relativedelta(weeks=-4), relativedelta(weeks=+4)),
"future": (relativedelta(weeks=+10), relativedelta(weeks=+20)),
"no_end": (relativedelta(), None),
"normal_first_half": (relativedelta(), relativedelta(days=+14)),
}
@property
def now(self):
return self.datetime_now.date()
@property
def datetime_now(self):
return datetime.now(tz=UTC).replace(hour=0, minute=0, second=0, microsecond=0)
def __getattr__(self, name):
if name in self.deltas:
start, end = self.deltas[name]
start = self.now + start
if end is not None:
end = self.now + end
return TaricDateRange(start, end)
raise AttributeError(name)
@classmethod
def short_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(days=-14),
)
@classmethod
def medium_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(days=-1),
)
@classmethod
def short_after(cls, dt):
return TaricDateRange(
dt + relativedelta(days=+14),
dt + relativedelta(months=+1),
)
@classmethod
def short_overlap(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(months=+1),
)
@classmethod
def no_end_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
None,
)
def only_applicable_after(cutoff):
"""
Decorator which asserts that a test fails after a specified cutoff date.
:param cutoff: A date string, or datetime object before which the test should fail.
"""
cutoff = parse_date(cutoff)
def decorator(fn):
@wraps(fn)
def do_test(*args, **kwargs):
# test should pass normally
fn(*args, **kwargs)
# test should fail before cutoff
with freeze_time(cutoff + relativedelta(days=-1)):
try:
fn(*args, **kwargs)
except pytest.fail.Exception:
pass
except Exception:
raise
else:
pytest.fail(f"Rule applied before {cutoff:%Y-%m-%d}")
return True
return do_test
return decorator
def validity_period_post_data(start: date, end: date) -> Dict[str, int]:
"""
Construct a POST data fragment for the validity period start and end dates
of a ValidityPeriodForm from the given date objects, eg:
>>> validity_period_post_data(
>>> datetime.date(2021, 1, 2),
>>> datetime.date(2022, 3, 4),
>>> )
{
"start_date_0": 1,
"start_date_1": 2,
"start_date_2": 2021,
"end_date_0": 4,
"end_date_1": 3,
"end_date_2": 2022,
}
"""
return {
f"{name}_{i}": part
for name, date in (("start_date", start), ("end_date", end))
for i, part in enumerate([date.day, date.month, date.year])
}
def get_form_data(form: forms.ModelForm) -> Dict[str, Any]:
"""Returns a dictionary of the fields that the form will put onto a page and
their current values, taking account of any fields that have sub-fields and
hence result in multiple HTML <input> objects."""
data = {**form.initial}
for field in form.rendered_fields:
value = data[field] if field in data else form.fields[field].initial
if hasattr(form.fields[field].widget, "decompress"):
# If the widget can be decompressed, then it is not just a simple
# value and has some internal structure. So we need to generate one
# form item per decompressed value and append the name with _0, _1,
# etc. This mirrors the MultiValueWidget in django/forms/widgets.py.
if field in data:
del data[field]
value = form.fields[field].widget.decompress(value)
data.update(
**{f"{field}_{i}": v for i, v in enumerate(value) if v is not None}
)
elif value is not None:
data.setdefault(field, value)
return data
|
608 | import logging
import unittest
from pyinstrument import Profiler
from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario
from nuplan.planning.simulation.history.simulation_history_buffer import SimulationHistoryBuffer
from nuplan.planning.simulation.observation.idm_agents import IDMAgents
from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class TestProfileIDM(unittest.TestCase):
"""
Profiling test for IDM agents.
"""
def setUp(self) -> None:
"""
Inherited, see super class.
"""
self.n_repeat_trials = 1
self.display_results = True
self.scenario = get_test_nuplan_scenario()
def test_profile_idm_agent_observation(self) -> None:
"""Profile IDMAgents."""
profiler = Profiler(interval=0.0001)
profiler.start()
# How many times to repeat runtime test
for _ in range(self.n_repeat_trials):
observation = IDMAgents(
target_velocity=10,
min_gap_to_lead_agent=0.5,
headway_time=1.5,
accel_max=1.0,
decel_max=2.0,
scenario=self.scenario,
)
for step in range(self.scenario.get_number_of_iterations() - 1):
iteration = SimulationIteration(time_point=self.scenario.get_time_point(step), index=step)
next_iteration = SimulationIteration(time_point=self.scenario.get_time_point(step + 1), index=step + 1)
buffer = SimulationHistoryBuffer.initialize_from_list(
1,
[self.scenario.get_ego_state_at_iteration(step)],
[self.scenario.get_tracked_objects_at_iteration(step)],
next_iteration.time_point.time_s - iteration.time_point.time_s,
)
observation.update_observation(iteration, next_iteration, buffer)
profiler.stop()
if self.display_results:
logger.info(profiler.output_text(unicode=True, color=True))
if __name__ == "__main__":
unittest.main()
|
623 | import math
from vp import geom_tools
def horizon_error(ground_truth_horizon, detected_horizon, image_dims):
"""Calculates error in a detected horizon.
This measures the max distance between the detected horizon line and
the ground truth horizon line, within the image's x-axis, and
normalized by image height.
Args:
ground_truth_horizon: Tuple with (slope, intercept) for the GT horizon line.
detected_horizon: Tuple with (slope, intercept) for the detected horizon line.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
Float, or None if a horizon is missing altogether.
"""
if ground_truth_horizon is None or detected_horizon is None:
return None
def gt(x):
return ground_truth_horizon[0] * x + ground_truth_horizon[1]
def dt(x):
return detected_horizon[0] * x + detected_horizon[1]
width, height = image_dims
return max(abs(gt(0) - dt(0)), abs(gt(width) - dt(width))) / height
def vp_direction_error(ground_truth_vps, detected_vps, image_dims):
"""Measures error in direction from center of detected vanishing points.
Each detected VP is matched with its closest unclaimed ground truth VP.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
List with float degrees of error for each ground truth VP.
Error is None for missing VPs.
"""
principal_point = (image_dims[0] // 2, image_dims[1] // 2)
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
gt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], gt_vp[0], gt_vp[1]))
dt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], dt_vp[0], dt_vp[1]))
angle_diff = 180 - abs(abs(gt_angle - dt_angle) - 180)
point_pair_dists.append((angle_diff, gt_vp, dt_vp))
point_pair_dists = sorted(point_pair_dists, key=lambda k: k[0])
gt_vp_to_error = {}
seen_dt_vps = set()
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in gt_vp_to_error or dt_vp in seen_dt_vps:
continue
gt_vp_to_error[gt_vp] = distance
seen_dt_vps.add(dt_vp)
return [gt_vp_to_error.get(gt, None) for gt in ground_truth_vps]
def location_accuracy_error(ground_truth_vps, detected_vps):
"""Measures average error in the location of detected vanishing points.
"Missed" or "extra" VPs do not count against the score.
Based on log distance of detected vp from ground truth vp.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
Returns:
Float, error.
"""
if len(ground_truth_vps) == 0 or len(detected_vps) == 0:
return 0
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
distance = geom_tools.point_to_point_dist(gt_vp, dt_vp)
point_pair_dists.append((distance, gt_vp, dt_vp))
sorted(point_pair_dists, key=lambda k: k[0])
seen_gt_vps = set()
seen_dt_vps = set()
total_error = 0
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in seen_gt_vps or dt_vp in seen_dt_vps:
continue
seen_gt_vps.add(gt_vp)
seen_dt_vps.add(dt_vp)
if distance > 0:
total_error += math.log(distance)
return total_error / min(len(detected_vps), len(ground_truth_vps))
def num_model_detection_error(ground_truth_vps, detected_vps):
"""Measures error in the number of detected vanishing points.
Returns:
Integer, positive when there are too many VPs, negative
when there are too few.
"""
return len(detected_vps) - len(ground_truth_vps)
|
629 | import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import glob
from PIL import Image
import random
class SUN397EncodableDataset(Dataset):
"""SUN397 encodable dataset class"""
def __init__(self, train=True):
super().__init__()
path = 'data/SUN397/train/*/*.jpg' if train else 'data/SUN397/test/*/*.jpg'
self.data = list(glob.glob(path))
random.shuffle(self.data)
cats = list(set([path.split("/")[3] for path in self.data]))
cats.sort()
self.labels = torch.LongTensor([cats.index(path.split("/")[3]) for path in self.data])
self.preprocessor = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if len(self.encoded_data) == 0:
return self.preprocessor(Image.open(self.data[idx]).convert('RGB')), self.labels[idx]
return self.encoded_data[idx], self.labels[idx]
def __len__(self):
return len(self.labels)
def num_classes(self):
return int(max(self.labels) + 1)
|
643 | from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from PhysicsTools.Heppy.physicsobjects.Tau import Tau
from PhysicsTools.HeppyCore.utils.deltar import deltaR, matchObjectCollection3
import PhysicsTools.HeppyCore.framework.config as cfg
class TauAnalyzer( Analyzer ):
def __init__(self, cfg_ana, cfg_comp, looperName ):
super(TauAnalyzer,self).__init__(cfg_ana,cfg_comp,looperName)
#----------------------------------------
# DECLARATION OF HANDLES OF LEPTONS STUFF
#----------------------------------------
def declareHandles(self):
super(TauAnalyzer, self).declareHandles()
self.handles['taus'] = AutoHandle( ('slimmedTaus',''),'std::vector<pat::Tau>')
def beginLoop(self, setup):
super(TauAnalyzer,self).beginLoop(setup)
self.counters.addCounter('events')
count = self.counters.counter('events')
count.register('all events')
count.register('has >=1 tau at preselection')
count.register('has >=1 selected taus')
count.register('has >=1 other taus')
#------------------
# MAKE LEPTON LISTS
#------------------
def makeTaus(self, event):
event.inclusiveTaus = []
event.selectedTaus = []
event.otherTaus = []
#get all
alltaus = map( Tau, self.handles['taus'].product() )
#make inclusive taus
for tau in alltaus:
tau.associatedVertex = event.goodVertices[0] if len(event.goodVertices)>0 else event.vertices[0]
tau.lepVeto = False
tau.idDecayMode = tau.tauID("decayModeFinding")
tau.idDecayModeNewDMs = tau.tauID("decayModeFindingNewDMs")
if hasattr(self.cfg_ana, 'inclusive_decayModeID') and self.cfg_ana.inclusive_decayModeID and not tau.tauID(self.cfg_ana.inclusive_decayModeID):
continue
tau.inclusive_lepVeto = False
if self.cfg_ana.inclusive_vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.inclusive_leptonVetoDR:
tau.inclusive_lepVeto = True
if tau.inclusive_lepVeto: continue
if self.cfg_ana.inclusive_vetoLeptonsPOG:
if not tau.tauID(self.cfg_ana.inclusive_tauAntiMuonID):
tau.inclusive_lepVeto = True
if not tau.tauID(self.cfg_ana.inclusive_tauAntiElectronID):
tau.inclusive_lepVeto = True
if tau.inclusive_lepVeto: continue
if tau.pt() < self.cfg_ana.inclusive_ptMin: continue
if abs(tau.eta()) > self.cfg_ana.inclusive_etaMax: continue
if abs(tau.dxy()) > self.cfg_ana.inclusive_dxyMax or abs(tau.dz()) > self.cfg_ana.inclusive_dzMax: continue
def id3(tau,X):
"""Create an integer equal to 1-2-3 for (loose,medium,tight)"""
return tau.tauID(X%"Loose") + tau.tauID(X%"Medium") + tau.tauID(X%"Tight")
def id5(tau,X):
"""Create an integer equal to 1-2-3-4-5 for (very loose,
loose, medium, tight, very tight)"""
return id3(tau, X) + tau.tauID(X%"VLoose") + tau.tauID(X%"VTight")
def id6(tau,X):
"""Create an integer equal to 1-2-3-4-5-6 for (very loose,
loose, medium, tight, very tight, very very tight)"""
return id5(tau, X) + tau.tauID(X%"VVTight")
tau.idMVA = id6(tau, "by%sIsolationMVArun2v1DBoldDMwLT")
tau.idMVANewDM = id6(tau, "by%sIsolationMVArun2v1DBnewDMwLT")
tau.idCI3hit = id3(tau, "by%sCombinedIsolationDeltaBetaCorr3Hits")
tau.idAntiMu = tau.tauID("againstMuonLoose3") + tau.tauID("againstMuonTight3")
tau.idAntiE = id5(tau, "againstElectron%sMVA6")
#print "Tau pt %5.1f: idMVA2 %d, idCI3hit %d, %s, %s" % (tau.pt(), tau.idMVA2, tau.idCI3hit, tau.tauID(self.cfg_ana.tauID), tau.tauID(self.cfg_ana.tauLooseID))
if tau.tauID(self.cfg_ana.inclusive_tauID):
event.inclusiveTaus.append(tau)
for tau in event.inclusiveTaus:
tau.loose_lepVeto = False
if self.cfg_ana.loose_vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.loose_leptonVetoDR:
tau.loose_lepVeto = True
if self.cfg_ana.loose_vetoLeptonsPOG:
if not tau.tauID(self.cfg_ana.loose_tauAntiMuonID):
tau.loose_lepVeto = True
if not tau.tauID(self.cfg_ana.loose_tauAntiElectronID):
tau.loose_lepVeto = True
if tau.tauID(self.cfg_ana.loose_decayModeID) and \
tau.pt() > self.cfg_ana.loose_ptMin and abs(tau.eta()) < self.cfg_ana.loose_etaMax and \
abs(tau.dxy()) < self.cfg_ana.loose_dxyMax and abs(tau.dz()) < self.cfg_ana.loose_dzMax and \
tau.tauID(self.cfg_ana.loose_tauID) and not tau.loose_lepVeto:
event.selectedTaus.append(tau)
else:
event.otherTaus.append(tau)
event.inclusiveTaus.sort(key = lambda l : l.pt(), reverse = True)
event.selectedTaus.sort(key = lambda l : l.pt(), reverse = True)
event.otherTaus.sort(key = lambda l : l.pt(), reverse = True)
self.counters.counter('events').inc('all events')
if len(event.inclusiveTaus): self.counters.counter('events').inc('has >=1 tau at preselection')
if len(event.selectedTaus): self.counters.counter('events').inc('has >=1 selected taus')
if len(event.otherTaus): self.counters.counter('events').inc('has >=1 other taus')
def matchTaus(self, event):
match = matchObjectCollection3(event.inclusiveTaus, event.gentaus, deltaRMax = 0.5)
for lep in event.inclusiveTaus:
gen = match[lep]
lep.mcMatchId = 1 if gen else 0
lep.genp = gen
def process(self, event):
self.readCollections( event.input )
self.makeTaus(event)
if not self.cfg_comp.isMC:
return True
if hasattr(event, 'gentaus'):
self.matchTaus(event)
return True
# Find the definitions of the tau ID strings here:
# http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py
setattr(TauAnalyzer,"defaultConfig",cfg.Analyzer(
class_object = TauAnalyzer,
# inclusive very loose hadronic tau selection
inclusive_ptMin = 18,
inclusive_etaMax = 9999,
inclusive_dxyMax = 1000.,
inclusive_dzMax = 0.4,
inclusive_vetoLeptons = False,
inclusive_leptonVetoDR = 0.4,
inclusive_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
inclusive_tauID = "decayModeFindingNewDMs",
inclusive_vetoLeptonsPOG = False, # If True, the following two IDs are required
inclusive_tauAntiMuonID = "",
inclusive_tauAntiElectronID = "",
# loose hadronic tau selection
loose_ptMin = 18,
loose_etaMax = 9999,
loose_dxyMax = 1000.,
loose_dzMax = 0.2,
loose_vetoLeptons = True,
loose_leptonVetoDR = 0.4,
loose_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
loose_tauID = "byLooseCombinedIsolationDeltaBetaCorr3Hits",
loose_vetoLeptonsPOG = False, # If True, the following two IDs are required
loose_tauAntiMuonID = "againstMuonLoose3",
loose_tauAntiElectronID = "againstElectronLooseMVA5"
)
)
|
650 | from django.db import models
from .query import BookQuerySet
class Book(models.Model):
objects = BookQuerySet.as_manager()
title = models.CharField(max_length=50)
publication_date = models.DateTimeField()
author = models.ForeignKey('Author')
genres = models.ManyToManyField('Genre')
class Author(models.Model):
name = models.CharField(max_length=50)
nationality = models.ForeignKey('Nation', null=True)
class Genre(models.Model):
name = models.CharField(max_length=50)
class Nation(models.Model):
name = models.CharField(max_length=50)
demonym = models.CharField(max_length=50)
|
666 | def ips_between(start, end):
calc = lambda n, m: (int(end.split(".")[n]) - int(start.split(".")[n])) * m
return calc(0, 256 * 256 * 256) + calc(1, 256 * 256) + calc(2, 256) + calc(3, 1)
|
667 | from abc import ABCMeta, abstractmethod
import os
from vmaf.tools.misc import make_absolute_path, run_process
from vmaf.tools.stats import ListStats
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import re
import numpy as np
import ast
from vmaf import ExternalProgramCaller, to_list
from vmaf.config import VmafConfig, VmafExternalConfig
from vmaf.core.executor import Executor
from vmaf.core.result import Result
from vmaf.tools.reader import YuvReader
class FeatureExtractor(Executor):
"""
FeatureExtractor takes in a list of assets, and run feature extraction on
them, and return a list of corresponding results. A FeatureExtractor must
specify a unique type and version combination (by the TYPE and VERSION
attribute), so that the Result generated by it can be identified.
A derived class of FeatureExtractor must:
1) Override TYPE and VERSION
2) Override _generate_result(self, asset), which call a
command-line executable and generate feature scores in a log file.
3) Override _get_feature_scores(self, asset), which read the feature
scores from the log file, and return the scores in a dictionary format.
For an example, follow VmafFeatureExtractor.
"""
__metaclass__ = ABCMeta
@property
@abstractmethod
def ATOM_FEATURES(self):
raise NotImplementedError
def _read_result(self, asset):
result = {}
result.update(self._get_feature_scores(asset))
executor_id = self.executor_id
return Result(asset, executor_id, result)
@classmethod
def get_scores_key(cls, atom_feature):
return "{type}_{atom_feature}_scores".format(
type=cls.TYPE, atom_feature=atom_feature)
@classmethod
def get_score_key(cls, atom_feature):
return "{type}_{atom_feature}_score".format(
type=cls.TYPE, atom_feature=atom_feature)
def _get_feature_scores(self, asset):
# routine to read the feature scores from the log file, and return
# the scores in a dictionary format.
log_file_path = self._get_log_file_path(asset)
atom_feature_scores_dict = {}
atom_feature_idx_dict = {}
for atom_feature in self.ATOM_FEATURES:
atom_feature_scores_dict[atom_feature] = []
atom_feature_idx_dict[atom_feature] = 0
with open(log_file_path, 'rt') as log_file:
for line in log_file.readlines():
for atom_feature in self.ATOM_FEATURES:
re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature)
mo = re.match(re_template, line)
if mo:
cur_idx = int(mo.group(1))
assert cur_idx == atom_feature_idx_dict[atom_feature]
# parse value, allowing NaN and inf
val = float(mo.group(2))
if np.isnan(val) or np.isinf(val):
val = None
atom_feature_scores_dict[atom_feature].append(val)
atom_feature_idx_dict[atom_feature] += 1
continue
len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]])
assert len_score != 0
for atom_feature in self.ATOM_FEATURES[1:]:
assert len_score == len(atom_feature_scores_dict[atom_feature]), \
"Feature data possibly corrupt. Run cleanup script and try again."
feature_result = {}
for atom_feature in self.ATOM_FEATURES:
scores_key = self.get_scores_key(atom_feature)
feature_result[scores_key] = atom_feature_scores_dict[atom_feature]
return feature_result
class VmafFeatureExtractor(FeatureExtractor):
TYPE = "VMAF_feature"
# VERSION = '0.1' # vmaf_study; Anush's VIF fix
# VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr
# VERSION = '0.2.1' # expose vif num/den of each scale
# VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case
# VERSION = '0.2.2b' # expose adm_den/num_scalex
# VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef
# VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step
# VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2
VERSION = '0.2.4c' # Modify by moving motion2 to c code
ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2',
'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr',
'vif_num_scale0', 'vif_den_scale0',
'vif_num_scale1', 'vif_den_scale1',
'vif_num_scale2', 'vif_den_scale2',
'vif_num_scale3', 'vif_den_scale3',
'adm_num_scale0', 'adm_den_scale0',
'adm_num_scale1', 'adm_den_scale1',
'adm_num_scale2', 'adm_den_scale2',
'adm_num_scale3', 'adm_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3',
'vif2', 'adm2', 'adm3',
'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(VmafFeatureExtractor, cls)._post_process_result(result)
# adm2 =
# (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT)
adm2_scores_key = cls.get_scores_key('adm2')
adm_num_scores_key = cls.get_scores_key('adm_num')
adm_den_scores_key = cls.get_scores_key('adm_den')
result.result_dict[adm2_scores_key] = list(
(np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) /
(np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT)
)
# vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3
vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0')
vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0')
vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1')
vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1')
vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2')
vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2')
vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3')
vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3')
vif_scale0_scores_key = cls.get_scores_key('vif_scale0')
vif_scale1_scores_key = cls.get_scores_key('vif_scale1')
vif_scale2_scores_key = cls.get_scores_key('vif_scale2')
vif_scale3_scores_key = cls.get_scores_key('vif_scale3')
result.result_dict[vif_scale0_scores_key] = list(
(np.array(result.result_dict[vif_num_scale0_scores_key])
/ np.array(result.result_dict[vif_den_scale0_scores_key]))
)
result.result_dict[vif_scale1_scores_key] = list(
(np.array(result.result_dict[vif_num_scale1_scores_key])
/ np.array(result.result_dict[vif_den_scale1_scores_key]))
)
result.result_dict[vif_scale2_scores_key] = list(
(np.array(result.result_dict[vif_num_scale2_scores_key])
/ np.array(result.result_dict[vif_den_scale2_scores_key]))
)
result.result_dict[vif_scale3_scores_key] = list(
(np.array(result.result_dict[vif_num_scale3_scores_key])
/ np.array(result.result_dict[vif_den_scale3_scores_key]))
)
# vif2 =
# ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) +
# (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0
vif_scores_key = cls.get_scores_key('vif2')
result.result_dict[vif_scores_key] = list(
(
(np.array(result.result_dict[vif_num_scale0_scores_key])
/ np.array(result.result_dict[vif_den_scale0_scores_key])) +
(np.array(result.result_dict[vif_num_scale1_scores_key])
/ np.array(result.result_dict[vif_den_scale1_scores_key])) +
(np.array(result.result_dict[vif_num_scale2_scores_key])
/ np.array(result.result_dict[vif_den_scale2_scores_key])) +
(np.array(result.result_dict[vif_num_scale3_scores_key])
/ np.array(result.result_dict[vif_den_scale3_scores_key]))
) / 4.0
)
# adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3
adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0')
adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0')
adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1')
adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1')
adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2')
adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2')
adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3')
adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3')
adm_scale0_scores_key = cls.get_scores_key('adm_scale0')
adm_scale1_scores_key = cls.get_scores_key('adm_scale1')
adm_scale2_scores_key = cls.get_scores_key('adm_scale2')
adm_scale3_scores_key = cls.get_scores_key('adm_scale3')
result.result_dict[adm_scale0_scores_key] = list(
(np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale1_scores_key] = list(
(np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale2_scores_key] = list(
(np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale3_scores_key] = list(
(np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
# adm3 = \
# (((adm_num_scale0 + ADM_SCALE_CONSTANT) / (adm_den_scale0 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale1 + ADM_SCALE_CONSTANT) / (adm_den_scale1 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale2 + ADM_SCALE_CONSTANT) / (adm_den_scale2 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale3 + ADM_SCALE_CONSTANT) / (adm_den_scale3 + ADM_SCALE_CONSTANT))) / 4.0
adm3_scores_key = cls.get_scores_key('adm3')
result.result_dict[adm3_scores_key] = list(
(
((np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT))
) / 4.0
)
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class VifFrameDifferenceFeatureExtractor(FeatureExtractor):
TYPE = "VifDiff_feature"
VERSION = '0.1'
ATOM_FEATURES = ['vifdiff',
'vifdiff_num', 'vifdiff_den',
'vifdiff_num_scale0', 'vifdiff_den_scale0',
'vifdiff_num_scale1', 'vifdiff_den_scale1',
'vifdiff_num_scale2', 'vifdiff_den_scale2',
'vifdiff_num_scale3', 'vifdiff_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vifdiff_scale0', 'vifdiff_scale1', 'vifdiff_scale2', 'vifdiff_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_vifdiff_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(VifFrameDifferenceFeatureExtractor, cls)._post_process_result(result)
# vifdiff_scalei = vifdiff_num_scalei / vifdiff_den_scalei, i = 0, 1, 2, 3
vifdiff_num_scale0_scores_key = cls.get_scores_key('vifdiff_num_scale0')
vifdiff_den_scale0_scores_key = cls.get_scores_key('vifdiff_den_scale0')
vifdiff_num_scale1_scores_key = cls.get_scores_key('vifdiff_num_scale1')
vifdiff_den_scale1_scores_key = cls.get_scores_key('vifdiff_den_scale1')
vifdiff_num_scale2_scores_key = cls.get_scores_key('vifdiff_num_scale2')
vifdiff_den_scale2_scores_key = cls.get_scores_key('vifdiff_den_scale2')
vifdiff_num_scale3_scores_key = cls.get_scores_key('vifdiff_num_scale3')
vifdiff_den_scale3_scores_key = cls.get_scores_key('vifdiff_den_scale3')
vifdiff_scale0_scores_key = cls.get_scores_key('vifdiff_scale0')
vifdiff_scale1_scores_key = cls.get_scores_key('vifdiff_scale1')
vifdiff_scale2_scores_key = cls.get_scores_key('vifdiff_scale2')
vifdiff_scale3_scores_key = cls.get_scores_key('vifdiff_scale3')
result.result_dict[vifdiff_scale0_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale0_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale0_scores_key]))
)
result.result_dict[vifdiff_scale1_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale1_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale1_scores_key]))
)
result.result_dict[vifdiff_scale2_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale2_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale2_scores_key]))
)
result.result_dict[vifdiff_scale3_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale3_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale3_scores_key]))
)
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class PsnrFeatureExtractor(FeatureExtractor):
TYPE = "PSNR_feature"
VERSION = "1.0"
ATOM_FEATURES = ['psnr']
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_psnr(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
class MomentFeatureExtractor(FeatureExtractor):
TYPE = "Moment_feature"
# VERSION = "1.0" # call executable
VERSION = "1.1" # python only
ATOM_FEATURES = ['ref1st', 'ref2nd', 'dis1st', 'dis2nd', ]
DERIVED_ATOM_FEATURES = ['refvar', 'disvar', ]
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_w, quality_h = asset.quality_width_height
ref_scores_mtx = None
with YuvReader(filepath=asset.ref_workfile_path, width=quality_w, height=quality_h,
yuv_type=self._get_workfile_yuv_type(asset)) as ref_yuv_reader:
scores_mtx_list = []
i = 0
for ref_yuv in ref_yuv_reader:
ref_y = ref_yuv[0]
firstm = ref_y.mean()
secondm = ref_y.var() + firstm**2
scores_mtx_list.append(np.hstack(([firstm], [secondm])))
i += 1
ref_scores_mtx = np.vstack(scores_mtx_list)
dis_scores_mtx = None
with YuvReader(filepath=asset.dis_workfile_path, width=quality_w, height=quality_h,
yuv_type=self._get_workfile_yuv_type(asset)) as dis_yuv_reader:
scores_mtx_list = []
i = 0
for dis_yuv in dis_yuv_reader:
dis_y = dis_yuv[0]
firstm = dis_y.mean()
secondm = dis_y.var() + firstm**2
scores_mtx_list.append(np.hstack(([firstm], [secondm])))
i += 1
dis_scores_mtx = np.vstack(scores_mtx_list)
assert ref_scores_mtx is not None and dis_scores_mtx is not None
log_dict = {'ref_scores_mtx': ref_scores_mtx.tolist(),
'dis_scores_mtx': dis_scores_mtx.tolist()}
log_file_path = self._get_log_file_path(asset)
with open(log_file_path, 'wt') as log_file:
log_file.write(str(log_dict))
def _get_feature_scores(self, asset):
# routine to read the feature scores from the log file, and return
# the scores in a dictionary format.
log_file_path = self._get_log_file_path(asset)
with open(log_file_path, 'rt') as log_file:
log_str = log_file.read()
log_dict = ast.literal_eval(log_str)
ref_scores_mtx = np.array(log_dict['ref_scores_mtx'])
dis_scores_mtx = np.array(log_dict['dis_scores_mtx'])
_, num_ref_features = ref_scores_mtx.shape
assert num_ref_features == 2 # ref1st, ref2nd
_, num_dis_features = dis_scores_mtx.shape
assert num_dis_features == 2 # dis1st, dis2nd
feature_result = {}
feature_result[self.get_scores_key('ref1st')] = list(ref_scores_mtx[:, 0])
feature_result[self.get_scores_key('ref2nd')] = list(ref_scores_mtx[:, 1])
feature_result[self.get_scores_key('dis1st')] = list(dis_scores_mtx[:, 0])
feature_result[self.get_scores_key('dis2nd')] = list(dis_scores_mtx[:, 1])
return feature_result
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(MomentFeatureExtractor, cls)._post_process_result(result)
# calculate refvar and disvar from ref1st, ref2nd, dis1st, dis2nd
refvar_scores_key = cls.get_scores_key('refvar')
ref1st_scores_key = cls.get_scores_key('ref1st')
ref2nd_scores_key = cls.get_scores_key('ref2nd')
disvar_scores_key = cls.get_scores_key('disvar')
dis1st_scores_key = cls.get_scores_key('dis1st')
dis2nd_scores_key = cls.get_scores_key('dis2nd')
get_var = lambda m: m[1] - m[0] * m[0]
result.result_dict[refvar_scores_key] = \
to_list(map(get_var, zip(result.result_dict[ref1st_scores_key],
result.result_dict[ref2nd_scores_key])))
result.result_dict[disvar_scores_key] = \
to_list(map(get_var, zip(result.result_dict[dis1st_scores_key],
result.result_dict[dis2nd_scores_key])))
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class SsimFeatureExtractor(FeatureExtractor):
TYPE = "SSIM_feature"
# VERSION = "1.0"
VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0
ATOM_FEATURES = ['ssim', 'ssim_l', 'ssim_c', 'ssim_s']
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
class MsSsimFeatureExtractor(FeatureExtractor):
TYPE = "MS_SSIM_feature"
# VERSION = "1.0"
VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0
ATOM_FEATURES = ['ms_ssim',
'ms_ssim_l_scale0', 'ms_ssim_c_scale0', 'ms_ssim_s_scale0',
'ms_ssim_l_scale1', 'ms_ssim_c_scale1', 'ms_ssim_s_scale1',
'ms_ssim_l_scale2', 'ms_ssim_c_scale2', 'ms_ssim_s_scale2',
'ms_ssim_l_scale3', 'ms_ssim_c_scale3', 'ms_ssim_s_scale3',
'ms_ssim_l_scale4', 'ms_ssim_c_scale4', 'ms_ssim_s_scale4',
]
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_ms_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
|
677 | from sklearn.linear_model import LogisticRegression
from fightchurn.listings.chap8.listing_8_2_logistic_regression import prepare_data, save_regression_model
from fightchurn.listings.chap8.listing_8_2_logistic_regression import save_regression_summary, save_dataset_predictions
def regression_cparam(data_set_path, C_param):
X,y = prepare_data(data_set_path)
retain_reg = LogisticRegression( C=C_param, penalty='l1', solver='liblinear', fit_intercept=True)
retain_reg.fit(X, y)
c_ext = '_c{:.3f}'.format(C_param)
save_regression_summary(data_set_path,retain_reg,ext=c_ext)
save_regression_model(data_set_path,retain_reg,ext=c_ext)
save_dataset_predictions(data_set_path,retain_reg,X,ext=c_ext)
|
696 | from operator import attrgetter
import logging
import os
import shutil
import subprocess
import pyfastaq
import pymummer
from cluster_vcf_records import vcf_record
from varifier import utils
# We only want the .snps file from the dnadiff script from MUMmer. From reading
# the docs inspecting that script, we need to run these commands:
#
# nucmer --maxmatch --delta out.delta ref.fasta query.fasta
# delta-filter -1 out.delta > out.1delta
# show-snps -rlTHC out.1delta > out.snps
#
# This is instead of just running show-snps, which runs several other commands
# in addition to making the snps file.
def _run_dnadiff_one_split(ref_fasta, query_fasta, outfile, threads=1, maxmatch=True):
delta = f"{outfile}.tmp.delta"
delta_1 = f"{outfile}.tmp.1delta"
subprocess.check_output(f"rm -f {delta} {delta_1}", shell=True)
maxmatch_opt = "--maxmatch" if maxmatch else ""
commands = [
f"nucmer --threads {threads} {maxmatch_opt} --delta {delta} {ref_fasta} {query_fasta}",
f"delta-filter -1 {delta} > {delta_1}",
f"show-snps -rlTHC {delta_1} > {outfile}",
]
for command in commands:
logging.info("Start run command: " + command)
subprocess.check_output(command, shell=True)
logging.info("Finish run command: " + command)
os.unlink(delta)
os.unlink(delta_1)
def _run_dnadiff(
ref_fasta,
query_fasta,
outfile,
split_query=False,
debug=False,
threads=1,
maxmatch=True,
):
if not split_query:
_run_dnadiff_one_split(
ref_fasta, query_fasta, outfile, threads=threads, maxmatch=maxmatch
)
else:
tmp_snp_files = []
seq_reader = pyfastaq.sequences.file_reader(query_fasta)
for seq in seq_reader:
prefix = f"{outfile}.tmp.split.{len(tmp_snp_files)}"
tmp_fasta = f"{prefix}.fasta"
with open(tmp_fasta, "w") as f:
print(seq, file=f)
snp_file = f"{prefix}.snps"
_run_dnadiff_one_split(
ref_fasta, tmp_fasta, snp_file, threads=threads, maxmatch=maxmatch
)
os.unlink(tmp_fasta)
tmp_snp_files.append(snp_file)
with open(outfile, "wb") as f_out:
for snp_file in tmp_snp_files:
with open(snp_file, "rb") as f_in:
shutil.copyfileobj(f_in, f_out)
if not debug:
os.unlink(snp_file)
def _snps_file_to_vcf(snps_file, query_fasta, outfile):
"""Loads the .snps file made by dnadiff.
query_fasta = fasta file of query sequences.
Writes a new VCF file unmerged records."""
vcf_records = {}
variants = pymummer.snp_file.get_all_variants(snps_file)
query_seqs = utils.file_to_dict_of_seqs(query_fasta)
for variant in variants:
# If the variant is reversed, it means that either the ref or query had to be
# reverse complemented when aligned by mummer. Need to do the appropriate
# reverse (complement) fixes so the VCF has the correct REF and ALT sequences
if variant.reverse:
qry_seq = pyfastaq.sequences.Fasta("x", variant.qry_base)
qry_seq.revcomp()
variant.qry_base = "".join(reversed(qry_seq.seq))
ref_seq = pyfastaq.sequences.Fasta("x", variant.ref_base)
ref_seq.revcomp()
variant.ref_base = ref_seq.seq
if variant.var_type == pymummer.variant.SNP:
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start + 1),
".",
variant.qry_base,
variant.ref_base,
".",
".",
"SVTYPE=DNADIFF_SNP",
"GT",
"1/1",
]
)
)
elif variant.var_type == pymummer.variant.DEL:
# The query has sequence missing, compared to the
# reference. We're making VCF records w.r.t. the
# query, so this is an insertion. So need to
# get the nucleotide before the insertion as well.
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start + 1),
".",
query_seqs[variant.qry_name][variant.qry_start],
query_seqs[variant.qry_name][variant.qry_start]
+ variant.ref_base,
".",
".",
"SVTYPE=DNADIFF_INS",
"GT",
"1/1",
]
)
)
elif variant.var_type == pymummer.variant.INS:
# The ref has sequence missing, compared to the
# query. We're making VCF records w.r.t. the
# query, so this is a deletion. So need to
# get the nucleotide before the deletion as well.
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start),
".",
query_seqs[variant.qry_name][variant.qry_start - 1]
+ variant.qry_base,
query_seqs[variant.qry_name][variant.qry_start - 1],
".",
".",
"SVTYPE=DNADIFF_DEL",
"GT",
"1/1",
]
)
)
else:
raise Exception("Unknown variant type: " + str(variant))
assert (
new_record.REF
== query_seqs[new_record.CHROM][
new_record.POS : new_record.POS + len(new_record.REF)
]
)
if new_record.CHROM not in vcf_records:
vcf_records[new_record.CHROM] = []
vcf_records[new_record.CHROM].append(new_record)
for vcf_list in vcf_records.values():
vcf_list.sort(key=attrgetter("POS"))
with open(outfile, "w") as f:
print("##fileformat=VCFv4.2", file=f)
for seq in query_seqs.values():
print(f"##contig=<ID={seq.id},length={len(seq)}>", file=f)
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tsample", file=f)
for key, vcf_list in sorted(vcf_records.items()):
for record in vcf_list:
print(record, file=f)
def make_truth_vcf(
ref_fasta,
truth_fasta,
outfile,
debug=False,
split_ref=False,
threads=1,
maxmatch=True,
):
snps_file = f"{outfile}.tmp.snps"
_run_dnadiff(
truth_fasta,
ref_fasta,
snps_file,
split_query=split_ref,
debug=debug,
threads=threads,
maxmatch=maxmatch,
)
_snps_file_to_vcf(snps_file, ref_fasta, outfile)
if not debug:
os.unlink(snps_file)
|
747 | import os
import numpy as np
import tensorflow as tf
def get_train_data(train_dir, batch_size):
train_images = np.load(os.path.join(train_dir, 'train_images.npy'))
train_labels = np.load(os.path.join(train_dir, 'train_labels.npy'))
print('train_images', train_images.shape, 'train_labels', train_labels.shape)
dataset_train = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
dataset_train = dataset_train.repeat().shuffle(10000).batch(batch_size)
return dataset_train
def get_val_data(val_dir):
test_images = np.load(os.path.join(val_dir, 'validation_images.npy'))
test_labels = np.load(os.path.join(val_dir, 'validation_labels.npy'))
print('validation_images', test_images.shape, 'validation_labels', test_labels.shape)
dataset_test = tf.data.Dataset.from_tensor_slices((test_images, test_labels))
return dataset_test
|
837 | import SimpleXMLRPCServer
import sys
import logging
from K8055Controller import K8055Controller
logging.basicConfig()
controller_log = logging.getLogger("Controller")
class Controller:
def __init__(self):
self.k8055 = K8055Controller()
controller_log.debug("initialized")
def reset(self):
self.k8055.reset()
controller_log.debug("reset")
return 0
def turn_on(self, i):
self.k8055.turn_on(i)
controller_log.debug('turned on %i' % (i))
return 0
def turn_off(self, i):
self.k8055.turn_off(i)
controller_log.debug('turned off %i' % (i))
return 0
def set_analog(self, i, level):
if (i == 1):
self.k8055.set_analog1(level)
else:
self.k8055.set_analog2(level)
return 0
controller = Controller()
server = SimpleXMLRPCServer.SimpleXMLRPCServer(("d6349.mysql.zone.ee", 7000))
server.register_instance(controller)
server.serve_forever() |
845 | from freight.api.serializer import serialize
from freight.testutils import TestCase
class UserSerializerTest(TestCase):
def test_simple(self):
user = self.create_user()
result = serialize(user)
assert result["id"] == str(user.id)
assert result["name"] == user.name
|
855 | import os
import pytest
import torch
from hivemind import RemoteExpert
from hivemind.moe.server import background_server
CUSTOM_EXPERTS_PATH = os.path.join(os.path.dirname(__file__), "test_utils", "custom_networks.py")
@pytest.mark.forked
def test_custom_expert(hid_dim=16):
with background_server(
expert_cls="perceptron",
num_experts=2,
device="cpu",
hidden_dim=hid_dim,
num_handlers=2,
no_dht=True,
custom_module_path=CUSTOM_EXPERTS_PATH,
) as (server_endpoint, _):
expert0 = RemoteExpert("expert.0", server_endpoint)
expert1 = RemoteExpert("expert.1", server_endpoint)
for batch_size in (1, 4):
batch = torch.randn(batch_size, hid_dim)
output0 = expert0(batch)
output1 = expert1(batch)
loss = output0.sum()
loss.backward()
loss = output1.sum()
loss.backward()
@pytest.mark.forked
def test_multihead_expert(hid_dim=16):
with background_server(
expert_cls="multihead",
num_experts=2,
device="cpu",
hidden_dim=hid_dim,
num_handlers=2,
no_dht=True,
custom_module_path=CUSTOM_EXPERTS_PATH,
) as (server_endpoint, _):
expert0 = RemoteExpert("expert.0", server_endpoint)
expert1 = RemoteExpert("expert.1", server_endpoint)
for batch_size in (1, 4):
batch = (
torch.randn(batch_size, hid_dim),
torch.randn(batch_size, 2 * hid_dim),
torch.randn(batch_size, 3 * hid_dim),
)
output0 = expert0(*batch)
output1 = expert1(*batch)
loss = output0.sum()
loss.backward()
loss = output1.sum()
loss.backward()
|
905 | from typing import Dict, Optional, List, Any
import torch
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy, F1Measure
from overrides import overrides
@Model.register("text_classifier")
class TextClassifier(Model):
"""
Implements a basic text classifier:
1) Embed tokens using `text_field_embedder`
2) Seq2SeqEncoder, e.g. BiLSTM
3) Append the first and last encoder states
4) Final feedforward layer
Optimized with CrossEntropyLoss. Evaluated with CategoricalAccuracy & F1.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
text_encoder: Seq2SeqEncoder,
classifier_feedforward: FeedForward,
verbose_metrics: False,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super(TextClassifier, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
self.text_encoder = text_encoder
self.classifier_feedforward = classifier_feedforward
self.prediction_layer = torch.nn.Linear(self.classifier_feedforward.get_output_dim() , self.num_classes)
self.label_accuracy = CategoricalAccuracy()
self.label_f1_metrics = {}
self.verbose_metrics = verbose_metrics
for i in range(self.num_classes):
self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i)
self.loss = torch.nn.CrossEntropyLoss()
self.pool = lambda text, mask: util.get_final_encoder_states(text, mask, bidirectional=True)
initializer(self)
@overrides
def forward(self,
text: Dict[str, torch.LongTensor],
label: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
text : Dict[str, torch.LongTensor]
From a ``TextField``
label : torch.IntTensor, optional (default = None)
From a ``LabelField``
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
Metadata containing the original tokenization of the premise and
hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.
Returns
-------
An output dictionary consisting of:
label_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log probabilities of the label.
label_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_text = self.text_field_embedder(text)
mask = util.get_text_field_mask(text)
encoded_text = self.text_encoder(embedded_text, mask)
pooled = self.pool(encoded_text, mask)
ff_hidden = self.classifier_feedforward(pooled)
logits = self.prediction_layer(ff_hidden)
class_probs = F.softmax(logits, dim=1)
output_dict = {"logits": logits}
if label is not None:
loss = self.loss(logits, label)
output_dict["loss"] = loss
# compute F1 per label
for i in range(self.num_classes):
metric = self.label_f1_metrics[self.vocab.get_token_from_index(index=i, namespace="labels")]
metric(class_probs, label)
self.label_accuracy(logits, label)
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
class_probabilities = F.softmax(output_dict['logits'], dim=-1)
output_dict['class_probs'] = class_probabilities
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metric_dict = {}
sum_f1 = 0.0
for name, metric in self.label_f1_metrics.items():
metric_val = metric.get_metric(reset)
if self.verbose_metrics:
metric_dict[name + '_P'] = metric_val[0]
metric_dict[name + '_R'] = metric_val[1]
metric_dict[name + '_F1'] = metric_val[2]
sum_f1 += metric_val[2]
names = list(self.label_f1_metrics.keys())
total_len = len(names)
average_f1 = sum_f1 / total_len
metric_dict['average_F1'] = average_f1
metric_dict['accuracy'] = self.label_accuracy.get_metric(reset)
return metric_dict
|
907 | import stl_path
class MyNDRPlugin():
def __init__(self):
pass
def pre_iteration(self, finding_max_rate, run_results=None, **kwargs):
""" Function ran before each iteration.
:parameters:
finding_max_rate: boolean
Indicates whether we are running for the first time, trying to find the max rate. In this is the case, the run_results will be None.
run_results: dict
A dictionary that contains the following keys:
queue_full_percentage: Percentage of packets that are queued.
drop_rate_percentage: Percentage of packets that were dropped.
rate_tx_bps: TX rate in bps.
rate_rx_bps: RX rate in bps.
tx_util: TX utilization percentage.
latency: Latency groups.
cpu_util: CPU utilization percentage.
tx_pps: TX in pps.
rx_pps: RX in pps.
tx_bps: TX in bps.
rx_bps: RX in bps.
bw_per_core: Bandwidth per core.
rate_p: Running rate in percentage out of max.
total_tx_L1: Total TX L1.
total_rx_L1: Total RX L1.
iteration: Description of iteration (not necessarily a number)
Pay attention: The rate is of the upcoming iteration. All the rest are of the previous iteration.
kwargs: dict
List of tunables passed as parameters.
"""
# Pre iteration function. This function will run before TRex transmits to the DUT.
# Could use this to better prepare the DUT, for example define shapers, policers, increase buffers and queues.
# You can receive tunables in the command line, through the kwargs argument.
pass
def post_iteration(self, finding_max_rate, run_results, **kwargs):
""" Function ran after each iteration.
:parameters:
finding_max_rate: boolean
Indicates whether we are running for the first time, trying to find the max rate. If this is the case, some values of run_results (like iteration for example) are not relevant.
run_results: dict
A dictionary that contains the following keys:
queue_full_percentage: Percentage of packets that are queued.
drop_rate_percentage: Percentage of packets that were dropped.
rate_tx_bps: TX rate in bps.
rate_rx_bps: RX rate in bps.
tx_util: TX utilization percentage.
latency: Latency groups.
cpu_util: CPU utilization percentage.
tx_pps: TX in pps.
rx_pps: RX in pps.
tx_bps: TX in bps.
rx_bps: RX in bps.
bw_per_core: Bandwidth per core.
rate_p: Running rate in percentage out of max.
total_tx_L1: Total TX L1.
total_rx_L1: Total RX L1.
iteration: Description of iteration (not necessarily a number)
kwargs: dict
List of tunables passed as parameters.
:returns:
bool: should stop the benchmarking or not.
"""
# Post iteration function. This function will run after TRex transmits to the DUT.
# Could use this to decide if to continue the benchmark after querying the DUT post run. The DUT might be overheated or any other thing that might make you want to stop the run.
# You can receive tunables in the command line, through the kwargs argument.
should_stop = False
return should_stop
# dynamic load of python module
def register():
return MyNDRPlugin() |
1014 | from lemur import database
def rotate_certificate(endpoint, new_cert):
"""
Rotates a certificate on a given endpoint.
:param endpoint:
:param new_cert:
:return:
"""
# ensure that certificate is available for rotation
endpoint.source.plugin.update_endpoint(endpoint, new_cert)
endpoint.certificate = new_cert
database.update(endpoint)
|
1017 | from gtrain import Model
import numpy as np
import tensorflow as tf
class NetForHypinv(Model):
"""
Implementaion of the crutial function for the HypINV algorithm.
Warning: Do not use this class but implement its subclass, for example see FCNetForHypinv
"""
def __init__(self, weights):
self.eval_session = None
self.grad_session = None
self.initial_x = None
self.center = None
self.weights = weights
self.out_for_eval = None #(going to be filled in build_for_eval method)
self.boundary_out_for_eval = None
self.trained_x = None
self.training_class_index = None
self.x = None # tf variable for inversion (going to be filled in build method)
self.x_for_eval = None
self.out = None
self.boundary_out = None # list of tf tensorf for each class of softmax class vs others output
self.loss = None
self.boundary_loss = None
self.t = None #target
self.boundary_t = None
self.x1 = None # this attribute is used of purposes of modified loss function
def __del__(self):
# close arr sessions
if self.eval_session:
self.eval_session.close()
if self.grad_session:
self.grad_session.close()
def set_initial_x(self, initial_x):
# sets starting point for the search of the closest point
self.initial_x = initial_x
def set_center(self, center):
# sets center point
self.center = center / np.linalg.norm(center)
def set_x1(self, x1):
# sets x1 to which we want to found the cosest point x0
self.x1 = x1
def has_modified_loss(self):
pass # if uses modified loss then it returns true
def set_initial_x_in_session(self, x, session=None):
# sets initial x in certain session
if session is None:
self.set_initial_x(x)
else:
pass # overide this method
def eval(self, x):
if len(x.shape) == 1:
x = x.reshape((1,len(x)))
if not self.eval_session:
self.eval_session = tf.Session()
with self.eval_session.as_default():
self.build_for_eval()
self.eval_session.run(tf.global_variables_initializer())
return self.eval_session.run(self.out_for_eval, {self.x_for_eval: x})
def boundary_eval(self, x, class_index):
# evaluates binary classificaitons class_index and other classes
if not self.eval_session:
self.eval_session = tf.Session()
with self.eval_session.as_default():
self.build_for_eval()
self.eval_session.run(tf.global_variables_initializer())
return self.eval_session.run(self.boundary_out_for_eval[class_index], {self.x_for_eval: x})
def get_boundary_gradient(self, x, class_index):
# computes gradient of the boundary for specified class_index
if not self.grad_session:
self.grad_session = tf.Session()
with self.grad_session.as_default():
self.build_for_eval()
self.grad = list()
for i in range(len(self.weights[0][-1][0])):
self.grad.append(tf.gradients(self.boundary_out_for_eval[i], [self.x_for_eval])[0])
self.grad_x = self.x_for_eval
return self.grad_session.run(self.grad[class_index], {self.grad_x: x})
def build_for_eval(self):
# build model for evaluation
pass #override this method (fill self.out_for_eval)
def train_ended(self, session):
self.trained_x = session.run(self.x)
def build(self):
# build model for training
pass #override this method (fill self.x, self.out)
def set_train_class(self, class_index):
# sets class of the x1
self.training_class_index = class_index
# overided methods from gtrain.Model
def get_loss(self):
if self.training_class_index is None:
return self.loss
else:
return self.boundary_loss[self.training_class_index]
def get_hits(self):
return self.get_loss()
def get_count(self):
return self.get_loss()
def get_train_summaries(self):
return []
def get_dev_summaries(self):
return []
def get_placeholders(self):
if self.training_class_index is None:
return [self.t]
else:
return [self.boundary_t]
#________________________________________EXAMPLES_OF_NetForHypinv_CLASS_____________________________________________
class FCNetForHypinv(NetForHypinv):
"""
Implementation of multi layer perceptron to by used in HypINV rule extraction algorithm
"""
def __init__(self, weights, function=tf.sigmoid, use_modified_loss=False, mu = 0.01):
"""
:param weights: saved as [list of weights for layers][0 weight, 1 bias]
:param function: tf function for propagation. For example tf.nn.sigmoid, tf.atan
:param use_modified_loss: weather the modified loss should be used
:param mu: factor of the penalty terms that specified the distance between x0 and x1 and
the distance x1 from the boundary
"""
super(FCNetForHypinv, self).__init__(weights)
self.function = function
self.layer_sizes = [len(self.weights[0][0])]
for bias in weights[1]:
self.layer_sizes.append(len(bias))
self.num_classes = self.layer_sizes[-1]
self.initial_x = np.zeros([1, self.layer_sizes[0]])
self.use_modified_loss = use_modified_loss
self.mu = mu
def build(self):
with tf.name_scope("Input"):
if self.center is not None:
self.point_weights = tf.Variable(self.center.reshape((1, len(self.center))),
dtype=tf.float64, trainable=False, name="Boundary_point")
init_factor = self.center
init_factor[init_factor!=0] = self.initial_x[init_factor!=0] / self.center[init_factor!=0]
self.factor = tf.Variable(init_factor.reshape((1, len(self.center))),
dtype=tf.float64, name="factor")
else:
self.point_weights = tf.Variable(self.initial_x.reshape((1, len(self.initial_x))),
dtype=tf.float64, trainable=False, name="Boundary_point")
self.factor = tf.Variable(np.ones((1, len(self.center))),
dtype=tf.float64, name="factor")
self.x = self.point_weights * self.factor
with tf.name_scope("Target"):
if self.use_modified_loss:
x1_constant = tf.constant(self.x1.reshape((1, len(self.x1))), dtype=tf.float64)
self.t = tf.placeholder(tf.float64, shape=[None, self.num_classes], name="Target_output")
self.boundary_t = tf.placeholder(tf.float64, shape=[None, 2], name="Target_boundary_output")
with tf.name_scope("FC_net"):
flowing_x = self.x
for i, _ in enumerate(self.weights[0]):
with tf.name_scope("layer_{}".format(i)):
W = tf.constant(self.weights[0][i], name="Weight_{}".format(i), dtype=tf.float64)
b = tf.constant(self.weights[1][i], name="Bias_{}".format(i), dtype=tf.float64)
flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b))
y = flowing_x
self.out = tf.nn.softmax(y)
with tf.name_scope("Binary_class_output"):
self.boundary_out = list()
for i in range(self.num_classes):
mask = True+np.zeros(self.num_classes, dtype=np.bool)
mask[i] = False
x0 = self.out[:,i]
x1 = tf.reduce_max(tf.boolean_mask(self.out, mask, axis=1), axis=1)
s = x0+x1
out = tf.stack([x0/s, x1/s], axis=1)
self.boundary_out.append(out)
with tf.name_scope("Loss_functions"):
self.loss = tf.reduce_mean(
tf.nn.l2_loss(self.out-self.t),
name="loss")
with tf.name_scope("Binary_class_loss"):
self.boundary_loss = list()
if self.use_modified_loss:
for i in range(self.num_classes):
self.boundary_loss.append(
tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i]-self.boundary_t)) +
self.mu * tf.reduce_mean(tf.nn.l2_loss(self.x - x1_constant))
)
else:
for i in range(self.num_classes):
self.boundary_loss.append(
tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i] - self.boundary_t))
)
def set_initial_x_in_session(self, x, session=None):
if session is None:
self.set_initial_x(x)
else:
if self.center is None:
session.run([
self.point_weights.assign(x.reshape((1, len(x)))),
self.factor.assign(np.ones((1, len(x))))
])
else:
init_factor = self.center
init_factor[init_factor!=0] = x[init_factor!=0] / self.center[init_factor!=0]
session.run(self.factor.assign(init_factor.reshape((1,len(init_factor)))))
def build_for_eval(self):
with tf.name_scope("eInput"):
self.x_for_eval = tf.placeholder(tf.float32, shape=[None, len(self.weights[0][0])])#tf.Variable(tf.constant(self.initial_x), name="Boundary_point")
with tf.name_scope("eFC_net"):
flowing_x = self.x_for_eval
for i, _ in enumerate(self.weights[0]):
W = tf.constant(self.weights[0][i], name="eWeight_{}".format(i))
b = tf.constant(self.weights[1][i], name="eBias_{}".format(i))
flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b), name="elayer_{}".format(i))
y = flowing_x
self.out_for_eval = tf.nn.softmax(y)
with tf.name_scope("Binary_class_output"):
self.boundary_out_for_eval = list()
for i in range(self.num_classes):
mask = True+np.zeros(self.num_classes, dtype=np.bool)
mask[i] = False
x0 = self.out_for_eval[:, i]
x1 = tf.reduce_max(tf.boolean_mask(self.out_for_eval, mask, axis=1), axis=1)
s = x0+x1
out = tf.stack([x0/s, x1/s], axis=1)
self.boundary_out_for_eval.append(out)
def has_modified_loss(self):
return self.use_modified_loss
def name(self):
return "Hypinv_FC_net_{}".format("-".join([str(ls) for ls in self.layer_sizes]))
class FCNetForHypinvBinary(FCNetForHypinv):
"""
Implementation of multi layer perceptron to by used in HypINV rule extraction algorithm
The task is simplified to the binary classificaiton base_class_index against the other classes
"""
def __init__(self, weights, base_class_index, function=tf.sigmoid, use_modified_loss=False, mu = 0.01):
"""
:param weights: saved as [list of weights for layers][0 weight, 1 bias]
:param base_class_index: an index of the class which is used as the base class
:param function: tf function for propagation. For example tf.nn.sigmoid, tf.atan
:param use_modified_loss: weather the modified loss should be used
:param mu: factor of the penalty terms that specified the distance between x0 and x1 and
the distance x1 from the boundary
"""
super(FCNetForHypinvBinary, self).__init__(weights)
self.base_class_index = base_class_index
self.function = function
self.layer_sizes = [len(self.weights[0][0])]
for bias in weights[1]:
self.layer_sizes.append(len(bias))
self.num_classes = self.layer_sizes[-1]
self.initial_x = np.zeros([1, self.layer_sizes[0]])
self.use_modified_loss = use_modified_loss
self.mu = mu
def build(self):
with tf.name_scope("Input"):
self.init_point = tf.Variable(self.initial_x.reshape((1, len(self.initial_x))),
dtype=tf.float64, trainable=False, name="Boundary_point")
self.factor = tf.Variable(np.ones((1, len(self.initial_x))),
dtype=tf.float64, name="factor")
self.x = self.init_point * self.factor
with tf.name_scope("Target"):
if self.use_modified_loss:
x1_constant = tf.constant(self.x1.reshape((1, len(self.x1))), dtype=tf.float64)
self.t = tf.placeholder(tf.float64, shape=[None, 2], name="Target_output")
self.boundary_t = tf.placeholder(tf.float64, shape=[None, 2], name="Target_boundary_output")
with tf.name_scope("FC_net"):
flowing_x = self.x
for i, _ in enumerate(self.weights[0]):
with tf.name_scope("layer_{}".format(i)):
W = tf.constant(self.weights[0][i], name="Weight_{}".format(i), dtype=tf.float64)
b = tf.constant(self.weights[1][i], name="Bias_{}".format(i), dtype=tf.float64)
flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b))
y = flowing_x
full_out = tf.nn.softmax(y)
with tf.name_scope("Binary_class_output"):
self.boundary_out = list()
mask = True+np.zeros(self.num_classes, dtype=np.bool)
mask[self.base_class_index] = False
x0 = full_out[:,self.base_class_index]
x1 = tf.reduce_max(tf.boolean_mask(full_out, mask, axis=1), axis=1)
s = x0+x1
self.out = tf.stack([x0/s, x1/s], axis=1)
self.boundary_out.append(self.out)
self.boundary_out.append(tf.stack([x1/s, x0/s], axis=1))
with tf.name_scope("Loss_functions"):
self.loss = tf.reduce_mean(
tf.nn.l2_loss(self.out-self.t),
name="loss")
with tf.name_scope("Binary_class_loss"):
self.boundary_loss = list()
if self.use_modified_loss:
for i in range(2):
self.boundary_loss.append(
tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i]-self.boundary_t)) +
self.mu * tf.reduce_mean(tf.nn.l2_loss(self.x - x1_constant))
)
else:
for i in range(2):
self.boundary_loss.append(
tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i] - self.boundary_t))
)
def build_for_eval(self):
with tf.name_scope("eInput"):
self.x_for_eval = tf.placeholder(tf.float32, shape=[None, len(self.weights[0][0])])#tf.Variable(tf.constant(self.initial_x), name="Boundary_point")
with tf.name_scope("eFC_net"):
flowing_x = self.x_for_eval
for i, _ in enumerate(self.weights[0]):
W = tf.constant(self.weights[0][i], name="eWeight_{}".format(i))
b = tf.constant(self.weights[1][i], name="eBias_{}".format(i))
flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b), name="elayer_{}".format(i))
y = flowing_x
full_out = tf.nn.softmax(y)
with tf.name_scope("Binary_class_output"):
self.boundary_out_for_eval = list()
mask = True+np.zeros(self.num_classes, dtype=np.bool)
mask[self.base_class_index] = False
x0 = full_out[:, self.base_class_index]
x1 = tf.reduce_max(tf.boolean_mask(full_out, mask, axis=1), axis=1)
s = x0+x1
self.out_for_eval = tf.stack([x0/s, x1/s], axis=1)
self.boundary_out_for_eval.append(self.out_for_eval)
self.boundary_out_for_eval.append(tf.stack([x1/s, x0/s], axis=1))
def get_boundary_gradient(self, x, class_index):
if not self.grad_session:
self.grad_session = tf.Session()
with self.grad_session.as_default():
self.build_for_eval()
self.grad = list()
for i in range(2):
self.grad.append(tf.gradients(self.boundary_out_for_eval[i], [self.x_for_eval])[0])
self.grad_x = self.x_for_eval
return self.grad_session.run(self.grad[class_index], {self.grad_x: x})
def has_modified_loss(self):
return self.use_modified_loss
def name(self):
return "Hypinv_FC_net_{}".format("-".join([str(ls) for ls in self.layer_sizes]))
|
1018 | from ..factory import Method
class getChatMember(Method):
chat_id = None # type: "int53"
user_id = None # type: "int32"
|
1023 | import numpy as np
from sklearn.metrics import roc_curve, auc
def compute_error_auc(op_str, gt, pred, prob):
# classification error
pred_int = (pred > prob).astype(np.int)
class_acc = (pred_int == gt).mean() * 100.0
# ROC - area under curve
fpr, tpr, thresholds = roc_curve(gt, pred)
roc_auc = auc(fpr, tpr)
print op_str, ', class acc = %.3f, ROC AUC = %.3f' % (class_acc, roc_auc)
#return class_acc, roc_auc
def calc_average_precision(recall, precision):
precision[np.isnan(precision)] = 0
recall[np.isnan(recall)] = 0
# pascal'12 way
mprec = np.hstack((0, precision, 0))
mrec = np.hstack((0, recall, 1))
for ii in range(mprec.shape[0]-2, -1,-1):
mprec[ii] = np.maximum(mprec[ii], mprec[ii+1])
inds = np.where(np.not_equal(mrec[1:], mrec[:-1]))[0]+1
ave_prec = ((mrec[inds] - mrec[inds-1])*mprec[inds]).sum()
return ave_prec
def remove_end_preds(nms_pos_o, nms_prob_o, gt_pos_o, durations, win_size):
# this filters out predictions and gt that are close to the end
# this is a bit messy because of the shapes of gt_pos_o
nms_pos = []
nms_prob = []
gt_pos = []
for ii in range(len(nms_pos_o)):
valid_time = durations[ii] - win_size
gt_cur = gt_pos_o[ii]
if gt_cur.shape[0] > 0:
gt_pos.append(gt_cur[:, 0][gt_cur[:, 0] < valid_time][..., np.newaxis])
else:
gt_pos.append(gt_cur)
valid_preds = nms_pos_o[ii] < valid_time
nms_pos.append(nms_pos_o[ii][valid_preds])
nms_prob.append(nms_prob_o[ii][valid_preds, 0][..., np.newaxis])
return nms_pos, nms_prob, gt_pos
def prec_recall_1d(nms_pos_o, nms_prob_o, gt_pos_o, durations, detection_overlap, win_size, remove_eof=True):
"""
nms_pos, nms_prob, and gt_pos are lists of numpy arrays specifying detection
position, detection probability and GT position.
Each list entry is a different file.
Each entry in nms_pos is an array of length num_entries. For nms_prob and
gt_pos its an array of size (num_entries, 1).
durations is a array of the length of the number of files with each entry
containing that file length in seconds.
detection_overlap determines if a prediction is counted as correct or not.
win_size is used to ignore predictions and ground truth at the end of an
audio file.
returns
precision: fraction of retrieved instances that are relevant.
recall: fraction of relevant instances that are retrieved.
"""
if remove_eof:
# filter out the detections in both ground truth and predictions that are too
# close to the end of the file - dont count them during eval
nms_pos, nms_prob, gt_pos = remove_end_preds(nms_pos_o, nms_prob_o, gt_pos_o, durations, win_size)
else:
nms_pos = nms_pos_o
nms_prob = nms_prob_o
gt_pos = gt_pos_o
# loop through each file
true_pos = [] # correctly predicts the ground truth
false_pos = [] # says there is a detection but isn't
for ii in range(len(nms_pos)):
num_preds = nms_pos[ii].shape[0]
if num_preds > 0: # check to make sure it contains something
num_gt = gt_pos[ii].shape[0]
# for each set of predictions label them as true positive or false positive (i.e. 1-tp)
tp = np.zeros(num_preds)
distance_to_gt = np.abs(gt_pos[ii].ravel()-nms_pos[ii].ravel()[:, np.newaxis])
within_overlap = (distance_to_gt <= detection_overlap)
# remove duplicate detections - assign to valid detection with highest prob
for jj in range(num_gt):
inds = np.where(within_overlap[:, jj])[0] # get the indices of all valid predictions
if inds.shape[0] > 0:
max_prob = np.argmax(nms_prob[ii][inds])
selected_pred = inds[max_prob]
within_overlap[selected_pred, :] = False
tp[selected_pred] = 1 # set as true positives
true_pos.append(tp)
false_pos.append(1 - tp)
# calc precision and recall - sort confidence in descending order
# PASCAL style
conf = np.concatenate(nms_prob)[:, 0]
num_gt = np.concatenate(gt_pos).shape[0]
inds = np.argsort(conf)[::-1]
true_pos_cat = np.concatenate(true_pos)[inds].astype(float)
false_pos_cat = np.concatenate(false_pos)[inds].astype(float) # i.e. 1-true_pos_cat
if (conf == conf[0]).sum() == conf.shape[0]:
# all the probability values are the same therefore we will not sweep
# the curve and instead will return a single value
true_pos_sum = true_pos_cat.sum()
false_pos_sum = false_pos_cat.sum()
recall = np.asarray([true_pos_sum / float(num_gt)])
precision = np.asarray([(true_pos_sum / (false_pos_sum + true_pos_sum))])
elif inds.shape[0] > 0:
# otherwise produce a list of values
true_pos_cum = np.cumsum(true_pos_cat)
false_pos_cum = np.cumsum(false_pos_cat)
recall = true_pos_cum / float(num_gt)
precision = (true_pos_cum / (false_pos_cum + true_pos_cum))
return precision, recall
|
1044 | from typing import List
from pybm import PybmConfig
from pybm.command import CLICommand
from pybm.config import get_reporter_class
from pybm.exceptions import PybmError
from pybm.reporters import BaseReporter
from pybm.status_codes import ERROR, SUCCESS
from pybm.util.path import get_subdirs
class CompareCommand(CLICommand):
"""
Report benchmark results from specified sources.
"""
usage = "pybm compare <run> <anchor-ref> <compare-refs> [<options>]\n"
def __init__(self):
super(CompareCommand, self).__init__(name="compare")
self.config = PybmConfig.load()
def add_arguments(self):
self.parser.add_argument(
"run",
type=str,
metavar="<run>",
help="Benchmark run to report results for. "
"To report the preceding run, use the "
'"latest" keyword. To report results '
"of the n-th preceding run "
"(i.e., n runs ago), "
'use the "latest^{n}" syntax.',
)
self.parser.add_argument(
"refs",
nargs="+",
metavar="<refs>",
help="Benchmarked refs to compare. The first "
"given ref will be treated as the "
"anchor ref, relative to which all "
"differences are reported. An error is "
"raised if any of the given "
"refs are not present in the run.",
)
reporter: BaseReporter = get_reporter_class(config=self.config)
reporter_args = reporter.additional_arguments()
if reporter_args:
reporter_name = self.config.get_value("reporter.name")
reporter_group_desc = (
f"Additional options from configured reporter class {reporter_name!r}"
)
reporter_group = self.parser.add_argument_group(reporter_group_desc)
# add builder-specific options into the group
for arg in reporter_args:
reporter_group.add_argument(arg.pop("flags"), **arg)
def run(self, args: List[str]) -> int:
if not args:
self.parser.print_help()
return ERROR
self.add_arguments()
options = self.parser.parse_args(args)
reporter: BaseReporter = get_reporter_class(config=self.config)
# TODO: Parse run to fit schema
run = options.run
refs: List[str] = options.refs
result_dir = reporter.result_dir
# TODO: Make this dynamic to support other run identifiers
result = sorted(get_subdirs(result_dir))[-1]
result_path = result_dir / result
if result_path.exists():
reporter.compare(
*refs,
result=result,
target_filter=options.target_filter,
benchmark_filter=options.benchmark_filter,
context_filter=options.context_filter,
)
else:
raise PybmError(
f"No benchmark results found for the requested run {run!r}."
)
return SUCCESS
|
1057 | import network
def conncb(task):
print("[{}] Connected".format(task))
def disconncb(task):
print("[{}] Disconnected".format(task))
def subscb(task):
print("[{}] Subscribed".format(task))
def pubcb(pub):
print("[{}] Published: {}".format(pub[0], pub[1]))
def datacb(msg):
print("[{}] Data arrived from topic: {}, Message:\n".format(msg[0], msg[1]), msg[2])
mqtt = network.mqtt("loboris", "mqtt://loboris.eu", user="wifimcu", password="<PASSWORD>", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb)
# secure connection requires more memory and may not work
# mqtts = network.mqtt("eclipse", "mqtts//iot.eclipse.org", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb)
# wsmqtt = network.mqtt("eclipse", "ws://iot.eclipse.org:80/ws", cleansession=True, data_cb=datacb)
mqtt.start()
#mqtt.config(lwt_topic='status', lwt_msg='Disconected')
'''
# Wait until status is: (1, 'Connected')
mqtt.subscribe('test')
mqtt.publish('test', 'Hi from Micropython')
mqtt.stop()
'''
# ==================
# ThingSpeak example
# ==================
import network
def datacb(msg):
print("[{}] Data arrived from topic: {}, Message:\n".format(msg[0], msg[1]), msg[2])
thing = network.mqtt("thingspeak", "mqtt://mqtt.thingspeak.com", user="anyName", password="<PASSWORD>", cleansession=True, data_cb=datacb)
# or secure connection
#thing = network.mqtt("thingspeak", "mqtts://mqtt.thingspeak.com", user="anyName", password="<PASSWORD>", cleansession=True, data_cb=datacb)
thingspeakChannelId = "123456" # enter Thingspeak Channel ID
thingspeakChannelWriteApiKey = "ThingspeakWriteAPIKey" # EDIT - enter Thingspeak Write API Key
thingspeakFieldNo = 1
thingSpeakChanelFormat = "json"
pubchan = "channels/{:s}/publish/{:s}".format(thingspeakChannelId, thingspeakChannelWriteApiKey)
pubfield = "channels/{:s}/publish/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey)
subchan = "channels/{:s}/subscribe/{:s}/{:s}".format(thingspeakChannelId, thingSpeakChanelFormat, thingspeakChannelWriteApiKey)
subfield = "channels/{:s}/subscribe/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey)
thing.start()
tmo = 0
while thing.status()[0] != 2:
utime.sleep_ms(100)
tmo += 1
if tmo > 80:
print("Not connected")
break
# subscribe to channel
thing.subscribe(subchan)
# subscribe to field
thing.subscribe(subfield)
# publish to channel
# Payload can include any of those fields separated b< ';':
# "field1=value;field2=value;...;field8=value;latitude=value;longitude=value;elevation=value;status=value"
thing.publish(pubchan, "field1=25.2;status=On line")
# Publish to field
thing.publish(pubfield, "24.5")
|
1094 | import math
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3]
)
grad, = autograd.grad(
outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True
)
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_mean.detach(), path_lengths
|
1104 | def init():
global brightness
global calibration_mode
brightness = 500
calibration_mode = False
|
1116 | def solution(A):
total = sum(A)
m = float('inf')
left_sum = 0
for n in A[:-1]:
left_sum += n
v = abs(total - 2*left_sum)
if v < m:
m = v
return m
|
1148 | description = 'PGAA setup with XYZOmega sample table'
group = 'basic'
sysconfig = dict(
datasinks = ['mcasink', 'chnsink', 'csvsink', 'livesink']
)
includes = [
'system',
'reactor',
'nl4b',
'pressure',
'sampletable',
'pilz',
'detector',
'collimation',
]
devices = dict(
mcasink = device('nicos_mlz.pgaa.devices.MCASink',
settypes = {'point'},
detectors = ['_60p', 'LEGe'],
),
chnsink = device('nicos_mlz.pgaa.devices.CHNSink',
settypes = {'point'},
detectors = ['_60p', 'LEGe'],
),
csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink',
settypes = {'point'},
),
)
startupcode = """
SetDetectors('_60p', 'LEGe')
SetEnvironment(chamber_pressure)
printinfo("============================================================")
printinfo("Welcome to the NICOS PGAI demo setup.")
printinfo("============================================================")
"""
|
1160 | import io
import logging
import json
import numpy
import torch
import numpy as np
from tqdm import tqdm
from clie.inputters import constant
from clie.objects import Sentence
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
logger = logging.getLogger(__name__)
def load_word_embeddings(file):
embeddings_index = {}
fin = io.open(file, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
for i, line in tqdm(enumerate(fin), total=n):
tokens = line.rstrip().split(' ')
v = numpy.array(tokens[1:], dtype=float)
embeddings_index[tokens[0]] = v
return embeddings_index
# ------------------------------------------------------------------------------
# Data loading
# ------------------------------------------------------------------------------
def load_data(filename, src_lang, tgt_lang, knn_file,
knn_size, max_examples=-1):
examples = []
wrong_subj_pos, wrong_obj_pos = 0, 0
with open(filename) as f:
data = json.load(f)
knn_dict = None
if knn_file:
with open(knn_file) as f:
knn_dict = json.load(f)
for idx, ex in enumerate(tqdm(data, total=len(data))):
sentence = Sentence(ex['id'])
sentence.language = src_lang
sentence.words = ex['token']
sentence.pos = ex['stanford_pos']
sentence.ner = ex['stanford_ner']
sentence.deprel = ex['stanford_deprel']
sentence.head = [int(x) for x in ex['stanford_head']]
sentence.subj_type = ex['subj_type']
sentence.obj_type = ex['obj_type']
sentence.relation = ex['relation']
if ex['subj_end'] - ex['subj_start'] < 0:
# we swap the start and end index
wrong_subj_pos += 1
sentence.subject = [ex['subj_end'], ex['subj_start']]
else:
sentence.subject = [ex['subj_start'], ex['subj_end']]
if ex['obj_end'] - ex['obj_start'] < 0:
# we swap the start and end index
wrong_obj_pos += 1
sentence.object = [ex['obj_end'], ex['obj_start']]
else:
sentence.object = [ex['obj_start'], ex['obj_end']]
# store KNN word info
if knn_dict:
sentence.tgt_lang = tgt_lang
knn_words = []
for w in ex['token']:
w = '!{}_{}'.format(src_lang, w)
if w in knn_dict:
assert len(knn_dict[w]) == knn_size
knn_words.append(knn_dict[w])
else:
knn_words.append([constant.UNK_WORD] * knn_size)
sentence.knn_words = knn_words
examples.append(sentence)
if max_examples != -1 and len(examples) > max_examples:
break
if wrong_subj_pos > 0 or wrong_obj_pos > 0:
logger.info('{} and {} wrong subject and object positions found!'.format(
wrong_subj_pos, wrong_obj_pos))
return examples
def vectorize(ex, model, iseval):
"""Torchify a single example."""
words = ['!{}_{}'.format(ex.language, w) for w in ex.words]
words = [model.word_dict[w] for w in words]
knn_word = None
if ex.knn_words:
knn_word = [[model.word_dict[w] for w in knn]
for knn in ex.knn_words]
knn_word = torch.LongTensor(knn_word)
word = torch.LongTensor(words)
pos = torch.LongTensor([model.pos_dict[p] for p in ex.pos])
ner = torch.LongTensor([model.ner_dict[n] for n in ex.ner])
deprel = torch.LongTensor([model.deprel_dict[d] for d in ex.deprel])
assert any([x == 0 for x in ex.head])
head = torch.LongTensor(ex.head)
subj_position = torch.LongTensor(ex.subj_position)
obj_position = torch.LongTensor(ex.obj_position)
type = [0] * len(ex.words)
ttype = model.type_dict[ex.subj_type]
start, end = ex.subject
type[start: end + 1] = [ttype] * (end - start + 1)
atype = model.type_dict[ex.obj_type]
start, end = ex.object
type[start: end + 1] = [atype] * (end - start + 1)
type = torch.LongTensor(type)
return {
'id': ex.id,
'language': ex.language,
'word': word,
'pos': pos,
'ner': ner,
'deprel': deprel,
'type': type,
'head': head,
'subject': ex.subj_text,
'object': ex.obj_text,
'subject_pos': subj_position,
'object_pos': obj_position,
'relation': model.label_dict[ex.relation],
'knn_word': knn_word
}
def batchify(batch):
"""Gather a batch of individual examples into one batch."""
# batch is a list of vectorized examples
batch_size = len(batch)
ids = [ex['id'] for ex in batch]
language = [ex['language'] for ex in batch]
use_knn = batch[0]['knn_word'] is not None
# NOTE. batch[0]['knn_word'] is a 2d list
knn_size = len(batch[0]['knn_word'][0]) if use_knn else 0
# --------- Prepare Code tensors ---------
max_len = max([ex['word'].size(0) for ex in batch])
# Batch Code Representations
len_rep = torch.LongTensor(batch_size).fill_(constant.PAD)
word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
labels = torch.LongTensor(batch_size)
subject = []
object = []
knn_rep = None
if use_knn:
knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD)
for i, ex in enumerate(batch):
len_rep[i] = ex['word'].size(0)
labels[i] = ex['relation']
word_rep[i, :len_rep[i]] = ex['word']
head_rep[i, :len_rep[i]] = ex['head']
subject_pos_rep[i, :len_rep[i]] = ex['subject_pos']
object_pos_rep[i, :len_rep[i]] = ex['object_pos']
pos_rep[i, :len_rep[i]] = ex['pos']
ner_rep[i, :len_rep[i]] = ex['ner']
deprel_rep[i, :len_rep[i]] = ex['deprel']
type_rep[i, :len_rep[i]] = ex['type']
subject.append(ex['subject'])
object.append(ex['object'])
if use_knn:
knn_rep[i, :len_rep[i]] = ex['knn_word']
return {
'ids': ids,
'language': language,
'batch_size': batch_size,
'len_rep': len_rep,
'word_rep': word_rep,
'knn_rep': knn_rep,
'head_rep': head_rep,
'subject': subject,
'object': object,
'subject_pos_rep': subject_pos_rep,
'object_pos_rep': object_pos_rep,
'labels': labels,
'pos_rep': pos_rep,
'ner_rep': ner_rep,
'deprel_rep': deprel_rep,
'type_rep': type_rep
}
class ACE05Dataset(Dataset):
def __init__(self, examples, model, evaluation=False):
self.model = model
self.examples = examples
self.evaluation = evaluation
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
return vectorize(self.examples[index], self.model,
iseval=self.evaluation)
def lengths(self):
return [len(ex.words) for ex in self.examples]
class SortedBatchSampler(Sampler):
def __init__(self, lengths, batch_size, shuffle=True):
self.lengths = lengths
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
lengths = np.array(
[(-l, np.random.random()) for l in self.lengths],
dtype=[('l1', np.int_), ('rand', np.float_)]
)
indices = np.argsort(lengths, order=('l1', 'rand'))
batches = [indices[i:i + self.batch_size]
for i in range(0, len(indices), self.batch_size)]
if self.shuffle:
np.random.shuffle(batches)
return iter([i for batch in batches for i in batch])
def __len__(self):
return len(self.lengths)
|
1161 | from distutils.extension import Extension
cmdclass = {}
try:
# with Cython
from Cython.Build import build_ext
cmdclass["build_ext"] = build_ext
module_src = "cgranges/python/cgranges.pyx"
except ImportError: # without Cython
module_src = "cgranges/python/cgranges.c"
def build(setup_kwargs):
"""
This function is mandatory in order to build the extensions.
"""
setup_kwargs.update(
{
"ext_modules": [
Extension(
"cgranges",
sources=[module_src, "cgranges/cgranges.c"],
depends=[
"cgranges/cgranges.h",
"cgranges/khash.h",
"cgranges/python/cgranges.pyx"
],
include_dirs=["cgranges"]
)
],
"cmdclass": cmdclass
}
)
|
1206 | def unlock(m):
return m.lower().translate(
str.maketrans(
'abcdefghijklmnopqrstuvwxyz',
'22233344455566677778889999'
)
)
|
1319 | import ssg.utils
def preprocess(data, lang):
data["arg_name_value"] = data["arg_name"] + "=" + data["arg_value"]
if lang == "oval":
# escape dot, this is used in oval regex
data["escaped_arg_name_value"] = data["arg_name_value"].replace(".", "\\.")
# replace . with _, this is used in test / object / state ids
data["sanitized_arg_name"] = ssg.utils.escape_id(data["arg_name"])
return data
|
1342 | from __future__ import absolute_import, division, print_function, unicode_literals
from unittest import TestCase
from beast.env.ReadEnvFile import read_env_file
from beast.util import Terminal
Terminal.CAN_CHANGE_COLOR = False
JSON = """
{
"FOO": "foo",
"BAR": "bar bar bar",
"CPPFLAGS": "-std=c++11 -frtti -fno-strict-aliasing -DWOMBAT"
}"""
ENV = """
# An env file.
FOO=foo
export BAR="bar bar bar"
CPPFLAGS=-std=c++11 -frtti -fno-strict-aliasing -DWOMBAT
# export BAZ=baz should be ignored.
"""
RESULT = {
'FOO': 'foo',
'BAR': 'bar bar bar',
'CPPFLAGS': '-std=c++11 -frtti -fno-strict-aliasing -DWOMBAT',
}
BAD_ENV = ENV + """
This line isn't right.
NO SPACES IN NAMES="valid value"
"""
class test_ReadEnvFile(TestCase):
def test_read_json(self):
self.assertEqual(read_env_file(JSON), RESULT)
def test_read_env(self):
self.assertEqual(read_env_file(ENV), RESULT)
def test_read_env_error(self):
errors = []
self.assertEqual(read_env_file(BAD_ENV, errors.append), RESULT)
self.assertEqual(errors, [
"WARNING: Didn't understand the following environment file lines:",
"11. >>> This line isn't right.",
'12. >>> NO SPACES IN NAMES="valid value"'])
|
1436 | import requests
from bs4 import BeautifulSoup
import urllib.request
import os
import random
import time
def html(url):
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "]
user_agent = random.choice(user_agents)
headers = {
'User-Agent': user_agent,
'Accept-Encoding': 'gzip'}
req = requests.get(url=url, headers=headers)
html_doc = req.text
soup = BeautifulSoup(html_doc, "html.parser")
times = soup.select("time")
views = soup.select("p.label-key > b")
active_str = str(views[2])
active = active_str[active_str.find("title=\"") + 7:active_str.find("Z")]
answers = soup.select("#answers-header > div > h2 >span")
question_content = soup.select("div.post-text")
tags = soup.select("#question > div.post-layout > div.postcell.post-layout--right > "
"div.post-taglist.grid.gs4.gsy.fd-column > div >a")
title = soup.select("h1 >a")
tags_str = ""
item = []
for tag in tags:
tags_str += tag.get_text() + ","
answer_contetnts = []
for i in range(1, len(question_content)):
answer_contetnts.append(question_content[i])
for i in range(len(times)):
if len(times[i].get_text()) > 1:
asked_time = times[i].get("datetime").replace("T", " ")
item.append(title[
0].get_text()) # title views answersnum asked_time tag_str active_time quest_content_ text answer_content_list
item.append(views[1].get_text())
item.append(answers[0].get_text())
item.append(asked_time)
item.append(tags_str)
item.append(active)
item.append(question_content[0])
item.append(answer_contetnts)
print(item)
# updatetosql(item)
def updatetosql(item):
ansers_text = "[split]".join(item[7])
updatesql = "UPDATE `t_stackoverflow_question` " \
"SET `tags`='%s', `views`='%s', `answers_num`='%s', `asked_time`='%s', `last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s' " \
"WHERE (`question_id`='%s') " \
% (item[4], item[1], item[2], item[3], item[5], item[6], ansers_text, item[0],)
pass
if __name__ == '__main__':
html("https://stackoverflow.com/questions/50119673/nginx-fast-cgi-cache-on-error-page-404")
|
1469 | import numpy as np
from skimage.transform import resize
from skimage import measure
from skimage.measure import regionprops
class OCROnObjects():
def __init__(self, license_plate):
character_objects = self.identify_boundary_objects(license_plate)
self.get_regions(character_objects, license_plate)
def identify_boundary_objects(self, a_license_plate):
labelImage = measure.label(a_license_plate)
character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1])
minHeight, maxHeight, minWidth, maxWidth = character_dimensions
regionLists = regionprops(labelImage)
return regionLists
def get_regions(self, character_objects, a_license_plate):
"""
used to map out regions where the license plate charcters are
the principle of connected component analysis and labelling
were used
Parameters:
-----------
a_license_plate: 2D numpy binary image of the license plate
Returns:
--------
a dictionary containing the index
fullscale: 3D array containig 2D array of each character
columnsVal: 1D array the starting column of each character
coordinates:
"""
cord = []
counter=0
column_list = []
character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1])
minHeight, maxHeight, minWidth, maxWidth = character_dimensions
for regions in character_objects:
minimumRow, minimumCol, maximumRow, maximumCol = regions.bbox
character_height = maximumRow - minimumRow
character_width = maximumCol - minimumCol
roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol]
if character_height > minHeight and character_height < maxHeight and character_width > minWidth and character_width < maxWidth:
if counter == 0:
samples = resize(roi, (20,20))
cord.append(regions.bbox)
counter += 1
elif counter == 1:
roismall = resize(roi, (20,20))
samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0)
cord.append(regions.bbox)
counter+=1
else:
roismall = resize(roi, (20,20))
samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0)
cord.append(regions.bbox)
column_list.append(minimumCol)
if len(column_list) == 0:
self.candidates = {}
else:
self.candidates = {
'fullscale': samples,
'coordinates': np.array(cord),
'columnsVal': column_list
}
return self.candidates |
1485 | from paddle.vision.transforms import (
ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose,
HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation
)
from paddle.vision.datasets import Cifar100
from paddle.io import DataLoader
from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup
import random
from resnet20 import *
import paddle
# supernet trainning 基于paddleslim模型压缩包
# https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star
from paddleslim.nas.ofa.convert_super import Convert, supernet
from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig
from paddleslim.nas.ofa.utils import utils
channel_list = []
for i in range(1, 21):
if 0 < i <= 7:
# channel_list.append(random.choice([ 4, 8, 12, 16]))
channel_list.append(16)
elif 7 < i <= 13:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32]))
channel_list.append(32)
elif 13 < i <= 19:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64]))
channel_list.append(64)
else:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64]))
channel_list.append(64)
net = ResNet20(100, channel_list)
net2 = ResNet20(100, channel_list)
net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams'))
channel_optional = []
for i in range(0, 23):
if i <= 7:
channel_optional.append([4, 8, 12, 16])
# channel_optional.append([12, 16])
elif 7 < i <= 14:
channel_optional.append([4, 8, 12, 16, 20, 24, 28, 32])
# channel_optional.append([20, 24, 28, 32])
elif 14 < i <= 21:
channel_optional.append(
[4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64])
# channel_optional.append([36, 40, 44, 48, 52, 56,60, 64])
else:
channel_optional.append(
[4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64])
# channel_optional.append([36, 40, 44, 48, 52, 56,60, 64])
distill_config = DistillConfig(teacher_model=net2)
sp_net_config = supernet(channel=channel_optional)
sp_model = Convert(sp_net_config).convert(net)
ofa_net = OFA(sp_model, distill_config=distill_config)
ofa_net.set_task('channel')
model = paddle.Model(ofa_net)
MAX_EPOCH = 300
LR = 0.1
WEIGHT_DECAY = 5e-4
MOMENTUM = 0.9
BATCH_SIZE = 128
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.1942, 0.1918, 0.1958]
DATA_FILE = './data/data76994/cifar-100-python.tar.gz'
model.prepare(
paddle.optimizer.Momentum(
learning_rate=LinearWarmup(
CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR),
momentum=MOMENTUM,
parameters=model.parameters(),
weight_decay=WEIGHT_DECAY),
CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 5)))
transforms = Compose([
RandomCrop(32, padding=4),
RandomApply(BrightnessTransform(0.1)),
RandomApply(ContrastTransform(0.1)),
RandomHorizontalFlip(),
RandomRotation(15),
ToArray(),
Normalize(CIFAR_MEAN, CIFAR_STD),
])
val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)])
train_set = Cifar100(DATA_FILE, mode='train', transform=transforms)
test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms)
callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')]
model.fit(
train_set,
test_set,
epochs=MAX_EPOCH,
batch_size=BATCH_SIZE,
save_dir='checkpoints',
save_freq=100,
shuffle=True,
num_workers=4,
verbose=1,
callbacks=callbacks,
)
|
1525 | import numpy as np
import cv2
import os
import json
import glob
from PIL import Image, ImageDraw
plate_diameter = 25 #cm
plate_depth = 1.5 #cm
plate_thickness = 0.2 #cm
def Max(x, y):
if (x >= y):
return x
else:
return y
def polygons_to_mask(img_shape, polygons):
mask = np.zeros(img_shape, dtype=np.uint8)
mask = Image.fromarray(mask)
xy = list(map(tuple, polygons))
ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
mask = np.array(mask, dtype=bool)
return mask
def mask2box(mask):
index = np.argwhere(mask == 1)
rows = index[:, 0]
clos = index[:, 1]
left_top_r = np.min(rows)
left_top_c = np.min(clos)
right_bottom_r = np.max(rows)
right_bottom_c = np.max(clos)
return [left_top_c, left_top_r, right_bottom_c, right_bottom_r]
def get_bbox(points, h, w):
polygons = points
mask = polygons_to_mask([h,w], polygons)
return mask2box(mask)
def get_scale(points, img, lowest):
bbox = get_bbox(points, img.shape[0], img.shape[1])
diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2
len_per_pix = plate_diameter/float(diameter)
avg = 0
k = 0
for point in points:
avg += img[point[1]][point[0]]
k += 1
avg = avg/float(k)
depth = lowest - avg
depth_per_pix = plate_depth/depth
return len_per_pix, depth_per_pix
def cal_volume(points, img, len_per_pix, depth_per_pix, lowest):
volume = 0.0
bbox = get_bbox(points, img.shape[0], img.shape[1])
points = np.array(points)
shape = points.shape
points = points.reshape(shape[0], 1, shape[1])
for i in range(bbox[0], bbox[2]+1):
for j in range(bbox[1], bbox[3]+1):
if (cv2.pointPolygonTest(points, (i,j), False) >= 0):
volume += Max(0, (lowest - img[j][i]) * depth_per_pix - plate_thickness) * len_per_pix * len_per_pix
return volume
def get_volume(img, json_path):
lowest = np.max(img)
vol_dict = {}
#print(lowest)
len_per_pix = 0.0
depth_per_pix = 0.0
with open(json_path, 'r') as json_file:
data = json.load(json_file)
for shape in data['shapes']:
if (shape['label'] == "plate"):
len_per_pix, depth_per_pix = get_scale(shape['points'], img, lowest)
#print(len_per_pix, depth_per_pix)
break
for shape in data['shapes']:
label = shape['label']
if (label == "plate"):
continue
points = shape['points']
volume = cal_volume(points, img, len_per_pix, depth_per_pix, lowest)
if (label in vol_dict):
vol_dict[label] += volume
else:
vol_dict[label] = volume
return vol_dict
img = cv2.imread("out.png",0)
print(get_volume(img,"test.json")) |
1529 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cntk as C
import numpy as np
from .common import floatx, epsilon, image_dim_ordering, image_data_format
from collections import defaultdict
from contextlib import contextmanager
import warnings
C.set_global_option('align_axis', 1)
b_any = any
dev = C.device.use_default_device()
if dev.type() == 0:
warnings.warn(
'CNTK backend warning: GPU is not detected. '
'CNTK\'s CPU version is not fully optimized,'
'please run with GPU to get better performance.')
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
# LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase
_LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase')
# static learning phase flag, if it is not 0 or 1, we will go with dynamic learning phase tensor.
_LEARNING_PHASE = -1
_UID_PREFIXES = defaultdict(int)
# cntk doesn't support gradient as symbolic op, to hook up with keras model,
# we will create gradient as a constant placeholder, here use this global
# map to keep the mapping from grad placeholder to parameter
grad_parameter_dict = {}
NAME_SCOPE_STACK = []
@contextmanager
def name_scope(name):
global NAME_SCOPE_STACK
NAME_SCOPE_STACK.append(name)
yield
NAME_SCOPE_STACK.pop()
def get_uid(prefix=''):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def learning_phase():
# If _LEARNING_PHASE is not 0 or 1, return dynamic learning phase tensor
return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER
def set_learning_phase(value):
global _LEARNING_PHASE
if value not in {0, 1}:
raise ValueError('CNTK Backend: Set learning phase '
'with value %s is not supported, '
'expected 0 or 1.' % value)
_LEARNING_PHASE = value
def clear_session():
"""Reset learning phase flag for cntk backend.
"""
global _LEARNING_PHASE
global _LEARNING_PHASE_PLACEHOLDER
_LEARNING_PHASE = -1
_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0)
def in_train_phase(x, alt, training=None):
global _LEARNING_PHASE
if training is None:
training = learning_phase()
uses_learning_phase = True
else:
uses_learning_phase = False
# CNTK currently don't support cond op, so here we use
# element_select approach as workaround. It may have
# perf issue, will resolve it later with cntk cond op.
if callable(x) and isinstance(x, C.cntk_py.Function) is False:
x = x()
if callable(alt) and isinstance(alt, C.cntk_py.Function) is False:
alt = alt()
if training is True:
x._uses_learning_phase = uses_learning_phase
return x
else:
# if _LEARNING_PHASE is static
if isinstance(training, int) or isinstance(training, bool):
result = x if training == 1 or training is True else alt
else:
result = C.element_select(training, x, alt)
result._uses_learning_phase = uses_learning_phase
return result
def in_test_phase(x, alt, training=None):
return in_train_phase(alt, x, training=training)
def _convert_string_dtype(dtype):
# cntk only support float32 and float64
if dtype == 'float32':
return np.float32
elif dtype == 'float64':
return np.float64
else:
# cntk only running with float,
# try to cast to float to run the model
return np.float32
def _convert_dtype_string(dtype):
if dtype == np.float32:
return 'float32'
elif dtype == np.float64:
return 'float64'
else:
raise ValueError('CNTK Backend: Unsupported dtype: %s. '
'CNTK only supports float32 and '
'float64.' % dtype)
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
# Arguments
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
# Returns
A variable instance (with Keras metadata included).
"""
if dtype is None:
dtype = floatx()
if name is None:
name = ''
if isinstance(
value,
C.variables.Constant) or isinstance(
value,
C.variables.Parameter):
value = value.value
# we don't support init parameter with symbolic op, so eval it first as
# workaround
if isinstance(value, C.cntk_py.Function):
value = eval(value)
shape = value.shape if hasattr(value, 'shape') else ()
if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0:
value = value.astype(dtype)
# TODO: remove the conversion when cntk supports int32, int64
# https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter
dtype = 'float32' if 'int' in str(dtype) else dtype
v = C.parameter(shape=shape,
init=value,
dtype=dtype,
name=_prepare_name(name, 'variable'))
v._keras_shape = v.shape
v._uses_learning_phase = False
v.constraint = constraint
return v
def bias_add(x, bias, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
dims = len(x.shape)
if dims > 0 and x.shape[0] == C.InferredDimension:
dims -= 1
bias_dims = len(bias.shape)
if bias_dims != 1 and bias_dims != dims:
raise ValueError('Unexpected bias dimensions %d, '
'expected 1 or %d dimensions' % (bias_dims, dims))
if dims == 4:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1, 1)
else:
shape = (bias.shape[3],) + bias.shape[:3]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 3:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1)
else:
shape = (bias.shape[2],) + bias.shape[:2]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 2:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1)
else:
shape = (bias.shape[1],) + bias.shape[:1]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, bias.shape[0])
else:
shape = bias.shape
else:
shape = bias.shape
return x + reshape(bias, shape)
def eval(x):
if isinstance(x, C.cntk_py.Function):
return x.eval()
elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter):
return x.value
else:
raise ValueError('CNTK Backend: `eval` method on '
'`%s` type is not supported. '
'CNTK only supports `eval` with '
'`Function`, `Constant` or '
'`Parameter`.' % type(x))
def placeholder(
shape=None,
ndim=None,
dtype=None,
sparse=False,
name=None,
dynamic_axis_num=1):
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension
cntk_shape = [dynamic_dimension if s is None else s for s in shape]
cntk_shape = tuple(cntk_shape)
if dynamic_axis_num > len(cntk_shape):
raise ValueError('CNTK backend: creating placeholder with '
'%d dimension is not supported, at least '
'%d dimensions are needed.'
% (len(cntk_shape, dynamic_axis_num)))
if name is None:
name = ''
cntk_shape = cntk_shape[dynamic_axis_num:]
x = C.input(
shape=cntk_shape,
dtype=_convert_string_dtype(dtype),
is_sparse=sparse,
name=name)
x._keras_shape = shape
x._uses_learning_phase = False
x._cntk_placeholder = True
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
# Arguments
x: A candidate placeholder.
# Returns
Boolean.
"""
return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder
def is_keras_tensor(x):
if not is_tensor(x):
raise ValueError('Unexpectedly found an instance of type `' +
str(type(x)) + '`. '
'Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
def is_tensor(x):
return isinstance(x, (C.variables.Constant,
C.variables.Variable,
C.variables.Parameter,
C.ops.functions.Function))
def shape(x):
shape = list(int_shape(x))
num_dynamic = _get_dynamic_axis_num(x)
non_dyn_shape = []
for i in range(len(x.shape)):
if shape[i + num_dynamic] is None:
non_dyn_shape.append(x.shape[i])
else:
non_dyn_shape.append(shape[i + num_dynamic])
return shape[:num_dynamic] + non_dyn_shape
def is_sparse(tensor):
return tensor.is_sparse
def int_shape(x):
if hasattr(x, '_keras_shape'):
return x._keras_shape
shape = x.shape
if hasattr(x, 'dynamic_axes'):
dynamic_shape = [None for a in x.dynamic_axes]
shape = tuple(dynamic_shape) + shape
return shape
def ndim(x):
shape = int_shape(x)
return len(shape)
def _prepare_name(name, default):
prefix = '_'.join(NAME_SCOPE_STACK)
if name is None or name == '':
return prefix + '/' + default
return prefix + '/' + name
def constant(value, dtype=None, shape=None, name=None):
if dtype is None:
dtype = floatx()
if shape is None:
shape = ()
np_value = value * np.ones(shape)
const = C.constant(np_value,
dtype=dtype,
name=_prepare_name(name, 'constant'))
const._keras_shape = const.shape
const._uses_learning_phase = False
return const
def random_binomial(shape, p=0.0, dtype=None, seed=None):
# use numpy workaround now
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
np.random.seed(seed)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
size = 1
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
size *= _
binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape)
return variable(value=binomial, dtype=dtype)
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
return random_uniform_variable(shape, minval, maxval, dtype, seed)
def random_uniform_variable(shape, low, high,
dtype=None, name=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e3)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if name is None:
name = ''
scale = (high - low) / 2
p = C.parameter(
shape,
init=C.initializer.uniform(
scale,
seed=seed),
dtype=dtype,
name=name)
return variable(value=p.value + low + scale)
def random_normal_variable(
shape,
mean,
scale,
dtype=None,
name=None,
seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if name is None:
name = ''
return C.parameter(
shape=shape,
init=C.initializer.normal(
scale=scale,
seed=seed),
dtype=dtype,
name=name)
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
# how to apply mean and stddev
return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if seed is None:
seed = np.random.randint(1, 10e6)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
return C.parameter(
shape, init=C.initializer.truncated_normal(
stddev, seed=seed), dtype=dtype)
def dtype(x):
return _convert_dtype_string(x.dtype)
def zeros(shape, dtype=None, name=None):
if dtype is None:
dtype = floatx()
ctype = _convert_string_dtype(dtype)
return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name)
def ones(shape, dtype=None, name=None):
if dtype is None:
dtype = floatx()
ctype = _convert_string_dtype(dtype)
return variable(value=np.ones(shape, ctype), dtype=dtype, name=name)
def eye(size, dtype=None, name=None):
if dtype is None:
dtype = floatx()
return variable(np.eye(size), dtype, name)
def zeros_like(x, dtype=None, name=None):
return x * 0
def ones_like(x, dtype=None, name=None):
return zeros_like(x) + 1
def count_params(x):
for _ in x.shape:
if _ == C.InferredDimension or _ == C.FreeDimension:
raise ValueError('CNTK backend: `count_params` with dynamic '
'shape is not supported. Please provide '
'fixed dimension instead of `None`.')
return np.prod(int_shape(x))
def cast(x, dtype):
# cntk calculate everything in float, so don't need case from bool / int
return x
def dot(x, y):
if len(x.shape) > 2 or len(y.shape) > 2:
y_shape = int_shape(y)
if len(y_shape) > 2:
permutation = [len(y_shape) - 2]
permutation += list(range(len(y_shape) - 2))
permutation += [len(y_shape) - 1]
y = C.transpose(y, perm=permutation)
return C.times(x, y, len(y_shape) - 1)
else:
return C.times(x, y)
def batch_dot(x, y, axes=None):
x_shape = int_shape(x)
y_shape = int_shape(y)
if isinstance(axes, int):
axes = (axes, axes)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [len(x_shape) - 1, len(y_shape) - 2]
if b_any([isinstance(a, (list, tuple)) for a in axes]):
raise ValueError('Multiple target dimensions are not supported. ' +
'Expected: None, int, (int, int), ' +
'Provided: ' + str(axes))
if len(x_shape) == 2 and len(y_shape) == 2:
if axes[0] == axes[1]:
result = sum(x * y, axis=axes[0], keepdims=True)
return result if axes[0] == 1 else transpose(result)
else:
return sum(x * transpose(y), axis=axes[0], keepdims=True)
else:
if len(y_shape) == 2:
y = expand_dims(y)
normalized_axis = []
normalized_axis.append(_normalize_axis(axes[0], x)[0])
normalized_axis.append(_normalize_axis(axes[1], y)[0])
# transpose
i = normalized_axis[0]
while i < len(x.shape) - 1:
x = C.swapaxes(x, i, i + 1)
i += 1
i = normalized_axis[1]
while i > 0:
y = C.swapaxes(y, i, i - 1)
i -= 1
result = C.times(x, y, output_rank=(len(y.shape) - 1)
if len(y.shape) > 1 else 1)
if len(y_shape) == 2:
result = squeeze(result, -1)
return result
def transpose(x):
return C.swapaxes(x, 0, 1)
def gather(reference, indices):
# There is a bug in cntk gather op which may cause crash.
# We have made a fix but not catched in CNTK 2.1 release.
# Will update with gather op in next release
if _get_cntk_version() >= 2.2:
return C.ops.gather(reference, indices)
else:
num_classes = reference.shape[0]
one_hot_matrix = C.ops.one_hot(indices, num_classes)
return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1)
def _remove_dims(x, axis, keepdims=False):
if keepdims is False and isinstance(axis, list):
# sequence axis is removed by default, so don't need reshape on it
reduce_axes = []
for a in axis:
if isinstance(a, C.Axis) is False:
reduce_axes.append(a)
return _reshape_dummy_dim(x, reduce_axes)
else:
if isinstance(axis, list):
has_seq = False
for a in axis:
if isinstance(a, C.Axis):
has_seq = True
break
if has_seq:
nones = _get_dynamic_axis_num(x)
x = expand_dims(x, nones)
return x
def max(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_max')
return _remove_dims(output, axis, keepdims)
def min(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_min')
return _remove_dims(output, axis, keepdims)
def sum(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_sum')
return _remove_dims(output, axis, keepdims)
def prod(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_prod')
return _remove_dims(output, axis, keepdims)
def logsumexp(x, axis=None, keepdims=False):
return log(sum(exp(x), axis=axis, keepdims=keepdims))
def var(x, axis=None, keepdims=False):
m = mean(x, axis, keepdims=True)
devs_squared = C.square(x - m)
return mean(devs_squared, axis=axis, keepdims=keepdims)
def std(x, axis=None, keepdims=False):
return C.sqrt(var(x, axis=axis, keepdims=keepdims))
def expand_dims(x, axis=-1):
shape = list(int_shape(x))
nones = _get_dynamic_axis_num(x)
index = axis if axis >= 0 else len(shape) + 1
shape.insert(index, 1)
new_shape = shape[nones:]
new_shape = tuple(
[C.InferredDimension if _ is None else _ for _ in new_shape])
result = C.reshape(x, new_shape)
if index < nones:
result._keras_shape = shape
return result
def squeeze(x, axis):
if isinstance(axis, tuple):
axis = list(axis)
if not isinstance(axis, list):
axis = [axis]
shape = list(int_shape(x))
_axis = []
for _ in axis:
if isinstance(_, int):
_axis.append(_ if _ >= 0 else _ + len(shape))
if len(_axis) == 0:
return x
nones = _get_dynamic_axis_num(x)
for _ in sorted(_axis, reverse=True):
del shape[_]
new_shape = shape[nones:]
new_shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in new_shape])
return C.reshape(x, new_shape)
def tile(x, n):
if isinstance(n, int):
n = (n,)
elif isinstance(n, list):
n = tuple(n)
shape = int_shape(x)
num_dynamic_axis = _get_dynamic_axis_num(x)
# Padding the axis
if len(n) < len(shape):
n = tuple([1 for _ in range(len(shape) - len(n))]) + n
if len(n) != len(shape):
raise NotImplementedError
i = num_dynamic_axis
for i, rep in enumerate(n):
if i >= num_dynamic_axis and shape[i] is not None:
tmp = [x] * rep
x = C.splice(*tmp, axis=i - num_dynamic_axis)
i += 1
return x
def _normalize_axis(axis, x):
shape = int_shape(x)
ndim = len(shape)
nones = _get_dynamic_axis_num(x)
if nones > ndim:
raise ValueError('CNTK Backend: tensor with keras shape: `%s` has '
'%d cntk dynamic axis, this is not expected, please '
'double check the keras shape history.' % (str(shape), nones))
# Current cntk does not support shape like (1, batch). so using the workaround
# here to mapping the correct axis. Will remove this tricky after we add support
# in native cntk op
cntk_axis = []
dynamic_axis_index = 0
for i in range(ndim):
if shape[i] is None and dynamic_axis_index < nones:
cntk_axis.append(x.dynamic_axes[dynamic_axis_index])
dynamic_axis_index += 1
else:
cntk_axis.append(i - dynamic_axis_index)
if dynamic_axis_index < nones:
i = 0
while dynamic_axis_index < nones:
cntk_axis[i] = x.dynamic_axes[dynamic_axis_index]
i += 1
dynamic_axis_index += 1
while i < len(cntk_axis):
cntk_axis[i] -= nones
i += 1
if isinstance(axis, tuple):
_axis = list(axis)
elif isinstance(axis, int):
_axis = [axis]
elif isinstance(axis, list):
_axis = list(axis)
else:
_axis = axis
if isinstance(_axis, list):
for i, a in enumerate(_axis):
if a is not None and a < 0:
_axis[i] = (a % ndim)
if _axis[i] is not None:
_axis[i] = cntk_axis[_axis[i]]
else:
if _axis is None:
_axis = C.Axis.all_axes()
return _axis
def _reshape_dummy_dim(x, axis):
shape = list(x.shape)
_axis = [_ + len(shape) if _ < 0 else _ for _ in axis]
if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1:
result = x
for index in sorted(_axis, reverse=True):
result = C.reshape(result,
shape=(),
begin_axis=index,
end_axis=index + 1)
return result
else:
for index in sorted(_axis, reverse=True):
del shape[index]
shape = [C.InferredDimension if _ == C.FreeDimension else _ for _ in shape]
return C.reshape(x, shape)
def mean(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_mean')
return _remove_dims(output, axis, keepdims)
def any(x, axis=None, keepdims=False):
reduce_result = sum(x, axis, keepdims=keepdims)
any_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(any_matrix)
else:
return any_matrix
def all(x, axis=None, keepdims=False):
reduce_result = prod(x, axis, keepdims=keepdims)
all_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(all_matrix)
else:
return all_matrix
def classification_error(target, output, axis=-1):
return C.ops.reduce_mean(
C.equal(
argmax(
output,
axis=-1),
argmax(
target,
axis=-1)),
axis=C.Axis.all_axes())
def argmax(x, axis=-1):
axis = [axis]
axis = _normalize_axis(axis, x)
output = C.ops.argmax(x, axis=axis[0])
return _reshape_dummy_dim(output, axis)
def argmin(x, axis=-1):
axis = [axis]
axis = _normalize_axis(axis, x)
output = C.ops.argmin(x, axis=axis[0])
return _reshape_dummy_dim(output, axis)
def square(x):
return C.square(x)
def abs(x):
return C.abs(x)
def sqrt(x):
return C.sqrt(x)
def exp(x):
return C.exp(x)
def log(x):
return C.log(x)
def round(x):
return C.round(x)
def sigmoid(x):
return C.sigmoid(x)
def sign(x):
return x / C.abs(x)
def pow(x, a):
return C.pow(x, a)
def clip(x, min_value, max_value):
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
if min_value is None:
min_value = -np.inf
return C.clip(x, min_value, max_value)
def binary_crossentropy(target, output, from_logits=False):
if from_logits:
output = C.sigmoid(output)
output = C.clip(output, epsilon(), 1.0 - epsilon())
output = -target * C.log(output) - (1.0 - target) * C.log(1.0 - output)
return output
def get_variable_shape(x):
return int_shape(x)
def update(x, new_x):
return C.assign(x, new_x)
def moving_average_update(variable, value, momentum):
return C.assign(variable, variable * momentum + value * (1. - momentum))
def update_add(x, increment):
result = x + increment
return C.assign(x, result)
def gradients(loss, variables):
# cntk does not support gradients as symbolic op,
# to hook up with keras model
# we will return a constant as place holder, the cntk learner will apply
# the gradient during training.
global grad_parameter_dict
if isinstance(variables, list) is False:
variables = [variables]
grads = []
for v in variables:
g = C.constant(0, shape=v.shape, name='keras_grad_placeholder')
grads.append(g)
grad_parameter_dict[g] = v
return grads
def equal(x, y):
return C.equal(x, y)
def not_equal(x, y):
return C.not_equal(x, y)
def greater(x, y):
return C.greater(x, y)
def greater_equal(x, y):
return C.greater_equal(x, y)
def less(x, y):
return C.less(x, y)
def less_equal(x, y):
return C.less_equal(x, y)
def maximum(x, y):
return C.element_max(x, y)
def minimum(x, y):
return C.element_min(x, y)
def sin(x):
return C.sin(x)
def cos(x):
return C.cos(x)
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
if gamma is None:
if beta is None:
gamma = ones_like(x)
else:
gamma = ones_like(beta)
if beta is None:
if gamma is None:
beta = zeros_like(x)
else:
beta = zeros_like(gamma)
mean, variant = _moments(x, _normalize_axis(reduction_axes, x))
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
normalized = batch_normalization(
x, mean, variant, beta, gamma, epsilon)
else:
# need broadcasting
target_shape = []
x_shape = int_shape(x)
# skip the batch axis
for axis in range(1, ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
if ndim(gamma) > axis:
gamma = C.reduce_mean(gamma, axis - 1)
beta = C.reduce_mean(beta, axis - 1)
else:
target_shape.append(x_shape[axis])
broadcast_mean = C.reshape(mean, target_shape)
broadcast_var = C.reshape(variant, target_shape)
broadcast_gamma = C.reshape(gamma, target_shape)
broadcast_beta = C.reshape(beta, target_shape)
normalized = batch_normalization(
x,
broadcast_mean,
broadcast_var,
broadcast_beta,
broadcast_gamma,
epsilon)
return normalized, mean, variant
def _moments(x, axes=None, shift=None, keep_dims=False):
_axes = tuple(axes)
if shift is None:
shift = x
# Compute true mean while keeping the dims for proper broadcasting.
for axis in _axes:
shift = C.reduce_mean(shift, axis=axis)
shift = C.stop_gradient(shift)
shifted_mean = C.minus(x, shift)
for axis in _axes:
shifted_mean = C.reduce_mean(shifted_mean, axis=axis)
variance_mean = C.square(C.minus(x, shift))
for axis in _axes:
variance_mean = C.reduce_mean(variance_mean, axis=axis)
variance = C.minus(variance_mean, C.square(shifted_mean))
mean = C.plus(shifted_mean, shift)
if not keep_dims:
mean = squeeze(mean, _axes)
variance = squeeze(variance, _axes)
return mean, variance
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
# The mean / var / beta / gamma may be processed by broadcast
# so it may have an extra batch axis with 1, it is not needed
# in cntk, need to remove those dummy axis.
if ndim(mean) == ndim(x) and shape(mean)[0] == 1:
mean = _reshape_dummy_dim(mean, [0])
if ndim(var) == ndim(x) and shape(var)[0] == 1:
var = _reshape_dummy_dim(var, [0])
if gamma is None:
gamma = ones_like(var)
elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1:
gamma = _reshape_dummy_dim(gamma, [0])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) == ndim(x) and shape(beta)[0] == 1:
beta = _reshape_dummy_dim(beta, [0])
return (x - mean) / (C.sqrt(var) + epsilon) * gamma + beta
def concatenate(tensors, axis=-1):
if len(tensors) == 0:
return None
axis = [axis]
axis = _normalize_axis(axis, tensors[0])
return C.splice(*tensors, axis=axis[0])
def flatten(x):
return reshape(x, (-1,))
def reshape(x, shape):
shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in shape])
if isinstance(x, C.variables.Parameter):
return C.reshape(x, shape)
else:
num_dynamic_axis = _get_dynamic_axis_num(x)
if num_dynamic_axis == 1 and len(shape) > 0 and shape[0] == -1:
# collapse axis with batch axis
if b_any(_ == C.InferredDimension for _ in x.shape) or b_any(
_ == C.FreeDimension for _ in x.shape):
warnings.warn(
'Warning: CNTK backend does not support '
'collapse of batch axis with inferred dimension. '
'The reshape did not take place.')
return x
return _reshape_batch(x, shape)
else:
# no collapse, then first need to padding the shape
if num_dynamic_axis >= len(shape):
i = 0
while i < len(shape):
if shape[i] is None or shape[i] == -1:
i += 1
else:
break
shape = tuple([-1 for _ in range(num_dynamic_axis - i)]) + shape
new_shape = list(shape)
new_shape = new_shape[num_dynamic_axis:]
new_shape = [C.InferredDimension if _ is None else _ for _ in new_shape]
return C.reshape(x, new_shape)
def permute_dimensions(x, pattern):
dims = len(int_shape(x))
num_dynamic_axis = _get_dynamic_axis_num(x)
if isinstance(pattern, list):
current_layout = [i for i in range(dims)]
else:
current_layout = tuple([i for i in range(dims)])
if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]:
raise ValueError('CNTK backend: the permute pattern %s '
'requested permute on dynamic axis, '
'which is not supported. Please do permute '
'on static axis.' % pattern)
axis = list(pattern)
axis = axis[num_dynamic_axis:]
axis = _normalize_axis(axis, x)
return C.transpose(x, axis)
def resize_images(x, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = repeat_elements(x, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, height_factor, axis=1)
output = repeat_elements(output, width_factor, axis=2)
return output
else:
raise ValueError('CNTK Backend: Invalid data_format:', data_format)
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('CNTK Backend: Invalid data_format:', data_format)
def repeat_elements(x, rep, axis):
axis = _normalize_axis(axis, x)
axis = axis[0]
slices = []
shape = x.shape
i = 0
while i < shape[axis]:
tmp = C.ops.slice(x, axis, i, i + 1)
for _ in range(rep):
slices.append(tmp)
i += 1
return C.splice(*slices, axis=axis)
def repeat(x, n):
# this is a workaround for recurrent layer
# if n is inferred dimension,
# we can't figure out how to repeat it in cntk now
# return the same x to take cntk broadcast feature
# to make the recurrent layer work.
# need to be fixed in GA.
if n is C.InferredDimension or n is C.FreeDimension:
return x
index = 1 - _get_dynamic_axis_num(x)
if index < 0 or index > 1:
raise NotImplementedError
new_shape = list(x.shape)
new_shape.insert(index, 1)
new_shape = tuple(new_shape)
x = C.reshape(x, new_shape)
temp = [x] * n
return C.splice(*temp, axis=index)
def tanh(x):
return C.tanh(x)
def _static_rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
shape = int_shape(inputs)
dims = len(shape)
uses_learning_phase = False
if dims < 3:
raise ValueError('Input should be at least 3D.')
# if the second axis is static axis, CNTK will do unroll by default
if shape[1] is None:
raise ValueError('CNTK Backend: the input of static rnn '
'has shape `%s`, the second axis '
'is not static. If you want to run '
'rnn with non-static axis, please try '
'dynamic rnn with sequence axis.' % shape)
if constants is None:
constants = []
if mask is not None:
mask_shape = int_shape(mask)
if len(mask_shape) == dims - 1:
mask = expand_dims(mask)
nones = _get_dynamic_axis_num(inputs)
states = tuple(initial_states)
outputs = []
time_axis = 1 - nones if nones > 0 else 1
if go_backwards:
i = shape[1] - 1
while i >= 0:
current = C.ops.slice(inputs, time_axis, i, i + 1)
# remove dummy dimension
current = squeeze(current, time_axis)
output, new_states = step_function(
current, tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
if mask is not None:
mask_slice = C.ops.slice(mask, time_axis, i, i + 1)
mask_slice = squeeze(mask_slice, time_axis)
if len(outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = outputs[-1]
output = C.ops.element_select(mask_slice, output, prev_output)
return_states = []
for s, n_s in zip(states, new_states):
return_states.append(
C.ops.element_select(
mask_slice, n_s, s))
new_states = return_states
outputs.append(output)
states = new_states
i -= 1
else:
i = 0
while i < shape[1]:
current = C.ops.slice(inputs, time_axis, i, i + 1)
# remove dummy dimension
current = squeeze(current, 1)
output, new_states = step_function(
current, tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
if mask is not None:
mask_slice = C.ops.slice(mask, time_axis, i, i + 1)
mask_slice = squeeze(mask_slice, 1)
if len(outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = outputs[-1]
output = C.ops.element_select(mask_slice, output, prev_output)
return_states = []
for s, n_s in zip(states, new_states):
return_states.append(
C.ops.element_select(
mask_slice, n_s, s))
new_states = return_states
outputs.append(output)
states = new_states[:len(states)]
i += 1
i = 1
# add the time_step axis back
final_output = expand_dims(outputs[0], 1)
last_output = outputs[0]
while i < len(outputs):
# add the time_step axis back
output_slice = expand_dims(outputs[i], 1)
final_output = C.splice(final_output, output_slice, axis=time_axis)
last_output = outputs[i]
i += 1
last_output._uses_learning_phase = uses_learning_phase
return last_output, final_output, states
def rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
shape = int_shape(inputs)
dims = len(shape)
global uses_learning_phase
uses_learning_phase = False
if dims < 3:
raise ValueError('CNTK Backend: the input of rnn has only rank %d '
'Need at least rank 3 to run RNN.' % dims)
if _get_dynamic_axis_num(inputs) == 0 or unroll:
return _static_rnn(
step_function,
inputs,
initial_states,
go_backwards,
mask,
constants,
unroll,
input_length)
if constants is None:
constants = []
num_time_step = shape[1]
if num_time_step is None and not has_seq_axis(inputs):
num_time_step = inputs.shape[0]
initial = []
for s in initial_states:
if _get_dynamic_axis_num(s) == 0:
if hasattr(C, 'to_batch'):
initial.append(C.to_batch(s))
else:
initial.append(C.user_function(ConvertToBatch(s)))
else:
initial.append(s)
need_convert = not has_seq_axis(inputs)
if go_backwards and need_convert is False:
raise NotImplementedError('CNTK Backend: `go_backwards` is not supported with '
'variable-length sequences. Please specify a '
'static length for your sequences.')
rnn_inputs = inputs
if need_convert:
if go_backwards:
rnn_inputs = reverse(rnn_inputs, 1)
rnn_inputs = C.to_sequence(rnn_inputs)
rnn_constants = []
for constant in constants:
if isinstance(constant, list):
new_c = []
for c in constant:
if _get_dynamic_axis_num(c) == 1:
new_c.append(C.sequence.broadcast_as(c, rnn_inputs))
else:
new_c.append(c)
rnn_constants.append(new_c)
else:
if _get_dynamic_axis_num(constant) == 1:
rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs))
else:
rnn_constants.append(constant)
else:
rnn_constants = constants
if mask is not None and not has_seq_axis(mask):
if go_backwards:
mask = reverse(mask, 1)
if len(int_shape(mask)) == 2:
mask = expand_dims(mask)
mask = C.to_sequence_like(mask, rnn_inputs)
states = tuple(initial)
with C.default_options(axis_offset=1):
def _recurrence(x, states, m):
# create place holder
place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states]
past_values = []
for s, p in zip(states, place_holders):
past_values.append(C.sequence.past_value(p, s))
new_output, new_states = step_function(
x, tuple(past_values) + tuple(rnn_constants))
if getattr(new_output, '_uses_learning_phase', False):
global uses_learning_phase
uses_learning_phase = True
if m is not None:
new_states = [C.element_select(m, n, s) for n, s in zip(new_states, past_values)]
n_s = []
for o, p in zip(new_states, place_holders):
n_s.append(o.replace_placeholders({p: o.output}))
if len(n_s) > 0:
new_output = n_s[0]
return new_output, n_s
final_output, final_states = _recurrence(rnn_inputs, states, mask)
last_output = C.sequence.last(final_output)
last_states = [C.sequence.last(s) for s in final_states]
if need_convert:
final_output = C.sequence.unpack(final_output, 0, no_mask_output=True)
if num_time_step is not None and num_time_step is not C.FreeDimension:
final_output = _reshape_sequence(final_output, num_time_step)
f_stats = []
for l_s, i_s in zip(last_states, initial_states):
if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1:
if hasattr(C, 'unpack_batch'):
f_stats.append(C.unpack_batch(l_s))
else:
f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0])))
else:
f_stats.append(l_s)
last_output._uses_learning_phase = uses_learning_phase
return last_output, final_output, f_stats
def has_seq_axis(x):
return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1
def l2_normalize(x, axis=None):
axis = [axis]
axis = _normalize_axis(axis, x)
norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0]))
return x / norm
def hard_sigmoid(x):
x = (0.2 * x) + 0.5
x = C.clip(x, 0.0, 1.0)
return x
def conv1d(x, kernel, strides=1, padding='valid',
data_format=None, dilation_rate=1):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel.shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
if data_format == 'channels_last':
x = C.swapaxes(x, 0, 1)
kernel = C.swapaxes(kernel, 0, 2)
padding = _preprocess_border_mode(padding)
strides = [strides]
x = C.convolution(
kernel,
x,
strides=tuple(strides),
auto_padding=[
False,
padding])
if data_format == 'channels_last':
x = C.swapaxes(x, 0, 1)
return x
def conv2d(x, kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding])
else:
assert dilation_rate[0] == dilation_rate[1]
assert strides == (1, 1), 'Invalid strides for dilated convolution'
x = C.convolution(
kernel,
x,
strides=dilation_rate[0],
auto_padding=[
False,
padding,
padding])
return _postprocess_conv2d_output(x, data_format)
def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1,
padding='valid', data_format=None, dilation_rate=1):
raise NotImplementedError
def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),
padding='valid', data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format)
depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)),
(-1, 1) + depthwise_kernel.shape[2:])
pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format)
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(depthwise_kernel, x,
strides=strides,
auto_padding=[False, padding, padding],
groups=x.shape[0])
x = C.convolution(pointwise_kernel, x,
strides=(1, 1, 1),
auto_padding=[False])
else:
if dilation_rate[0] != dilation_rate[1]:
raise ValueError('CNTK Backend: non-square dilation_rate is '
'not supported.')
if strides != (1, 1):
raise ValueError('Invalid strides for dilated convolution')
x = C.convolution(depthwise_kernel, x,
strides=dilation_rate[0],
auto_padding=[False, padding, padding])
x = C.convolution(pointwise_kernel, x,
strides=(1, 1, 1),
auto_padding=[False])
return _postprocess_conv2d_output(x, data_format)
def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format)
depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)),
(-1, 1) + depthwise_kernel.shape[2:])
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(depthwise_kernel, x,
strides=strides,
auto_padding=[False, padding, padding],
groups=x.shape[0])
else:
if dilation_rate[0] != dilation_rate[1]:
raise ValueError('CNTK Backend: non-square dilation_rate is '
'not supported.')
if strides != (1, 1):
raise ValueError('Invalid strides for dilated convolution')
x = C.convolution(depthwise_kernel, x,
strides=dilation_rate[0],
auto_padding=[False, padding, padding],
groups=x.shape[0])
return _postprocess_conv2d_output(x, data_format)
def conv3d(x, kernel, strides=(1, 1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = strides + (strides[0],)
x = C.convolution(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding,
padding])
return _postprocess_conv3d_output(x, data_format)
def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1),
padding='valid', data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = (1,) + strides
# cntk output_shape does not include batch axis
output_shape = output_shape[1:]
# in keras2, need handle output shape in different format
if data_format == 'channels_last':
shape = list(output_shape)
shape[0] = output_shape[3]
shape[1] = output_shape[0]
shape[2] = output_shape[1]
shape[3] = output_shape[2]
output_shape = tuple(shape)
x = C.convolution_transpose(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding,
padding],
output_shape=output_shape)
return _postprocess_conv3d_output(x, data_format)
def pool2d(x, pool_size, strides=(1, 1),
padding='valid', data_format=None,
pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
padding = _preprocess_border_mode(padding)
strides = strides
pool_size = pool_size
x = _preprocess_conv2d_input(x, data_format)
if pool_mode == 'max':
x = C.pooling(
x,
C.MAX_POOLING,
pool_size,
strides,
auto_padding=[padding])
elif pool_mode == 'avg':
x = C.pooling(
x,
C.AVG_POOLING,
pool_size,
strides,
auto_padding=[padding])
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
return _postprocess_conv2d_output(x, data_format)
def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid',
data_format=None, pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
padding = _preprocess_border_mode(padding)
x = _preprocess_conv3d_input(x, data_format)
if pool_mode == 'max':
x = C.pooling(
x,
C.MAX_POOLING,
pool_size,
strides,
auto_padding=[padding])
elif pool_mode == 'avg':
x = C.pooling(
x,
C.AVG_POOLING,
pool_size,
strides,
auto_padding=[padding])
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
return _postprocess_conv3d_output(x, data_format)
def relu(x, alpha=0., max_value=None):
if alpha != 0.:
negative_part = C.relu(-x)
x = C.relu(x)
if max_value is not None:
x = C.clip(x, 0.0, max_value)
if alpha != 0.:
x -= alpha * negative_part
return x
def dropout(x, level, noise_shape=None, seed=None):
if level < 0. or level >= 1:
raise ValueError('CNTK Backend: Invalid dropout level %s, '
'must be in interval [0, 1].' % level)
return C.dropout(x, level)
def batch_flatten(x):
# cntk's batch axis is not in shape,
# so just flatten all the dim in x.shape
dim = np.prod(x.shape)
x = C.reshape(x, (-1,))
x._keras_shape = (None, dim)
return x
def softmax(x, axis=-1):
return C.softmax(x, axis=axis)
def softplus(x):
return C.softplus(x)
def softsign(x):
return x / (1 + C.abs(x))
def categorical_crossentropy(target, output, from_logits=False):
if from_logits:
result = C.cross_entropy_with_softmax(output, target)
# cntk's result shape is (batch, 1), while keras expect (batch, )
return C.reshape(result, ())
else:
# scale preds so that the class probas of each sample sum to 1
output /= C.reduce_sum(output, axis=-1)
# avoid numerical instability with epsilon clipping
output = C.clip(output, epsilon(), 1.0 - epsilon())
return -sum(target * C.log(output), axis=-1)
def sparse_categorical_crossentropy(target, output, from_logits=False):
target = C.one_hot(target, output.shape[-1])
target = C.reshape(target, output.shape)
return categorical_crossentropy(target, output, from_logits)
class Function(object):
def __init__(self, inputs, outputs, updates=[], **kwargs):
self.placeholders = inputs
self.trainer = None
self.unrelated_updates = None
self.updates = updates
if len(updates) > 0:
assert len(outputs) > 0
self.loss = outputs[0]
# need group update by gradient place holder
u_ops = []
unrelated_updates = []
for update in updates:
if isinstance(update, tuple):
if len(update) != 2:
raise NotImplementedError
else:
u = C.assign(update[0], update[1])
else:
u = update
if len(u.arguments) == 0:
u_ops.append(u)
else:
unrelated_updates.append(u)
update_func = C.combine([u.output for u in u_ops])
grads = update_func.find_all_with_name('keras_grad_placeholder')
u_list = []
p_list = []
for g in grads:
if g in grad_parameter_dict:
p_list.append(grad_parameter_dict[g])
u_list.append(g)
else:
raise ValueError(
'CNTK backend: when constructing trainer, '
'found gradient node `%s` which is not '
'related to any parameters in the model. '
'Please double check how the gradient node '
'is constructed.' % g)
if len(u_list) > 0:
learner = C.cntk_py.universal_learner(p_list, u_list, update_func)
criterion = (
outputs[0],
outputs[1]) if len(outputs) > 1 else (
outputs[0],
)
self.trainer = C.trainer.Trainer(
outputs[0], criterion, [learner])
self.trainer_output = tuple([f.output for f in criterion])
elif len(u_ops) > 0:
unrelated_updates.extend(u_ops)
if len(unrelated_updates) > 0:
self.unrelated_updates = C.combine([_.output for _ in unrelated_updates])
if self.trainer is None:
self.metrics_outputs = [f.output for f in outputs]
self.metrics_func = C.combine(self.metrics_outputs)
# cntk only could handle loss and 1 metric in trainer, for metrics more
# than 2, need manual eval
elif len(outputs) > 2:
self.metrics_outputs = [f.output for f in outputs[2:]]
self.metrics_func = C.combine(self.metrics_outputs)
else:
self.metrics_func = None
@staticmethod
def _is_input_shape_compatible(input, placeholder):
if hasattr(input, 'shape') and hasattr(placeholder, 'shape'):
num_dynamic = get_num_dynamic_axis(placeholder)
input_shape = input.shape[num_dynamic:]
placeholder_shape = placeholder.shape
for i, p in zip(input_shape, placeholder_shape):
if i != p and p != C.InferredDimension and p != C.FreeDimension:
return False
return True
def __call__(self, inputs):
global _LEARNING_PHASE_PLACEHOLDER
global _LEARNING_PHASE
assert isinstance(inputs, (list, tuple))
feed_dict = {}
for tensor, value in zip(self.placeholders, inputs):
# cntk only support calculate on float, do auto cast here
if (hasattr(value, 'dtype') and
value.dtype != np.float32 and
value.dtype != np.float64):
value = value.astype(np.float32)
if tensor == _LEARNING_PHASE_PLACEHOLDER:
_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value)
else:
# in current version cntk can't support input with variable
# length. Will support it in next release.
if not self._is_input_shape_compatible(value, tensor):
raise ValueError('CNTK backend: The placeholder has been resolved '
'to shape `%s`, but input shape is `%s`. Currently '
'CNTK can not take variable length inputs. Please '
'pass inputs that have a static shape.'
% (str(tensor.shape), str(value.shape)))
feed_dict[tensor] = value
updated = []
if self.trainer is not None:
input_dict = {}
for argument in self.loss.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError(
'CNTK backend: argument %s is not found in inputs. '
'Please double check the model and inputs in '
'`train_function`.' % argument.name)
result = self.trainer.train_minibatch(
input_dict, self.trainer_output)
assert(len(result) == 2)
outputs = result[1]
for o in self.trainer_output:
updated.append(outputs[o])
if self.metrics_func is not None:
input_dict = {}
for argument in self.metrics_func.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError('CNTK backend: metrics argument %s '
'is not found in inputs. Please double '
'check the model and inputs.' % argument.name)
# Some ops (like dropout) won't be applied during "eval" in cntk.
# They only evaluated in training phase. To make it work, call
# "forward" method to let cntk know we want to evaluate them.from
# But the assign ops won't be executed under this mode, that's why
# we need this check.
if (self.unrelated_updates is None and
(_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)):
_, output_values = self.metrics_func.forward(
input_dict,
self.metrics_func.outputs,
(self.metrics_func.outputs[0],),
as_numpy=False)
else:
output_values = self.metrics_func.eval(input_dict, as_numpy=False)
if isinstance(output_values, dict):
for o in self.metrics_outputs:
value = output_values[o]
v = value.asarray()
updated.append(v)
else:
v = output_values.asarray()
for o in self.metrics_outputs:
updated.append(v)
if self.unrelated_updates is not None:
input_dict = {}
for argument in self.unrelated_updates.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError(
'CNTK backend: assign ops argument %s '
'is not found in inputs. Please double '
'check the model and inputs.' % argument.name)
self.unrelated_updates.eval(input_dict, as_numpy=False)
return updated
def function(inputs, outputs, updates=[], **kwargs):
return Function(inputs, outputs, updates=updates, **kwargs)
def temporal_padding(x, padding=(1, 1)):
assert len(padding) == 2
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if num_dynamic_axis > 0:
assert len(base_shape) == 2
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[padding, (0, 0)])
else:
x = _padding(x, padding, 0)
else:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[(0, 0), padding, (0, 0)])
else:
x = _padding(x, padding, 1)
return x
def _padding(x, pattern, axis):
base_shape = x.shape
if b_any([dim < 0 for dim in base_shape]):
raise ValueError('CNTK Backend: padding input tensor with '
'shape `%s` contains non-specified dimension, '
'which is not supported. Please give fixed '
'dimension to enable padding.' % base_shape)
if pattern[0] > 0:
prefix_shape = list(base_shape)
prefix_shape[axis] = pattern[0]
prefix_shape = tuple(prefix_shape)
x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis)
base_shape = x.shape
if pattern[1] > 0:
postfix_shape = list(base_shape)
postfix_shape[axis] = pattern[1]
postfix_shape = tuple(postfix_shape)
x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis)
return x
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if data_format == 'channels_first':
if num_dynamic_axis > 0:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
else:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])])
else:
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]])
else:
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
else:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
return x
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if data_format == 'channels_first':
if num_dynamic_axis > 0:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
else:
assert len(base_shape) == 5
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])])
else:
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
x = _padding(x, padding[2], 4)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])
else:
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
x = _padding(x, padding[2], 2)
else:
assert len(base_shape) == 5
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
return x
def one_hot(indices, num_classes):
return C.one_hot(indices, num_classes)
def get_value(x):
if isinstance(
x,
C.variables.Parameter) or isinstance(
x,
C.variables.Constant):
return x.value
else:
return eval(x)
def batch_get_value(xs):
result = []
for x in xs:
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
result.append(x.value)
else:
result.append(eval(x))
return result
def set_value(x, value):
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
if isinstance(value, (float, int)):
value = np.full(x.shape, value, dtype=floatx())
x.value = value
else:
raise NotImplementedError
def print_tensor(x, message=''):
return C.user_function(
LambdaFunc(x,
when=lambda x: True,
execute=lambda x: print(message)))
def batch_set_value(tuples):
for t in tuples:
x = t[0]
value = t[1]
if isinstance(value, np.ndarray) is False:
value = np.asarray(value)
if isinstance(x, C.variables.Parameter):
x.value = value
else:
raise NotImplementedError
def stop_gradient(variables):
if isinstance(variables, (list, tuple)):
return map(C.stop_gradient, variables)
else:
return C.stop_gradient(variables)
def switch(condition, then_expression, else_expression):
ndim_cond = ndim(condition)
ndim_expr = ndim(then_expression)
if ndim_cond > ndim_expr:
raise ValueError('Rank of condition should be less'
' than or equal to rank of then and'
' else expressions. ndim(condition)=' +
str(ndim_cond) + ', ndim(then_expression)'
'=' + str(ndim_expr))
elif ndim_cond < ndim_expr:
shape_expr = int_shape(then_expression)
ndim_diff = ndim_expr - ndim_cond
for i in range(ndim_diff):
condition = expand_dims(condition)
condition = tile(condition, shape_expr[ndim_cond + i])
return C.element_select(condition,
then_expression,
else_expression)
def elu(x, alpha=1.):
res = C.elu(x)
if alpha == 1:
return res
else:
return C.element_select(C.greater(x, 0), res, alpha * res)
def in_top_k(predictions, targets, k):
_targets = C.one_hot(targets, predictions.shape[-1])
result = C.classification_error(predictions, _targets, topN=k)
return 1 - C.reshape(result, shape=())
def conv2d_transpose(x, kernel, output_shape, strides=(1, 1),
padding='valid', data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = (1,) + strides
# cntk output_shape does not include batch axis
output_shape = output_shape[1:]
# in keras2, need handle output shape in different format
if data_format == 'channels_last':
shape = list(output_shape)
shape[0] = output_shape[2]
shape[1] = output_shape[0]
shape[2] = output_shape[1]
output_shape = tuple(shape)
x = C.convolution_transpose(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding],
output_shape=output_shape)
return _postprocess_conv2d_output(x, data_format)
def identity(x, name=None):
if name is None:
name = '%s_alias' % x.name
return C.alias(x, name=name)
def _preprocess_conv2d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = C.transpose(x, (2, 0, 1))
return x
def _preprocess_conv2d_kernel(kernel, data_format):
# As of Keras 2.0.0, all kernels are normalized
# on the format `(rows, cols, input_depth, depth)`,
# independently of `data_format`.
# CNTK expects `(depth, input_depth, rows, cols)`.
kernel = C.transpose(kernel, (3, 2, 0, 1))
return kernel
def _preprocess_border_mode(padding):
if padding == 'same':
padding = True
elif padding == 'valid':
padding = False
else:
raise ValueError('Invalid border mode: ' + str(padding))
return padding
def _postprocess_conv2d_output(x, data_format):
if data_format == 'channels_last':
x = C.transpose(x, (1, 2, 0))
return x
def _preprocess_conv3d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3,
# input_depth)
x = C.transpose(x, (3, 0, 1, 2))
return x
def _preprocess_conv3d_kernel(kernel, dim_ordering):
kernel = C.transpose(kernel, (4, 3, 0, 1, 2))
return kernel
def _postprocess_conv3d_output(x, dim_ordering):
if dim_ordering == 'channels_last':
x = C.transpose(x, (1, 2, 3, 0))
return x
def _get_dynamic_axis_num(x):
if hasattr(x, 'dynamic_axes'):
return len(x.dynamic_axes)
else:
return 0
def _contain_seqence_axis(x):
if _get_dynamic_axis_num(x) > 1:
return x.dynamic_axes[1] == C.Axis.default_dynamic_axis()
else:
return False
def get_num_dynamic_axis(x):
return _get_dynamic_axis_num(x)
def _reduce_on_axis(x, axis, reduce_fun_name):
if isinstance(axis, list):
for a in axis:
if isinstance(a, C.Axis) \
and a != C.Axis.default_batch_axis() \
and hasattr(C.sequence, reduce_fun_name):
x = getattr(C.sequence, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, axis)
return x
def _reshape_sequence(x, time_step):
tmp_shape = list(int_shape(x))
tmp_shape[1] = time_step
return reshape(x, tmp_shape)
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride = strides[0]
kernel_shape = int_shape(kernel)
output_length, feature_dim, filters = kernel_shape
xs = []
for i in range(output_length):
slice_length = slice(i * stride,
i * stride + kernel_size[0])
xs.append(reshape(inputs[:, slice_length, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to output_filters first, to apply broadcast
weight = permute_dimensions(kernel, (2, 0, 1))
# Shape: (batch, filters, output_length, input_length * kernel_size)
output = x_aggregate * weight
# Shape: (batch, filters, output_length)
output = sum(output, axis=3)
# Shape: (batch, output_length, filters)
return permute_dimensions(output, (0, 2, 1))
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride_row, stride_col = strides
output_row, output_col = output_shape
kernel_shape = int_shape(kernel)
_, feature_dim, filters = kernel_shape
xs = []
for i in range(output_row):
for j in range(output_col):
slice_row = slice(i * stride_row,
i * stride_row + kernel_size[0])
slice_col = slice(j * stride_col,
j * stride_col + kernel_size[1])
if data_format == 'channels_first':
xs.append(reshape(inputs[:, :, slice_row, slice_col],
(-1, 1, feature_dim)))
else:
xs.append(reshape(inputs[:, slice_row, slice_col, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to put filters first
weight = permute_dimensions(kernel, (2, 0, 1))
# shape: batch, filters, output_length, input_length * kernel_size
output = x_aggregate * weight
# shape: batch, filters, output_length
output = sum(output, axis=3)
# shape: batch, filters, row, col
output = reshape(output,
(-1, filters, output_row, output_col))
if data_format == 'channels_last':
# shape: batch, row, col, filters
output = permute_dimensions(output, (0, 2, 3, 1))
return output
def reverse(x, axes):
if isinstance(axes, int):
axes = [axes]
cntk_axes = _normalize_axis(axes, x)
begin_index = [0 for _ in cntk_axes]
end_index = [0 for _ in cntk_axes]
strides = [-1 for _ in cntk_axes]
return C.slice(x, cntk_axes, begin_index, end_index, strides)
def _reshape_batch(x, shape):
# there is a bug in cntk 2.1's unpack_batch implementation
if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2:
const_a = C.unpack_batch(x)
const_a = C.reshape(const_a, shape)
return C.to_batch(const_a)
else:
return C.user_function(ReshapeBatch(x, shape[1:]))
def _get_cntk_version():
version = C.__version__
if version.endswith('+'):
version = version[:-1]
# for hot fix, ignore all the . except the first one.
if len(version) > 2 and version[1] == '.':
version = version[:2] + version[2:].replace('.', '')
try:
return float(version)
except:
warnings.warn(
'CNTK backend warning: CNTK version not detected. '
'Will using CNTK 2.0 GA as default.')
return float(2.0)
class ReshapeBatch(C.ops.functions.UserFunction):
def __init__(self, input, shape, name='reshape_with_batch'):
super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name)
self.from_shape = input.shape
self.target_shape = shape
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.target_shape,
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape))
num_static_element = np.prod(np.asarray(self.target_shape))
num_batch = int(num_element / num_static_element)
result = arguments.data().as_shape((num_batch,) + self.target_shape)
return None, C.cntk_py.Value(result)
def backward(self, state, root_gradients):
grad_array_view = root_gradients.data()
num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape))
num_static_element = np.prod(np.asarray(self.from_shape))
num_old_batch = int(num_element / num_static_element)
return C.cntk_py.Value(
grad_array_view.as_shape(
(num_old_batch,) + self.from_shape))
class ConvertToBatch(C.ops.functions.UserFunction):
"""Converts input first axis to CNTK batch axis.
We may introduce this operation in CNTK native
implementation later.
# Arguments
inputs: a cntk variable (parameter/constant)
name: name of this node
"""
def __init__(self, input, name='convert_to_batch'):
super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name)
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.inputs[0].shape[1:],
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
return None, C.cntk_py.Value(arguments.data())
def backward(self, state, root_gradients):
return C.cntk_py.Value(root_gradients.data())
class ConvertToStatic(C.ops.functions.UserFunction):
"""Converts input first axis to CNTK static axis.
We may introduce this operation in CNTK native
implementation later.
# Arguments
inputs: a cntk tensor which has batch axis
batch_size: size of batch axis.
name: name of this node.
"""
def __init__(self, input, batch_size, name='convert_to_static'):
super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name)
self.target_shape = (batch_size,) + input.shape
def infer_outputs(self):
return [
C.output_variable(
self.target_shape,
self.inputs[0].dtype,
[])]
def forward(self, arguments, device=None, outputs_to_retain=None):
return None, C.cntk_py.Value(arguments.data())
def backward(self, state, root_gradients):
return C.cntk_py.Value(root_gradients.data())
class LambdaFunc(C.ops.functions.UserFunction):
def __init__(self,
arg,
when=lambda arg: True,
execute=lambda arg: print(arg),
name=''):
self.when = when
self.execute = execute
super(LambdaFunc, self).__init__([arg], name=name)
def infer_outputs(self):
return [
C.output_variable(
self.inputs[0].shape,
self.inputs[0].dtype,
self.inputs[0].dynamic_axes)]
def forward(self, argument, device=None, outputs_to_retain=None):
if self.when(argument):
self.execute(argument)
return None, argument
def backward(self, state, root_gradients):
return root_gradients
|
1534 | from contextlib import contextmanager
from django.db import DatabaseError
from ..core.tracing import traced_atomic_transaction
@contextmanager
def transaction_with_commit_on_errors():
"""Perform transaction and raise an error in any occurred."""
error = None
with traced_atomic_transaction():
try:
yield
except DatabaseError:
raise
except Exception as e:
error = e
if error:
raise error
|
1561 | import requests
import redis
import json
import ast
import sys
import time
import urllib
import re
import sys
from threading import Thread
from concurrent.futures import ThreadPoolExecutor
import argparse
def get_options():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--sentinel-ip", default='127.0.0.1', help="Sentinel IP")
parser.add_argument("-p", "--sentinel-port", default="16379", help="Sentinel Port")
parser.add_argument("-v", "--redis-password", default=None, help="Redis AUTH Password")
parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name")
parser.add_argument("-b", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server")
return parser.parse_args()
def safe_print(content):
print("{0}".format(content))
class askRedis():
def __init__(self, ip="127.0.0.1", port="16379", sentinel_cluster_name="scality-s3", password=<PASSWORD>):
self._password = password
r = redis.Redis(host=ip, port=port, db=0, password=password)
self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name)
def read(self, resource, name):
r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password)
res = 's3:%s:%s:storageUtilized:counter' % (resource, name)
total_size = r.get(res)
res = 's3:%s:%s:numberOfObjects:counter' % (resource, name)
files = r.get(res)
try:
return {'files': int(files), "total_size": int(total_size)}
except Exception as e:
return {'files': 0, "total_size": 0}
class S3ListBuckets():
def __init__(self, host='127.0.0.1:9000'):
self.bucketd_host = host
def run(self):
docs = []
url = "%s/default/bucket/users..bucket" % self.bucketd_host
session = requests.Session()
r = session.get(url, timeout=30)
if r.status_code == 200:
payload = json.loads(r.text)
for keys in payload['Contents']:
key = keys["key"]
r1 = re.match("(\w+)..\|..(\w+.*)", key)
docs.append(r1.groups())
return docs
return(self.userid, self.bucket, user, files, total_size)
if __name__ == '__main__':
options = get_options()
redis_conf = dict(
ip=options.sentinel_ip,
port=options.sentinel_port,
sentinel_cluster_name=options.sentinel_cluster_name,
password=options.redis_password
)
P = S3ListBuckets(options.bucketd_addr)
listbuckets = P.run()
userids = set([x for x, y in listbuckets])
executor = ThreadPoolExecutor(max_workers=1)
for userid, bucket in listbuckets:
U = askRedis(**redis_conf)
data = U.read('buckets', bucket)
content = "Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, bucket, data["files"], data["total_size"])
executor.submit(safe_print, content)
data = U.read('buckets', 'mpuShadowBucket'+bucket)
content = "Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, 'mpuShadowBucket'+bucket, data["files"], data["total_size"])
executor.submit(safe_print, content)
executor.submit(safe_print, "")
for userid in sorted(userids):
U = askRedis(**redis_conf)
data = U.read('accounts', userid)
content = "Account:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, data["files"], data["total_size"])
executor.submit(safe_print, content) |
1569 | from pprint import pprint
import yaml
import netmiko
import paramiko
def send_cmd_with_prompt(device, command, *, wait_for, confirmation):
if type(wait_for) == str:
wait_for = [wait_for]
if type(confirmation) == str:
confirmation = [confirmation]
with netmiko.Netmiko(**device) as ssh:
ssh.enable()
result = ssh.send_command_timing(
command, strip_prompt=False, strip_command=False
)
for wait, confirm in zip(wait_for, confirmation):
if wait in result:
result += ssh.send_command_timing(
confirm, strip_prompt=False, strip_command=False
)
return result
if __name__ == "__main__":
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
r1 = devices[0]
out = send_cmd_with_prompt(
r1, "copy run start", wait_for="Destination filename", confirmation="\n"
)
print(out)
"""
R1#copy run start
Destination filename [startup-config]?
Building configuration...
[OK]
R1#
"""
|
1588 | from dataclasses import dataclass
@dataclass
class PayloadSender:
phone: int
name: str
@dataclass
class PayloadBaseModel:
sender: PayloadSender
payload_id: str
|
1660 | import filters as f
from iota import TransactionHash, Address
from iota.commands import FilterCommand, RequestFilter, ResponseFilter
from iota.filters import Trytes
__all__ = [
'GetNodeInfoCommand',
]
class GetNodeInfoCommand(FilterCommand):
"""
Executes `getNodeInfo` command.
See :py:meth:`iota.api.StrictIota.get_node_info`.
"""
command = 'getNodeInfo'
def get_request_filter(self):
return GetNodeInfoRequestFilter()
def get_response_filter(self):
return GetNodeInfoResponseFilter()
class GetNodeInfoRequestFilter(RequestFilter):
def __init__(self) -> None:
# ``getNodeInfo`` does not accept any parameters.
# Using a filter here just to enforce that the request is empty.
super(GetNodeInfoRequestFilter, self).__init__({})
class GetNodeInfoResponseFilter(ResponseFilter):
def __init__(self) -> None:
super(GetNodeInfoResponseFilter, self).__init__({
'coordinatorAddress':
f.ByteString(encoding='ascii') | Trytes(Address),
'latestMilestone':
f.ByteString(encoding='ascii') | Trytes(TransactionHash),
'latestSolidSubtangleMilestone':
f.ByteString(encoding='ascii') | Trytes(TransactionHash),
})
|
1670 | try:
from public_config import *
except ImportError:
pass
PORT = 9028
SERVICE_NAME = 'interface'
|
1684 | from PyQt5 import QtWidgets, uic
from Factory import Factory
from Dialogs.DialogMacros import turn_into_free_point, free_point_checkbox
from Fill.ListWidget import fill_listWidget_with_data, set_selected_id_in_listWidget
import Constant as c
class RegularPolygonDialog(QtWidgets.QDialog):
def __init__(self, scene, data):
"""Construct RegularPolygonDialog."""
super(RegularPolygonDialog, self).__init__()
self.ui = uic.loadUi('regularpolygon.ui', self)
self.scene = scene
self.sides = 3
self.free_point = False
self.data = data
self.ui.buttonBox.accepted.connect(self.accepted)
self.ui.buttonBox.rejected.connect(self.rejected)
self.ui.sides_slider.valueChanged.connect(self.hslider_sides_func)
self.ui.checkBox.stateChanged.connect(lambda x: free_point_checkbox(self, x))
def hslider_sides_func(self, value):
"""Be slider callback function to set sides."""
self.sides = value
self.ui.sides_spin.setValue(value)
def accepted(self):
"""Create new regular polygon with settings."""
A, B = self.data
angle = -(self.sides - 2) * 180 / self.sides
polygon = [A, B]
for _ in range(self.sides - 2):
item = Factory.create_empty_item('point', c.Point.Definition.ROTATION)
definition = {'A': A, 'B': B, 'angle': angle}
id_ = Factory.next_id(item, definition, self.scene.project_data.items)
item.item["id"] = id_
item.item["definition"] = definition
if self.free_point:
item = turn_into_free_point(item, self.scene)
self.scene.project_data.add(item)
A = B
B = item.item["id"]
polygon.append(item.item["id"])
item = Factory.create_empty_item('polygon', None)
definition = polygon
item.item["id"] = Factory.next_id(item, definition, self.scene.project_data.items)
item.item["definition"] = definition
self.scene.project_data.add(item)
self.scene.project_data.recompute_canvas(*self.scene.init_canvas_dims)
current_row_old = self.scene.ui.listWidget.currentRow()
fill_listWidget_with_data(self.scene.project_data, self.scene.ui.listWidget, self.scene.current_tab_idx)
set_selected_id_in_listWidget(self.scene, current_row_old)
self.scene.edit.add_undo_item(self.scene)
def rejected(self):
"""Add no new regular polygon."""
pass
|
1695 | import asyncio
import logging
import synapse.exc as s_exc
import synapse.lib.types as s_types
import synapse.lib.module as s_module
import synapse.lib.version as s_version
logger = logging.getLogger(__name__)
class Cpe23Str(s_types.Str):
'''
CPE 2.3 Formatted String
https://nvlpubs.nist.gov/nistpubs/Legacy/IR/nistir7695.pdf
(Section 6.2)
cpe:2.3: part : vendor : product : version : update : edition :
language : sw_edition : target_sw : target_hw : other
* = "any"
- = N/A
'''
def __init__(self, modl, name, info, opts):
opts['lower'] = True
s_types.Str.__init__(self, modl, name, info, opts)
def _splitCpe23(self, text):
part = ''
parts = []
genr = iter(text)
try:
while True:
c = next(genr)
if c == '\\':
c += next(genr)
if c == ':':
parts.append(part)
part = ''
continue
part += c
except StopIteration:
parts.append(part)
return parts
def _normPyStr(self, valu):
if not valu.startswith('cpe:2.3:'):
mesg = 'CPE 2.3 string is expected to start with "cpe:2.3:"'
raise s_exc.BadTypeValu(valu=valu, mesg=mesg)
text, info = s_types.Str._normPyStr(self, valu)
parts = self._splitCpe23(text)
if len(parts) != 13:
mesg = f'CPE 2.3 string has {len(parts)} parts, expected 13.'
raise s_exc.BadTypeValu(valu=valu, mesg=mesg)
subs = {
'part': parts[2],
'vendor': parts[3],
'product': parts[4],
'version': parts[5],
'update': parts[6],
'edition': parts[7],
'language': parts[8],
'sw_edition': parts[9],
'target_sw': parts[10],
'target_hw': parts[11],
'other': parts[12],
}
return ':'.join(parts), {'subs': subs}
class SemVer(s_types.Int):
'''
Provides support for parsing a semantic version string into its component
parts. This normalizes a version string into an integer to allow version
ordering. Prerelease information is disregarded for integer comparison
purposes, as we cannot map an arbitrary pre-release version into a integer
value
Major, minor and patch levels are represented as integers, with a max
width of 20 bits. The comparable integer value representing the semver
is the bitwise concatenation of the major, minor and patch levels.
Prerelease and build information will be parsed out and available as
strings if that information is present.
'''
def postTypeInit(self):
s_types.Int.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
self.setNormFunc(int, self._normPyInt)
def _normPyStr(self, valu):
valu = valu.strip()
if not valu:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='No text left after stripping whitespace')
subs = s_version.parseSemver(valu)
if subs is None:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Unable to parse string as a semver.')
valu = s_version.packVersion(subs.get('major'), subs.get('minor'), subs.get('patch'))
return valu, {'subs': subs}
def _normPyInt(self, valu):
if valu < 0:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Cannot norm a negative integer as a semver.')
if valu > s_version.mask60:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Cannot norm a integer larger than 1152921504606846975 as a semver.')
major, minor, patch = s_version.unpackVersion(valu)
valu = s_version.packVersion(major, minor, patch)
subs = {'major': major,
'minor': minor,
'patch': patch}
return valu, {'subs': subs}
def repr(self, valu):
major, minor, patch = s_version.unpackVersion(valu)
valu = s_version.fmtVersion(major, minor, patch)
return valu
loglevels = (
(10, 'debug'),
(20, 'info'),
(30, 'notice'),
(40, 'warning'),
(50, 'err'),
(60, 'crit'),
(70, 'alert'),
(80, 'emerg'),
)
class ItModule(s_module.CoreModule):
async def initCoreModule(self):
self.model.form('it:dev:str').onAdd(self._onFormItDevStr)
self.model.form('it:dev:pipe').onAdd(self._onFormMakeDevStr)
self.model.form('it:dev:mutex').onAdd(self._onFormMakeDevStr)
self.model.form('it:dev:regkey').onAdd(self._onFormMakeDevStr)
self.model.prop('it:prod:softver:arch').onSet(self._onPropSoftverArch)
self.model.prop('it:prod:softver:vers').onSet(self._onPropSoftverVers)
self.model.prop('it:prod:softver:software').onSet(self._onPropSoftverSoft)
def bruteVersionStr(self, valu):
'''
Brute force the version out of a string.
Args:
valu (str): String to attempt to get version information for.
Notes:
This first attempts to parse strings using the it:semver normalization
before attempting to extract version parts out of the string.
Returns:
int, dict: The system normalized version integer and a subs dictionary.
'''
try:
valu, info = self.core.model.type('it:semver').norm(valu)
subs = info.get('subs')
return valu, subs
except s_exc.BadTypeValu:
# Try doing version part extraction by noming through the string
subs = s_version.parseVersionParts(valu)
if subs is None:
raise s_exc.BadTypeValu(valu=valu, name='bruteVersionStr',
mesg='Unable to brute force version parts out of the string')
if subs:
valu = s_version.packVersion(subs.get('major'),
subs.get('minor', 0),
subs.get('patch', 0))
return valu, subs
async def _onFormItDevStr(self, node):
await node.set('norm', node.ndef[1])
async def _onFormMakeDevStr(self, node):
pprop = node.ndef[1]
await node.snap.addNode('it:dev:str', pprop)
async def _onPropSoftverSoft(self, node, oldv):
# Check to see if name is available and set it if possible
prop = node.get('software')
if prop:
opts = {'vars': {'soft': prop}}
nodes = await node.snap.nodes('it:prod:soft=$soft', opts=opts)
if nodes:
name = nodes[0].get('name')
if name:
await node.set('software:name', name)
async def _onPropSoftverArch(self, node, oldv):
# make it:dev:str for arch
prop = node.get('arch')
if prop:
await node.snap.addNode('it:dev:str', prop)
async def _onPropSoftverVers(self, node, oldv):
# Set vers:norm and make it's normed valu
prop = node.get('vers')
if not prop:
return
await node.set('vers:norm', prop)
# Make it:dev:str from version str
await node.snap.addNode('it:dev:str', prop)
# form the semver properly or bruteforce parts
try:
valu, subs = self.bruteVersionStr(prop)
await node.set('semver', valu)
for k, v in subs.items():
await node.set(f'semver:{k}', v)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception:
logger.exception('Failed to brute force version string [%s]', prop)
def getModelDefs(self):
modl = {
'ctors': (
('it:semver', 'synapse.models.infotech.SemVer', {}, {
'doc': 'Semantic Version type.',
}),
('it:sec:cpe', 'synapse.models.infotech.Cpe23Str', {}, {
'doc': 'A NIST CPE 2.3 Formatted String',
}),
),
'types': (
('it:hostname', ('str', {'strip': True, 'lower': True}), {
'doc': 'The name of a host or system.',
}),
('it:host', ('guid', {}), {
'doc': 'A GUID that represents a host or system.'
}),
('it:log:event', ('guid', {}), {
'doc': 'A GUID representing an individual log event.',
'interfaces': ('it:host:activity',),
}),
('it:network', ('guid', {}), {
'doc': 'A GUID that represents a logical network.'
}),
('it:domain', ('guid', {}), {
'doc': 'A logical boundary of authentication and configuration such as a windows domain.'
}),
('it:account', ('guid', {}), {
'doc': 'A GUID that represents an account on a host or network.'
}),
('it:group', ('guid', {}), {
'doc': 'A GUID that represents a group on a host or network.'
}),
('it:logon', ('guid', {}), {
'doc': 'A GUID that represents an individual logon/logoff event.'
}),
('it:hosturl', ('comp', {'fields': (('host', 'it:host'), ('url', 'inet:url'))}), {
'doc': 'A url hosted on or served by a host or system.',
}),
('it:sec:cve', ('str', {'lower': True, 'regex': r'(?i)^CVE-[0-9]{4}-[0-9]{4,}$'}), {
'doc': 'A vulnerability as designated by a Common Vulnerabilities and Exposures (CVE) number.',
'ex': 'cve-2012-0158'
}),
('it:sec:cwe', ('str', {'regex': r'^CWE-[0-9]{1,8}$'}), {
'doc': 'NIST NVD Common Weaknesses Enumeration Specification',
'ex': 'CWE-120',
}),
('it:mitre:attack:status', ('str', {'enums': 'current,deprecated,withdrawn'}), {
'doc': 'A Mitre ATT&CK element status.',
'ex': 'current',
}),
('it:mitre:attack:group', ('str', {'regex': r'^G[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Group ID.',
'ex': 'G0100',
}),
('it:mitre:attack:tactic', ('str', {'regex': r'^TA[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Tactic ID.',
'ex': 'TA0040',
}),
('it:mitre:attack:technique', ('str', {'regex': r'^T[0-9]{4}(.[0-9]{3})?$'}), {
'doc': 'A Mitre ATT&CK Technique ID.',
'ex': 'T1548',
}),
('it:mitre:attack:mitigation', ('str', {'regex': r'^M[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Mitigation ID.',
'ex': 'M1036',
}),
('it:mitre:attack:software', ('str', {'regex': r'^S[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Software ID.',
'ex': 'S0154',
}),
('it:dev:str', ('str', {}), {
'doc': 'A developer-selected string.'
}),
('it:dev:pipe', ('str', {}), {
'doc': 'A string representing a named pipe.',
}),
('it:dev:mutex', ('str', {}), {
'doc': 'A string representing a mutex.',
}),
('it:dev:int', ('int', {}), {
'doc': 'A developer selected integer constant.',
}),
('it:dev:regkey', ('str', {}), {
'doc': 'A Windows registry key.',
'ex': 'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run',
}),
('it:dev:regval', ('guid', {}), {
'doc': 'A Windows registry key/value pair.',
}),
('it:prod:soft', ('guid', {}), {
'doc': 'A arbitrary, unversioned software product.',
}),
('it:adid', ('str', {'lower': True, 'strip': True}), {
'doc': 'An advertising identification string.'}),
('it:os:windows:sid', ('str', {'regex': r'^S-1-[0-59]-\d{2}-\d{8,10}-\d{8,10}-\d{8,10}-[1-9]\d{3}$'}), {
'doc': 'A Microsoft Windows Security Identifier.',
'ex': 'S-1-5-21-1220945662-1202665555-839525555-5555',
}),
('it:os:ios:idfa', ('it:adid', {}), {
'doc': 'An iOS advertising identification string.'}),
('it:os:android:aaid', ('it:adid', {}), {
'doc': 'An android advertising identification string.'}),
('it:os:android:perm', ('str', {}), {
'doc': 'An android permission string.'}),
('it:os:android:intent', ('str', {}), {
'doc': 'An android intent string.'}),
('it:os:android:reqperm', ('comp', {'fields': (
('app', 'it:prod:soft'),
('perm', 'it:os:android:perm'))}), {
'doc': 'The given software requests the android permission.'}),
('it:os:android:ilisten', ('comp', {'fields': (
('app', 'it:prod:soft'),
('intent', 'it:os:android:intent'))}), {
'doc': 'The given software listens for an android intent.'}),
('it:os:android:ibroadcast', ('comp', {'fields': (
('app', 'it:prod:soft'),
('intent', 'it:os:android:intent')
)}), {
'doc': 'The given software broadcasts the given Android intent.'}),
('it:prod:softver', ('guid', {}), {
'doc': 'A specific version of a software product.'}),
('it:prod:softfile', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('file', 'file:bytes'))}), {
'doc': 'A file is distributed by a specific software version.'}),
('it:prod:softlib', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('lib', 'it:prod:softver'))}), {
'doc': 'A software version contains a library software version.'}),
('it:prod:softos', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('os', 'it:prod:softver'))}), {
'doc': 'The software version is known to be compatible with the given os software version.'}),
('it:hostsoft', ('comp', {'fields': (('host', 'it:host'), ('softver', 'it:prod:softver'))}), {
'doc': 'A version of a software product which is present on a given host.',
}),
('it:av:sig', ('comp', {'fields': (('soft', 'it:prod:soft'), ('name', ('str', {'lower': True})))}), {
'doc': 'A signature name within the namespace of an antivirus engine name.'
}),
('it:av:filehit', ('comp', {'fields': (('file', 'file:bytes'), ('sig', 'it:av:sig'))}), {
'doc': 'A file that triggered an alert on a specific antivirus signature.',
}),
('it:av:prochit', ('guid', {}), {
'doc': 'An instance of a process triggering an alert on a specific antivirus signature.'
}),
('it:auth:passwdhash', ('guid', {}), {
'doc': 'An instance of a password hash.',
}),
('it:exec:proc', ('guid', {}), {
'doc': 'A process executing on a host. May be an actual (e.g., endpoint) or virtual (e.g., malware sandbox) host.',
}),
('it:exec:thread', ('guid', {}), {
'doc': 'A thread executing in a process.',
}),
('it:exec:loadlib', ('guid', {}), {
'doc': 'A library load event in a process.',
}),
('it:exec:mmap', ('guid', {}), {
'doc': 'A memory mapped segment located in a process.',
}),
('it:cmd', ('str', {'strip': True}), {
'doc': 'A unique command-line string.',
'ex': 'foo.exe --dostuff bar',
}),
('it:exec:mutex', ('guid', {}), {
'doc': 'A mutex created by a process at runtime.',
}),
('it:exec:pipe', ('guid', {}), {
'doc': 'A named pipe created by a process at runtime.',
}),
('it:exec:url', ('guid', {}), {
'doc': 'An instance of a host requesting a URL.',
}),
('it:exec:bind', ('guid', {}), {
'doc': 'An instance of a host binding a listening port.',
}),
('it:fs:file', ('guid', {}), {
'doc': 'A file on a host.'
}),
('it:exec:file:add', ('guid', {}), {
'doc': 'An instance of a host adding a file to a filesystem.',
}),
('it:exec:file:del', ('guid', {}), {
'doc': 'An instance of a host deleting a file from a filesystem.',
}),
('it:exec:file:read', ('guid', {}), {
'doc': 'An instance of a host reading a file from a filesystem.',
}),
('it:exec:file:write', ('guid', {}), {
'doc': 'An instance of a host writing a file to a filesystem.',
}),
('it:exec:reg:get', ('guid', {}), {
'doc': 'An instance of a host getting a registry key.',
}),
('it:exec:reg:set', ('guid', {}), {
'doc': 'An instance of a host creating or setting a registry key.',
}),
('it:exec:reg:del', ('guid', {}), {
'doc': 'An instance of a host deleting a registry key.',
}),
('it:app:yara:rule', ('guid', {}), {
'doc': 'A YARA rule unique identifier.',
}),
('it:app:yara:match', ('comp', {'fields': (('rule', 'it:app:yara:rule'), ('file', 'file:bytes'))}), {
'doc': 'A YARA rule match to a file.',
}),
('it:app:yara:procmatch', ('guid', {}), {
'doc': 'An instance of a YARA rule match to a process.',
}),
('it:app:snort:rule', ('guid', {}), {
'doc': 'A snort rule unique identifier.',
}),
('it:app:snort:hit', ('guid', {}), {
'doc': 'An instance of a snort rule hit.',
}),
('it:reveng:function', ('guid', {}), {
'doc': 'A function inside an executable.',
}),
('it:reveng:filefunc', ('comp', {'fields': (('file', 'file:bytes'), ('function', 'it:reveng:function'))}), {
'doc': 'An instance of a function in an executable.',
}),
('it:reveng:funcstr', ('comp', {'fields': (('function', 'it:reveng:function'), ('string', 'str'))}), {
'deprecated': True,
'doc': 'A reference to a string inside a function.',
}),
('it:reveng:impfunc', ('str', {'lower': 1}), {
'doc': 'A function from an imported library.',
}),
),
'interfaces': (
('it:host:activity', {
'props': (
('exe', ('file:bytes', {}), {
'doc': 'The executable file which caused the activity.'}),
('proc', ('it:exec:proc', {}), {
'doc': 'The host process which caused the activity.'}),
('thread', ('it:exec:thread', {}), {
'doc': 'The host thread which caused the activity.'}),
('host', ('it:host', {}), {
'doc': 'The host on which the activity occurred.'}),
('time', ('time', {}), {
'doc': 'The time that the activity started.'}),
),
}),
),
'forms': (
('it:hostname', {}, ()),
('it:host', {}, (
('name', ('it:hostname', {}), {
'doc': 'The name of the host or system.',
}),
('desc', ('str', {}), {
'doc': 'A free-form description of the host.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain that the host is a member of.',
}),
('ipv4', ('inet:ipv4', {}), {
'doc': 'The last known ipv4 address for the host.'
}),
('latlong', ('geo:latlong', {}), {
'doc': 'The last known location for the host.'
}),
('place', ('geo:place', {}), {
'doc': 'The place where the host resides.',
}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the node.',
}),
('os', ('it:prod:softver', {}), {
'doc': 'The operating system of the host.'
}),
('manu', ('str', {}), {
'doc': 'The manufacturer of the host.',
}),
('model', ('str', {}), {
'doc': 'The product model of the host.',
}),
('serial', ('str', {}), {
'doc': 'The serial number of the host.',
}),
('operator', ('ps:contact', {}), {
'doc': 'The operator of the host.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that operates the given host.',
}),
)),
('it:log:event', {}, (
('mesg', ('str', {}), {
'doc': 'The log messsage text.',
}),
('severity', ('int', {'enums': loglevels}), {
'doc': 'A log level integer that increases with severity.',
}),
('data', ('data', {}), {
'doc': 'A raw JSON record of the log event.',
}),
)),
('it:domain', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the domain.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the domain.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that operates the given domain.',
}),
)),
('it:network', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the network.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the network.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that owns/operates the network.',
}),
('net4', ('inet:net4', {}), {
'doc': 'The optional contiguous IPv4 address range of this network.',
}),
('net6', ('inet:net6', {}), {
'doc': 'The optional contiguous IPv6 address range of this network.',
}),
)),
('it:account', {}, (
('user', ('inet:user', {}), {
'doc': 'The username associated with the account',
}),
('contact', ('ps:contact', {}), {
'doc': 'Additional contact information associated with this account.',
}),
('host', ('it:host', {}), {
'doc': 'The host where the account is registered.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain where the account is registered.',
}),
('posix:uid', ('int', {}), {
'doc': 'The user ID of the account.',
'ex': '1001',
}),
('posix:gid', ('int', {}), {
'doc': 'The primary group ID of the account.',
'ex': '1001',
}),
('posix:gecos', ('int', {}), {
'doc': 'The GECOS field for the POSIX account.',
}),
('posix:home', ('file:path', {}), {
'doc': "The path to the POSIX account's home directory.",
'ex': '/home/visi',
}),
('posix:shell', ('file:path', {}), {
'doc': "The path to the POSIX account's default shell.",
'ex': '/bin/bash',
}),
('windows:sid', ('it:os:windows:sid', {}), {
'doc': 'The Microsoft Windows Security Identifier of the account.',
}),
('groups', ('array', {'type': 'it:group'}), {
'doc': 'An array of groups that the account is a member of.',
}),
)),
('it:group', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the group.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the group.',
}),
('host', ('it:host', {}), {
'doc': 'The host where the group is registered.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain where the group is registered.',
}),
('groups', ('array', {'type': 'it:group'}), {
'doc': 'Groups that are a member of this group.',
}),
('posix:gid', ('int', {}), {
'doc': 'The primary group ID of the account.',
'ex': '1001',
}),
('windows:sid', ('it:os:windows:sid', {}), {
'doc': 'The Microsoft Windows Security Identifier of the group.',
}),
)),
('it:logon', {}, (
('time', ('time', {}), {
'doc': 'The time the logon occured.',
}),
('success', ('bool', {}), {
'doc': 'Set to false to indicate an unsuccessful logon attempt.',
}),
('logoff:time', ('time', {}), {
'doc': 'The time the logon session ended.',
}),
('host', ('it:host', {}), {
'doc': 'The host that the account logged in to.',
}),
('account', ('it:account', {}), {
'doc': 'The account that logged in.',
}),
('creds', ('auth:creds', {}), {
'doc': 'The credentials that were used for the logon.',
}),
('duration', ('duration', {}), {
'doc': 'The duration of the logon session.',
}),
('client:host', ('it:host', {}), {
'doc': 'The host where the logon originated.',
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 where the logon originated.',
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 where the logon originated.',
}),
)),
('it:hosturl', {}, (
('host', ('it:host', {}), {
'ro': True,
'doc': 'Host serving a url.',
}),
('url', ('inet:url', {}), {
'ro': True,
'doc': 'URL available on the host.',
}),
)),
('it:dev:str', {}, (
('norm', ('str', {'lower': True}), {
'doc': 'Lower case normalized version of the it:dev:str.',
}),
)),
('it:sec:cve', {}, (
('desc', ('str', {}), {
'doc': 'A free-form description of the CVE vulnerability.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A URL linking this CVE to a full description.',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the CVE ID.',
}),
)),
('it:sec:cpe', {}, (
('part', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "part" field from the CPE 2.3 string.'}),
('vendor', ('ou:name', {}), {
'ro': True,
'doc': 'The "vendor" field from the CPE 2.3 string.'}),
('product', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "product" field from the CPE 2.3 string.'}),
('version', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "version" field from the CPE 2.3 string.'}),
('update', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "update" field from the CPE 2.3 string.'}),
('edition', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "edition" field from the CPE 2.3 string.'}),
('language', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "language" field from the CPE 2.3 string.'}),
('sw_edition', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "sw_edition" field from the CPE 2.3 string.'}),
('target_sw', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "target_sw" field from the CPE 2.3 string.'}),
('target_hw', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "target_hw" field from the CPE 2.3 string.'}),
('other', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "other" field from the CPE 2.3 string.'}),
)),
('it:sec:cwe', {}, (
('name', ('str', {}), {
'doc': 'The CWE description field.',
'ex': 'Buffer Copy without Checking Size of Input (Classic Buffer Overflow)',
}),
('desc', ('str', {}), {
'doc': 'The CWE description field.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A URL linking this CWE to a full description.',
}),
('parents', ('array', {'type': 'it:sec:cwe',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ChildOf CWE Relationships.'
}),
)),
('it:mitre:attack:group', {}, (
('org', ('ou:org', {}), {
'doc': 'Used to map an ATT&CK group to a synapse ou:org.',
}),
('name', ('ou:name', {}), {
'doc': 'The primary name for the ATT&CK group.',
}),
('names', ('array', {'type': 'ou:name', 'uniq': True, 'sorted': True}), {
'doc': 'An array of alternate names for the ATT&CK group.',
}),
('desc', ('str', {}), {
'doc': 'A description of the ATT&CK group.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK group.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK group ID.',
'ex': 'cno.mitre.g0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK group.',
}),
('techniques', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK technique IDs used by the group.',
}),
('software', ('array', {'type': 'it:mitre:attack:software',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK software IDs used by the group.',
}),
)),
('it:mitre:attack:tactic', {}, (
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK tactic.',
}),
('desc', ('str', {}), {
'doc': 'A description of the ATT&CK tactic.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK tactic.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK tactic.',
'ex': 'cno.mitre.ta0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK tactic.',
}),
)),
('it:mitre:attack:technique', {}, (
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK technique.',
}),
('status', ('it:mitre:attack:status', {}), {
'doc': 'The status of this ATT&CK technique.',
}),
('isnow', ('it:mitre:attack:technique', {}), {
'doc': 'If deprecated, this field may contain the current value for the technique.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK technique.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK technique.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK technique.',
'ex': 'cno.mitre.t0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK technique.',
}),
('parent', ('it:mitre:attack:technique', {}), {
'doc': 'The parent ATT&CK technique on this sub-technique.',
}),
('tactics', ('array', {'type': 'it:mitre:attack:tactic',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK tactics that include this technique.',
}),
)),
('it:mitre:attack:software', {}, (
('software', ('it:prod:soft', {}), {
'doc': 'Used to map an ATT&CK software to a synapse it:prod:soft.',
}),
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK software.',
}),
('names', ('array', {'type': 'str', 'uniq': True, 'sorted': True}), {
'doc': 'Associated names for the ATT&CK software.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK software.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK software.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK software.',
'ex': 'cno.mitre.s0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK software.',
}),
('techniques', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of techniques used by the software.',
}),
)),
('it:mitre:attack:mitigation', {}, (
# TODO map to an eventual risk:mitigation
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK mitigation.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK mitigation.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK mitigation.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK mitigation.',
'ex': 'cno.mitre.m0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK mitigation.',
}),
('addresses', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK technique IDs addressed by the mitigation.',
}),
)),
('it:dev:int', {}, ()),
('it:dev:pipe', {}, ()),
('it:dev:mutex', {}, ()),
('it:dev:regkey', {}, ()),
('it:dev:regval', {}, (
('key', ('it:dev:regkey', {}), {
'doc': 'The Windows registry key.',
}),
('str', ('it:dev:str', {}), {
'doc': 'The value of the registry key, if the value is a string.',
}),
('int', ('it:dev:int', {}), {
'doc': 'The value of the registry key, if the value is an integer.',
}),
('bytes', ('file:bytes', {}), {
'doc': 'The file representing the value of the registry key, if the value is binary data.',
}),
)),
('it:prod:soft', {}, (
('name', ('str', {'lower': True, 'strip': True}), {
'doc': 'Name of the software.',
}),
('names', ('array', {'type': 'it:dev:str', 'uniq': True, 'sorted': True}), {
'doc': 'Observed/variant names for this software.',
}),
('desc', ('str', {}), {
'doc': 'A description of the software.',
'disp': {'hint': 'text'},
}),
('desc:short', ('str', {'lower': True}), {
'doc': 'A short description of the software.',
}),
('cpe', ('it:sec:cpe', {}), {
'doc': 'The NIST CPE 2.3 string specifying this software.',
}),
('author', ('ps:contact', {}), {
'doc': 'The contact information of the org or person who authored the software.',
}),
('author:org', ('ou:org', {}), {
'deprecated': True,
'doc': 'Organization which authored the software.',
}),
('author:acct', ('inet:web:acct', {}), {
'deprecated': True,
'doc': 'Web account of the software author.',
}),
('author:email', ('inet:email', {}), {
'deprecated': True,
'doc': 'Email address of the sofware author.',
}),
('author:person', ('ps:person', {}), {
'deprecated': True,
'doc': 'Person who authored the software.',
}),
('url', ('inet:url', {}), {
'doc': 'URL relevant for the software.',
}),
('isos', ('bool', {}), {
'doc': 'Set to True if the software is an operating system.'}),
('islib', ('bool', {}), {
'doc': 'Set to True if the software is a library.'}),
)),
('it:adid', {}, ()),
('it:os:ios:idfa', {}, ()),
('it:os:android:aaid', {}, ()),
('it:os:android:perm', {}, ()),
('it:os:android:intent', {}, ()),
('it:os:android:reqperm', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The android app which requests the permission.'}),
('perm', ('it:os:android:perm', {}), {'ro': True,
'doc': 'The android permission requested by the app.'}),
)),
('it:prod:softos', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software which can run on the operating system.'}),
('os', ('it:prod:softver', {}), {'ro': True,
'doc': 'The operating system which the software can run on.'}),
)),
('it:os:android:ilisten', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The app software which listens for the android intent.'}),
('intent', ('it:os:android:intent', {}), {'ro': True,
'doc': 'The android intent which is listened for by the app.'}),
)),
('it:os:android:ibroadcast', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The app software which broadcasts the android intent.'}),
('intent', ('it:os:android:intent', {}), {'ro': True,
'doc': 'The android intent which is broadcast by the app.'}),
)),
('it:prod:softver', {}, (
('software', ('it:prod:soft', {}), {
'doc': 'Software associated with this version instance.',
}),
('software:name', ('str', {'lower': True, 'strip': True}), {
'doc': 'The name of the software at a particular version.',
}),
('names', ('array', {'type': 'it:dev:str', 'uniq': True, 'sorted': True}), {
'doc': 'Observed/variant names for this software version.',
}),
('cpe', ('it:sec:cpe', {}), {
'doc': 'The NIST CPE 2.3 string specifying this software version',
}),
('cves', ('array', {'type': 'it:sec:cve', 'uniq': True, 'sorted': True}), {
'doc': 'A list of CVEs that apply to this software version.',
}),
('vers', ('it:dev:str', {}), {
'doc': 'Version string associated with this version instance.',
}),
('vers:norm', ('str', {'lower': True}), {
'doc': 'Normalized version of the version string.',
}),
('arch', ('it:dev:str', {}), {
'doc': 'Software architecture.',
}),
('released', ('time', {}), {
'doc': 'Timestamp for when this version of the software was released.',
}),
('semver', ('it:semver', {}), {
'doc': 'System normalized semantic version number.',
}),
('semver:major', ('int', {}), {
'doc': 'Version major number.',
}),
('semver:minor', ('int', {}), {
'doc': 'Version minor number.',
}),
('semver:patch', ('int', {}), {
'doc': 'Version patch number.',
}),
('semver:pre', ('str', {}), {
'doc': 'Semver prerelease string.',
}),
('semver:build', ('str', {}), {
'doc': 'Semver build string.',
}),
('url', ('inet:url', {}), {
'doc': 'URL where a specific version of the software is available from.',
}),
)),
('it:prod:softlib', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software version that contains the library.'}),
('lib', ('it:prod:softver', {}), {'ro': True,
'doc': 'The library software version.'}),
)),
('it:prod:softfile', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software which distributes the file.'}),
('file', ('file:bytes', {}), {'ro': True,
'doc': 'The file distributed by the software.'}),
('path', ('file:path', {}), {
'doc': 'The default installation path of the file.'}),
)),
('it:hostsoft', {}, (
('host', ('it:host', {}), {'ro': True,
'doc': 'Host with the software.'}),
('softver', ('it:prod:softver', {}), {'ro': True,
'doc': 'Software on the host.'})
)),
('it:av:sig', {}, (
('soft', ('it:prod:soft', {}), {
'ro': True,
'doc': 'The anti-virus product which contains the signature.',
}),
('name', ('str', {'lower': True}), {
'ro': True,
'doc': 'The signature name.'
}),
('desc', ('str', {}), {
'doc': 'A free-form description of the signature.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A reference URL for information about the signature.',
})
)),
('it:av:filehit', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that triggered the signature hit.',
}),
('sig', ('it:av:sig', {}), {
'ro': True,
'doc': 'The signature that the file triggered on.'
}),
('sig:name', ('str', {'lower': True}), {
'ro': True,
'doc': 'The signature name.',
}),
('sig:soft', ('it:prod:soft', {}), {
'ro': True,
'doc': 'The anti-virus product which contains the signature.',
}),
)),
('it:av:prochit', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The file that triggered the signature hit.',
}),
('sig', ('it:av:sig', {}), {
'doc': 'The signature that the file triggered on.'
}),
('time', ('time', {}), {
'doc': 'The time that the AV engine detected the signature.'
}),
)),
('it:auth:passwdhash', {}, (
('salt', ('hex', {}), {
'doc': 'The (optional) hex encoded salt value used to calculate the password hash.',
}),
('hash:md5', ('hash:md5', {}), {
'doc': 'The MD5 password hash value.',
}),
('hash:sha1', ('hash:sha1', {}), {
'doc': 'The SHA1 password hash value.',
}),
('hash:sha256', ('hash:sha256', {}), {
'doc': 'The SHA256 password hash value.',
}),
('hash:sha512', ('hash:sha512', {}), {
'doc': 'The SHA512 password hash value.',
}),
('hash:lm', ('hash:lm', {}), {
'doc': 'The LM password hash value.',
}),
('hash:ntlm', ('hash:ntlm', {}), {
'doc': 'The NTLM password hash value.',
}),
('passwd', ('inet:passwd', {}), {
'doc': 'The (optional) clear text password for this password hash.',
}),
)),
('it:cmd', {}, ()),
('it:exec:proc', {}, (
('host', ('it:host', {}), {
'doc': 'The host that executed the process. May be an actual or a virtual / notional host.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The file considered the "main" executable for the process. For example, rundll32.exe may be considered the "main" executable for DLLs loaded by that program.',
}),
('cmd', ('it:cmd', {}), {
'doc': 'The command string used to launch the process, including any command line parameters.',
'disp': {'hint': 'text'},
}),
('pid', ('int', {}), {
'doc': 'The process ID.',
}),
('time', ('time', {}), {
'doc': 'The start time for the process.',
}),
('exited', ('time', {}), {
'doc': 'The time the process exited.',
}),
('exitcode', ('int', {}), {
'doc': 'The exit code for the process.',
}),
('user', ('inet:user', {}), {
'doc': 'The user name of the process owner.',
}),
('path', ('file:path', {}), {
'doc': 'The path to the executable of the process.',
}),
('src:exe', ('file:path', {}), {
'doc': 'The path to the executable which started the process.',
}),
('src:proc', ('it:exec:proc', {}), {
'doc': 'The process which created the process.'
}),
('killedby', ('it:exec:proc', {}), {
'doc': 'The process which killed this process.',
}),
)),
('it:exec:thread', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process which contains the thread.',
}),
('created', ('time', {}), {
'doc': 'The time the thread was created.',
}),
('exited', ('time', {}), {
'doc': 'The time the thread exited.',
}),
('exitcode', ('int', {}), {
'doc': 'The exit code or return value for the thread.',
}),
('src:proc', ('it:exec:proc', {}), {
'doc': 'An external process which created the thread.',
}),
('src:thread', ('it:exec:thread', {}), {
'doc': 'The thread which created this thread.',
}),
)),
('it:exec:loadlib', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process where the library was loaded.',
}),
('va', ('int', {}), {
'doc': 'The base memory address where the library was loaded in the process.',
}),
('loaded', ('time', {}), {
'doc': 'The time the library was loaded.',
}),
('unloaded', ('time', {}), {
'doc': 'The time the library was unloaded.',
}),
('path', ('file:path', {}), {
'doc': 'The path that the library was loaded from.',
}),
('file', ('file:bytes', {}), {
'doc': 'The library file that was loaded.',
}),
)),
('it:exec:mmap', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process where the memory was mapped.',
}),
('va', ('int', {}), {
'doc': 'The base memory address where the map was created in the process.',
}),
('size', ('int', {}), {
'doc': 'The size of the memory map in bytes.',
}),
('perms:read', ('bool', {}), {
'doc': 'True if the mmap is mapped with read permissions.',
}),
('perms:write', ('bool', {}), {
'doc': 'True if the mmap is mapped with write permissions.',
}),
('perms:execute', ('bool', {}), {
'doc': 'True if the mmap is mapped with execute permissions.',
}),
('created', ('time', {}), {
'doc': 'The time the memory map was created.',
}),
('deleted', ('time', {}), {
'doc': 'The time the memory map was deleted.',
}),
('path', ('file:path', {}), {
'doc': 'The file path if the mmap is a mapped view of a file.',
}),
('hash:sha256', ('hash:sha256', {}), {
'doc': 'A SHA256 hash of the memory map. Bytes may optionally be present in the axon.',
}),
)),
('it:exec:mutex', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the mutex.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the mutex. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the mutex. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the mutex was created.',
}),
('name', ('it:dev:mutex', {}), {
'doc': 'The mutex string.',
}),
)),
('it:exec:pipe', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the named pipe.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the named pipe. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the named pipe. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the named pipe was created.',
}),
('name', ('it:dev:pipe', {}), {
'doc': 'The named pipe string.',
}),
)),
('it:exec:url', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that requested the URL.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that requested the URL. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that requested the URL. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the URL was requested.',
}),
('url', ('inet:url', {}), {
'doc': 'The URL that was requested.',
}),
('client', ('inet:client', {}), {
'doc': 'The address of the client during the URL retrieval.'
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 of the client during the URL retrieval..'
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 of the client during the URL retrieval..'
}),
('client:port', ('inet:port', {}), {
'doc': 'The client port during the URL retrieval..'
}),
)),
('it:exec:bind', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that bound the listening port.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that bound the listening port. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that bound the listening port. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the port was bound.',
}),
('server', ('inet:server', {}), {
'doc': 'The inet:addr of the server when binding the port.'
}),
('server:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 address specified to bind().'
}),
('server:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 address specified to bind().'
}),
('server:port', ('inet:port', {}), {
'doc': 'The bound (listening) TCP port.'
}),
)),
('it:fs:file', {}, (
('host', ('it:host', {}), {
'doc': 'The host containing the file.',
}),
('path', ('file:path', {}), {
'doc': 'The path for the file.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file on the host.',
}),
('ctime', ('time', {}), {
'doc': 'The file creation time.',
}),
('mtime', ('time', {}), {
'doc': 'The file modification time.',
}),
('atime', ('time', {}), {
'doc': 'The file access time.',
}),
('user', ('inet:user', {}), {
'doc': 'The owner of the file.',
}),
('group', ('inet:user', {}), {
'doc': 'The group owner of the file.',
}),
)),
('it:exec:file:add', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the new file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the new file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the new file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was created.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was created.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was created.',
}),
)),
('it:exec:file:del', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that deleted the file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that deleted the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that deleted the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was deleted.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was deleted.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was deleted.',
}),
)),
('it:exec:file:read', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that read the file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that read the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that read the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was read.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was read.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was read.',
}),
)),
('it:exec:file:write', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that wrote to / modified the existing file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that wrote to the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that wrote to the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was written to/modified.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was written to/modified.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was modified.',
}),
)),
('it:exec:reg:get', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that read the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that read the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that read the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the registry was read.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was read.',
}),
)),
('it:exec:reg:set', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that wrote to the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that wrote to the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that wrote to the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the registry was written to.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was written to.',
}),
)),
('it:exec:reg:del', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that deleted data from the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that deleted data from the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that deleted data from the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the data from the registry was deleted.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was deleted.',
}),
)),
('it:app:snort:rule', {}, (
('text', ('str', {}), {
'doc': 'The snort rule text.',
'disp': {'hint': 'text'},
}),
('name', ('str', {}), {
'doc': 'The name of the snort rule.'}),
('version', ('it:semver', {}), {
'doc': 'The current version of the rule.'}),
)),
('it:app:snort:hit', {}, (
('rule', ('it:app:snort:rule', {}), {
'doc': 'The snort rule that matched the file.'}),
('flow', ('inet:flow', {}), {
'doc': 'The inet:flow that matched the snort rule.'}),
('src', ('inet:addr', {}), {
'doc': 'The source address of flow that caused the hit.'}),
('src:ipv4', ('inet:ipv4', {}), {
'doc': 'The source IPv4 address of the flow that caused the hit.'}),
('src:ipv6', ('inet:ipv6', {}), {
'doc': 'The source IPv6 address of the flow that caused the hit.'}),
('src:port', ('inet:port', {}), {
'doc': 'The source port of the flow that caused the hit.'}),
('dst', ('inet:addr', {}), {
'doc': 'The destination address of the trigger.'}),
('dst:ipv4', ('inet:ipv4', {}), {
'doc': 'The destination IPv4 address of the flow that caused the hit.'}),
('dst:ipv6', ('inet:ipv6', {}), {
'doc': 'The destination IPv4 address of the flow that caused the hit.'}),
('dst:port', ('inet:port', {}), {
'doc': 'The destination port of the flow that caused the hit.'}),
('time', ('time', {}), {
'doc': 'The time of the network flow that caused the hit.'}),
('sensor', ('it:host', {}), {
'doc': 'The sensor host node that produced the hit.'}),
('version', ('it:semver', {}), {
'doc': 'The version of the rule at the time of match.'}),
)),
('it:app:yara:rule', {}, (
('text', ('str', {}), {
'doc': 'The YARA rule text.',
'disp': {'hint': 'text'},
}),
('name', ('str', {}), {
'doc': 'The name of the YARA rule.'}),
('author', ('ps:contact', {}), {
'doc': 'Contact info for the author of the YARA rule.'}),
('version', ('it:semver', {}), {
'doc': 'The current version of the rule.'}),
('enabled', ('bool', {}), {
'doc': 'The rule enabled status to be used for YARA evaluation engines.'}),
)),
('it:app:yara:match', {}, (
('rule', ('it:app:yara:rule', {}), {
'ro': True,
'doc': 'The YARA rule that matched the file.'}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that matched the YARA rule.'}),
('version', ('it:semver', {}), {
'doc': 'The most recent version of the rule evaluated as a match.'}),
)),
('it:app:yara:procmatch', {}, (
('rule', ('it:app:yara:rule', {}), {
'doc': 'The YARA rule that matched the file.'}),
('proc', ('it:exec:proc', {}), {
'doc': 'The process that matched the YARA rule.'}),
('time', ('time', {}), {
'doc': 'The time that the YARA engine matched the process to the rule.'}),
('version', ('it:semver', {}), {
'doc': 'The most recent version of the rule evaluated as a match.'}),
)),
('it:reveng:function', {}, (
('name', ('str', {}), {
'doc': 'The name of the function.'}),
('description', ('str', {}), {
'doc': 'Notes concerning the function.'}),
('impcalls', ('array', {'type': 'it:reveng:impfunc'}), {
'doc': 'Calls to imported library functions within the scope of the function.',
}),
('strings', ('array', {'type': 'it:dev:str', 'uniq': True}), {
'doc': 'An array of strings referenced within the function.',
}),
)),
('it:reveng:filefunc', {}, (
('function', ('it:reveng:function', {}), {
'ro': True,
'doc': 'The guid matching the function.'}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that contains the function.'}),
('va', ('int', {}), {
'doc': 'The virtual address of the first codeblock of the function.'}),
('rank', ('int', {}), {
'doc': 'The function rank score used to evaluate if it exhibits interesting behavior.'}),
('complexity', ('int', {}), {
'doc': 'The complexity of the function.'}),
('funccalls', ('array', {'type': 'it:reveng:filefunc'}), {
'doc': 'Other function calls within the scope of the function.',
}),
)),
('it:reveng:funcstr', {}, (
('function', ('it:reveng:function', {}), {
'ro': True,
'doc': 'The guid matching the function.'}),
('string', ('str', {}), {
'ro': True,
'doc': 'The string that the function references.'}),
)),
('it:reveng:impfunc', {}, ()),
),
}
name = 'it'
return ((name, modl), )
|
1718 | import mxnet as mx
def slice_symbol_to_seq_symobls(net, seq_len, axis=1, squeeze_axis=True):
net = mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=axis, squeeze_axis=squeeze_axis)
hidden_all = []
for seq_index in range(seq_len):
hidden_all.append(net[seq_index])
net = hidden_all
return net
|
1776 | import logging
from ariadne import MutationType, convert_kwargs_to_snake_case
from config import clients, messages, queue
mutation = MutationType()
@mutation.field("createMessage")
@convert_kwargs_to_snake_case
async def resolve_create_message(obj, info, content, client_id):
try:
message = {"content": content, "client_id": client_id}
messages.append(message)
await queue.put(message)
return {"success": True, "message": message}
except Exception as error:
return {"success": False, "errors": [str(error)]}
@mutation.field("createClient")
@convert_kwargs_to_snake_case
async def resolve_create_client(obj, info, client_id):
try:
logging.info(f"Client id: {client_id}")
if not clients.get(client_id):
client = {"client_id": client_id}
clients[client_id] = client
return {"success": True, "client": client}
return {"success": False, "errors": ["Client is taken"]}
except Exception as error:
return {"success": False, "errors": [str(error)]}
|
1779 | from typing import Dict, List, Any
from ..df.types import Definition
from ..df.base import op
from ..util.data import traverse_get
MAPPING = Definition(name="mapping", primitive="map")
MAPPING_TRAVERSE = Definition(name="mapping_traverse", primitive="List[str]")
MAPPING_KEY = Definition(name="key", primitive="str")
MAPPING_VALUE = Definition(name="value", primitive="generic")
@op(
name="dffml.mapping.extract",
inputs={"mapping": MAPPING, "traverse": MAPPING_TRAVERSE},
outputs={"value": MAPPING_VALUE},
)
def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]):
"""
Extracts value from a given mapping.
Parameters
----------
mapping : dict
The mapping to extract the value from.
traverse : list[str]
A list of keys to traverse through the mapping dictionary and extract the values.
Returns
-------
dict
A dictionary containing the value of the keys.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> dataflow = DataFlow.auto(mapping_extract_value, GetSingle)
>>>
>>> dataflow.seed.append(
... Input(
... value=[mapping_extract_value.op.outputs["value"].name],
... definition=GetSingle.op.inputs["spec"],
... )
... )
>>> inputs = [
... Input(
... value={"key1": {"key2": 42}},
... definition=mapping_extract_value.op.inputs["mapping"],
... ),
... Input(
... value=["key1", "key2"],
... definition=mapping_extract_value.op.inputs["traverse"],
... ),
... ]
>>>
>>> async def main():
... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs):
... print(result)
>>>
>>> asyncio.run(main())
{'value': 42}
"""
return {"value": traverse_get(mapping, *traverse)}
@op(
name="dffml.mapping.create",
inputs={"key": MAPPING_KEY, "value": MAPPING_VALUE},
outputs={"mapping": MAPPING},
)
def create_mapping(key: str, value: Any):
"""
Creates a mapping of a given key and value.
Parameters
----------
key : str
The key for the mapping.
value : Any
The value for the mapping.
Returns
-------
dict
A dictionary containing the mapping created.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> dataflow = DataFlow.auto(create_mapping, GetSingle)
>>> dataflow.seed.append(
... Input(
... value=[create_mapping.op.outputs["mapping"].name],
... definition=GetSingle.op.inputs["spec"],
... )
... )
>>> inputs = [
... Input(
... value="key1", definition=create_mapping.op.inputs["key"],
... ),
... Input(
... value=42, definition=create_mapping.op.inputs["value"],
... ),
... ]
>>>
>>> async def main():
... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs):
... print(result)
>>>
>>> asyncio.run(main())
{'mapping': {'key1': 42}}
"""
return {"mapping": {key: value}}
|
1789 | import unittest
import base
class Test(base.BaseScriptTest, unittest.TestCase):
command_line = "./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf -c -m 5 -p mm8."
input_stdin = base.TestFile(filename="./test_data/maf_tests/dcking_ghp074.bed")
output_stdout = base.TestFile(filename="./test_data/maf_tests/dcking_ghp074.maf")
|
1793 | import svhn2mnist
import usps
import syn2gtrsb
import syndig2svhn
def Generator(source, target, pixelda=False):
if source == 'usps' or target == 'usps':
return usps.Feature()
elif source == 'svhn':
return svhn2mnist.Feature()
elif source == 'synth':
return syn2gtrsb.Feature()
def Classifier(source, target):
if source == 'usps' or target == 'usps':
return usps.Predictor()
if source == 'svhn':
return svhn2mnist.Predictor()
if source == 'synth':
return syn2gtrsb.Predictor()
|
1800 | from datetime import datetime
from kubernetes import client
from kubernetes.client.rest import ApiException
import os
import time
import yaml
from tests import config as conf
import tests.utils as ut
def remove_clusterrole_binding(shipper_name, crb_name):
# remove clusterrolebind
k8s_client = client.RbacAuthorizationV1Api()
try:
k8s_client.delete_cluster_role_binding(crb_name)
print(f"\nsuccessfully deleted: {crb_name}")
except Exception as e:
print(f"\n{shipper_name} cluster role binding deletion has failed, please manually delete {crb_name}:")
print(f"kubectl delete clusterrolebinding {crb_name}")
def filebeat_teardown(namespace):
# remove clusterrolebind
# TODO: find a solution for sharing the name both here and in the kube object
crb_name = f"filebeat-cluster-role-binding-{namespace}"
remove_clusterrole_binding("filebeat", crb_name)
def fluent_bit_teardown(namespace):
# remove clusterrolebind
# TODO: find a solution for sharing the name both here and in the kube object
crb_name = f"fluent-bit-clusterrole-binding-{namespace}"
remove_clusterrole_binding("fluent-bit", crb_name)
def add_elastic_cluster(namespace):
print("\nDeploying ElasticSearch\n")
add_deployment_dir(namespace, conf.ELASTIC_CONF_DIR)
def add_filebeat_cluster(namespace):
print("\nDeploying FileBeat\n")
add_deployment_dir(namespace, conf.FILEBEAT_CONF_DIR)
def add_fluent_bit_cluster(namespace):
print("\nDeploying Fluent-bit\n")
add_deployment_dir(namespace, conf.FLUENT_BIT_CONF_DIR)
def add_kibana_cluster(namespace):
print("\nDeploying Kibana\n")
add_deployment_dir(namespace, conf.KIBANA_CONF_DIR)
def add_logstash_cluster(namespace):
print("\nDeploying LogStash\n")
add_deployment_dir(namespace, conf.LOGSTASH_CONF_DIR)
def add_deployment_dir(namespace, dir_path, delete=False):
with open(os.path.join(dir_path, 'dep_order.txt')) as f:
dep_order = f.readline()
dep_lst = [x.strip() for x in dep_order.split(',')]
print(dep_lst)
phrases_to_replace = ["(?<!_)NAMESPACE", "REP_ES_USER", "REP_ES_PASS"]
values_for_replacement = [namespace, conf.ES_USER_LOCAL, conf.ES_PASS_LOCAL]
for filename in dep_lst:
# replace all phrases with the actual values if exists
modified_file_path, is_change = ut.duplicate_file_and_replace_phrases(
dir_path, filename, f"{namespace}_{filename}", phrases_to_replace, values_for_replacement
)
print(f"applying file: {filename}")
with open(modified_file_path) as f:
dep = yaml.safe_load(f)
if modified_file_path != os.path.join(dir_path, filename) and is_change:
# remove modified file
ut.delete_file(modified_file_path)
name = dep["metadata"]["name"]
if dep['kind'] == 'StatefulSet':
k8s_client = client.AppsV1Api()
if not delete:
k8s_client.create_namespaced_stateful_set(body=dep, namespace=namespace)
else:
k8s_client.delete_namespaced_stateful_set(name=name, namespace=namespace)
elif dep['kind'] == 'DaemonSet':
k8s_client = client.AppsV1Api()
k8s_client.create_namespaced_daemon_set(body=dep, namespace=namespace)
elif dep['kind'] == 'Deployment':
k8s_client = client.AppsV1Api()
k8s_client.create_namespaced_deployment(body=dep, namespace=namespace)
elif dep['kind'] == 'Service':
try:
k8s_client = client.CoreV1Api()
k8s_client.create_namespaced_service(body=dep, namespace=namespace)
except ApiException as e:
if e.status == 409:
print(f"Service exists: {dep['metadata']['name']}")
continue
raise e
elif dep['kind'] == 'PodDisruptionBudget':
k8s_client = client.PolicyV1beta1Api()
k8s_client.create_namespaced_pod_disruption_budget(body=dep, namespace=namespace)
elif dep["kind"] == 'Role':
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.create_namespaced_role(body=dep, namespace=namespace)
elif dep["kind"] == 'ClusterRole':
try:
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.create_cluster_role(body=dep)
except ApiException as e:
if e.status == 409:
print(f"cluster role already exists")
continue
raise e
elif dep["kind"] == 'RoleBinding':
k8s_client = client.RbacAuthorizationV1Api()
dep["subjects"][0]["namespace"] = namespace
k8s_client.create_namespaced_role_binding(body=dep, namespace=namespace)
elif dep["kind"] == 'ClusterRoleBinding':
k8s_client = client.RbacAuthorizationV1Api()
try:
k8s_client.create_cluster_role_binding(body=dep)
except ApiException as e:
if e.status == 409:
print(f"cluster role binding already exists")
continue
raise e
elif dep["kind"] == 'ConfigMap':
k8s_client = client.CoreV1Api()
k8s_client.create_namespaced_config_map(body=dep, namespace=namespace)
elif dep["kind"] == 'ServiceAccount':
k8s_client = client.CoreV1Api()
k8s_client.create_namespaced_service_account(body=dep, namespace=namespace)
print("\nDone\n")
def remove_deployment_dir(namespace, dir_path):
with open(os.path.join(dir_path, 'dep_order.txt')) as f:
dep_order = f.readline()
dep_lst = [x.strip() for x in dep_order.split(',')]
print(dep_lst)
for filename in dep_lst:
print(f"deleting {filename}")
with open(os.path.join(dir_path, filename)) as f:
dep = yaml.safe_load(f)
name = dep["metadata"]["name"]
if dep['kind'] == 'StatefulSet':
k8s_client = client.AppsV1Api()
k8s_client.delete_namespaced_stateful_set(name=name, namespace=namespace)
elif dep['kind'] == 'DaemonSet':
k8s_client = client.AppsV1Api()
k8s_client.delete_namespaced_daemon_set(name=name, namespace=namespace)
elif dep['kind'] == 'Deployment':
k8s_client = client.AppsV1Api()
k8s_client.delete_namespaced_deployment(name=name, namespace=namespace)
elif dep['kind'] == 'Service':
k8s_client = client.CoreV1Api()
k8s_client.delete_namespaced_service(name=name, namespace=namespace, grace_period_seconds=0)
delete_func = k8s_client.delete_namespaced_service
list_func = k8s_client.list_namespaced_service
wait_for_namespaced_deletion(name, namespace, delete_func, list_func)
elif dep['kind'] == 'PodDisruptionBudget':
k8s_client = client.PolicyV1beta1Api()
k8s_client.delete_namespaced_pod_disruption_budget(name=name, namespace=namespace)
elif dep["kind"] == 'Role':
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.delete_namespaced_role(name=name, namespace=namespace)
elif dep["kind"] == 'RoleBinding':
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.delete_namespaced_role_binding(name=name, namespace=namespace)
elif dep["kind"] == 'ClusterRoleBinding':
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.delete_cluster_role_binding(name=name)
elif dep["kind"] == 'ConfigMap':
k8s_client = client.CoreV1Api()
k8s_client.delete_namespaced_config_map(name=name, namespace=namespace)
elif dep["kind"] == 'ServiceAccount':
k8s_client = client.CoreV1Api()
k8s_client.delete_namespaced_service_account(name=name, namespace=namespace)
print("\nDone\n")
def wait_for_namespaced_deletion(name, namespace, deletion_func, list_func, timeout=15):
deleted = False
orig_timeout = timeout
while not deleted:
# find by name and delete requested item
for item in list_func(namespace).items:
if item.metadata.name == name:
if timeout < 0:
raise TimeoutError(f"{orig_timeout} was not enough for deleting item:\n{item}\n")
deletion_func(name=name, namespace=namespace)
print(f"service {name} was not deleted, retrying")
time.sleep(1)
timeout -= 1
# validate item was deleted
for item in list_func(namespace).items:
deleted = True
if item.metadata.name == name:
deleted = False
return deleted
def wait_for_daemonset_to_be_ready(name, namespace, timeout=None):
wait_for_to_be_ready("daemonset", name, namespace, timeout=timeout)
def resolve_read_status_func(obj_name):
if obj_name == "daemonset":
return client.AppsV1Api().read_namespaced_daemon_set_status
else:
raise ValueError(f"resolve_read_status_func: {obj_name} is not a valid value")
def wait_for_to_be_ready(obj_name, name, namespace, timeout=None):
start = datetime.now()
while True:
read_func = resolve_read_status_func(obj_name)
resp = read_func(name=name, namespace=namespace)
total_sleep_time = (datetime.now()-start).total_seconds()
number_ready = resp.status.number_ready
updated_number_scheduled = resp.status.updated_number_scheduled
if number_ready and updated_number_scheduled and number_ready == updated_number_scheduled:
print("Total time waiting for {3} {0} [size: {1}]: {2} sec".format(name, number_ready, total_sleep_time,
obj_name))
break
print("{0}/{1} pods ready {2} sec ".format(number_ready, updated_number_scheduled, total_sleep_time), end="\r")
time.sleep(1)
if timeout and total_sleep_time > timeout:
raise Exception(f"Timeout waiting for {obj_name} to be ready")
|
1810 | import json
import inspect
import hashlib
from _plotly_utils.utils import PlotlyJSONEncoder
from dash.long_callback.managers import BaseLongCallbackManager
class CeleryLongCallbackManager(BaseLongCallbackManager):
def __init__(self, celery_app, cache_by=None, expire=None):
"""
Long callback manager that runs callback logic on a celery task queue,
and stores results using a celery result backend.
:param celery_app:
A celery.Celery application instance that must be configured with a
result backend. See the celery documentation for information on
configuration options.
:param cache_by:
A list of zero-argument functions. When provided, caching is enabled and
the return values of these functions are combined with the callback
function's input arguments and source code to generate cache keys.
:param expire:
If provided, a cache entry will be removed when it has not been accessed
for ``expire`` seconds. If not provided, the lifetime of cache entries
is determined by the default behavior of the celery result backend.
"""
try:
import celery # pylint: disable=import-outside-toplevel,import-error
from celery.backends.base import ( # pylint: disable=import-outside-toplevel,import-error
DisabledBackend,
)
except ImportError as missing_imports:
raise ImportError(
"""\
CeleryLongCallbackManager requires extra dependencies which can be installed doing
$ pip install "dash[celery]"\n"""
) from missing_imports
if not isinstance(celery_app, celery.Celery):
raise ValueError("First argument must be a celery.Celery object")
if isinstance(celery_app.backend, DisabledBackend):
raise ValueError("Celery instance must be configured with a result backend")
super().__init__(cache_by)
self.handle = celery_app
self.expire = expire
def terminate_job(self, job):
if job is None:
return
self.handle.control.terminate(job)
def terminate_unhealthy_job(self, job):
task = self.get_task(job)
if task and task.status in ("FAILURE", "REVOKED"):
return self.terminate_job(job)
return False
def job_running(self, job):
future = self.get_task(job)
return future and future.status in (
"PENDING",
"RECEIVED",
"STARTED",
"RETRY",
"PROGRESS",
)
def make_job_fn(self, fn, progress, args_deps):
return _make_job_fn(fn, self.handle, progress, args_deps)
def get_task(self, job):
if job:
return self.handle.AsyncResult(job)
return None
def clear_cache_entry(self, key):
self.handle.backend.delete(key)
def call_job_fn(self, key, job_fn, args):
task = job_fn.delay(key, self._make_progress_key(key), args)
return task.task_id
def get_progress(self, key):
progress_key = self._make_progress_key(key)
progress_data = self.handle.backend.get(progress_key)
if progress_data:
return json.loads(progress_data)
return None
def result_ready(self, key):
return self.handle.backend.get(key) is not None
def get_result(self, key, job):
# Get result value
result = self.handle.backend.get(key)
if result is None:
return None
result = json.loads(result)
# Clear result if not caching
if self.cache_by is None:
self.clear_cache_entry(key)
else:
if self.expire:
# Set/update expiration time
self.handle.backend.expire(key, self.expire)
self.clear_cache_entry(self._make_progress_key(key))
self.terminate_job(job)
return result
def _make_job_fn(fn, celery_app, progress, args_deps):
cache = celery_app.backend
# Hash function source and module to create a unique (but stable) celery task name
fn_source = inspect.getsource(fn)
fn_str = fn_source
fn_hash = hashlib.sha1(fn_str.encode("utf-8")).hexdigest()
@celery_app.task(name=f"long_callback_{fn_hash}")
def job_fn(result_key, progress_key, user_callback_args, fn=fn):
def _set_progress(progress_value):
cache.set(progress_key, json.dumps(progress_value, cls=PlotlyJSONEncoder))
maybe_progress = [_set_progress] if progress else []
if isinstance(args_deps, dict):
user_callback_output = fn(*maybe_progress, **user_callback_args)
elif isinstance(args_deps, (list, tuple)):
user_callback_output = fn(*maybe_progress, *user_callback_args)
else:
user_callback_output = fn(*maybe_progress, user_callback_args)
cache.set(result_key, json.dumps(user_callback_output, cls=PlotlyJSONEncoder))
return job_fn
|
1838 | import os
import os.path as osp
import numpy as np
from joblib import Parallel, delayed
from tensorflow.keras.utils import get_file
from tqdm import tqdm
from spektral.data import Dataset, Graph
from spektral.utils import label_to_one_hot, sparse
from spektral.utils.io import load_csv, load_sdf
ATOM_TYPES = [1, 6, 7, 8, 9]
BOND_TYPES = [1, 2, 3, 4]
class QM9(Dataset):
"""
The QM9 chemical data set of small molecules.
In this dataset, nodes represent atoms and edges represent chemical bonds.
There are 5 possible atom types (H, C, N, O, F) and 4 bond types (single,
double, triple, aromatic).
Node features represent the chemical properties of each atom and include:
- The atomic number, one-hot encoded;
- The atom's position in the X, Y, and Z dimensions;
- The atomic charge;
- The mass difference from the monoisotope;
The edge features represent the type of chemical bond between two atoms,
one-hot encoded.
Each graph has an 19-dimensional label for regression.
**Arguments**
- `amount`: int, load this many molecules instead of the full dataset
(useful for debugging).
- `n_jobs`: number of CPU cores to use for reading the data (-1, to use all
available cores).
"""
url = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/gdb9.tar.gz"
def __init__(self, amount=None, n_jobs=1, **kwargs):
self.amount = amount
self.n_jobs = n_jobs
super().__init__(**kwargs)
def download(self):
get_file(
"qm9.tar.gz",
self.url,
extract=True,
cache_dir=self.path,
cache_subdir=self.path,
)
os.remove(osp.join(self.path, "qm9.tar.gz"))
def read(self):
print("Loading QM9 dataset.")
sdf_file = osp.join(self.path, "gdb9.sdf")
data = load_sdf(sdf_file, amount=self.amount) # Internal SDF format
def read_mol(mol):
x = np.array([atom_to_feature(atom) for atom in mol["atoms"]])
a, e = mol_to_adj(mol)
return x, a, e
data = Parallel(n_jobs=self.n_jobs)(
delayed(read_mol)(mol) for mol in tqdm(data, ncols=80)
)
x_list, a_list, e_list = list(zip(*data))
# Load labels
labels_file = osp.join(self.path, "gdb9.sdf.csv")
labels = load_csv(labels_file)
labels = labels.set_index("mol_id").values
if self.amount is not None:
labels = labels[: self.amount]
return [
Graph(x=x, a=a, e=e, y=y)
for x, a, e, y in zip(x_list, a_list, e_list, labels)
]
def atom_to_feature(atom):
atomic_num = label_to_one_hot(atom["atomic_num"], ATOM_TYPES)
coords = atom["coords"]
charge = atom["charge"]
iso = atom["iso"]
return np.concatenate((atomic_num, coords, [charge, iso]), -1)
def mol_to_adj(mol):
row, col, edge_features = [], [], []
for bond in mol["bonds"]:
start, end = bond["start_atom"], bond["end_atom"]
row += [start, end]
col += [end, start]
edge_features += [bond["type"]] * 2
a, e = sparse.edge_index_to_matrix(
edge_index=np.array((row, col)).T,
edge_weight=np.ones_like(row),
edge_features=label_to_one_hot(edge_features, BOND_TYPES),
)
return a, e
|
1851 | from copy import copy
try:
# Python 2 only:
from StringIO import StringIO
# create a variant that can serve as a context manager
class StringIO(StringIO):
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
except ImportError:
from io import StringIO
try: # python 3.5+
from typing import Dict, Any
from yamlable import Y
except ImportError:
pass
import pytest
from yaml import dump, load
from yamlable import YamlAble, yaml_info
def test_yamlable_incomplete_description():
""" Tests that if __yaml_tag_suffix__ is not provided a YamlAble subclass cannot be declared """
with pytest.raises(NotImplementedError) as err_info:
class Foo(YamlAble):
# __yaml_tag_suffix__ = 'foo'
def __to_yaml_dict__(self):
# type: (...) -> Dict[str, Any]
return copy(vars(self))
@classmethod
def __from_yaml_dict__(cls, # type: Type[Y]
dct, # type: Dict[str, Any]
yaml_tag # type: str
):
# type: (...) -> Y
return Foo(**dct)
# instantiate
f = Foo()
# dump
f.dumps_yaml()
assert "does not seem to have a non-None '__yaml_tag_suffix__' field" in str(err_info.value)
def test_yamlable():
""" Tests that YamlAble works correctly """
@yaml_info(yaml_tag_ns='yaml.tests')
class Foo(YamlAble):
# __yaml_tag_suffix__ = 'foo' not needed: we used @yaml_info
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
return vars(self) == vars(other)
def __to_yaml_dict__(self):
# type: (...) -> Dict[str, Any]
return copy(vars(self))
@classmethod
def __from_yaml_dict__(cls, # type: Type[Y]
dct, # type: Dict[str, Any]
yaml_tag # type: str
):
# type: (...) -> Y
return Foo(**dct)
# instantiate
f = Foo(1, 'hello') # note:
# dump
y = f.dumps_yaml(default_flow_style=False)
assert y == """!yamlable/yaml.tests.Foo
a: 1
b: hello
"""
# dump io
class MemorizingStringIO(StringIO):
""" A StringIO object that memorizes its buffer when it is closed (as opposed to the standard StringIO) """
def close(self):
self.value = self.getvalue()
# super(StringIO, self).close() # this does not work with python 2 old-style classes (StringIO is one)
StringIO.close(self)
s = MemorizingStringIO()
f.dump_yaml(s, default_flow_style=False)
assert s.value == y
# dump pyyaml
assert dump(f, default_flow_style=False) == y
# load
assert f == Foo.loads_yaml(y)
# load io
assert f == Foo.load_yaml(StringIO(y))
# load pyyaml
assert f == load(y)
def test_yamlable_legacy_method_names():
""" Tests that YamlAbleMixIn works correctly """
global enc
global dec
enc, dec = False, False
@yaml_info(yaml_tag_ns='yaml.tests')
class FooLegacy(YamlAble):
# __yaml_tag_suffix__ = 'foo' not needed: we used @yaml_info
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
return vars(self) == vars(other)
def to_yaml_dict(self):
# type: (...) -> Dict[str, Any]
global enc
enc = True
return copy(vars(self))
@classmethod
def from_yaml_dict(cls, # type: Type[Y]
dct, # type: Dict[str, Any]
yaml_tag # type: str
):
# type: (...) -> Y
global dec
dec = True
return FooLegacy(**dct)
# instantiate
f = FooLegacy(1, 'hello')
# dump
y = f.dumps_yaml(default_flow_style=False)
assert y == """!yamlable/yaml.tests.FooLegacy
a: 1
b: hello
"""
# dump io
class MemorizingStringIO(StringIO):
""" A StringIO object that memorizes its buffer when it is closed (as opposed to the standard StringIO) """
def close(self):
self.value = self.getvalue()
# super(StringIO, self).close() # this does not work with python 2 old-style classes (StringIO is one)
StringIO.close(self)
s = MemorizingStringIO()
f.dump_yaml(s, default_flow_style=False)
assert s.value == y
# dump pyyaml
assert dump(f, default_flow_style=False) == y
# load
assert f == FooLegacy.loads_yaml(y)
# load io
assert f == FooLegacy.load_yaml(StringIO(y))
# load pyyaml
assert f == load(y)
assert enc
assert dec
# TODO override so that tag is not supported, to check error message
def test_yamlable_not_supported():
@yaml_info(yaml_tag_ns='yaml.tests')
class Foo_Err(YamlAble):
# __yaml_tag_suffix__ = 'foo' not needed: we used @yaml_info
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
return vars(self) == vars(other)
def __to_yaml_dict__(self):
# type: (...) -> Dict[str, Any]
return copy(vars(self))
@classmethod
def __from_yaml_dict__(cls, # type: Type[Y]
dct, # type: Dict[str, Any]
yaml_tag # type: str
):
# type: (...) -> Y
return Foo_Err(**dct)
@classmethod
def is_yaml_tag_supported(cls,
yaml_tag # type: str
):
# type: (...) -> bool
# ALWAYS return false
return False
with pytest.raises(TypeError) as err_info:
Foo_Err.loads_yaml("!yamlable/yaml.tests.Foo_Err {a: 1, b: hello}\n")
assert "No YamlAble subclass found able to decode object" in str(err_info.value)
def test_yamlable_default_impl():
""" tests that the default implementation works """
@yaml_info(yaml_tag_ns='yaml.tests')
class Foo_Default(YamlAble):
def __init__(self, a, b):
self.a = a
self.b = b
f = Foo_Default(1, 'hello')
s = """!yamlable/yaml.tests.Foo_Default
a: 1
b: hello
"""
assert dump(f, default_flow_style=False) == s
assert dump(load(dump(load(s))), default_flow_style=False) == s
def test_help_yaml_info():
@yaml_info("com.example.MyFoo")
class Foo(YamlAble):
pass
assert Foo.__yaml_tag_suffix__ == "com.example.MyFoo"
@yaml_info(yaml_tag_ns="com.example")
class Foo(YamlAble):
pass
assert Foo.__yaml_tag_suffix__ == "com.example.Foo"
assert Foo().dumps_yaml() == """!yamlable/com.example.Foo {}
"""
def test_abstract_parent_error():
"""This tests that we can define an abstract parent class with the YamlAble behaviour and inherit it"""
class AbstractFooE(YamlAble):
pass
class FooError(AbstractFooE):
"""
This class inherits from the parent without redefining a yaml tag
"""
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
return vars(self) == vars(other)
# instantiate
e = FooError(1, 'hello')
# dump
with pytest.raises(NotImplementedError):
e.dumps_yaml()
def test_abstract_parent():
"""This tests that we can define an abstract parent class with the YamlAble behaviour and inherit it"""
class AbstractFooV(YamlAble):
pass
@yaml_info(yaml_tag_ns='yaml.tests')
class FooValid(AbstractFooV):
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
return vars(self) == vars(other)
# instantiate
f = FooValid(1, 'hello') # note:
# dump
y = f.dumps_yaml(default_flow_style=False)
assert y == """!yamlable/yaml.tests.FooValid
a: 1
b: hello
"""
# dump io
class MemorizingStringIO(StringIO):
""" A StringIO object that memorizes its buffer when it is closed (as opposed to the standard StringIO) """
def close(self):
self.value = self.getvalue()
# super(StringIO, self).close() # this does not work with python 2 old-style classes (StringIO is one)
StringIO.close(self)
s = MemorizingStringIO()
f.dump_yaml(s, default_flow_style=False)
assert s.value == y
# dump pyyaml
assert dump(f, default_flow_style=False) == y
# load
assert f == FooValid.loads_yaml(y)
# load io
assert f == FooValid.load_yaml(StringIO(y))
# load pyyaml
assert f == load(y)
|
1859 | import scene
class MyScene(scene.Scene):
def setup(self):
self.label_node = scene.LabelNode('A',
position=(100,400), parent=self)
self.start_flag = False
def update(self):
if self.start_flag:
x,y = self.label_node.position
if x < 340:
self.label_node.position = (x+2, y)
else:
self.start_flag = False
def touch_ended(self, touch):
self.start_flag = True
scene.run(MyScene())
|
1918 | import os
import sys
DIR_OF_THIS_SCRIPT = os.path.abspath( os.path.dirname( __file__ ) )
def Settings( **kwargs ):
return {
'interpreter_path': sys.executable,
'sys_path': [ os.path.join( DIR_OF_THIS_SCRIPT, 'third_party' ) ]
}
|
1925 | import numpy as np
import xml.etree.ElementTree as ET
class Geom(object):
def __init__(self, geom):
self.xml = geom
self.params = []
def get_params(self):
return self.params.copy()
def set_params(self, new_params):
self.params = new_params
def update_point(self, p, new_params):
pass
def update_xml(self):
pass
def update(self, new_params):
self.set_params(new_params)
self.update_xml()
def get_smallest_z(self):
pass
def get_param_limits(self):
pass
def get_param_names(self):
pass
def get_volume(self):
pass
class Sphere(Geom):
min_radius = .05
max_radius = .4
def __init__(self, geom):
self.xml = geom
self.params = [float(self.xml.get('size'))] # radius
self.center = np.array([float(x) for x in self.xml.get('pos').split()])
def update_point(self, p, new_params):
return ((p - self.center) * new_params[0] / self.params[0]) + self.center
def update_xml(self):
self.xml.set('size', str(self.params[0]))
def get_smallest_z(self):
return self.center[2] - self.params[0]
def get_param_limits(self):
return [[self.min_radius], [self.max_radius]]
def get_param_names(self):
return ['radius']
def get_volume(self):
return 4./3. * np.pi * self.params[0] ** 3
class Capsule(Geom):
min_length = 0.175
max_length = 0.8
min_radius = 0.035
max_radius = 0.085
def __init__(self, geom):
self.xml = geom
fromto = [float(x) for x in self.xml.get('fromto').split()]
self.p1 = np.array(fromto[:3])
self.p2 = np.array(fromto[3:])
length = np.sqrt(np.sum((self.p2 - self.p1) ** 2))
radius = float(self.xml.get('size'))
self.params = [length, radius]
self.axis = (self.p2 - self.p1) / length
def update_point(self, p, new_params):
lfac = p.dot(self.axis) * self.axis
rfac = p - lfac
return p + lfac * (-1.0 + new_params[0] / self.params[0])# + rfac * (new_params[1] / self.params[1])
def update_xml(self):
self.xml.set('fromto', ' '.join([str(x) for x in np.concatenate([self.p1, self.p2])]))
self.xml.set('size', str(self.params[1])) # radius
def set_params(self, new_params):
p1 = self.update_point(self.p1, new_params)
p2 = self.update_point(self.p2, new_params)
# update only after computing p1, p2
self.p1 = p1
self.p2 = p2
super().set_params(new_params)
def get_smallest_z(self):
return min(self.p1[2], self.p2[2]) - self.params[1]
def get_param_limits(self):
return [[self.min_length, self.min_radius], [self.max_length, self.max_radius]]
def get_param_names(self):
return ['length','radius']
def get_volume(self):
return 4./3. * np.pi * self.params[1]**3 + self.params[0] * np.pi * self.params[1]**2
class Body:
geoms = {'sphere': Sphere, 'capsule': Capsule} # dictionary of legal geometry types
def __init__(self, body, worldbody=False):
self.xml = body
self.worldbody = worldbody
geom_xml = body.find('geom') # assume only one geometry per body
self.geom = self.geoms[geom_xml.get('type')](geom_xml)
self.joints = [j for j in body.findall('joint') if 'ignore' not in j.get('name')]
self.parts = [Body(b) for b in body.findall('body')]
pos = [b.get('pos') for b in body.findall('body')]
self.part_positions = [np.array([float(x) for x in p.split()]) for p in pos]
pos = [j.get('pos') for j in self.joints]
self.joint_positions = [np.array([float(x) for x in p.split()]) for p in pos]
self.n = len(self.geom.get_params())
self.n_all_params = len(self.get_params())
self.zmin = float(self.xml.get("pos").split()[2]) - self.get_height()
def get_height(self):
max_height = -self.geom.get_smallest_z()
for body, pos in zip(self.parts, self.part_positions):
max_height = max(max_height, body.get_height() - pos[2])
return max_height
def update_initial_position(self):
pos = self.xml.get("pos").split()
pos[2] = str(self.get_height() + self.zmin)
self.xml.set("pos", ' '.join(pos))
def update_xml(self):
for body, pos in zip(self.parts, self.part_positions):
body.xml.set('pos', ' '.join([str(x) for x in pos]))
for joint, pos in zip(self.joints, self.joint_positions):
joint.set('pos', ' '.join([str(x) for x in pos]))
def set_body_positions(self, new_params):
for i, pos in enumerate(self.part_positions):
self.part_positions[i] = self.geom.update_point(pos, new_params)
for i, pos in enumerate(self.joint_positions):
self.joint_positions[i] = self.geom.update_point(pos, new_params)
def update(self, new_params):
self.set_body_positions(new_params)
self.geom.update(new_params)
self.update_xml()
def get_params(self):
params = self.geom.get_params()
for body in self.parts:
params += body.get_params()
return params
def get_param_limits(self):
limits = self.geom.get_param_limits()
for body in self.parts:
body_limits = body.get_param_limits()
limits[0] += body_limits[0]
limits[1] += body_limits[1]
return limits
def get_param_names(self):
name = self.xml.get('name')
param_names = [name + '-' + p for p in self.geom.get_param_names()]
for body in self.parts:
param_names += body.get_param_names()
return param_names
def update_params(self, new_params):
if self.worldbody: assert len(new_params) == self.n_all_params, "Wrong number of parameters"
self.update(new_params[:self.n])
remaining_params = new_params[self.n:]
for body in self.parts:
remaining_params = body.update_params(remaining_params)
if self.worldbody:
self.update_initial_position()
else:
return remaining_params
def get_body_names(self):
names = [self.xml.get('name')]
for body in self.parts:
names += body.get_names()
return names
def get_joints(self):
joints = {}
for body,pos in zip(self.parts, self.part_positions):
for j in body.joints:
joints[j.get('name')] = (self.xml.get('name'), body.xml.get('name'), self.geom, body.geom, pos)
joints.update(body.get_joints())
return joints
def get_volumes(self):
volumes = {}
if len(self.joints) > 0:
for j in self.joints:
v1 = self.geom.get_volume()
v2 = sum([b.geom.get_volume() for b in self.parts])
volumes[j.get('name')] = np.array((v1, v2))
for body in self.parts:
volumes.update(body.get_volumes())
return volumes
class MuJoCoXmlRobot:
def __init__(self, model_xml):
self.model_xml = model_xml
self.tree = ET.parse(self.model_xml)
worldbody = self.tree.getroot().find('worldbody')
self.body = Body(worldbody.find('body'), worldbody=True)
def get_params(self):
return self.body.get_params()
def get_param_limits(self):
return self.body.get_param_limits()
def get_param_names(self):
return self.body.get_param_names()
def get_height(self):
return self.body.get_height()
def get_joints(self):
return self.body.get_joints()
def get_volumes(self):
return self.body.get_volumes()
def update(self, params, xml_file=None):
if xml_file is None:
xml_file = self.model_xml
self.body.update_params(list(params))
self.tree.write(xml_file)
if __name__ == '__main__':
robot = MuJoCoXmlRobot('mujoco_assets/hopper.xml')
params = list(1.0 * np.array(robot.get_params()))
robot.update(params, 'mujoco_assets/hopper_test.xml')
assert robot.get_params() == params
#assert robot.get_height() == 1.31
print(robot.get_param_limits())
print(robot.get_param_names())
robot = MuJoCoXmlRobot('mujoco_assets/walker2d.xml')
params = [.4,.04,.5,.05,.55,.055,.6,.06,.5,.05,.55,.055,.6,.06]
robot.update(params, 'mujoco_assets/walker2d_test.xml')
assert robot.get_params() == params
assert robot.get_height() == 1.31
print(robot.get_param_limits())
print(robot.get_param_names())
robot = MuJoCoXmlRobot('mujoco_assets/ant.xml')
params = [.2, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06]
robot.update(params, 'mujoco_assets/ant_test.xml')
assert robot.get_params() == params
assert robot.get_height() == .2
print(robot.get_param_limits())
print(robot.get_param_names())
robot = MuJoCoXmlRobot('mujoco_assets/humanoid.xml')
params = list(.8 * np.array(robot.get_params()))
robot.update(params, 'mujoco_assets/humanoid_test.xml')
assert robot.get_params() == params
print(robot.get_height())
#assert robot.get_height() == .6085
print(robot.get_param_limits())
print(robot.get_param_names())
import gym, roboschool
env = gym.make("RoboschoolHopper-v1")
env.unwrapped.model_xml = 'mujoco_assets/hopper_test.xml'
env.reset()
#env.render()
import os
from scipy.misc import imsave
import subprocess as sp
outdir = 'xml_vid'
os.makedirs(outdir, exist_ok=True)
i = 0
for _ in range(10):
env.reset()
for _ in range(100):
env.step(env.action_space.sample())
rgb = env.render('rgb_array')
imsave(os.path.join(outdir, '{:05d}.png'.format(i)), rgb)
i+=1
sp.call(['ffmpeg', '-r', '60', '-f', 'image2', '-i', os.path.join(outdir, '%05d.png'), '-vcodec', 'libx264', '-pix_fmt', 'yuv420p', os.path.join(outdir, 'out.mp4')])
env.close()
|
1934 | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic import TemplateView
urlpatterns = [
path('api-auth/', include('rest_framework.urls')),
path('rest-auth/', include('rest_auth.urls')),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('admin/', admin.site.urls),
path('api/', include('core.api.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
if not settings.DEBUG:
urlpatterns += [re_path(r'^.*',
TemplateView.as_view(template_name='index.html'))]
|
1946 | import h5py
import numpy as np
np.set_printoptions(threshold=np.nan)
from shutil import copyfile
copyfile("dummy_lutnet.h5", "pretrained_bin.h5") # create pretrained.h5 using datastructure from dummy.h5
bl = h5py.File("baseline_pruned.h5", 'r')
#dummy = h5py.File("dummy.h5", 'r')
pretrained = h5py.File("pretrained_bin.h5", 'r+')
# dense layer 1
bl_w1 = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"]
pret_w1[...] = np.array(bl_w1)
p_gamma[...] = np.array(bl_gamma)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 2
bl_w1 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 3
bl_w1 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 4
bl_w1 = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 5
bl_w1 = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# bn 1
bl_beta = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 2
bl_beta = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 3
bl_beta = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 4
bl_beta = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 5
bl_beta = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
pretrained.close()
|
1953 | from Ifc.ClassRegistry import ifc_class, ifc_abstract_class, ifc_fallback_class
@ifc_abstract_class
class IfcEntity:
"""
Generic IFC entity, only for subclassing from it
"""
def __init__(self, rtype, args):
"""
rtype: Resource type
args: Arguments in *reverse* order, so you can just args.pop() from it
"""
self.rtype = rtype
def __str__(self):
return self.rtype
def __json__(self):
return {'rtype': self.rtype}
@ifc_fallback_class
class IfcGenericEntity(IfcEntity):
"""
Generic IFC entity: type and args
"""
def __init__(self, rtype, args):
IfcEntity.__init__(self, rtype, args)
self.args = args
self.args.reverse()
def __str__(self):
return "Gen<{sup}>{a}".format(
sup=IfcEntity.__str__(self),
a=self.args)
@ifc_class
class IfcScalarValue(IfcEntity):
def __init__(self, rtype, args):
IfcEntity.__init__(self, rtype, args)
self.value = args.pop()
def __str__(self):
return str(self.value)
@ifc_class
class BOOLEAN(IfcScalarValue):
pass
@ifc_class
class REAL(IfcScalarValue):
pass
@ifc_class
class BINARY(IfcScalarValue):
pass
@ifc_class
class INTEGER(IfcScalarValue):
pass
@ifc_class
class NUMBER(IfcScalarValue):
pass
@ifc_class
class STRING(IfcScalarValue):
pass
@ifc_class
class LOGICAL(IfcScalarValue):
pass
class Omitted:
"""
Marked with '*' it states that some supertype had defined that attribute, but in the subtype it is a derived
(calculated) value, so it no longer makes sense to explicitely assign value to it.
"""
# TODO: Haven't tried if it can be handled 'just as expected'
def __init__(self):
pass
def __str__(self):
return "<omitted>"
def __json__(self):
return None
# class-level, enough to reference, no need to create multiple instances (doesn't hurt though)
omitted = Omitted()
class Reference:
"""
Refers to another entity by its index
"""
def __init__(self, index):
self.index = index
def __str__(self):
return "<#{idx}>".format(idx=self.index)
def __json__(self):
return {'ref': self.index}
class EnumValue:
"""
Item from some set of enumerated values.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return "<.{val}.>".format(val=self.value)
def __json__(self):
return self.value
@ifc_class
class STEPHeader(IfcEntity):
def __init__(self):
IfcEntity.__init__(self, "STEPHeader", [])
self.fields = {}
def add(self, e):
self.fields[e.rtype] = e
def __str__(self):
return "STEPHeader({f})".format(f=", ".join(map(lambda f: "{n}: {v}".format(n=f[0], v=str(f[1])), self.fields.iteritems())))
# vim: set sw=4 ts=4 et:
|
1995 | from feemodel.app.transient import TransientOnline
from feemodel.app.pools import PoolsOnlineEstimator
from feemodel.app.predict import Prediction
from feemodel.app.simonline import SimOnline
__all__ = [
'TransientOnline',
'PoolsOnlineEstimator',
'Prediction',
'SimOnline'
]
|
2040 | from typing import Dict, List
from simulator.services.resources.directory import Directory
from simulator.services.services import Services
class Atlas(Directory):
def __init__(self, services: Services, name: str, parent: str, create: bool = False) -> None:
super().__init__(services, name, parent, create)
if create:
metadata: Dict[str, any] = {
"next_index": 0,
}
self._save_metadata(metadata)
def append(self, obj: any) -> None:
self.save(str(self._get_next_index()), obj)
self._increment_index()
def load_all(self, max_els: int = float("inf")) -> List[any]:
ret: List[any] = []
idx: int = 0
while idx < max_els:
obj: any = self.load(str(idx))
if obj:
ret.append(obj)
idx += 1
else:
break
return ret
def _get_next_index(self) -> int:
metadata: Dict[str, any] = self._get_metadata()
return metadata["next_index"]
def _increment_index(self) -> None:
metadata: Dict[str, any] = self._get_metadata()
metadata["next_index"] += 1
self._save_metadata(metadata)
def _save_metadata(self, metadata: Dict[str, any]) -> None:
super().save("metadata", metadata)
def _get_metadata(self) -> Dict[str, any]:
return super().load("metadata")
|
2063 | from common.make_tx import make_swap_tx
from sol.handle_simple import handle_unknown_detect_transfers
def handle_metaplex(exporter, txinfo):
transfers_in, transfers_out, _ = txinfo.transfers_net
if len(transfers_in) == 1 and len(transfers_out) == 1:
sent_amount, sent_currency, _, _ = transfers_out[0]
received_amount, received_currency, _, _ = transfers_in[0]
row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)
exporter.ingest_row(row)
else:
handle_unknown_detect_transfers(exporter, txinfo)
def is_nft_mint(txinfo):
log_instructions = txinfo.log_instructions
transfers_in, transfers_out, _ = txinfo.transfers_net
if "MintTo" in log_instructions and len(transfers_out) == 1 and len(transfers_in) == 0:
return True
elif ("MintTo" in log_instructions
and len(transfers_out) == 1
and len(transfers_in) == 1
and transfers_in[0][0] == 1):
return True
else:
return False
def handle_nft_mint(exporter, txinfo):
transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net
if len(transfers_in) == 1 and len(transfers_out) == 1:
sent_amount, sent_currency, _, _ = transfers_out[0]
received_amount, received_currency, _, _ = transfers_in[0]
row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)
exporter.ingest_row(row)
return
handle_unknown_detect_transfers(exporter, txinfo)
|
2086 | from flask import Flask
from flask_appconfig import HerokuConfig
def create_sample_app():
app = Flask('testapp')
HerokuConfig(app)
return app
def test_herokupostgres(monkeypatch):
monkeypatch.setenv('HEROKU_POSTGRESQL_ORANGE_URL', 'heroku-db-uri')
app = create_sample_app()
assert app.config['SQLALCHEMY_DATABASE_URI'] == 'heroku-db-uri'
|
2101 | import numpy as np
from keras import backend as K
import os
import sys
K.set_image_dim_ordering('tf')
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
def main():
sys.path.append(patch_path('..'))
data_dir_path = patch_path('very_large_data')
model_dir_path = patch_path('models/UCF-101')
from keras_video_classifier.library.convolutional import CnnVideoClassifier
from keras_video_classifier.library.utility.ucf.UCF101_loader import load_ucf, scan_ucf_with_labels
config_file_path = CnnVideoClassifier.get_config_file_path(model_dir_path)
weight_file_path = CnnVideoClassifier.get_weight_file_path(model_dir_path)
np.random.seed(42)
load_ucf(data_dir_path)
predictor = CnnVideoClassifier()
predictor.load_model(config_file_path, weight_file_path)
videos = scan_ucf_with_labels(data_dir_path, [label for (label, label_index) in predictor.labels.items()])
video_file_path_list = np.array([file_path for file_path in videos.keys()])
np.random.shuffle(video_file_path_list)
for video_file_path in video_file_path_list:
label = videos[video_file_path]
predicted_label = predictor.predict(video_file_path)
print('predicted: ' + predicted_label + ' actual: ' + label)
if __name__ == '__main__':
main() |
2116 | import warnings
from collections import OrderedDict
from distutils.version import LooseVersion
from functools import partial
from inspect import isclass
from typing import Callable, Optional, Dict, Union
import numpy as np
import torch
import tqdm
from torch import Tensor, nn
from torch.nn import functional as F
from adv_lib.distances.lp_norms import l0_distances, l1_distances, l2_distances, linf_distances
from adv_lib.utils import ForwardCounter, BackwardCounter, predict_inputs
def generate_random_targets(labels: Tensor, num_classes: int) -> Tensor:
"""
Generates one random target in (num_classes - 1) possibilities for each label that is different from the original
label.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random target for each label. Has the same shape as labels.
"""
random = torch.rand(len(labels), num_classes, device=labels.device, dtype=torch.float)
random.scatter_(1, labels.unsqueeze(-1), 0)
return random.argmax(1)
def get_all_targets(labels: Tensor, num_classes: int):
"""
Generates all possible targets that are different from the original labels.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random targets for each label. shape: (len(labels), num_classes - 1).
"""
all_possible_targets = torch.zeros(len(labels), num_classes - 1, dtype=torch.long)
all_classes = set(range(num_classes))
for i in range(len(labels)):
this_label = labels[i].item()
other_labels = list(all_classes.difference({this_label}))
all_possible_targets[i] = torch.tensor(other_labels)
return all_possible_targets
def run_attack(model: nn.Module,
inputs: Tensor,
labels: Tensor,
attack: Callable,
targets: Optional[Tensor] = None,
batch_size: Optional[int] = None) -> dict:
device = next(model.parameters()).device
to_device = lambda tensor: tensor.to(device)
targeted, adv_labels = False, labels
if targets is not None:
targeted, adv_labels = True, targets
batch_size = batch_size or len(inputs)
# run attack only on non already adversarial samples
already_adv = []
chunks = [tensor.split(batch_size) for tensor in [inputs, adv_labels]]
for (inputs_chunk, label_chunk) in zip(*chunks):
batch_chunk_d, label_chunk_d = [to_device(tensor) for tensor in [inputs_chunk, label_chunk]]
preds = model(batch_chunk_d).argmax(1)
is_adv = (preds == label_chunk_d) if targeted else (preds != label_chunk_d)
already_adv.append(is_adv.cpu())
not_adv = ~torch.cat(already_adv, 0)
start, end = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
forward_counter, backward_counter = ForwardCounter(), BackwardCounter()
model.register_forward_pre_hook(forward_counter)
if LooseVersion(torch.__version__) >= LooseVersion('1.8'):
model.register_full_backward_hook(backward_counter)
else:
model.register_backward_hook(backward_counter)
average_forwards, average_backwards = [], [] # number of forward and backward calls per sample
advs_chunks = []
chunks = [tensor.split(batch_size) for tensor in [inputs[not_adv], adv_labels[not_adv]]]
total_time = 0
for (inputs_chunk, label_chunk) in tqdm.tqdm(zip(*chunks), ncols=80, total=len(chunks[0])):
batch_chunk_d, label_chunk_d = [to_device(tensor.clone()) for tensor in [inputs_chunk, label_chunk]]
start.record()
advs_chunk_d = attack(model, batch_chunk_d, label_chunk_d, targeted=targeted)
# performance monitoring
end.record()
torch.cuda.synchronize()
total_time += (start.elapsed_time(end)) / 1000 # times for cuda Events are in milliseconds
average_forwards.append(forward_counter.num_samples_called / len(batch_chunk_d))
average_backwards.append(backward_counter.num_samples_called / len(batch_chunk_d))
forward_counter.reset(), backward_counter.reset()
advs_chunks.append(advs_chunk_d.cpu())
if isinstance(attack, partial) and (callback := attack.keywords.get('callback')) is not None:
callback.reset_windows()
adv_inputs = inputs.clone()
adv_inputs[not_adv] = torch.cat(advs_chunks, 0)
data = {
'inputs': inputs,
'labels': labels,
'targets': adv_labels if targeted else None,
'adv_inputs': adv_inputs,
'time': total_time,
'num_forwards': sum(average_forwards) / len(chunks[0]),
'num_backwards': sum(average_backwards) / len(chunks[0]),
}
return data
_default_metrics = OrderedDict([
('linf', linf_distances),
('l0', l0_distances),
('l1', l1_distances),
('l2', l2_distances),
])
def compute_attack_metrics(model: nn.Module,
attack_data: Dict[str, Union[Tensor, float]],
batch_size: Optional[int] = None,
metrics: Dict[str, Callable] = _default_metrics) -> Dict[str, Union[Tensor, float]]:
inputs, labels, targets, adv_inputs = map(attack_data.get, ['inputs', 'labels', 'targets', 'adv_inputs'])
if adv_inputs.min() < 0 or adv_inputs.max() > 1:
warnings.warn('Values of produced adversarials are not in the [0, 1] range -> Clipping to [0, 1].')
adv_inputs.clamp_(min=0, max=1)
device = next(model.parameters()).device
to_device = lambda tensor: tensor.to(device)
batch_size = batch_size or len(inputs)
chunks = [tensor.split(batch_size) for tensor in [inputs, labels, adv_inputs]]
all_predictions = [[] for _ in range(6)]
distances = {k: [] for k in metrics.keys()}
metrics = {k: v().to(device) if (isclass(v.func) if isinstance(v, partial) else False) else v for k, v in
metrics.items()}
append = lambda list, data: list.append(data.cpu())
for inputs_chunk, labels_chunk, adv_chunk in zip(*chunks):
inputs_chunk, adv_chunk = map(to_device, [inputs_chunk, adv_chunk])
clean_preds, adv_preds = [predict_inputs(model, chunk.to(device)) for chunk in [inputs_chunk, adv_chunk]]
list(map(append, all_predictions, [*clean_preds, *adv_preds]))
for metric, metric_func in metrics.items():
distances[metric].append(metric_func(adv_chunk, inputs_chunk).detach().cpu())
logits, probs, preds, logits_adv, probs_adv, preds_adv = [torch.cat(l) for l in all_predictions]
for metric in metrics.keys():
distances[metric] = torch.cat(distances[metric], 0)
accuracy_orig = (preds == labels).float().mean().item()
if targets is not None:
success = (preds_adv == targets)
labels = targets
else:
success = (preds_adv != labels)
prob_orig = probs.gather(1, labels.unsqueeze(1)).squeeze(1)
prob_adv = probs_adv.gather(1, labels.unsqueeze(1)).squeeze(1)
labels_infhot = torch.zeros_like(logits_adv).scatter_(1, labels.unsqueeze(1), float('inf'))
real = logits_adv.gather(1, labels.unsqueeze(1)).squeeze(1)
other = (logits_adv - labels_infhot).max(1).values
diff_vs_max_adv = (real - other)
nll = F.cross_entropy(logits, labels, reduction='none')
nll_adv = F.cross_entropy(logits_adv, labels, reduction='none')
data = {
'time': attack_data['time'],
'num_forwards': attack_data['num_forwards'],
'num_backwards': attack_data['num_backwards'],
'targeted': targets is not None,
'preds': preds,
'adv_preds': preds_adv,
'accuracy_orig': accuracy_orig,
'success': success,
'probs_orig': prob_orig,
'probs_adv': prob_adv,
'logit_diff_adv': diff_vs_max_adv,
'nll': nll,
'nll_adv': nll_adv,
'distances': distances,
}
return data
def print_metrics(metrics: dict) -> None:
np.set_printoptions(formatter={'float': '{:0.3f}'.format}, threshold=16, edgeitems=3,
linewidth=120) # To print arrays with less precision
print('Original accuracy: {:.2%}'.format(metrics['accuracy_orig']))
print('Attack done in: {:.2f}s with {:.4g} forwards and {:.4g} backwards.'.format(
metrics['time'], metrics['num_forwards'], metrics['num_backwards']))
success = metrics['success'].numpy()
fail = bool(success.mean() != 1)
print('Attack success: {:.2%}'.format(success.mean()) + fail * ' - {}'.format(success))
for distance, values in metrics['distances'].items():
data = values.numpy()
print('{}: {} - Average: {:.3f} - Median: {:.3f}'.format(distance, data, data.mean(), np.median(data)) +
fail * ' | Avg over success: {:.3f}'.format(data[success].mean()))
attack_type = 'targets' if metrics['targeted'] else 'correct'
print('Logit({} class) - max_Logit(other classes): {} - Average: {:.2f}'.format(
attack_type, metrics['logit_diff_adv'].numpy(), metrics['logit_diff_adv'].numpy().mean()))
print('NLL of target/pred class: {:.3f}'.format(metrics['nll_adv'].numpy().mean()))
|
2135 | from dataclasses import dataclass
from typing import List
from greendoge.types.condition_opcodes import ConditionOpcode
from greendoge.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class ConditionWithArgs(Streamable):
"""
This structure is used to store parsed CLVM conditions
Conditions in CLVM have either format of (opcode, var1) or (opcode, var1, var2)
"""
opcode: ConditionOpcode
vars: List[bytes]
|
2141 | hiddenimports = ['sip', 'PyQt4.QtGui', 'PyQt4._qt']
from PyInstaller.hooks.hookutils import qt4_plugins_binaries
def hook(mod):
mod.binaries.extend(qt4_plugins_binaries('phonon_backend'))
return mod
|
2209 | import hashlib
import mimetypes
from urllib.parse import unquote
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.http import HttpResponseRedirect
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import CreationDateTimeField, ModificationDateTimeField
from great_components.mixins import GA360Mixin
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.models import ClusterableModel, ParentalKey
from taggit.managers import TaggableManager
from taggit.models import ItemBase, TagBase, TaggedItemBase
from wagtail.admin.edit_handlers import (
FieldPanel,
InlinePanel,
MultiFieldPanel,
ObjectList,
PageChooserPanel,
StreamFieldPanel,
TabbedInterface,
)
from wagtail.contrib.redirects.models import Redirect
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.core import blocks
from wagtail.core.blocks.stream_block import StreamBlockValidationError
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page
from wagtail.images import get_image_model_string
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.images.models import AbstractImage, AbstractRendition, Image
from wagtail.snippets.models import register_snippet
from wagtail.utils.decorators import cached_classmethod
from wagtailmedia.models import Media
from core import blocks as core_blocks, mixins
from core.case_study_index import delete_cs_index, update_cs_index
from core.constants import BACKLINK_QUERYSTRING_NAME, RICHTEXT_FEATURES__MINIMAL
from core.context import get_context_provider
from core.utils import PageTopicHelper, get_first_lesson
from exportplan.core.data import (
SECTION_SLUGS as EXPORTPLAN_SLUGS,
SECTIONS as EXPORTPLAN_URL_MAP,
)
# If we make a Redirect appear as a Snippet, we can sync it via Wagtail-Transfer
register_snippet(Redirect)
class GreatMedia(Media):
transcript = models.TextField(
verbose_name=_('Transcript'), blank=False, null=True # left null because was an existing field
)
subtitles_en = models.TextField(
verbose_name=_('English subtitles'),
null=True,
blank=True,
help_text='English-language subtitles for this video, in VTT format',
)
admin_form_fields = Media.admin_form_fields + (
'transcript',
'subtitles_en',
)
@property
def sources(self):
return [
{
'src': self.url,
'type': mimetypes.guess_type(self.filename)[0] or 'application/octet-stream',
'transcript': self.transcript,
}
]
@property
def subtitles(self):
output = []
# TO COME: support for more than just English
if self.subtitles_en:
output.append(
{
'srclang': 'en',
'label': 'English',
'url': reverse('core:subtitles-serve', args=[self.id, 'en']),
'default': False,
},
)
return output
class AbstractObjectHash(models.Model):
class Meta:
abstract = True
content_hash = models.CharField(max_length=1000)
@staticmethod
def generate_content_hash(field_file):
filehash = hashlib.md5()
field_file.open()
filehash.update(field_file.read())
field_file.close()
return filehash.hexdigest()
class DocumentHash(AbstractObjectHash):
document = models.ForeignKey(
'wagtaildocs.Document', null=True, blank=True, on_delete=models.CASCADE, related_name='+'
)
class ImageHash(AbstractObjectHash):
image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.CASCADE, related_name='+')
class AltTextImage(AbstractImage):
alt_text = models.CharField(max_length=255, blank=True)
admin_form_fields = Image.admin_form_fields + ('alt_text',)
class Rendition(AbstractRendition):
image = models.ForeignKey(AltTextImage, on_delete=models.CASCADE, related_name='renditions')
class Meta:
unique_together = ('image', 'filter_spec', 'focal_point_key')
@property
def alt(self):
return self.image.alt_text
@register_snippet
class Tour(ClusterableModel):
page = models.OneToOneField('wagtailcore.Page', on_delete=models.CASCADE, related_name='tour')
title = models.CharField(max_length=255)
body = models.CharField(max_length=255)
button_text = models.CharField(max_length=255)
panels = [
PageChooserPanel('page'),
FieldPanel('title'),
FieldPanel('body'),
FieldPanel('button_text'),
MultiFieldPanel([InlinePanel('steps')], heading='Steps'),
]
def __str__(self):
return self.page.title
class TourStep(Orderable):
title = models.CharField(max_length=255)
body = models.CharField(max_length=255)
position = models.CharField(max_length=255)
selector = models.CharField(max_length=255)
tour = ParentalKey(Tour, on_delete=models.CASCADE, related_name='steps')
panels = [
FieldPanel('title'),
FieldPanel('body'),
FieldPanel('position'),
FieldPanel('selector'),
]
@register_snippet
class Product(models.Model):
name = models.CharField(max_length=255)
panels = [
FieldPanel('name'),
]
def __str__(self):
return self.name
@register_snippet
class Region(models.Model):
name = models.CharField(max_length=100, unique=True)
panels = [FieldPanel('name')]
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@register_snippet
class Country(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField(max_length=100, unique=True)
region = models.ForeignKey(Region, null=True, blank=True, on_delete=models.SET_NULL)
panels = [
FieldPanel('name'),
FieldPanel('region'),
]
class Meta:
verbose_name_plural = 'Countries'
ordering = ('name',)
def save(self, *args, **kwargs):
# Automatically set slug on save, if not already set
if not self.slug:
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def __str__(self):
return self.name
@register_snippet
class Tag(models.Model):
name = models.CharField(max_length=100, unique=True)
panels = [FieldPanel('name')]
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@register_snippet
class IndustryTag(models.Model):
name = models.CharField(max_length=100, unique=True)
icon = models.ForeignKey(
AltTextImage,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
panels = [FieldPanel('name'), ImageChooserPanel('icon')]
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class TimeStampedModel(models.Model):
"""Modified version of django_extensions.db.models.TimeStampedModel
Unfortunately, because null=True needed to be added to create and
modified fields, inheritance causes issues with field clash.
"""
created = CreationDateTimeField('created', null=True)
modified = ModificationDateTimeField('modified', null=True)
def save(self, **kwargs):
self.update_modified = kwargs.pop('update_modified', getattr(self, 'update_modified', True))
super().save(**kwargs)
class Meta:
get_latest_by = 'modified'
ordering = (
'-modified',
'-created',
)
abstract = True
# Content models
class CMSGenericPage(
mixins.EnableTourMixin,
mixins.AuthenticatedUserRequired,
mixins.WagtailGA360Mixin,
GA360Mixin,
Page,
):
"""
Generic page, freely inspired by Codered page
"""
class Meta:
abstract = True
# Do not allow this page type to be created in wagtail admin
is_creatable = False
template_choices = []
###############
# Layout fields
###############
template = models.CharField(
max_length=255,
choices=None,
)
#########
# Panels
##########
layout_panels = [FieldPanel('template')]
settings_panels = [FieldPanel('slug')] + Page.settings_panels
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
field = self._meta.get_field('template')
field.choices = self.template_choices
field.required = True
@cached_classmethod
def get_edit_handler(cls): # NOQA N805
panels = [
ObjectList(cls.content_panels, heading='Content'),
ObjectList(cls.layout_panels, heading='Layout'),
ObjectList(cls.settings_panels, heading='Settings', classname='settings'),
]
return TabbedInterface(panels).bind_to(model=cls)
def get_template(self, request, *args, **kwargs):
return self.template
def get_context(self, request, *args, **kwargs):
context = super().get_context(request)
self.set_ga360_payload(
page_id=self.id,
business_unit=settings.GA360_BUSINESS_UNIT,
site_section=str(self.url or '/').split('/')[1],
)
self.add_ga360_data_to_payload(request)
context['ga360'] = self.ga360_payload
provider = get_context_provider(request=request, page=self)
if provider:
context.update(provider.get_context_data(request=request, page=self))
return context
class LandingPage(CMSGenericPage):
parent_page_types = [
'domestic.DomesticHomePage', # TODO: once we've restructured, remove this permission
'domestic.GreatDomesticHomePage',
]
subpage_types = [
'core.ListPage',
'core.InterstitialPage',
'domestic.DomesticDashboard',
]
template_choices = (
('learn/landing_page.html', 'Learn'),
('core/generic_page.html', 'Generic'),
)
################
# Content fields
################
description = RichTextField()
button = StreamField([('button', core_blocks.ButtonBlock(icon='cog'))], null=True, blank=True)
image = models.ForeignKey(
get_image_model_string(), null=True, blank=True, on_delete=models.SET_NULL, related_name='+'
)
body = StreamField(
[
('section', core_blocks.SectionBlock()),
('title', core_blocks.TitleBlock()),
('text', blocks.RichTextBlock(icon='openquote', helptext='Add a textblock')),
('image', core_blocks.ImageBlock()),
],
null=True,
blank=True,
)
components = StreamField(
[
('route', core_blocks.RouteSectionBlock()),
],
null=True,
blank=True,
)
#########
# Panels
#########
content_panels = CMSGenericPage.content_panels + [
FieldPanel('description'),
StreamFieldPanel('button'),
ImageChooserPanel('image'),
StreamFieldPanel('components'),
StreamFieldPanel('body'),
]
class InterstitialPage(CMSGenericPage):
parent_page_types = ['core.LandingPage']
template_choices = (('learn/interstitial.html', 'Learn'),)
################
# Content fields
################
button = StreamField([('button', core_blocks.ButtonBlock(icon='cog'))], null=True, blank=True)
#########
# Panels
#########
content_panels = CMSGenericPage.content_panels + [
StreamFieldPanel('button'),
]
class ListPage(CMSGenericPage):
parent_page_types = ['core.LandingPage']
subpage_types = ['core.CuratedListPage']
template_choices = (('learn/automated_list_page.html', 'Learn'),)
record_read_progress = models.BooleanField(
default=False,
help_text='Should we record when a user views a page in this collection?',
)
class Meta:
verbose_name = 'Automated list page'
verbose_name_plural = 'Automated list pages'
def get_context(self, request, *args, **kwargs):
from core.helpers import get_high_level_completion_progress
from domestic.helpers import get_lesson_completion_status
context = super().get_context(request)
if request.user.is_authenticated:
completion_status = get_lesson_completion_status(request.user)
context['high_level_completion_progress'] = get_high_level_completion_progress(
completion_status=completion_status,
)
return context
################
# Content fields
################
description = RichTextField()
button_label = models.CharField(max_length=100)
#########
# Panels
#########
settings_panels = CMSGenericPage.settings_panels + [FieldPanel('record_read_progress')]
content_panels = CMSGenericPage.content_panels + [FieldPanel('description'), FieldPanel('button_label')]
class CuratedListPage(CMSGenericPage):
parent_page_types = ['core.ListPage']
subpage_types = [
'core.TopicPage',
]
template_choices = (('learn/curated_list_page.html', 'Learn'),)
################
# Content fields
################
heading = RichTextField()
image = models.ForeignKey(
get_image_model_string(), null=True, blank=True, on_delete=models.SET_NULL, related_name='+'
)
########
# Panels
########
content_panels = CMSGenericPage.content_panels + [
FieldPanel('heading'),
ImageChooserPanel('image'),
]
def get_topics(self, live=True) -> models.QuerySet:
qs = TopicPage.objects.live().specific().descendant_of(self)
if live:
qs = qs.live()
return qs
@cached_property
def count_topics(self):
return self.get_topics().count()
@cached_property
def count_detail_pages(self):
count = 0
for topic in self.get_topics():
count += DetailPage.objects.live().descendant_of(topic).count()
return count
def get_context(self, request, *args, **kwargs):
from core.helpers import (
get_high_level_completion_progress,
get_module_completion_progress,
)
from domestic.helpers import get_lesson_completion_status
context = super().get_context(request)
# Give the template a simple way to link back to the parent
# learning module (ListPage)
context['parent_page_url'] = self.get_parent().url
if request.user.is_authenticated:
# get this once, so we don't waste the network call to get the data twice
completion_status = get_lesson_completion_status(request.user)
context['module_completion_progress'] = get_module_completion_progress(
completion_status=completion_status,
module_page=self,
)
context['high_level_completion_progress'] = get_high_level_completion_progress(
completion_status=completion_status,
)
return context
def hero_singular_validation(value):
if value and len(value) > 1:
raise StreamBlockValidationError(
non_block_errors=ValidationError('Only one image or video allowed in Hero section', code='invalid'),
)
class TopicPage(mixins.AuthenticatedUserRequired, Page):
"""Structural page to allow for cleaner mapping of lessons (`DetailPage`s)
to modules (`CuratedListPage`s).
Not intented to be viewed by end users, so will redirect to the parent
module if accessed.
Also, for the above reason, mixins.WagtailGA360Mixin and GA360Mixin
are not used."""
parent_page_types = ['core.CuratedListPage']
subpage_types = [
'core.DetailPage',
'core.LessonPlaceholderPage',
]
# `title` comes from Page superclass and that's all we need here
def _redirect_to_parent_module(self):
return HttpResponseRedirect(self.get_parent().url)
def serve_preview(self, request, mode_name='dummy'):
# It doesn't matter what is passed as mode_name - we always redirect
return self._redirect_to_parent_module()
def serve(self, request):
return self._redirect_to_parent_module()
class LessonPlaceholderPage(mixins.AuthenticatedUserRequired, Page):
"""Structural page to allow for configuring and representing very simple
to modules (`CuratedListPage`s).
Not intented to be viewed by end users, so will redirect to the parent
module if accessed.
Also, for the above reason, mixins.WagtailGA360Mixin and GA360Mixin
are not used."""
parent_page_types = ['core.TopicPage']
subpage_types = [] # No child pages allowed for placeholders
# `title` comes from Page superclass and that's all we need here
def _redirect_to_parent_module(self):
dest = CuratedListPage.objects.ancestor_of(self).first().url
return HttpResponseRedirect(dest)
def serve_preview(self, request, mode_name='dummy'):
# It doesn't matter what is passed as mode_name - we always redirect
return self._redirect_to_parent_module()
def serve(self, request):
return self._redirect_to_parent_module()
class DetailPage(CMSGenericPage):
estimated_read_duration = models.DurationField(null=True, blank=True)
parent_page_types = [
'core.CuratedListPage', # TEMPORARY: remove after topics refactor migration has run
'core.TopicPage',
]
template_choices = (('learn/detail_page.html', 'Learn'),)
class Meta:
verbose_name = 'Detail page'
verbose_name_plural = 'Detail pages'
################
# Content fields
################
hero = StreamField(
[
('Image', core_blocks.ImageBlock(template='core/includes/_hero_image.html')),
('Video', core_blocks.SimpleVideoBlock(template='core/includes/_hero_video.html')),
],
null=True,
blank=True,
validators=[hero_singular_validation],
)
objective = StreamField(
[
(
'paragraph',
blocks.RichTextBlock(options={'class': 'objectives'}),
),
('ListItem', core_blocks.Item()),
]
)
body = StreamField(
[
(
'paragraph',
blocks.StructBlock(
[('paragraph', blocks.RichTextBlock())],
template='core/struct_paragraph_block.html',
icon='fa-font',
),
),
(
'video',
blocks.StructBlock(
[('video', core_blocks.VideoBlock())],
template='core/struct_video_block.html',
icon='fa-play',
),
),
('case_study', core_blocks.CaseStudyStaticBlock(icon='fa-book')),
(
'Step',
core_blocks.StepByStepBlock(icon='cog'),
),
(
'fictional_example',
blocks.StructBlock(
[('fiction_body', blocks.RichTextBlock(icon='openquote'))],
template='learn/fictional_company_example.html',
icon='fa-commenting-o',
),
),
(
'ITA_Quote',
core_blocks.ITAQuoteBlock(icon='fa-quote-left'),
),
(
'pros_cons',
blocks.StructBlock(
[
(
'pros',
blocks.StreamBlock(
[
(
'item',
core_blocks.Item(icon='fa-arrow-right'),
)
]
),
),
(
'cons',
blocks.StreamBlock(
[
(
'item',
core_blocks.Item(icon='fa-arrow-right'),
)
]
),
),
],
template='learn/pros_and_cons.html',
icon='fa-arrow-right',
),
),
('choose_do_not_choose', core_blocks.ChooseDoNotChooseBlock()),
(
'image',
core_blocks.ImageBlock(
template='core/includes/_image_full_width.html',
help_text='Image displayed within a full-page-width block',
),
),
(
'video',
core_blocks.SimpleVideoBlock(
template='core/includes/_video_full_width.html',
help_text='Video displayed within a full-page-width block',
),
),
]
)
recap = StreamField(
[
(
'recap_item',
blocks.StructBlock(
[
('title', blocks.CharBlock(icon='fa-header')),
(
'item',
blocks.StreamBlock(
[
(
'item',
core_blocks.Item(),
)
]
),
),
],
template='learn/recap.html',
icon='fa-commenting-o',
),
)
]
)
#########
# Panels
##########
content_panels = Page.content_panels + [
StreamFieldPanel('hero'),
StreamFieldPanel('objective'),
StreamFieldPanel('body'),
StreamFieldPanel('recap'),
]
def handle_page_view(self, request):
if request.user.is_authenticated:
# checking if the page should record read progress
# checking if the page is already marked as read
list_page = (
ListPage.objects.ancestor_of(self)
.filter(record_read_progress=True)
.exclude(page_views_list__sso_id=request.user.pk, page_views_list__page=self)
.first()
)
if list_page:
PageView.objects.get_or_create(
page=self,
list_page=list_page,
sso_id=request.user.pk,
)
def serve(self, request, *args, **kwargs):
self.handle_page_view(request)
return super().serve(request, **kwargs)
@cached_property
def topic_title(self):
return self.get_parent().title
@cached_property
def module(self):
"""Gets the learning module this lesson belongs to"""
return CuratedListPage.objects.live().specific().ancestor_of(self).first()
@cached_property
def _export_plan_url_map(self):
"""Return a lookup dictionary of URL Slugs->title for all the
Export Plan sections we have."""
return {url: values['title'] for url, values in EXPORTPLAN_URL_MAP.items()}
def _get_backlink(self, request):
"""Try to extract a backlink (used for a link to the export plan) from the
querystring on the request that brought us to this view.
Only accepts backlinks that we KNOW are for the export plan, else ignore it."""
backlink_path = request.GET.get(BACKLINK_QUERYSTRING_NAME, '')
if backlink_path is not None:
backlink_path = unquote(backlink_path)
if len(backlink_path.split('/')) > 2 and (
backlink_path.split('/')[3] in EXPORTPLAN_SLUGS and '://' not in backlink_path
):
# The check for '://' will stop us accepting a backlink which
# features a full URL as its OWN querystring param (eg a crafted attack
# URL), but that's an acceptable limitation here and is very unlikely
# to happen.
return backlink_path
return None # safe default
def _get_backlink_title(self, backlink_path):
"""For a given backlink, see if we can get a title that goes with it.
For now, this is limited only to Export Plan pages/links.
"""
# We have to re-arrange EXPORT_PLAN_SECTION_TITLES_URLS after import
# because it features lazily-evaluated URLs that aren't ready when
# models are imported
if backlink_path and len(backlink_path.split('/')) > 3:
_path = backlink_path.split('/')[3]
return self._export_plan_url_map.get(_path)
def get_context(self, request, *args, **kwargs):
context = super().get_context(request)
context['refresh_on_market_change'] = True
# Prepare backlink to the export plan if we detect one and can validate it
_backlink = self._get_backlink(request)
if _backlink:
context['backlink'] = _backlink
context['backlink_title'] = self._get_backlink_title(_backlink)
if isinstance(self.get_parent().specific, TopicPage):
# In a conditional because a DetailPage currently MAY be used as
# a child of another page type...
page_topic_helper = PageTopicHelper(self)
next_lesson = page_topic_helper.get_next_lesson()
context['current_lesson'] = self
context['current_module'] = page_topic_helper.module
if page_topic_helper:
topic_page = page_topic_helper.get_page_topic()
if topic_page:
context['current_topic'] = topic_page
context['page_topic'] = topic_page.title
if next_lesson:
context['next_lesson'] = next_lesson
else:
next_module = self.module.get_next_sibling()
if not next_module:
return context
context['next_module'] = next_module.specific
context['next_lesson'] = get_first_lesson(next_module)
return context
class PageView(TimeStampedModel):
page = models.ForeignKey(DetailPage, on_delete=models.CASCADE, related_name='page_views')
list_page = models.ForeignKey(ListPage, on_delete=models.CASCADE, related_name='page_views_list')
sso_id = models.TextField()
class Meta:
ordering = ['page__pk']
unique_together = ['page', 'sso_id']
# TODO: deprecate and remove
class ContentModuleTag(TaggedItemBase):
content_object = ParentalKey('core.ContentModule', on_delete=models.CASCADE, related_name='tagged_items')
# TODO: deprecate and remove
@register_snippet
class ContentModule(ClusterableModel):
title = models.CharField(max_length=255)
content = RichTextField()
tags = TaggableManager(through=ContentModuleTag, blank=True)
panels = [
FieldPanel('title'),
FieldPanel('content'),
FieldPanel('tags'),
]
def __str__(self):
return self.title
class PersonalisationHSCodeTag(TagBase):
"""Custom tag for personalisation.
Tag value will be a HS6, HS4 or HS2 code"""
# free_tagging = False # DISABLED until tag data only comes via data migration
class Meta:
verbose_name = 'HS Code tag for personalisation'
verbose_name_plural = 'HS Code tags for personalisation'
class PersonalisationCountryTag(TagBase):
"""Custom tag for personalisation.
Tag value will be an ISO-2 Country code ('DE')
"""
free_tagging = False
class Meta:
verbose_name = 'Country tag for personalisation'
verbose_name_plural = 'Country tags for personalisation'
class PersonalisationRegionTag(TagBase):
"""Custom tag for personalisation.
Tag value will be a geographical string ('Europe')
"""
free_tagging = False
class Meta:
verbose_name = 'Region tag for personalisation'
verbose_name_plural = 'Region tags for personalisation'
class PersonalisationTradingBlocTag(TagBase):
"""Custom tag for personalisation.
Tag value will be an Trading blocs
"""
free_tagging = False
class Meta:
verbose_name = 'Trading bloc tag for personalisation'
verbose_name_plural = 'Trading bloc tags for personalisation'
# If you're wondering what's going on here:
# https://docs.wagtail.io/en/stable/reference/pages/model_recipes.html#custom-tag-models
class HSCodeTaggedCaseStudy(ItemBase):
tag = models.ForeignKey(
PersonalisationHSCodeTag, related_name='hscode_tagged_case_studies', on_delete=models.CASCADE
)
content_object = ParentalKey(to='core.CaseStudy', on_delete=models.CASCADE, related_name='hs_code_tagged_items')
class CountryTaggedCaseStudy(ItemBase):
tag = models.ForeignKey(
PersonalisationCountryTag, related_name='country_tagged_case_studies', on_delete=models.CASCADE
)
content_object = ParentalKey(to='core.CaseStudy', on_delete=models.CASCADE, related_name='country_tagged_items')
class RegionTaggedCaseStudy(ItemBase):
tag = models.ForeignKey(
PersonalisationRegionTag, related_name='region_tagged_case_studies', on_delete=models.CASCADE
)
content_object = ParentalKey(to='core.CaseStudy', on_delete=models.CASCADE, related_name='region_tagged_items')
class TradingBlocTaggedCaseStudy(ItemBase):
tag = models.ForeignKey(
PersonalisationTradingBlocTag, related_name='trading_bloc_tagged_case_studies', on_delete=models.CASCADE
)
content_object = ParentalKey(
to='core.CaseStudy', on_delete=models.CASCADE, related_name='trading_bloc_tagged_items'
)
def _high_level_validation(value, error_messages):
TEXT_BLOCK = 'text' # noqa N806
MEDIA_BLOCK = 'media' # noqa N806
QUOTE_BLOCK = 'quote' # noqa N806
# we need to be strict about presence and ordering of these nodes
if [node.block_type for node in value if node.block_type != QUOTE_BLOCK] != [MEDIA_BLOCK, TEXT_BLOCK]:
error_messages.append(
(
'This block must contain one Media section (with one or '
'two items in it) and/or a Quote section, then one Text section following it.'
)
)
return error_messages
def _low_level_validation(value, error_messages):
# Check content of media node, which should be present here
MEDIA_BLOCK = 'media' # noqa N806
VIDEO_BLOCK = 'video' # noqa N806
for node in value:
if node.block_type == MEDIA_BLOCK:
subnode_block_types = [subnode.block_type for subnode in node.value]
if len(subnode_block_types) == 2:
if set(subnode_block_types) == {VIDEO_BLOCK}:
# Two videos: not allowed
error_messages.append('Only one video may be used in a case study.')
elif subnode_block_types[1] == VIDEO_BLOCK:
# implicitly, [0] must be an image
# video after image: not allowed
error_messages.append('The video must come before a still image.')
return error_messages
def case_study_body_validation(value):
"""Ensure the case study has exactly both a media node and a text node
and that the media node has the following content:
* One image, only
* One video, only
* One video + One image
* (video must comes first so that it is displayed first)
* Two images
"""
error_messages = []
if value:
error_messages = _high_level_validation(value, error_messages)
error_messages = _low_level_validation(value, error_messages)
if error_messages:
raise StreamBlockValidationError(
non_block_errors=ValidationError('; '.join(error_messages), code='invalid'),
)
class MagnaPageChooserPanel(PageChooserPanel):
show_label = False
field_template = 'admin/wagtailadmin/edit_handlers/field_panel_field.html'
def render_as_field(self):
instance_obj = self.get_chosen_item()
context = {
'field': self.bound_field,
self.object_type_name: instance_obj,
'is_chosen': bool(instance_obj), # DEPRECATED - passed to templates for backwards compatibility only
# Added obj_type on base class method render_as_field
'obj_type': instance_obj.specific.__class__.__name__ if instance_obj else None,
}
return mark_safe(render_to_string(self.field_template, context))
class CaseStudyRelatedPages(Orderable):
case_study = ParentalKey(
'core.CaseStudy',
related_name='related_pages',
on_delete=models.SET_NULL,
null=True,
blank=True,
)
page = models.ForeignKey(
'wagtailcore.Page',
on_delete=models.CASCADE,
related_name='+',
)
panels = [
MagnaPageChooserPanel('page', [DetailPage, CuratedListPage, TopicPage]),
]
class Meta:
unique_together = ['case_study', 'page']
@register_snippet
class CaseStudy(ClusterableModel):
"""Dedicated snippet for use as a case study. Supports personalised
selection via its tags.
The decision about the appropriate Case Study block to show will happen
when the page attempts to render the relevant CaseStudyBlock.
Note that this is rendered via Wagtail's ModelAdmin, so appears in the sidebar,
but we have to keep it registered as a Snippet to be able to transfer it
with Wagtail-Transfer
"""
title = models.CharField(
max_length=255,
blank=False,
verbose_name='Internal case study title',
)
# old name company_name
summary_context = models.CharField(max_length=255, blank=False, default='How we did it')
# old name summary
lead_title = models.TextField(blank=False) # Deliberately not rich-text / no formatting
body = StreamField(
[
(
'media',
blocks.StreamBlock(
[
('video', core_blocks.SimpleVideoBlock(template='core/includes/_case_study_video.html')),
('image', core_blocks.ImageBlock()),
],
min_num=1,
max_num=2,
),
),
(
'text',
blocks.RichTextBlock(
features=RICHTEXT_FEATURES__MINIMAL,
),
),
(
'quote',
core_blocks.CaseStudyQuoteBlock(),
),
],
validators=[case_study_body_validation],
help_text=(
'This block must contain one Media section (with one or two items in it) '
'and/or Quote sections, then one Text section.'
),
)
# We are keeping the personalisation-relevant tags in separate
# fields to aid lookup and make the UX easier for editors
hs_code_tags = ClusterTaggableManager(through='core.HSCodeTaggedCaseStudy', blank=True, verbose_name='HS-code tags')
country_code_tags = ClusterTaggableManager(
through='core.CountryTaggedCaseStudy', blank=True, verbose_name='Country tags'
)
region_code_tags = ClusterTaggableManager(
through='core.RegionTaggedCaseStudy', blank=True, verbose_name='Region tags'
)
trading_bloc_code_tags = ClusterTaggableManager(
through='core.TradingBlocTaggedCaseStudy', blank=True, verbose_name='Trading bloc tags'
)
created = CreationDateTimeField('created', null=True)
modified = ModificationDateTimeField('modified', null=True)
panels = [
MultiFieldPanel(
[
FieldPanel('title'),
FieldPanel('lead_title'),
FieldPanel('summary_context'),
StreamFieldPanel('body'),
],
heading='Case Study content',
),
MultiFieldPanel(
[
FieldPanel('hs_code_tags'),
FieldPanel('country_code_tags'),
FieldPanel('region_code_tags'),
FieldPanel('trading_bloc_code_tags'),
],
heading='Case Study tags for Personalisation',
),
MultiFieldPanel(
[
InlinePanel('related_pages', label='Related pages'),
],
heading='Related Lesson, Topic & Module, also used for Personalisation',
),
]
def __str__(self):
display_name = self.title if self.title else self.summary_context
return f'{display_name}'
def save(self, **kwargs):
# When we create a new CS need to call create to obtain an ID for indexing
self.update_modified = kwargs.pop('update_modified', getattr(self, 'update_modified', True))
super().save(**kwargs)
update_cs_index(self)
def delete(self, **kwargs):
delete_cs_index(self.id)
super().delete(**kwargs)
def get_cms_standalone_view_url(self):
return reverse('cms_extras:case-study-view', args=[self.id])
class Meta:
verbose_name_plural = 'Case studies'
get_latest_by = 'modified'
ordering = (
'-modified',
'-created',
)
@register_setting
class CaseStudyScoringSettings(BaseSetting):
threshold = models.DecimalField(
help_text='This is the minimum score which a case study needs to have to be '
'considered before being presented to users. ',
default=10,
decimal_places=3,
max_digits=5,
)
lesson = models.DecimalField(
help_text="Score given when user's lesson is tagged in the case study.",
default=8,
decimal_places=3,
max_digits=5,
)
topic = models.DecimalField(
help_text="Score given when user's lesson's topic is tagged in the case study "
'unless there is also lesson match.',
default=4,
decimal_places=3,
max_digits=5,
)
module = models.DecimalField(
help_text="Score given when the user's lesson's module is tagged in the case study "
'unless there is also lesson or topic match.',
default=2,
decimal_places=3,
max_digits=5,
)
product_hs6 = models.DecimalField(
help_text='Score given when any case study HS6 tag matches the complete HS6 code of '
"any of the user's products",
default=8,
decimal_places=3,
max_digits=5,
)
product_hs4 = models.DecimalField(
help_text="Score given when any case study HS4 tag matches the first 4 digits of any of the user's products "
'unless there is an HS6 match.',
default=4,
decimal_places=3,
max_digits=5,
)
product_hs2 = models.DecimalField(
help_text="Score given when any case study HS2 tag matches the first 2 digits of any of the user's products "
'unless there is an HS6 or HS4 match.',
default=2,
decimal_places=3,
max_digits=5,
)
country_exact = models.DecimalField(
help_text="Score given when any case study country tag exactly matches one of the user's export markets.",
default=4,
decimal_places=3,
max_digits=5,
)
country_region = models.DecimalField(
help_text="Score given when any case study region tag matches the region of any of the user's export markets "
'unless there is an exact country match.',
default=2,
decimal_places=3,
max_digits=5,
)
trading_blocs = models.DecimalField(
help_text='Score given when any case study trading bloc tag matches the any trading bloc that any of '
"the user's export markets falls into unless there is an exact country or region match.",
default=2,
decimal_places=3,
max_digits=5,
)
product_tab = [MultiFieldPanel([FieldPanel('product_hs6'), FieldPanel('product_hs4'), FieldPanel('product_hs2')])]
market_tab = [
MultiFieldPanel([FieldPanel('country_exact'), FieldPanel('country_region'), FieldPanel('trading_blocs')])
]
lesson_tab = [MultiFieldPanel([FieldPanel('lesson'), FieldPanel('topic'), FieldPanel('module')])]
threshold_tab = [
MultiFieldPanel(
[
FieldPanel('threshold'),
]
)
]
edit_handler = TabbedInterface(
[
ObjectList(product_tab, heading='Product'),
ObjectList(market_tab, heading='Market'),
ObjectList(lesson_tab, heading='Lesson'),
ObjectList(threshold_tab, heading='Threshold'),
]
)
class Meta:
verbose_name = 'Case Study Scoring'
|
2253 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras import backend as K
from keras import activations
def get_standard_values():
'''
These are just a set of floats used for testing the activation
functions, and are useful in multiple tests.
'''
return np.array([[0, 0.1, 0.5, 0.9, 1.0]], dtype=K.floatx())
def test_softmax():
'''
Test using a reference implementation of softmax
'''
def softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softmax(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softmax(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_time_distributed_softmax():
x = K.placeholder(shape=(1, 1, 5))
f = K.function([x], [activations.softmax(x)])
test_values = get_standard_values()
test_values = np.reshape(test_values, (1, 1, np.size(test_values)))
f([test_values])[0]
def test_softplus():
'''
Test using a reference softplus implementation
'''
def softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softplus(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softplus(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_softsign():
'''
Test using a reference softsign implementation
'''
def softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softsign(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softsign(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_sigmoid():
'''
Test using a numerically stable reference sigmoid implementation
'''
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
sigmoid = np.vectorize(ref_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_hard_sigmoid():
'''
Test using a reference hard sigmoid implementation
'''
def ref_hard_sigmoid(x):
'''
Reference hard sigmoid with slope and shift values from theano, see
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py
'''
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.hard_sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_relu():
'''
Relu implementation doesn't depend on the value being
a theano variable. Testing ints, floats and theano tensors.
'''
x = K.placeholder(ndim=2)
f = K.function([x], [activations.relu(x)])
test_values = get_standard_values()
result = f([test_values])[0]
# because no negatives in test values
assert_allclose(result, test_values, rtol=1e-05)
def test_elu():
x = K.placeholder(ndim=2)
f = K.function([x], [activations.elu(x, 0.5)])
test_values = get_standard_values()
result = f([test_values])[0]
# because no negatives in test values
assert_allclose(result, test_values, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=K.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) / 2
assert_allclose(result, true_result)
def test_tanh():
test_values = get_standard_values()
x = K.placeholder(ndim=2)
exp = activations.tanh(x)
f = K.function([x], [exp])
result = f([test_values])[0]
expected = np.tanh(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_linear():
'''
This function does no input validation, it just returns the thing
that was passed in.
'''
xs = [1, 5, True, None, 'foo']
for x in xs:
assert(x == activations.linear(x))
if __name__ == '__main__':
pytest.main([__file__])
|
2280 | from arcapix.fs.gpfs.policy import PlacementPolicy
from arcapix.fs.gpfs.rule import MigrateRule
# load placement policy for mmfs1
policy = PlacementPolicy('mmfs1')
# create a new migrate rule for 'sata1'
r = MigrateRule(source='sata1', threshold=(90, 50))
# add rule to start of the policy
policy.rules.insert(r, 0)
# save changes
policy.save()
|
2285 | from allennlp.common import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.models import Model
from allennlp.predictors import Predictor
from overrides import overrides
@Predictor.register("sentence_classifier")
class SentenceClassifierPredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
sentence = json_dict["sentence"]
return self._dataset_reader.text_to_instance(sentence)
|
2298 | from __future__ import print_function
from scipy.linalg import block_diag
from scipy.stats import norm as ndist
from scipy.interpolate import interp1d
import collections
import numpy as np
from numpy import log
from numpy.linalg import norm, qr, inv, eig
import pandas as pd
import regreg.api as rr
from .randomization import randomization
from ..base import restricted_estimator
from ..algorithms.barrier_affine import solve_barrier_affine_py as solver
from ..distributions.discrete_family import discrete_family
class group_lasso(object):
def __init__(self,
loglike,
groups,
weights,
ridge_term,
randomizer,
use_lasso=True, # should lasso solver be used where applicable - defaults to True
perturb=None):
_check_groups(groups) # make sure groups looks sensible
# log likelihood : quadratic loss
self.loglike = loglike
self.nfeature = self.loglike.shape[0]
# ridge parameter
self.ridge_term = ridge_term
# group lasso penalty (from regreg)
# use regular lasso penalty if all groups are size 1
if use_lasso and groups.size == np.unique(groups).size:
# need to provide weights an an np.array rather than a dictionary
weights_np = np.array([w[1] for w in sorted(weights.items())])
self.penalty = rr.weighted_l1norm(weights=weights_np,
lagrange=1.)
else:
self.penalty = rr.group_lasso(groups,
weights=weights,
lagrange=1.)
# store groups as a class variable since the non-group lasso doesn't
self.groups = groups
self._initial_omega = perturb
# gaussian randomization
self.randomizer = randomizer
def fit(self,
solve_args={'tol': 1.e-12, 'min_its': 50},
perturb=None):
# solve the randomized version of group lasso
(self.initial_soln,
self.initial_subgrad) = self._solve_randomized_problem(perturb=perturb,
solve_args=solve_args)
# initialize variables
active_groups = [] # active group labels
active_dirs = {} # dictionary: keys are group labels, values are unit-norm coefficients
unpenalized = [] # selected groups with no penalty
overall = np.ones(self.nfeature, np.bool) # mask of active features
ordered_groups = [] # active group labels sorted by label
ordered_opt = [] # gamma's ordered by group labels
ordered_vars = [] # indices "ordered" by sorting group labels
tol = 1.e-20
_, self.randomizer_prec = self.randomizer.cov_prec
# now we are collecting the directions and norms of the active groups
for g in sorted(np.unique(self.groups)): # g is group label
group_mask = self.groups == g
soln = self.initial_soln # do not need to keep setting this
if norm(soln[group_mask]) > tol * norm(soln): # is group g appreciably nonzero
ordered_groups.append(g)
# variables in active group
ordered_vars.extend(np.flatnonzero(group_mask))
if self.penalty.weights[g] == 0:
unpenalized.append(g)
else:
active_groups.append(g)
active_dirs[g] = soln[group_mask] / norm(soln[group_mask])
ordered_opt.append(norm(soln[group_mask]))
else:
overall[group_mask] = False
self.selection_variable = {'directions': active_dirs,
'active_groups': active_groups} # kind of redundant with keys of active_dirs
self._ordered_groups = ordered_groups
# exception if no groups are selected
if len(self.selection_variable['active_groups']) == 0:
return np.sign(soln), soln
# otherwise continue as before
self.observed_opt_state = np.hstack(ordered_opt) # gammas as array
_beta_unpenalized = restricted_estimator(self.loglike, # refit OLS on E
overall,
solve_args=solve_args)
beta_bar = np.zeros(self.nfeature)
beta_bar[overall] = _beta_unpenalized # refit OLS beta with zeros
self._beta_full = beta_bar
X, y = self.loglike.data
W = self._W = self.loglike.saturated_loss.hessian(X.dot(beta_bar)) # all 1's for LS
opt_linearNoU = np.dot(X.T, X[:, ordered_vars] * W[:, np.newaxis])
for i, var in enumerate(ordered_vars):
opt_linearNoU[var, i] += self.ridge_term
opt_offset = self.initial_subgrad
self.observed_score_state = -opt_linearNoU.dot(_beta_unpenalized)
self.observed_score_state[~overall] += self.loglike.smooth_objective(beta_bar, 'grad')[~overall]
active_signs = np.sign(self.initial_soln)
active = np.flatnonzero(active_signs)
self.active = active
def compute_Vg(ug):
pg = ug.size # figure out size of g'th group
if pg > 1:
Z = np.column_stack((ug, np.eye(pg, pg - 1)))
Q, _ = qr(Z)
Vg = Q[:, 1:] # drop the first column
else:
Vg = np.zeros((1, 0)) # if the group is size one, the orthogonal complement is empty
return Vg
def compute_Lg(g):
pg = active_dirs[g].size
Lg = self.penalty.weights[g] * np.eye(pg)
return Lg
sorted_active_dirs = collections.OrderedDict(sorted(active_dirs.items()))
Vs = [compute_Vg(ug) for ug in sorted_active_dirs.values()]
V = block_diag(*Vs) # unpack the list
Ls = [compute_Lg(g) for g in sorted_active_dirs]
L = block_diag(*Ls) # unpack the list
XE = X[:, ordered_vars] # changed to ordered_vars
Q = XE.T.dot(self._W[:, None] * XE)
QI = inv(Q)
C = V.T.dot(QI).dot(L).dot(V)
self.XE = XE
self.Q = Q
self.QI = QI
self.C = C
U = block_diag(*[ug for ug in sorted_active_dirs.values()]).T
self.opt_linear = opt_linearNoU.dot(U)
self.active_dirs = active_dirs
self.opt_offset = opt_offset
self.ordered_vars = ordered_vars
self.linear_part = -np.eye(self.observed_opt_state.shape[0])
self.offset = np.zeros(self.observed_opt_state.shape[0])
return active_signs, soln
def _solve_randomized_problem(self,
perturb=None,
solve_args={'tol': 1.e-15, 'min_its': 100}):
# take a new perturbation if supplied
if perturb is not None:
self._initial_omega = perturb
if self._initial_omega is None:
self._initial_omega = self.randomizer.sample()
quad = rr.identity_quadratic(self.ridge_term,
0,
-self._initial_omega,
0)
problem = rr.simple_problem(self.loglike, self.penalty)
# if all groups are size 1, set up lasso penalty and run usual lasso solver... (see existing code)...
initial_soln = problem.solve(quad, **solve_args)
initial_subgrad = -(self.loglike.smooth_objective(initial_soln,
'grad') +
quad.objective(initial_soln, 'grad'))
return initial_soln, initial_subgrad
@staticmethod
def gaussian(X,
Y,
groups,
weights,
sigma=1.,
quadratic=None,
ridge_term=0.,
perturb=None,
use_lasso=True, # should lasso solver be used when applicable - defaults to True
randomizer_scale=None):
loglike = rr.glm.gaussian(X, Y, coef=1. / sigma ** 2, quadratic=quadratic)
n, p = X.shape
mean_diag = np.mean((X ** 2).sum(0))
if ridge_term is None:
ridge_term = np.std(Y) * np.sqrt(mean_diag) / np.sqrt(n - 1)
if randomizer_scale is None:
randomizer_scale = np.sqrt(mean_diag) * 0.5 * np.std(Y) * np.sqrt(n / (n - 1.))
randomizer = randomization.isotropic_gaussian((p,), randomizer_scale)
return group_lasso(loglike,
groups,
weights,
ridge_term,
randomizer,
use_lasso,
perturb)
def _setup_implied_gaussian(self):
_, prec = self.randomizer.cov_prec
if np.asarray(prec).shape in [(), (0,)]:
cond_precision = self.opt_linear.T.dot(self.opt_linear) * prec
cond_cov = inv(cond_precision)
logdens_linear = cond_cov.dot(self.opt_linear.T) * prec
else:
cond_precision = self.opt_linear.T.dot(prec.dot(self.opt_linear))
cond_cov = inv(cond_precision)
logdens_linear = cond_cov.dot(self.opt_linear.T).dot(prec)
cond_mean = -logdens_linear.dot(self.observed_score_state + self.opt_offset)
self.cond_mean = cond_mean
self.cond_cov = cond_cov
self.cond_precision = cond_precision
self.logdens_linear = logdens_linear
return cond_mean, cond_cov, cond_precision, logdens_linear
def selective_MLE(self,
solve_args={'tol': 1.e-12},
level=0.9,
useJacobian=True,
dispersion=None):
"""Do selective_MLE for group_lasso
Note: this masks the selective_MLE inherited from query
because that is not adapted for the group_lasso. Also, assumes
you have already run the fit method since this uses results
from that method.
Parameters
----------
observed_target: from selected_targets
target_cov: from selected_targets
target_cov_score: from selected_targets
init_soln: (opt_state) initial (observed) value of optimization variables
cond_mean: conditional mean of optimization variables (model on _setup_implied_gaussian)
cond_cov: conditional variance of optimization variables (model on _setup_implied_gaussian)
logdens_linear: (model on _setup_implied_gaussian)
linear_part: like A_scaling (from lasso)
offset: like b_scaling (from lasso)
solve_args: passed on to solver
level: level of confidence intervals
useC: whether to use python or C solver
JacobianPieces: (use self.C defined in fitting)
"""
self._setup_implied_gaussian() # Calculate useful quantities
(observed_target, target_cov, target_score_cov, alternatives) = self.selected_targets(dispersion)
init_soln = self.observed_opt_state # just the gammas
cond_mean = self.cond_mean
cond_cov = self.cond_cov
logdens_linear = self.logdens_linear
linear_part = self.linear_part
offset = self.offset
if np.asarray(observed_target).shape in [(), (0,)]:
raise ValueError('no target specified')
observed_target = np.atleast_1d(observed_target)
prec_target = inv(target_cov)
prec_opt = self.cond_precision
score_offset = self.observed_score_state + self.opt_offset
# target_lin determines how the conditional mean of optimization variables
# vary with target
# logdens_linear determines how the argument of the optimization density
# depends on the score, not how the mean depends on score, hence the minus sign
target_linear = target_score_cov.T.dot(prec_target)
target_offset = score_offset - target_linear.dot(observed_target)
target_lin = - logdens_linear.dot(target_linear)
target_off = cond_mean - target_lin.dot(observed_target)
if np.asarray(self.randomizer_prec).shape in [(), (0,)]:
_P = target_linear.T.dot(target_offset) * self.randomizer_prec
_prec = prec_target + (target_linear.T.dot(target_linear) * self.randomizer_prec) - target_lin.T.dot(
prec_opt).dot(
target_lin)
else:
_P = target_linear.T.dot(self.randomizer_prec).dot(target_offset)
_prec = prec_target + (target_linear.T.dot(self.randomizer_prec).dot(target_linear)) - target_lin.T.dot(
prec_opt).dot(target_lin)
C = target_cov.dot(_P - target_lin.T.dot(prec_opt).dot(target_off))
conjugate_arg = prec_opt.dot(cond_mean)
val, soln, hess = solve_barrier_affine_jacobian_py(conjugate_arg,
prec_opt,
init_soln,
linear_part,
offset,
self.C,
self.active_dirs,
useJacobian,
**solve_args)
final_estimator = target_cov.dot(_prec).dot(observed_target) \
+ target_cov.dot(target_lin.T.dot(prec_opt.dot(cond_mean - soln))) + C
unbiased_estimator = target_cov.dot(_prec).dot(observed_target) + target_cov.dot(
_P - target_lin.T.dot(prec_opt).dot(target_off))
L = target_lin.T.dot(prec_opt)
observed_info_natural = _prec + L.dot(target_lin) - L.dot(hess.dot(L.T))
observed_info_mean = target_cov.dot(observed_info_natural.dot(target_cov))
Z_scores = final_estimator / np.sqrt(np.diag(observed_info_mean))
pvalues = ndist.cdf(Z_scores)
pvalues = 2 * np.minimum(pvalues, 1 - pvalues)
alpha = 1 - level
quantile = ndist.ppf(1 - alpha / 2.)
intervals = np.vstack([final_estimator -
quantile * np.sqrt(np.diag(observed_info_mean)),
final_estimator +
quantile * np.sqrt(np.diag(observed_info_mean))]).T
log_ref = val + conjugate_arg.T.dot(cond_cov).dot(conjugate_arg) / 2.
result = pd.DataFrame({'MLE': final_estimator,
'SE': np.sqrt(np.diag(observed_info_mean)),
'Zvalue': Z_scores,
'pvalue': pvalues,
'lower_confidence': intervals[:, 0],
'upper_confidence': intervals[:, 1],
'unbiased': unbiased_estimator})
return result, observed_info_mean, log_ref
def selected_targets(self,
dispersion=None,
solve_args={'tol': 1.e-12, 'min_its': 50}):
X, y = self.loglike.data
n, p = X.shape
XE = self.XE
Q = self.Q
observed_target = restricted_estimator(self.loglike, self.ordered_vars, solve_args=solve_args)
_score_linear = -XE.T.dot(self._W[:, None] * X).T
alternatives = ['twosided'] * len(self.active)
if dispersion is None: # use Pearson's X^2
dispersion = ((y - self.loglike.saturated_loss.mean_function(
XE.dot(observed_target))) ** 2 / self._W).sum() / (n - XE.shape[1])
cov_target = self.QI * dispersion
crosscov_target_score = _score_linear.dot(self.QI).T * dispersion
return (observed_target,
cov_target,
crosscov_target_score,
alternatives)
class approximate_grid_inference(object):
def __init__(self,
query,
dispersion,
solve_args={'tol': 1.e-12},
useIP=True):
"""
Produce p-values and confidence intervals for targets
of model including selected features
Parameters
----------
query : `gaussian_query`
A Gaussian query which has information
to describe implied Gaussian.
observed_target : ndarray
Observed estimate of target.
target_cov : ndarray
Estimated covaraince of target.
target_score_cov : ndarray
Estimated covariance of target and score of randomized query.
solve_args : dict, optional
Arguments passed to solver.
"""
self.solve_args = solve_args
result, inverse_info = query.selective_MLE(dispersion=dispersion)[:2]
self.linear_part = query.linear_part
self.offset = query.offset
self.logdens_linear = query.logdens_linear
self.cond_mean = query.cond_mean
self.prec_opt = np.linalg.inv(query.cond_cov)
self.cond_cov = query.cond_cov
self.C = query.C
self.active_dirs = query.active_dirs
(observed_target, target_cov, target_score_cov, alternatives) = query.selected_targets(dispersion)
self.observed_target = observed_target
self.target_score_cov = target_score_cov
self.target_cov = target_cov
self.init_soln = query.observed_opt_state
self.randomizer_prec = query.randomizer_prec
self.score_offset = query.observed_score_state + query.opt_offset
self.ntarget = ntarget = target_cov.shape[0]
_scale = 4 * np.sqrt(np.diag(inverse_info))
if useIP == False:
ngrid = 1000
self.stat_grid = np.zeros((ntarget, ngrid))
for j in range(ntarget):
self.stat_grid[j, :] = np.linspace(observed_target[j] - 1.5 * _scale[j],
observed_target[j] + 1.5 * _scale[j],
num=ngrid)
else:
ngrid = 100
self.stat_grid = np.zeros((ntarget, ngrid))
for j in range(ntarget):
self.stat_grid[j, :] = np.linspace(observed_target[j] - 1.5 * _scale[j],
observed_target[j] + 1.5 * _scale[j],
num=ngrid)
self.opt_linear = query.opt_linear
self.useIP = useIP
def summary(self,
alternatives=None,
parameter=None,
level=0.9):
"""
Produce p-values and confidence intervals for targets
of model including selected features
Parameters
----------
alternatives : [str], optional
Sequence of strings describing the alternatives,
should be values of ['twosided', 'less', 'greater']
parameter : np.array
Hypothesized value for parameter -- defaults to 0.
level : float
Confidence level.
"""
if parameter is not None:
pivots = self._approx_pivots(parameter,
alternatives=alternatives)
else:
pivots = None
pvalues = self._approx_pivots(np.zeros_like(self.observed_target),
alternatives=alternatives)
lower, upper = self._approx_intervals(level=level)
result = pd.DataFrame({'target': self.observed_target,
'pvalue': pvalues,
'lower_confidence': lower,
'upper_confidence': upper})
if not np.all(parameter == 0):
result.insert(4, 'pivot', pivots)
result.insert(5, 'parameter', parameter)
return result
def log_reference(self,
observed_target,
target_cov,
target_score_cov,
grid):
"""
Approximate the log of the reference density on a grid.
"""
if np.asarray(observed_target).shape in [(), (0,)]:
raise ValueError('no target specified')
prec_target = np.linalg.inv(target_cov)
target_lin = - self.logdens_linear.dot(target_score_cov.T.dot(prec_target))
ref_hat = []
for k in range(grid.shape[0]):
# in the usual D = N + Gamma theta.hat,
# target_lin is "something" times Gamma,
# where "something" comes from implied Gaussian
# cond_mean is "something" times D
# Gamma is target_score_cov.T.dot(prec_target)
num_opt = self.prec_opt.shape[0]
num_con = self.linear_part.shape[0]
cond_mean_grid = (target_lin.dot(np.atleast_1d(grid[k] - observed_target)) +
self.cond_mean)
#direction for decomposing o
eta = -self.prec_opt.dot(self.logdens_linear.dot(target_score_cov.T))
implied_mean = np.asscalar(eta.T.dot(cond_mean_grid))
implied_cov = np.asscalar(eta.T.dot(self.cond_cov).dot(eta))
implied_prec = 1./implied_cov
_A = self.cond_cov.dot(eta) * implied_prec
R = np.identity(num_opt) - _A.dot(eta.T)
A = self.linear_part.dot(_A).reshape((-1,))
b = self.offset-self.linear_part.dot(R).dot(self.init_soln)
conjugate_arg = implied_mean * implied_prec
val, soln, _ = solver(np.asarray([conjugate_arg]),
np.reshape(implied_prec, (1,1)),
eta.T.dot(self.init_soln),
A.reshape((A.shape[0],1)),
b,
**self.solve_args)
gamma_ = _A.dot(soln) + R.dot(self.init_soln)
log_jacob = jacobian_grad_hess(gamma_, self.C, self.active_dirs)
ref_hat.append(-val - ((conjugate_arg ** 2) * implied_cov)/ 2. + log_jacob[0])
return np.asarray(ref_hat)
def _construct_families(self):
self._construct_density()
self._families = []
for m in range(self.ntarget):
p = self.target_score_cov.shape[1]
observed_target_uni = (self.observed_target[m]).reshape((1,))
target_cov_uni = (np.diag(self.target_cov)[m]).reshape((1, 1))
target_score_cov_uni = self.target_score_cov[m, :].reshape((1, p))
var_target = 1. / ((self.precs[m])[0, 0])
log_ref = self.log_reference(observed_target_uni,
target_cov_uni,
target_score_cov_uni,
self.stat_grid[m])
if self.useIP == False:
logW = (log_ref - 0.5 * (self.stat_grid[m] - self.observed_target[m]) ** 2 / var_target)
logW -= logW.max()
self._families.append(discrete_family(self.stat_grid[m],
np.exp(logW)))
else:
approx_fn = interp1d(self.stat_grid[m],
log_ref,
kind='quadratic',
bounds_error=False,
fill_value='extrapolate')
grid = np.linspace(self.stat_grid[m].min(), self.stat_grid[m].max(), 1000)
logW = (approx_fn(grid) -
0.5 * (grid - self.observed_target[m]) ** 2 / var_target)
logW -= logW.max()
self._families.append(discrete_family(grid,
np.exp(logW)))
def _approx_pivots(self,
mean_parameter,
alternatives=None):
if not hasattr(self, "_families"):
self._construct_families()
if alternatives is None:
alternatives = ['twosided'] * self.ntarget
pivot = []
for m in range(self.ntarget):
family = self._families[m]
var_target = 1. / ((self.precs[m])[0, 0])
mean = self.S[m].dot(mean_parameter[m].reshape((1,))) + self.r[m]
_cdf = family.cdf((mean[0] - self.observed_target[m]) / var_target, x=self.observed_target[m])
print("variable completed ", m)
if alternatives[m] == 'twosided':
pivot.append(2 * min(_cdf, 1 - _cdf))
elif alternatives[m] == 'greater':
pivot.append(1 - _cdf)
elif alternatives[m] == 'less':
pivot.append(_cdf)
else:
raise ValueError('alternative should be in ["twosided", "less", "greater"]')
return pivot
def _approx_intervals(self,
level=0.9):
if not hasattr(self, "_families"):
self._construct_families()
lower, upper = [], []
for m in range(self.ntarget):
# construction of intervals from families follows `selectinf.learning.core`
family = self._families[m]
observed_target = self.observed_target[m]
l, u = family.equal_tailed_interval(observed_target,
alpha=1 - level)
var_target = 1. / ((self.precs[m])[0, 0])
lower.append(l * var_target + observed_target)
upper.append(u * var_target + observed_target)
return np.asarray(lower), np.asarray(upper)
### Private method
def _construct_density(self):
precs = {}
S = {}
r = {}
p = self.target_score_cov.shape[1]
for m in range(self.ntarget):
observed_target_uni = (self.observed_target[m]).reshape((1,))
target_cov_uni = (np.diag(self.target_cov)[m]).reshape((1, 1))
prec_target = 1. / target_cov_uni
target_score_cov_uni = self.target_score_cov[m, :].reshape((1, p))
target_linear = target_score_cov_uni.T.dot(prec_target)
target_offset = (self.score_offset - target_linear.dot(observed_target_uni)).reshape(
(target_linear.shape[0],))
target_lin = -self.logdens_linear.dot(target_linear)
target_off = (self.cond_mean - target_lin.dot(observed_target_uni)).reshape((target_lin.shape[0],))
_prec = prec_target + (target_linear.T.dot(target_linear) * self.randomizer_prec) - target_lin.T.dot(
self.prec_opt).dot(target_lin)
_P = target_linear.T.dot(target_offset) * self.randomizer_prec
_r = (1. / _prec).dot(target_lin.T.dot(self.prec_opt).dot(target_off) - _P)
_S = np.linalg.inv(_prec).dot(prec_target)
S[m] = _S
r[m] = _r
precs[m] = _prec
self.precs = precs
self.S = S
self.r = r
def solve_barrier_affine_jacobian_py(conjugate_arg,
precision,
feasible_point,
con_linear,
con_offset,
C,
active_dirs,
useJacobian=True,
step=1,
nstep=2000,
min_its=500,
tol=1.e-12):
"""
This needs to be updated to actually use the Jacobian information (in self.C)
arguments
conjugate_arg: \\bar{\\Sigma}^{-1} \bar{\\mu}
precision: \\bar{\\Sigma}^{-1}
feasible_point: gamma's from fitting
con_linear: linear part of affine constraint used for barrier function
con_offset: offset part of affine constraint used for barrier function
C: V^T Q^{-1} \\Lambda V
active_dirs:
"""
scaling = np.sqrt(np.diag(con_linear.dot(precision).dot(con_linear.T)))
if feasible_point is None:
feasible_point = 1. / scaling
def objective(gs):
p1 = -gs.T.dot(conjugate_arg)
p2 = gs.T.dot(precision).dot(gs) / 2.
if useJacobian:
p3 = - jacobian_grad_hess(gs, C, active_dirs)[0]
else:
p3 = 0
p4 = log(1. + 1. / ((con_offset - con_linear.dot(gs)) / scaling)).sum()
return p1 + p2 + p3 + p4
def grad(gs):
p1 = -conjugate_arg + precision.dot(gs)
p2 = -con_linear.T.dot(1. / (scaling + con_offset - con_linear.dot(gs)))
if useJacobian:
p3 = - jacobian_grad_hess(gs, C, active_dirs)[1]
else:
p3 = 0
p4 = 1. / (con_offset - con_linear.dot(gs))
return p1 + p2 + p3 + p4
def barrier_hessian(gs): # contribution of barrier and jacobian to hessian
p1 = con_linear.T.dot(np.diag(-1. / ((scaling + con_offset - con_linear.dot(gs)) ** 2.)
+ 1. / ((con_offset - con_linear.dot(gs)) ** 2.))).dot(con_linear)
if useJacobian:
p2 = - jacobian_grad_hess(gs, C, active_dirs)[2]
else:
p2 = 0
return p1 + p2
current = feasible_point
current_value = np.inf
for itercount in range(nstep):
cur_grad = grad(current)
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * cur_grad
if np.all(con_offset - con_linear.dot(proposal) > 0):
break
step *= 0.5
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
count += 1
proposal = current - step * cur_grad
proposed_value = objective(proposal)
if proposed_value <= current_value:
break
step *= 0.5
if count >= 20:
if not (np.isnan(proposed_value) or np.isnan(current_value)):
break
else:
raise ValueError('value is NaN: %f, %f' % (proposed_value, current_value))
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value) and itercount >= min_its:
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
hess = inv(precision + barrier_hessian(current))
return current_value, current, hess
# Jacobian calculations
def calc_GammaMinus(gamma, active_dirs):
"""Calculate Gamma^minus (as a function of gamma vector, active directions)
"""
to_diag = [[g] * (ug.size - 1) for (g, ug) in zip(gamma, active_dirs.values())]
return block_diag(*[i for gp in to_diag for i in gp])
def jacobian_grad_hess(gamma, C, active_dirs):
""" Calculate the log-Jacobian (scalar), gradient (gamma.size vector) and hessian (gamma.size square matrix)
"""
if C.shape == (0, 0): # when all groups are size one, C will be an empty array
return 0, 0, 0
else:
GammaMinus = calc_GammaMinus(gamma, active_dirs)
# eigendecomposition
#evalues, evectors = eig(GammaMinus + C)
# log Jacobian
#J = log(evalues).sum()
J = np.log(np.linalg.det(GammaMinus + C))
# inverse
#GpC_inv = evectors.dot(np.diag(1 / evalues).dot(evectors.T))
GpC_inv = np.linalg.inv(GammaMinus + C)
# summing matrix (gamma.size by C.shape[0])
S = block_diag(*[np.ones((1, ug.size - 1)) for ug in active_dirs.values()])
# gradient
grad_J = S.dot(GpC_inv.diagonal())
# hessian
hess_J = -S.dot(np.multiply(GpC_inv, GpC_inv.T).dot(S.T))
return J, grad_J, hess_J
def _check_groups(groups):
"""Make sure that the user-specific groups are ok
There are a number of assumptions that group_lasso makes about
how groups are specified. Specifically, we assume that
`groups` is a 1-d array_like of integers that are sorted in
increasing order, start at 0, and have no gaps (e.g., if there
is a group 2 and a group 4, there must also be at least one
feature in group 3).
This function checks the user-specified group scheme and
raises an exception if it finds any problems.
Sorting feature groups is potentially tedious for the user and
in future we might do this for them.
"""
# check array_like
agroups = np.array(groups)
# check dimension
if len(agroups.shape) != 1:
raise ValueError("Groups are not a 1D array_like")
# check sorted
if np.any(agroups[:-1] > agroups[1:]) < 0:
raise ValueError("Groups are not sorted")
# check integers
if not np.issubdtype(agroups.dtype, np.integer):
raise TypeError("Groups are not integers")
# check starts with 0
if not np.amin(agroups) == 0:
raise ValueError("First group is not 0")
# check for no skipped groups
if not np.all(np.diff(np.unique(agroups)) == 1):
raise ValueError("Some group is skipped")
|
2300 | import six
import json
import gzip
from exporters.default_retries import retry_long
from exporters.writers.base_writer import BaseWriter
class ODOWriter(BaseWriter):
"""
Writes items to a odo destination. https://odo.readthedocs.org/en/latest/
Needed parameters:
- schema (object)
schema object.
- odo_uri (str)
ODO valid destination uri.
"""
requirements = {
'schema': {'type': object, 'required': True},
'odo_uri': {'type': six.string_types, 'required': True}
}
def __init__(self, options):
super(ODOWriter, self).__init__(options)
from flatson import Flatson
schema = self.read_option('schema', None)
self.odo_uri = self.read_option('odo_uri', None)
self.flatson = Flatson(schema)
self.logger.info('ODOWriter has been initiated. Writing to: {}'.format(self.odo_uri))
@retry_long
def write(self, dump_path, group_key=''):
from odo import odo, resource, discover
import pandas as pd
with gzip.open(dump_path) as f:
lines = [json.loads(line.replace('\n', '')) for line in f.readlines()]
flattened_lines = (self.flatson.flatten(line) for line in lines)
pf = pd.DataFrame(flattened_lines, columns=self.flatson.fieldnames)
dshape = discover(pf)
odo(pf, resource(self.odo_uri), dshape=dshape)
|
2312 | import time
import os
import sys
import shutil
import json
import argparse
from zipfile import ZipFile
from contextlib import contextmanager
from datetime import datetime
from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, \
extract_packs_artifacts
from Tests.Marketplace.marketplace_services import init_storage_client
from Tests.scripts.utils.log_util import install_logging
from Tests.scripts.utils import logging_wrapper as logging
MAX_SECONDS_TO_WAIT_FOR_LOCK = 600
LOCK_FILE_PATH = 'lock.txt'
@contextmanager
def lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
try:
acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
yield
except Exception:
logging.exception("Error in dummy index lock context manager.")
finally:
release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
def change_pack_price_to_zero(path_to_pack_metadata):
with open(path_to_pack_metadata, 'r') as pack_metadata_file:
pack_metadata = json.load(pack_metadata_file)
pack_metadata['price'] = 0
with open(path_to_pack_metadata, 'w') as pack_metadata_file:
json.dump(pack_metadata, pack_metadata_file, indent=4)
def change_packs_price_to_zero(public_index_folder_path):
paths_to_packs_in_merged_index = [pack_dir.path for pack_dir in os.scandir(public_index_folder_path) if
pack_dir.is_dir()]
for path_to_pack in paths_to_packs_in_merged_index:
path_to_pack_metadata = os.path.join(path_to_pack, 'metadata.json')
change_pack_price_to_zero(path_to_pack_metadata)
def merge_private_index_into_public_index(public_index_folder_path, private_index_folder_path):
packs_in_private_index = [pack_dir.name for pack_dir in os.scandir(private_index_folder_path) if pack_dir.is_dir()]
for pack_name in packs_in_private_index:
path_to_pack_in_private_index = os.path.join(private_index_folder_path, pack_name)
path_to_pack_in_public_index = os.path.join(public_index_folder_path, pack_name)
shutil.copy(path_to_pack_in_private_index, path_to_pack_in_public_index)
def upload_modified_index(public_index_folder_path, extract_destination_path, public_ci_dummy_index_blob, build_number,
private_packs):
"""Upload updated index zip to cloud storage.
Args:
public_index_folder_path (str): public index folder full path.
extract_destination_path (str): extract folder full path.
public_ci_dummy_index_blob (Blob): google cloud storage object that represents the dummy index.zip blob.
build_number (str): circleCI build number, used as an index revision.
private_packs (list): List of private packs and their price.
"""
with open(os.path.join(public_index_folder_path, "index.json"), "w+") as index_file:
for private_pack in private_packs:
private_pack['price'] = 0
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'packs': private_packs
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(public_index_folder_path)
index_zip_path = shutil.make_archive(base_name=public_index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
public_ci_dummy_index_blob.reload()
public_ci_dummy_index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
public_ci_dummy_index_blob.upload_from_filename(index_zip_path)
logging.success("Finished uploading index.zip to storage.")
except Exception:
logging.exception("Failed in uploading index. Mismatch in index file generation.")
sys.exit(1)
finally:
shutil.rmtree(public_index_folder_path)
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-b', '--public_bucket_name', help="CI public bucket name", required=True)
parser.add_argument('-pb', '--private_bucket_name', help="CI private bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=True)
parser.add_argument('-e', '--extract_public_index_path', help="Full path of folder to extract the public index",
required=True)
parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.",
required=False)
parser.add_argument('-p', '--pack_name', help="Modified pack to upload to gcs.")
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-ea', '--extract_artifacts_path', help="Full path of folder to extract wanted packs",
required=True)
parser.add_argument('-di', '--dummy_index_dir_path', help="Full path to the dummy index in the private CI bucket",
required=True)
# disable-secrets-detection-end
return parser.parse_args()
def is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
return dummy_index_lock_blob.exists()
def lock_dummy_index(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
with open(LOCK_FILE_PATH, 'w') as lock_file:
lock_file.write('locked')
with open(LOCK_FILE_PATH, 'rb') as lock_file:
dummy_index_lock_blob.upload_from_file(lock_file)
def acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
total_seconds_waited = 0
while is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
if total_seconds_waited >= MAX_SECONDS_TO_WAIT_FOR_LOCK:
logging.critical("Error: Failed too long to acquire lock, exceeded max wait time.")
sys.exit(1)
if total_seconds_waited % 60 == 0:
# Printing a message every minute to keep the machine from dying due to no output
logging.info("Waiting to acquire lock.")
total_seconds_waited += 10
time.sleep(10)
lock_dummy_index(public_storage_bucket, dummy_index_lock_path)
def release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
dummy_index_lock_blob.delete()
os.remove(LOCK_FILE_PATH)
def add_private_packs_from_dummy_index(private_packs, dummy_index_blob):
downloaded_dummy_index_path = 'current_dummy_index.zip'
extracted_dummy_index_path = 'dummy_index'
dummy_index_json_path = os.path.join(extracted_dummy_index_path, 'index', 'index.json')
dummy_index_blob.download_to_filename(downloaded_dummy_index_path)
os.mkdir(extracted_dummy_index_path)
if os.path.exists(downloaded_dummy_index_path):
with ZipFile(downloaded_dummy_index_path, 'r') as index_zip:
index_zip.extractall(extracted_dummy_index_path)
with open(dummy_index_json_path) as index_file:
index_json = json.load(index_file)
packs_from_dummy_index = index_json.get('packs', [])
for pack in private_packs:
is_pack_in_dummy_index = any(
[pack['id'] == dummy_index_pack['id'] for dummy_index_pack in packs_from_dummy_index])
if not is_pack_in_dummy_index:
packs_from_dummy_index.append(pack)
os.remove(downloaded_dummy_index_path)
shutil.rmtree(extracted_dummy_index_path)
return packs_from_dummy_index
def main():
install_logging('prepare_public_index_for_private_testing.log', logger=logging)
upload_config = option_handler()
service_account = upload_config.service_account
build_number = upload_config.ci_build_number
public_bucket_name = upload_config.public_bucket_name
private_bucket_name = upload_config.private_bucket_name
storage_base_path = upload_config.storage_base_path
extract_public_index_path = upload_config.extract_public_index_path
changed_pack = upload_config.pack_name
extract_destination_path = upload_config.extract_artifacts_path
packs_artifacts_path = upload_config.artifacts_path
dummy_index_dir_path = upload_config.dummy_index_dir_path
dummy_index_path = os.path.join(dummy_index_dir_path, 'index.zip')
dummy_index_lock_path = os.path.join(dummy_index_dir_path, 'lock.txt')
storage_client = init_storage_client(service_account)
public_storage_bucket = storage_client.bucket(public_bucket_name)
private_storage_bucket = storage_client.bucket(private_bucket_name)
dummy_index_blob = public_storage_bucket.blob(dummy_index_path)
with lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
public_index_folder_path, public_index_blob, _ = download_and_extract_index(public_storage_bucket,
extract_public_index_path, storage_base_path)
# In order for the packs to be downloaded successfully, their price has to be 0
change_packs_price_to_zero(public_index_folder_path)
private_packs, private_index_path, private_index_blob = update_index_with_priced_packs(private_storage_bucket,
extract_destination_path,
public_index_folder_path,
changed_pack, True,
storage_base_path)
private_packs = add_private_packs_from_dummy_index(private_packs, dummy_index_blob)
upload_modified_index(public_index_folder_path, extract_public_index_path, dummy_index_blob, build_number,
private_packs)
if __name__ == '__main__':
main()
|
2321 | r"""Training and evaluating quantum kernels
===========================================
.. meta::
:property="og:description": Kernels and alignment training with Pennylane.
:property="og:image": https://pennylane.ai/qml/_images/QEK_thumbnail.png
.. related::
tutorial_kernel_based_training Kernel-based training with scikit-learn
tutorial_data_reuploading_classifier Classification with data reuploading
*Authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. Posted: 24 June 2021*
Kernel methods are one of the cornerstones of classical machine learning.
Here we are concerned with kernels that can be evaluated on quantum computers,
*quantum kernels* for short.
In this tutorial you will learn how to evaluate kernels, use them for classification
and train them with gradient-based optimization, and all that using the
functionality of PennyLane's
`kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__.
The demo is based on Ref. [#Training_QEKs]_, a project from Xanadu's own
`QHack <https://qhack.ai/>`__ hackathon.
What are kernel methods?
------------------------
To understand what a kernel method does, let's first revisit
one of the simplest methods to assign binary labels to datapoints:
linear classification.
Imagine we want to discern two different classes of points that lie in
different corners of the plane. A linear classifier corresponds to
drawing a line and assigning different labels to the regions on opposing
sides of the line:
.. figure:: ../demonstrations/kernels_module/linear_classification.png
:align: center
:width: 30%
We can mathematically formalize this by assigning the label :math:`y`
via
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \boldsymbol{x}\rangle + b).
The vector :math:`\boldsymbol{w}` points perpendicular to the line and
thus determine its slope. The independent term :math:`b` specifies the
position on the plane. In this form, linear classification can also be
extended to higher dimensional vectors :math:`\boldsymbol{x}`, where a
line does not divide the entire space into two regions anymore. Instead
one needs a *hyperplane*. It is immediately clear that this method is
not very powerful, as datasets that are not separable by a hyperplane
can't be classified without error.
We can actually sneak around this limitation by performing a neat trick:
if we define some map :math:`\phi(\boldsymbol{x})` that *embeds* our
datapoints into a larger *feature space* and then perform linear
classification there, we could actually realise non-linear
classification in our original space!
.. figure:: ../demonstrations/kernels_module/embedding_nonlinear_classification.png
:align: center
:width: 65%
If we go back to the expression for our prediction and include the
embedding, we get
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \phi(\boldsymbol{x})\rangle + b).
We will forgo one tiny step, but it can be shown that for the purpose
of optimal classification, we can choose the vector defining the
decision boundary as a linear combination of the embedded datapoints
:math:`\boldsymbol{w} = \sum_i \alpha_i \phi(\boldsymbol{x}_i)`. Putting
this into the formula yields
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}\left(\sum_i \alpha_i \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x})\rangle + b\right).
This rewriting might not seem useful at first, but notice the above
formula only contains inner products between vectors in the embedding
space:
.. math::
k(\boldsymbol{x}_i, \boldsymbol{x}_j) = \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x}_j)\rangle.
We call this function the *kernel*. It provides the advantage that we can often
find an explicit formula for the kernel :math:`k` that makes it
superfluous to actually perform the (potentially expensive) embedding
:math:`\phi`. Consider for example the following embedding and the
associated kernel:
.. math::
\phi((x_1, x_2)) &= (x_1^2, \sqrt{2} x_1 x_2, x_2^2) \\
k(\boldsymbol{x}, \boldsymbol{y}) &= x_1^2 y_1^2 + 2 x_1 x_2 y_1 y_2 + x_2^2 y_2^2 = \langle \boldsymbol{x}, \boldsymbol{y} \rangle^2.
This means by just replacing the regular scalar product in our linear
classification with the map :math:`k`, we can actually express much more
intricate decision boundaries!
This is very important, because in many interesting cases the embedding :math:`\phi`
will be much costlier to compute than the kernel :math:`k`.
In this demo, we will explore one particular kind of kernel
that can be realized on near-term quantum computers, namely *Quantum
Embedding Kernels (QEKs)*. These are kernels that arise from embedding
data into the space of quantum states. We formalize this by considering
a parameterised quantum circuit :math:`U(\boldsymbol{x})` that maps
a datapoint :math:`\boldsymbol{x}` to the state
.. math::
|\psi(\boldsymbol{x})\rangle = U(\boldsymbol{x}) |0 \rangle.
The kernel value is then given by the *overlap* of the associated
embedded quantum states
.. math::
k(\boldsymbol{x}_i, \boldsymbol{x}_j) = | \langle\psi(\boldsymbol{x}_i)|\psi(\boldsymbol{x}_j)\rangle|^2.
"""
##############################################################################
# A toy problem
# -------------
# In this demo, we will treat a toy problem that showcases the
# inner workings of classification with quantum embedding kernels,
# training variational embedding kernels and the available functionalities
# to do both in PennyLane. We of course need to start with some imports:
from pennylane import numpy as np
import matplotlib as mpl
np.random.seed(1359)
##############################################################################
# And we proceed right away to create a dataset to work with, the
# ``DoubleCake`` dataset. Firstly, we define two functions to enable us to
# generate the data.
# The details of these functions are not essential for understanding the demo,
# so don't mind them if they are confusing.
def _make_circular_data(num_sectors):
"""Generate datapoints arranged in an even circle."""
center_indices = np.array(range(0, num_sectors))
sector_angle = 2 * np.pi / num_sectors
angles = (center_indices + 0.5) * sector_angle
x = 0.7 * np.cos(angles)
y = 0.7 * np.sin(angles)
labels = 2 * np.remainder(np.floor_divide(angles, sector_angle), 2) - 1
return x, y, labels
def make_double_cake_data(num_sectors):
x1, y1, labels1 = _make_circular_data(num_sectors)
x2, y2, labels2 = _make_circular_data(num_sectors)
# x and y coordinates of the datapoints
x = np.hstack([x1, 0.5 * x2])
y = np.hstack([y1, 0.5 * y2])
# Canonical form of dataset
X = np.vstack([x, y]).T
labels = np.hstack([labels1, -1 * labels2])
# Canonical form of labels
Y = labels.astype(int)
return X, Y
##############################################################################
# Next, we define a function to help plot the ``DoubleCake`` data:
def plot_double_cake_data(X, Y, ax, num_sectors=None):
"""Plot double cake data and corresponding sectors."""
x, y = X.T
cmap = mpl.colors.ListedColormap(["#FF0000", "#0000FF"])
ax.scatter(x, y, c=Y, cmap=cmap, s=25, marker="s")
if num_sectors is not None:
sector_angle = 360 / num_sectors
for i in range(num_sectors):
color = ["#FF0000", "#0000FF"][(i % 2)]
other_color = ["#FF0000", "#0000FF"][((i + 1) % 2)]
ax.add_artist(
mpl.patches.Wedge(
(0, 0),
1,
i * sector_angle,
(i + 1) * sector_angle,
lw=0,
color=color,
alpha=0.1,
width=0.5,
)
)
ax.add_artist(
mpl.patches.Wedge(
(0, 0),
0.5,
i * sector_angle,
(i + 1) * sector_angle,
lw=0,
color=other_color,
alpha=0.1,
)
)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_aspect("equal")
ax.axis("off")
return ax
##############################################################################
# Let's now have a look at our dataset. In our example, we will work with
# 3 sectors:
import matplotlib.pyplot as plt
num_sectors = 3
X, Y = make_double_cake_data(num_sectors)
ax = plot_double_cake_data(X, Y, plt.gca(), num_sectors=num_sectors)
##############################################################################
# Defining a Quantum Embedding Kernel
# -----------------------------------
# PennyLane's `kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__
# allows for a particularly simple
# implementation of Quantum Embedding Kernels. The first ingredient we
# need for this is an *ansatz*, which we will construct by repeating a
# layer as building block. Let's start by defining this layer:
import pennylane as qml
def layer(x, params, wires, i0=0, inc=1):
"""Building block of the embedding ansatz"""
i = i0
for j, wire in enumerate(wires):
qml.Hadamard(wires=[wire])
qml.RZ(x[i % len(x)], wires=[wire])
i += inc
qml.RY(params[0, j], wires=[wire])
qml.broadcast(unitary=qml.CRZ, pattern="ring", wires=wires, parameters=params[1])
##############################################################################
# To construct the ansatz, this layer is repeated multiple times, reusing
# the datapoint ``x`` but feeding different variational
# parameters ``params`` into each of them.
# Together, the datapoint and the variational parameters fully determine
# the embedding ansatz :math:`U(\boldsymbol{x})`.
# In order to construct the full kernel circuit, we also require its adjoint
# :math:`U(\boldsymbol{x})^\dagger`, which we can obtain via ``qml.adjoint``.
def ansatz(x, params, wires):
"""The embedding ansatz"""
for j, layer_params in enumerate(params):
layer(x, layer_params, wires, i0=j * len(wires))
adjoint_ansatz = qml.adjoint(ansatz)
def random_params(num_wires, num_layers):
"""Generate random variational parameters in the shape for the ansatz."""
return np.random.uniform(0, 2 * np.pi, (num_layers, 2, num_wires), requires_grad=True)
##############################################################################
# Together with the ansatz we only need a device to run the quantum circuit on.
# For the purpose of this tutorial we will use PennyLane's ``default.qubit``
# device with 5 wires in analytic mode.
dev = qml.device("default.qubit", wires=5, shots=None)
wires = dev.wires.tolist()
##############################################################################
# Let us now define the quantum circuit that realizes the kernel. We will compute
# the overlap of the quantum states by first applying the embedding of the first
# datapoint and then the adjoint of the embedding of the second datapoint. We
# finally extract the probabilities of observing each basis state.
@qml.qnode(dev)
def kernel_circuit(x1, x2, params):
ansatz(x1, params, wires=wires)
adjoint_ansatz(x2, params, wires=wires)
return qml.probs(wires=wires)
##############################################################################
# The kernel function itself is now obtained by looking at the probability
# of observing the all-zero state at the end of the kernel circuit -- because
# of the ordering in ``qml.probs``, this is the first entry:
def kernel(x1, x2, params):
return kernel_circuit(x1, x2, params)[0]
##############################################################################
#
# .. note::
# An alternative way to set up the kernel circuit in PennyLane would be
# to use the observable type
# `Projector <https://pennylane.readthedocs.io/en/latest/code/api/pennylane.Projector.html>`__.
# This is shown in the
# `demo on kernel-based training of quantum models <https://pennylane.ai/qml/demos/tutorial_kernel_based_training.html>`__, where you will also find more
# background information on the kernel circuit structure itself.
#
# Before focusing on the kernel values we have to provide values for the
# variational parameters. At this point we fix the number of layers in the
# ansatz circuit to :math:`6`.
init_params = random_params(num_wires=5, num_layers=6)
##############################################################################
# Now we can have a look at the kernel value between the first and the
# second datapoint:
kernel_value = kernel(X[0], X[1], init_params)
print(f"The kernel value between the first and second datapoint is {kernel_value:.3f}")
##############################################################################
# The mutual kernel values between all elements of the dataset form the
# *kernel matrix*. We can inspect it via the ``qml.kernels.square_kernel_matrix``
# method, which makes use of symmetry of the kernel,
# :math:`k(\boldsymbol{x}_i,\boldsymbol{x}_j) = k(\boldsymbol{x}_j, \boldsymbol{x}_i)`.
# In addition, the option ``assume_normalized_kernel=True`` ensures that we do not
# calculate the entries between the same datapoints, as we know them to be 1
# for our noiseless simulation. Overall this means that we compute
# :math:`\frac{1}{2}(N^2-N)` kernel values for :math:`N` datapoints.
# To include the variational parameters, we construct a ``lambda`` function that
# fixes them to the values we sampled above.
init_kernel = lambda x1, x2: kernel(x1, x2, init_params)
K_init = qml.kernels.square_kernel_matrix(X, init_kernel, assume_normalized_kernel=True)
with np.printoptions(precision=3, suppress=True):
print(K_init)
##############################################################################
# Using the Quantum Embedding Kernel for predictions
# --------------------------------------------------
# The quantum kernel alone can not be used to make predictions on a
# dataset, becaues it is essentially just a tool to measure the similarity
# between two datapoints. To perform an actual prediction we will make use
# of scikit-learn's Support Vector Classifier (SVC).
from sklearn.svm import SVC
##############################################################################
# To construct the SVM, we need to supply ``sklearn.svm.SVC`` with a function
# that takes two sets of datapoints and returns the associated kernel matrix.
# We can make use of the function ``qml.kernels.kernel_matrix`` that provides
# this functionality. It expects the kernel to not have additional parameters
# besides the datapoints, which is why we again supply the variational
# parameters via the ``lambda`` function from above.
# Once we have this, we can let scikit-learn adjust the SVM from our Quantum
# Embedding Kernel.
#
# .. note::
# This step does *not* modify the variational parameters in our circuit
# ansatz. What it does is solving a different optimization task for the
# :math:`\alpha` and :math:`b` vectors we introduced in the beginning.
svm = SVC(kernel=lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, init_kernel)).fit(X, Y)
##############################################################################
# To see how well our classifier performs we will measure which percentage
# of the dataset it classifies correctly.
def accuracy(classifier, X, Y_target):
return 1 - np.count_nonzero(classifier.predict(X) - Y_target) / len(Y_target)
accuracy_init = accuracy(svm, X, Y)
print(f"The accuracy of the kernel with random parameters is {accuracy_init:.3f}")
##############################################################################
# We are also interested in seeing what the decision boundaries in this
# classification look like. This could help us spotting overfitting issues
# visually in more complex data sets. To this end we will introduce a
# second helper method.
def plot_decision_boundaries(classifier, ax, N_gridpoints=14):
_xx, _yy = np.meshgrid(np.linspace(-1, 1, N_gridpoints), np.linspace(-1, 1, N_gridpoints))
_zz = np.zeros_like(_xx)
for idx in np.ndindex(*_xx.shape):
_zz[idx] = classifier.predict(np.array([_xx[idx], _yy[idx]])[np.newaxis, :])
plot_data = {"_xx": _xx, "_yy": _yy, "_zz": _zz}
ax.contourf(
_xx,
_yy,
_zz,
cmap=mpl.colors.ListedColormap(["#FF0000", "#0000FF"]),
alpha=0.2,
levels=[-1, 0, 1],
)
plot_double_cake_data(X, Y, ax)
return plot_data
##############################################################################
# With that done, let's have a look at the decision boundaries for our
# initial classifier:
init_plot_data = plot_decision_boundaries(svm, plt.gca())
##############################################################################
# We see the outer points in the dataset can be correctly classified, but
# we still struggle with the inner circle. But remember we have a circuit
# with many free parameters! It is reasonable to believe we can give
# values to those variational parameters which improve the overall accuracy
# of our SVC.
#
# Training the Quantum Embedding Kernel
# -------------------------------------
#
# To be able to train the Quantum Embedding Kernel we need some measure of
# how well it fits the dataset in question. Performing an exhaustive
# search in parameter space is not a good solution because it is very
# resource intensive, and since the accuracy is a discrete quantity we
# would not be able to detect small improvements.
#
# We can, however, resort to a more specialized measure, the
# *kernel-target alignment* [#Alignment]_. The kernel-target alignment compares the
# similarity predicted by the quantum kernel to the actual labels of the
# training data. It is based on *kernel alignment*, a similiarity measure
# between two kernels with given kernel matrices :math:`K_1` and
# :math:`K_2`:
#
# .. math::
# \operatorname{KA}(K_1, K_2) = \frac{\operatorname{Tr}(K_1 K_2)}{\sqrt{\operatorname{Tr}(K_1^2)\operatorname{Tr}(K_2^2)}}.
#
# .. note::
# Seen from a more theoretical side, :math:`\operatorname{KA}`
# is nothing else than the cosine of the angle between the kernel
# matrices :math:`K_1` and :math:`K_2` if we see them as vectors
# in the space of matrices with the Hilbert-Schmidt (or
# Frobenius) scalar product
# :math:`\langle A, B \rangle = \operatorname{Tr}(A^T B)`. This
# reinforces the geometric picture of how this measure relates
# to objects, namely two kernels, being aligned in a vector space.
#
# The training data enters the picture by defining an *ideal* kernel
# function that expresses the original labelling in the vector
# :math:`\boldsymbol{y}` by assigning to two datapoints the product
# of the corresponding labels:
#
# .. math::
# k_{\boldsymbol{y}}(\boldsymbol{x}_i, \boldsymbol{x}_j) = y_i y_j.
#
# The assigned kernel is thus :math:`+1` if both datapoints lie in the
# same class and :math:`-1` otherwise and its kernel matrix is simply
# given by the outer product :math:`\boldsymbol{y}\boldsymbol{y}^T`.
# The kernel-target alignment is then defined as the kernel alignment
# of the kernel matrix :math:`K` generated by the
# quantum kernel and :math:`\boldsymbol{y}\boldsymbol{y}^T`:
#
# .. math::
# \operatorname{KTA}_{\boldsymbol{y}}(K)
# = \frac{\operatorname{Tr}(K \boldsymbol{y}\boldsymbol{y}^T)}{\sqrt{\operatorname{Tr}(K^2)\operatorname{Tr}((\boldsymbol{y}\boldsymbol{y}^T)^2)}}
# = \frac{\boldsymbol{y}^T K \boldsymbol{y}}{\sqrt{\operatorname{Tr}(K^2)} N}
#
# where :math:`N` is the number of elements in :math:`\boldsymbol{y}`,
# that is the number of datapoints in the dataset.
#
# In summary, the kernel-target alignment effectively captures how well
# the kernel you chose reproduces the actual similarities of the data. It
# does have one drawback, however: having a high kernel-target alignment
# is only a necessary but not a sufficient condition for a good
# performance of the kernel [#Alignment]_. This means having good alignment is
# guaranteed for good performance, but optimal alignment will not always
# bring optimal training accuracy with it.
#
# Let's now come back to the actual implementation. PennyLane's
# ``kernels`` module allows you to easily evaluate the kernel
# target alignment:
kta_init = qml.kernels.target_alignment(X, Y, init_kernel, assume_normalized_kernel=True)
print(f"The kernel-target alignment for our dataset and random parameters is {kta_init:.3f}")
##############################################################################
# Now let's code up an optimization loop and improve the kernel-target alignment!
#
# We will make use of regular gradient descent optimization. To speed up
# the optimization we will not use the entire training set to compute
# :math:`\operatorname{KTA}` but rather
# sample smaller subsets of the data at each step, we choose :math:`4`
# datapoints at random. Remember that PennyLane's built-in optimizer works
# to *minimize* the cost function that is given to it, which is why we
# have to multiply the kernel target alignment by :math:`-1` to actually
# *maximize* it in the process.
#
# .. note::
# Currently, the function ``qml.kernels.target_alignment`` is not
# differentiable yet, making it unfit for gradient descent optimization.
# We therefore first define a differentiable version of this function.
def target_alignment(
X,
Y,
kernel,
assume_normalized_kernel=False,
rescale_class_labels=True,
):
"""Kernel-target alignment between kernel and labels."""
K = qml.kernels.square_kernel_matrix(
X,
kernel,
assume_normalized_kernel=assume_normalized_kernel,
)
if rescale_class_labels:
nplus = np.count_nonzero(np.array(Y) == 1)
nminus = len(Y) - nplus
_Y = np.array([y / nplus if y == 1 else y / nminus for y in Y])
else:
_Y = np.array(Y)
T = np.outer(_Y, _Y)
inner_product = np.sum(K * T)
norm = np.sqrt(np.sum(K * K) * np.sum(T * T))
inner_product = inner_product / norm
return inner_product
params = init_params
opt = qml.GradientDescentOptimizer(0.2)
for i in range(500):
# Choose subset of datapoints to compute the KTA on.
subset = np.random.choice(list(range(len(X))), 4)
# Define the cost function for optimization
cost = lambda _params: -target_alignment(
X[subset],
Y[subset],
lambda x1, x2: kernel(x1, x2, _params),
assume_normalized_kernel=True,
)
# Optimization step
params = opt.step(cost, params)
# Report the alignment on the full dataset every 50 steps.
if (i + 1) % 50 == 0:
current_alignment = target_alignment(
X,
Y,
lambda x1, x2: kernel(x1, x2, params),
assume_normalized_kernel=True,
)
print(f"Step {i+1} - Alignment = {current_alignment:.3f}")
##############################################################################
# We want to assess the impact of training the parameters of the quantum
# kernel. Thus, let's build a second support vector classifier with the
# trained kernel:
# First create a kernel with the trained parameter baked into it.
trained_kernel = lambda x1, x2: kernel(x1, x2, params)
# Second create a kernel matrix function using the trained kernel.
trained_kernel_matrix = lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, trained_kernel)
# Note that SVC expects the kernel argument to be a kernel matrix function.
svm_trained = SVC(kernel=trained_kernel_matrix).fit(X, Y)
##############################################################################
# We expect to see an accuracy improvement vs. the SVM with random
# parameters:
accuracy_trained = accuracy(svm_trained, X, Y)
print(f"The accuracy of a kernel with trained parameters is {accuracy_trained:.3f}")
##############################################################################
# We have now achieved perfect classification! 🎆
#
# Following on the results that SVM's have proven good generalisation
# behavior, it will be interesting to inspect the decision boundaries of
# our classifier:
trained_plot_data = plot_decision_boundaries(svm_trained, plt.gca())
##############################################################################
# Indeed, we see that now not only every data instance falls within the
# correct class, but also that there are no strong artifacts that would make us
# distrust the model. In this sense, our approach benefits from both: on
# one hand it can adjust itself to the dataset, and on the other hand
# is not expected to suffer from bad generalisation.
#
# References
# ----------
#
# .. [#Training_QEKs]
#
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, and <NAME>.
# "Training Quantum Embedding Kernels on Near-Term Quantum Computers."
# `arXiv:2105.02276 <https://arxiv.org/abs/2105.02276>`__, 2021.
#
# .. [#Alignment]
#
# <NAME>, <NAME>, and <NAME>.
# "An overview of kernel alignment and its applications."
# `Artificial Intelligence Review 43.2: 179-192 <https://link.springer.com/article/10.1007/s10462-012-9369-4>`__, 2015.
|