id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
117 | from pytest import raises
from discopy.cartesian import *
def test_Box_repr():
f = Box('f', 1, 2, lambda x: (x, x))
assert "Box('f', 1, 2" in repr(f)
def test_Function_str():
f = Function(2, 1, lambda x, y: x + y)
assert 'Function(dom=2, cod=1,' in str(f)
def test_Function_call():
f = Swap(2, 1)
values = (2, 3)
with raises(TypeError) as err:
f(*values)
assert str(err.value) == messages.expected_input_length(f, values)
def test_Function_then():
f, g = Function(2, 1, lambda x, y: x + y), Function(1, 1, lambda x: x + 1)
assert Function.id(2).then(*(f, g))(20, 21) == 42
def test_Function_then_err():
f = Function(2, 1, lambda x, y: x + y)
g = (lambda x: x, )
with raises(TypeError) as err:
f >> g
assert str(err.value) == messages.type_err(Function, g)
g = Function.id(2)
with raises(AxiomError) as err:
f >> g
assert str(err.value) == messages.does_not_compose(f, g)
def test_Function_tensor():
assert Function.id(3)(1, 2, 3)\
== Function.id(0).tensor(*(3 * [Function.id(1)]))(1, 2, 3)
def test_Function_tensor_err():
f = Function(2, 1, lambda x, y: x + y)
g = (lambda x: x, )
with raises(TypeError) as err:
f @ g
assert str(err.value) == messages.type_err(Function, g)
|
136 | import os
import sys
import shutil
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(cwd_path), 'rt-thread', 'tools'))
# BSP dist function
def dist_do_building(BSP_ROOT, dist_dir):
from mkdist import bsp_copy_files
import rtconfig
library_dir = os.path.join(dist_dir, 'libraries')
print("=> copy nrf52 bsp libraries")
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries')
bsp_copy_files(library_path, library_dir)
|
149 | from itertools import groupby
class Solution:
def countAndSay(self, n):
def gen(s):
return "".join(str(len(list(g))) + k for k, g in groupby(s))
s, i = "1", 1
while i < n:
s = gen(s)
i += 1
return s
|
157 | import json
import aiohttp
async def request(url, payload=None, params=None, headers=None):
headers = {'content-type': 'application/json', **(headers or {})}
data = payload and json.dumps(payload)
async with aiohttp.ClientSession() as client:
async with client.post(
url, data=data, params=params, headers=headers) as resp:
# TODO: Check response status
json_response = await resp.json()
return json_response
async def get_updates(base_url, timeout, offset):
params = {
'timeout': timeout,
'offset': offset
}
return await request(f'{base_url}/getUpdates', params=params)
async def send_message(base_url, chat_id, text, reply_markup=None):
payload = {
'chat_id': chat_id,
'text': text
}
if reply_markup is not None:
payload['reply_markup'] = reply_markup
return await request(f'{base_url}/sendMessage', payload)
async def answer_callback_query(
base_url, callback_query_id, text, show_alert,
url=None, cache_time=None):
payload = {
'callback_query_id': callback_query_id,
'text': text,
'show_alert': show_alert
}
if url is not None:
payload['url'] = url
if cache_time is not None:
payload['cache_time'] = cache_time
return await request(f'{base_url}/answerCallbackQuery', payload)
|
185 | import traceback
from pprint import pformat
from threading import Thread
import itchat
import logging
from wxpy.chat import Chat
from wxpy.chats import Chats
from wxpy.friend import Friend
from wxpy.group import Group
from wxpy.message import MessageConfigs, Messages, Message, MessageConfig
from wxpy.mp import MP
from wxpy.response import ResponseError
from wxpy.user import User
from wxpy.utils.constants import SYSTEM
from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list
logger = logging.getLogger('wxpy')
class Robot(object):
"""
机器人对象,用于登陆和操作微信账号,涵盖大部分 Web 微信的功能
"""
def __init__(
self, save_path=None, console_qr=False, qr_path=None,
qr_callback=None, login_callback=None, logout_callback=None
):
"""
:param save_path:
| 用于保存或载入登陆状态的文件路径,例如: 'wxpy.pkl',为空则不尝试载入。
| 填写本参数后,可在短时间内重新载入登陆状态,避免重复扫码,失效时会重新要求登陆
:param console_qr: 在终端中显示登陆二维码,需要安装 Pillow 模块
:param qr_path: 保存二维码的路径
:param qr_callback: 获得二维码时的回调,接收参数: uuid, status, qrcode
:param login_callback: 登陆时的回调,接收参数同上
:param logout_callback: 登出时的回调,接收参数同上
"""
self.core = itchat.Core()
itchat.instanceList.append(self)
self.core.auto_login(
hotReload=bool(save_path), statusStorageDir=save_path,
enableCmdQR=console_qr, picDir=qr_path, qrCallback=qr_callback,
loginCallback=login_callback, exitCallback=logout_callback
)
self.message_configs = MessageConfigs(self)
self.messages = Messages(robot=self)
self.file_helper = Chat(wrap_user_name('filehelper'))
self.file_helper.robot = self
self.file_helper.nick_name = '文件传输助手'
self.self = Chat(self.core.loginInfo['User'])
self.self.robot = self
self.save_path = save_path
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.self.name)
@handle_response()
def logout(self):
"""
登出当前账号
"""
return self.core.logout()
@property
def alive(self):
"""
当前的登陆状态
:return: 若为登陆状态,则为 True,否则为 False
"""
return self.core.alive
@alive.setter
def alive(self, value):
self.core.alive = value
def dump_login_status(self, save_path=None):
return self.core.dump_login_status(save_path or self.save_path)
# chats
def except_self(self, chats_or_dicts):
"""
从聊天对象合集或用户字典列表中排除自身
:param chats_or_dicts: 聊天对象合集或用户字典列表
:return: 排除自身后的列表
"""
return list(filter(lambda x: get_user_name(x) != self.self.user_name, chats_or_dicts))
def chats(self, update=False):
"""
获取所有聊天对象
:param update: 是否更新
:return: 聊天对象合集
"""
return Chats(self.friends(update) + self.groups(update) + self.mps(update), self)
def friends(self, update=False):
"""
获取所有好友
:param update: 是否更新
:return: 聊天对象合集
"""
@handle_response(Friend)
def do():
return self.core.get_friends(update=update)
ret = do()
ret.source = self
return ret
@handle_response(Group)
def groups(self, update=False, contact_only=False):
"""
获取所有群聊
:param update: 是否更新
:param contact_only: 是否限于保存为联系人的群聊
:return: 群聊合集
"""
return self.core.get_chatrooms(update=update, contactOnly=contact_only)
@handle_response(MP)
def mps(self, update=False):
"""
获取所有公众号
:param update: 是否更新
:return: 聊天对象合集
"""
return self.core.get_mps(update=update)
@handle_response(User)
def user_details(self, user_or_users, chunk_size=50):
"""
获取单个或批量获取多个用户的详细信息(地区、性别、签名等),但不可用于群聊成员
:param user_or_users: 单个或多个用户对象或 user_name
:param chunk_size: 分配请求时的单批数量,目前为 50
:return: 单个或多个用户用户的详细信息
"""
def chunks():
total = ensure_list(user_or_users)
for i in range(0, len(total), chunk_size):
yield total[i:i + chunk_size]
@handle_response()
def process_one_chunk(_chunk):
return self.core.update_friend(userName=get_user_name(_chunk))
if isinstance(user_or_users, (list, tuple)):
ret = list()
for chunk in chunks():
chunk_ret = process_one_chunk(chunk)
if isinstance(chunk_ret, list):
ret += chunk_ret
else:
ret.append(chunk_ret)
return ret
else:
return process_one_chunk(user_or_users)
def search(self, name=None, **attributes):
"""
在所有类型的聊天对象中进行搜索
:param name: 名称 (可以是昵称、备注等)
:param attributes: 属性键值对,键可以是 sex(性别), province(省份), city(城市) 等。例如可指定 province='广东'
:return: 匹配的聊天对象合集
"""
return self.chats().search(name, **attributes)
# add / create
@handle_response()
def add_friend(self, user, verify_content=''):
"""
添加用户为好友
:param user: 用户对象或用户名
:param verify_content: 验证说明信息
"""
return self.core.add_friend(
userName=get_user_name(user),
status=2,
verifyContent=verify_content,
autoUpdate=True
)
@handle_response()
def accept_friend(self, user, verify_content=''):
"""
接受用户为好友
:param user: 用户对象或用户名
:param verify_content: 验证说明信息
"""
# Todo: 验证好友接口可用性,并在接受好友时直接返回新好友
return self.core.add_friend(
userName=get_user_name(user),
status=3,
verifyContent=verify_content,
autoUpdate=True
)
def create_group(self, users, topic=None):
"""
创建一个新的群聊
:param users: 用户列表
:param topic: 群名称
:return: 若建群成功,返回一个新的群聊对象
"""
@handle_response()
def request():
return self.core.create_chatroom(
memberList=wrap_user_name(users),
topic=topic or ''
)
ret = request()
user_name = ret.get('ChatRoomName')
if user_name:
return Group(self.core.update_chatroom(userName=user_name))
else:
raise ResponseError('Failed to create group:\n{}'.format(pformat(ret)))
# messages
def _process_message(self, msg):
"""
处理接收到的消息
"""
if not self.alive:
return
func, run_async = self.message_configs.get_func(msg)
if not func:
return
def process():
# noinspection PyBroadException
try:
ret = func(msg)
if ret is not None:
if isinstance(ret, (tuple, list)):
self.core.send(
msg=str(ret[0]),
toUserName=msg.chat.user_name,
mediaId=ret[1]
)
else:
self.core.send(
msg=str(ret),
toUserName=msg.chat.user_name
)
except:
logger.warning(
'An error occurred in registered function, '
'use `Robot().start(debug=True)` to show detailed information')
logger.debug(traceback.format_exc())
if run_async:
Thread(target=process).start()
else:
process()
def register(
self, chats=None, msg_types=None,
except_self=True, run_async=True, enabled=True
):
"""
装饰器:用于注册消息配置
:param chats: 单个或列表形式的多个聊天对象或聊天类型,为空时匹配所有聊天对象
:param msg_types: 单个或列表形式的多个消息类型,为空时匹配所有消息类型 (SYSTEM 类消息除外)
:param except_self: 排除自己在手机上发送的消息
:param run_async: 异步执行配置的函数,可提高响应速度
:param enabled: 当前配置的默认开启状态,可事后动态开启或关闭
"""
def register(func):
self.message_configs.append(MessageConfig(
robot=self, func=func, chats=chats, msg_types=msg_types,
except_self=except_self, run_async=run_async, enabled=enabled
))
return func
return register
def start(self, block=True):
"""
开始监听和处理消息
:param block: 是否堵塞线程,为 False 时将在新的线程中运行
"""
def listen():
logger.info('{} Auto-reply started.'.format(self))
try:
while self.alive:
msg = Message(self.core.msgList.get(), self)
if msg.type is not SYSTEM:
self.messages.append(msg)
self._process_message(msg)
except KeyboardInterrupt:
logger.info('KeyboardInterrupt received, ending...')
self.alive = False
if self.core.useHotReload:
self.dump_login_status()
logger.info('Bye.')
if block:
listen()
else:
t = Thread(target=listen, daemon=True)
t.start()
|
201 | import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import DropboxOAuth2Provider
class DropboxOAuth2Adapter(OAuth2Adapter):
provider_id = DropboxOAuth2Provider.id
access_token_url = "https://api.dropbox.com/oauth2/token"
authorize_url = "https://www.dropbox.com/oauth2/authorize"
profile_url = "https://api.dropbox.com/2/users/get_current_account"
redirect_uri_protocol = "https"
def complete_login(self, request, app, token, **kwargs):
response = requests.post(
self.profile_url,
headers={"Authorization": "Bearer %s" % (token.token,)},
)
response.raise_for_status()
return self.get_provider().sociallogin_from_response(request, response.json())
oauth_login = OAuth2LoginView.adapter_view(DropboxOAuth2Adapter)
oauth_callback = OAuth2CallbackView.adapter_view(DropboxOAuth2Adapter)
|
274 | import logging
from collections import Counter
from django.core.management.base import BaseCommand
from django.db.models import Q
from TWLight.applications.models import Application
from TWLight.resources.models import Partner
from TWLight.applications.signals import Reminder
from TWLight.users.models import Editor
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
# This is not DRY. Originally, this pulled the queryset from
# TWLight.applications.views.ListApplicationsView.get_queryset().
# But that now expects a request object. So, we did a copy/paste.
# We're actually getting apps with a status of PENDING or QUESTION
# or APPROVED, and their corresponding user preferences being True
# for partners with a status of AVAILABLE.
all_apps = (
Application.objects.filter(
Q(
partner__coordinator__editor__user__userprofile__pending_app_reminders=True
)
& Q(status=Application.PENDING)
| Q(
partner__coordinator__editor__user__userprofile__discussion_app_reminders=True
)
& Q(status=Application.QUESTION)
| Q(
partner__coordinator__editor__user__userprofile__approved_app_reminders=True
)
& Q(status=Application.APPROVED),
partner__status__in=[Partner.AVAILABLE],
editor__isnull=False,
)
.exclude(editor__user__groups__name="restricted")
.order_by("status", "partner", "date_created")
)
# A deduplicated dict of coordinators from the pending app queryset, along
# with a count of how many total pending apps they have
coordinators = Counter(
all_apps.values_list(
"partner__coordinator__editor",
"partner__coordinator__email",
"partner__coordinator__editor__user__userprofile__lang",
)
)
for coordinator, count in list(coordinators.items()):
try:
# We create a dictionary with the three status codes
# we'd want to send emails for, and their corresponding
# counts.
app_status_and_count = {
Application.PENDING: all_apps.filter(
status=Application.PENDING,
partner__coordinator__editor=coordinator[0],
).count(),
Application.QUESTION: all_apps.filter(
status=Application.QUESTION,
partner__coordinator__editor=coordinator[0],
).count(),
Application.APPROVED: all_apps.filter(
status=Application.APPROVED,
partner__coordinator__editor=coordinator[0],
).count(),
}
editor = Editor.objects.get(id=coordinator[0])
except Editor.DoesNotExist:
logger.info(
"Editor {} does not exist; skipping.".format(coordinator[0])
)
break
# Only bother with the signal if we have a coordinator email.
if coordinator[1]:
Reminder.coordinator_reminder.send(
sender=self.__class__,
app_status_and_count=app_status_and_count,
coordinator_wp_username=editor.wp_username,
coordinator_email=coordinator[1],
coordinator_lang=coordinator[2],
)
|
330 | from tools.geofunc import GeoFunc
import pandas as pd
import json
def getData(index):
'''报错数据集有(空心):han,jakobs1,jakobs2 '''
'''形状过多暂时未处理:shapes、shirt、swim、trousers'''
name=["ga","albano","blaz1","blaz2","dighe1","dighe2","fu","han","jakobs1","jakobs2","mao","marques","shapes","shirts","swim","trousers"]
print("开始处理",name[index],"数据集")
'''暂时没有考虑宽度,全部缩放来表示'''
scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50]
print("缩放",scale[index],"倍")
df = pd.read_csv("data/"+name[index]+".csv")
polygons=[]
for i in range(0,df.shape[0]):
for j in range(0,df['num'][i]):
poly=json.loads(df['polygon'][i])
GeoFunc.normData(poly,scale[index])
polygons.append(poly)
return polygons
|
340 | import FWCore.ParameterSet.Config as cms
#
# module to make the MaxSumPtWMass jet combination
#
findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer("TtSemiLepJetCombMaxSumPtWMass",
## jet input
jets = cms.InputTag("selectedPatJets"),
## lepton input
leps = cms.InputTag("selectedPatMuons"),
## maximum number of jets to be considered
maxNJets = cms.int32(4),
## nominal WMass parameter (in GeV)
wMass = cms.double(80.4),
## use b-tagging two distinguish between light and b jets
useBTagging = cms.bool(False),
## choose algorithm for b-tagging
bTagAlgorithm = cms.string("trackCountingHighEffBJetTags"),
## minimum b discriminator value required for b jets and
## maximum b discriminator value allowed for non-b jets
minBDiscBJets = cms.double(1.0),
maxBDiscLightJets = cms.double(3.0)
)
|
351 | from vyper import ast as vy_ast
def test_output_class():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert isinstance(new_node, vy_ast.Int)
def test_source():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.src == new_node.src
assert old_node.node_source_code == new_node.node_source_code
def test_kwargs():
old_node = vy_ast.parse_to_ast("42").body[0].value
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.value == 42
assert new_node.value == 666
def test_compare_nodes():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert not vy_ast.compare_nodes(old_node, new_node)
def test_new_node_has_no_parent():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert new_node._parent is None
assert new_node._depth == 0
|
377 | import os
from functools import wraps
from os.path import join as join_path
from dash import Dash
from flask import make_response, render_template_string, redirect
excluded_resources_endpoints = (
'static', '_dash_assets.static', '/_favicon.ico', '/login', '/logout',
'/_user', '/auth')
def add_routes(app, authorizer):
"""Adds authentication endpoints to a flask app.
Decorates other endpoints to grant access.
The endpoints are:
* /login
* Method: GET
* /logout
* Method: GET
* Erases cookies
* /auth
* Method: GET
* Validates cookies if present or header authentication
* Header:
'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)'
* Sets cookies on login
* Rejects unauthorized users
Parameters
----------
app: flask.Flask or dash.Dash
The flask or dash application
excluded_resources_endpoints: tuple(str)
Tuple with endpoints where access must not be checked.
"""
def login():
ok, _ = authorizer.validate()
if ok:
return make_response(redirect('/'), 307)
return render_template_string(login_template)
def logout():
_, response = authorizer.clean_cookie()
return response
def auth():
_, response = authorizer.validate()
return response
def authorize_endpoint(function):
@wraps(function)
def authorized_function(*args, **kwargs):
ok, response = authorizer.validate()
if ok:
return function(*args, **kwargs)
return response
return authorized_function
if isinstance(app, Dash):
app = app.server
login_template = load_template('login.html')
app.add_url_rule('/auth', '/auth', auth)
app.add_url_rule('/login', '/login', login)
app.add_url_rule('/logout', '/logout', logout)
for endpoint, function in app.view_functions.items():
if endpoint not in excluded_resources_endpoints:
app.view_functions[endpoint] = authorize_endpoint(function)
def load_template(filename):
"""Loads the login html template."""
pyfile_path = os.path.dirname(os.path.abspath(__file__))
path = join_path(pyfile_path, 'templates', filename)
with open(path, 'r') as f:
return f.read().strip()
|
432 | from robotpy_ext.control.toggle import Toggle
from robotpy_ext.misc.precise_delay import NotifierDelay
class FakeJoystick:
def __init__(self):
self._pressed = [False] * 2
def getRawButton(self, num):
return self._pressed[num]
def press(self, num):
self._pressed[num] = True
def release(self, num):
self._pressed[num] = False
def test_toggle():
joystick = FakeJoystick()
toggleButton = Toggle(joystick, 0)
toggleButton2 = Toggle(joystick, 1)
assert toggleButton.off
joystick.press(0)
assert toggleButton.on
assert toggleButton2.off
joystick.release(0)
assert toggleButton.on
joystick.press(0)
assert toggleButton.off
joystick.release(0)
assert toggleButton.off
joystick.press(1)
assert toggleButton.off
assert toggleButton2.on
def test_toggle_debounce():
# TODO: use simulated time
delay = NotifierDelay(0.5)
joystick = FakeJoystick()
toggleButton = Toggle(joystick, 1, 0.1)
assert toggleButton.off
joystick.press(1)
assert toggleButton.on
joystick.release(1)
joystick.press(1)
joystick.release(1)
assert toggleButton.on
delay.wait()
assert toggleButton.on
joystick.press(1)
assert toggleButton.off
|
505 | from test_plus.test import TestCase
from ...administrative_units.factories import AdministrativeUnitFactory
from ...cases.factories import CaseFactory
from ...channels.factories import ChannelFactory
from ...events.factories import EventFactory
from ...features.factories import FeatureFactory, FeatureOptionFactory
from ...generic.tests.test_views import ReadOnlyViewSetMixin
from ...institutions.factories import InstitutionFactory
from ...letters.factories import DocumentTypeFactory, ReferenceNumberFactory
from ...search.tests.mixins import SearchQueryMixin
from ...tags.factories import TagFactory
from ...users.factories import UserFactory
class AdministrativeUnitAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_administrative_unit"
factory_class = AdministrativeUnitFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):
basename = "autocomplete_case"
factory_class = CaseFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class ChannelAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_channel"
factory_class = ChannelFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class DocumentTypeAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_document_type"
factory_class = DocumentTypeFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class ReferenceNumberAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_reference_number"
factory_class = ReferenceNumberFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class EventAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_event"
factory_class = EventFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class FeatureAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_feature"
factory_class = FeatureFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class FeatureOptionAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_feature_option"
factory_class = FeatureOptionFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class InstitutionAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_institution"
factory_class = InstitutionFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):
basename = "autocomplete_tag"
factory_class = TagFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):
basename = "autocomplete_user"
factory_class = UserFactory
initial_count = 1
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["username"], self.obj.username)
|
509 | import json
import re
import responses
from werkzeug.test import Client
from werkzeug.wrappers import Response
from satosa.proxy_server import make_app
from satosa.satosa_config import SATOSAConfig
class TestConsent:
def test_full_flow(self, satosa_config_dict, consent_module_config):
api_url = "https://consent.example.com/api"
redirect_url = "https://consent.example.com/redirect"
consent_module_config["config"]["api_url"] = api_url
consent_module_config["config"]["redirect_url"] = redirect_url
satosa_config_dict["MICRO_SERVICES"].append(consent_module_config)
# application
test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response)
# incoming auth req
http_resp = test_client.get("/{}/{}/request".format(satosa_config_dict["BACKEND_MODULES"][0]["name"],
satosa_config_dict["FRONTEND_MODULES"][0]["name"]))
assert http_resp.status_code == 200
verify_url_re = re.compile(r"{}/verify/\w+".format(api_url))
with responses.RequestsMock() as rsps:
# fake no previous consent
consent_request_url_re = re.compile(r"{}/creq/\w+".format(api_url))
rsps.add(responses.GET, verify_url_re, status=401)
rsps.add(responses.GET, consent_request_url_re, "test_ticket", status=200)
# incoming auth resp
http_resp = test_client.get("/{}/response".format(satosa_config_dict["BACKEND_MODULES"][0]["name"]))
assert http_resp.status_code == 302
assert http_resp.headers["Location"].startswith(redirect_url)
with responses.RequestsMock() as rsps:
# fake consent
rsps.add(responses.GET, verify_url_re, json.dumps({"foo": "bar"}), status=200)
# incoming consent response
http_resp = test_client.get("/consent/handle_consent")
assert http_resp.status_code == 200
|
511 | import argparse
import glob
import os
import pickle
from pathlib import Path
import numpy as np
from PIL import Image
from tqdm import tqdm
from src.align.align_trans import get_reference_facial_points, warp_and_crop_face
# sys.path.append("../../")
from src.align.detector import detect_faces
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="face alignment")
parser.add_argument(
"-source_root",
"--source_root",
help="specify your source dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-dest_root",
"--dest_root",
help="specify your destination dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-crop_size",
"--crop_size",
help="specify size of aligned faces, align and crop with padding",
default=112,
type=int,
)
args = parser.parse_args()
source_root = args.source_root # specify your source dir
dest_root = args.dest_root # specify your destination dir
crop_size = (
args.crop_size
) # specify size of aligned faces, align and crop with padding
scale = crop_size / 112.0
reference = get_reference_facial_points(default_square=True) * scale
cwd = os.getcwd() # delete '.DS_Store' existed in the source_root
os.chdir(source_root)
os.system("find . -name '*.DS_Store' -type f -delete")
os.chdir(cwd)
imfiles = [
f
for f in glob.glob(f"{source_root}F????/MID*/faces/msceleb*")
if Path(f).is_file()
]
# images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles}
meta = {}
# for subfolder in tqdm(os.listdir(source_root)):
for imfile in tqdm(imfiles):
ref = imfile.replace(source_root, "")
print("Processing\t{}".format(imfile))
img = Image.open(imfile)
try: # Handle exception
bbs, landmarks = detect_faces(img)
except Exception:
print("{} is discarded due to exception!".format(imfile))
continue
ref = imfile.replace(source_root, "")
ndetections = len(landmarks)
if (
ndetections == 0
): # If the landmarks cannot be detected, the img will be discarded
print("{} is discarded due to non-detected landmarks!".format(imfile))
meta[ref] = []
continue
li_meta = []
for i in range(ndetections):
im_meta = {}
im_meta["face"] = i
im_meta["landmarks"] = landmarks[i]
im_meta["bb"] = bbs[i]
facial5points = [[landmarks[i][j], landmarks[i][j + 5]] for j in range(5)]
warped_face = warp_and_crop_face(
np.array(img),
facial5points,
reference,
crop_size=(crop_size, crop_size),
)
img_warped = Image.fromarray(warped_face)
image_name = imfile.replace("images", "cropped").replace(
".jpg", "-{:02d}.jpg".format(i)
)
# im_meta['ref'] = "/".join(image_name.split('/')[-5:])
img_warped.save(image_name)
li_meta.append(im_meta)
meta[ref] = li_meta
with open(source_root + "cropped-meta.pkl", "wb") as f:
pickle.dump(meta, f)
|
513 | import collections
class ReadOnlyDict(collections.MutableMapping):
def __init__(self, store):
self.store = store
def __getitem__(self, key):
return self.store[key]
def __setitem__(self, key, value):
raise TypeError('Cannot modify ReadOnlyDict')
def __delitem__(self, key):
raise TypeError('Cannot modify ReadOnlyDict')
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __str__(self):
return 'ReadOnlyDict(%s)' % self.store
def __repr__(self):
return 'ReadOnlyDict(%r)' % self.store |
522 | import os
import unittest
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
import torch.nn as nn
from machina.optims import DistributedAdamW
def init_processes(rank, world_size,
function, backend='tcp'):
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank,
world_size=world_size)
function(rank, world_size)
class TestDistributedAdamW(unittest.TestCase):
def test_step(self):
def _run(rank, world_size):
model = nn.Linear(10, 1)
optimizer = DistributedAdamW(
model.parameters())
optimizer.zero_grad()
loss = model(torch.ones(10).float())
loss.backward()
optimizer.step()
processes = []
world_size = 4
for rank in range(world_size):
p = Process(target=init_processes,
args=(rank,
world_size,
_run))
p.start()
processes.append(p)
for p in processes:
p.join()
|
532 | import datetime
import io
import json_tricks
import logging
import os
from os.path import (abspath, basename, dirname, exists, expanduser,
join, realpath, relpath, splitext)
import re
import shutil
import sys
from traits.api import (Any, Dict, Enum, HasTraits, Instance, List, Long,
Str)
from whoosh import fields, qparser, query
from whoosh.util.times import datetime_to_long, long_to_datetime
from .common import get_project_dir
from .media import Media, MediaData, get_media_data
from .directory import Directory
from . import processor
logger = logging.getLogger(__name__)
if sys.version_info[0] > 2:
unicode = str
string_types = (str,)
import csv
else:
string_types = (basestring,)
import backports.csv as csv
INT = fields.NUMERIC(numtype=int)
FLOAT = fields.NUMERIC(numtype=float)
def get_file_saved_time(path):
dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime)
return dt.ctime()
def _get_sample(fname):
sample = ''
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
sample += fp.readline() + fp.readline()
return sample
def _get_csv_headers(fname):
sample = _get_sample(fname)
sniffer = csv.Sniffer()
has_header = sniffer.has_header(sample)
dialect = sniffer.sniff(sample)
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
header = next(reader)
return has_header, header, dialect
class TagInfo(HasTraits):
name = Str
type = Enum("string", "text", "int", "float", "bool")
default = Any
def __repr__(self):
return 'TagInfo(%r, %r)' % (self.name, self.type)
def _default_default(self):
map = {"string": "", "text": "", "int": 0, "float": 0.0,
"bool": False}
return map[self.type]
def open_file(fname_or_file, mode='rb'):
if hasattr(fname_or_file, 'read'):
return fname_or_file
else:
return open(fname_or_file, mode)
def sanitize_name(name):
name = name.lower()
name = re.sub(r'\s+', '_', name)
return re.sub(r'\W+', '', name)
def get_non_existing_filename(fname):
if exists(fname):
base, ext = splitext(basename(fname))
return join(dirname(fname), base + '_a' + ext)
else:
return fname
COMMON_TAGS = dict(
file_name='string', path='string', relpath='string',
ctime='string', mtime='string', size='int', type='string'
)
def _cleanup_query(q, tag_types):
type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes)
for term in q.leaves():
if isinstance(term, query.Term):
if isinstance(term.text, (str, unicode, bytes)):
fieldtype = tag_types[term.fieldname]
if fieldtype in type_map:
term.text = type_map[fieldtype](term.text)
else:
term.text = term.text.lower()
elif isinstance(term, query.Phrase):
term.words = [x.lower() for x in term.words]
def _check_value(value, expr):
if isinstance(expr, string_types):
return expr in value.lower()
else:
return expr == value
def _check_range(x, term):
result = True
if term.start is not None:
if term.startexcl:
result &= x > term.start
else:
result &= x >= term.start
if term.end is not None and result:
if term.endexcl:
result &= x < term.end
else:
result &= x <= term.end
return result
def _check_date_range(x, term):
result = True
if term.startdate is not None:
result &= x >= term.start
if term.enddate is not None and result:
result &= x <= term.end
return result
def _search_media(expr, m_key, get_tag):
"""Given search expression, index to media, and a getter to get the attribute
check if the media matches expression.
"""
if expr.is_leaf():
if isinstance(expr, query.Term):
attr = expr.fieldname
return _check_value(get_tag(m_key, attr), expr.text)
elif isinstance(expr, query.Phrase):
attr = expr.fieldname
text = " ".join(expr.words)
return _check_value(get_tag(m_key, attr), text)
elif isinstance(expr, query.DateRange):
if expr.fieldname == 'ctime':
value = get_tag(m_key, 'ctime_')
elif expr.fieldname == 'mtime':
value = get_tag(m_key, 'mtime_')
return _check_date_range(value, expr)
elif isinstance(expr, query.NumericRange):
attr = expr.fieldname
return _check_range(get_tag(m_key, attr), expr)
else:
print("Unsupported term: %r" % expr)
return False
else:
if isinstance(expr, query.And):
result = True
for child in expr.children():
result &= _search_media(child, m_key, get_tag)
if not result:
break
return result
elif isinstance(expr, query.Or):
result = False
for child in expr.children():
result |= _search_media(child, m_key, get_tag)
if result:
break
return result
elif isinstance(expr, query.Not):
subquery = list(expr.children())[0]
return not _search_media(subquery, m_key, get_tag)
else:
print("Unsupported term: %r" % expr)
return False
class Project(HasTraits):
name = Str
description = Str
path = Str
root = Instance(Directory)
tags = List(TagInfo)
_media = Dict(Str, Media)
extensions = List(Str)
processors = List(processor.FactoryBase)
number_of_files = Long
# Path where the project data is saved.
save_file = Str
last_save_time = Str
_data = Dict
_tag_data = Dict
_relpath2index = Dict()
_query_parser = Instance(qparser.QueryParser)
def add_tags(self, tags):
tags = list(self.tags) + tags
self.update_tags(tags)
def update_tags(self, new_tags):
old_tags = self.tags
new_tag_names = set(tag.name for tag in new_tags)
tag_info = dict((tag.name, tag.type) for tag in old_tags)
removed = []
added = []
for tag in new_tags:
if tag.name not in tag_info:
added.append(tag)
elif tag_info[tag.name] != tag.type:
removed.append(tag)
added.append(tag)
for tag in old_tags:
if tag.name not in new_tag_names:
removed.append(tag)
for tag in removed:
del self._tag_data[tag.name]
n_entries = len(self._relpath2index)
for tag in added:
self._tag_data[tag.name] = [tag.default]*n_entries
# The above can be the first time when self._tag_data is accessed, when
# creating a new project for example. In this case,
# self.__tag_data_default is called, so if self.tags is set then the
# removed tags will not exist in _tag_data causing an error. So we only
# set self.tags below.
self.tags = new_tags
# Update the cached media
for m in self._media.values():
for tag in removed:
del m.tags[tag.name]
for tag in added:
m.tags[tag.name] = tag.default
self._query_parser = self._make_query_parser()
def copy(self):
"""Make a copy of this project. This does not copy the data but only
the tags, extensions and the other settings of the project.
This will not copy any of the processor states but only their settings.
"""
name = self.name + ' copy'
p = Project(name=name)
traits = ['description', 'extensions', 'path', 'processors', 'tags']
p.copy_traits(self, traits, copy='deep')
# Clear out the _done information from the processors
for proc in p.processors:
proc._done.clear()
return p
# #### CRUD interface to the data ####
def update(self, media_data, tags=None):
"""Create/update the internal data given the media data and tags.
Parameters
----------
f: vixen.directory.File instance
tags: dict
"""
relpath = media_data.relpath
if not self.has_media(relpath):
index = len(self._relpath2index)
self._relpath2index[relpath] = index
for key in MediaData._fields:
self._data[key].append(None)
for tag in self.tags:
self._tag_data[tag.name].append(tag.default)
index = self._relpath2index[relpath]
for i, key in enumerate(MediaData._fields):
self._data[key][index] = media_data[i]
if tags:
for key, value in tags.items():
self._tag_data[key][index] = value
media = self._media.get(relpath)
if media is not None:
media.update(media_data, tags)
def get(self, relpath):
"""Given the relative path of some media, return a Media instance.
"""
if relpath in self._media:
return self._media[relpath]
else:
data = {}
index = self._relpath2index[relpath]
for key in MediaData._fields:
data[key] = self._data[key][index]
tags = {}
for key in self._tag_data:
tags[key] = self._tag_data[key][index]
media = Media.from_data(MediaData(**data), tags)
media.on_trait_change(self._media_tag_handler, 'tags_items')
self._media[relpath] = media
return media
def remove(self, relpaths):
"""Given a list of relative path of some media, remove them from the
database.
"""
relpath2index = self._relpath2index
indices = [(x, relpath2index[x]) for x in relpaths]
for relpath, index in sorted(indices, reverse=True):
last = len(relpath2index) - 1
if index == last:
self._delete_record(last, relpath)
else:
self._replace_with_last_record(index, last)
self._delete_record(last, relpath)
def has_media(self, relpath):
"""Returns True if the media data is available.
"""
return relpath in self._relpath2index
def keys(self):
"""Return all the keys for the media relative paths."""
return self._relpath2index.keys()
def _get_media_attr(self, index, attr):
"""Given an index to the media, return its value.
"""
if attr in self._data:
return self._data[attr][index]
elif attr in self._tag_data:
return self._tag_data[attr][index]
# #### End of CRUD interface to the data ####
def clean(self):
"""Scan the project and remove any dead entries.
This is useful when you remove or rename files. This does not refresh
the directory tree or set the number of files. It simply cleans up the
db of files that no longer exist.
"""
logger.info('Cleaning project: %s', self.name)
root_path = self.path
to_remove = []
relpath2index = self._relpath2index
for rpath in list(relpath2index.keys()):
fname = os.path.join(root_path, rpath)
if not os.path.exists(fname):
to_remove.append(rpath)
self.remove(to_remove)
def export_csv(self, fname, cols=None):
"""Export metadata to a csv file. If `cols` are not specified,
it writes out all the useful metadata.
Parameters
-----------
fname: str: a path to the csv file to dump.
cols: sequence: a sequence of columns to write.
"""
logger.info('Exporting CSV: %s', fname)
all_keys = ((set(MediaData._fields) | set(self._tag_data.keys()))
- set(('ctime_', 'mtime_')))
if cols is None:
cols = all_keys
cols = list(sorted(cols))
data_cols = set([x for x in cols if x in self._data])
with io.open(fname, 'w', newline='', encoding='utf-8') as of:
# Write the header.
writer = csv.writer(of)
writer.writerow(cols)
for i in range(len(self._relpath2index)):
line = []
for col in cols:
if col in data_cols:
elem = self._data[col][i]
else:
elem = self._tag_data[col][i]
line.append(elem)
writer.writerow(line)
def import_csv(self, fname):
"""Read tag information from given CSV filename.
Returns the success status and the error message if any. Note that this
only applies tags for column headers with known tags. Unknown tags are
not added.
Parameters
----------
fname : str Input filename.
"""
logger.info('Importing tags from: %s', fname)
has_header, header, dialect = _get_csv_headers(fname)
if not has_header:
return False, "The CSV file does not appear to have a header."
if 'path' not in header:
msg = "The CSV file does not have a 'path' column."
return False, msg
tags = {x: header.index(x.name) for x in self.tags if x.name in header}
path_idx = header.index('path')
TRUE = ('1', 't', 'true', 'y', 'yes')
type_map = {
'bool': lambda x: x.lower() in TRUE,
'string': lambda x: x,
'text': lambda x: x,
'int': int,
'float': float
}
count = 0
total = 0
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
next(reader) # Skip header
for record in reader:
total += 1
path = record[path_idx]
rpath = relpath(path, self.path)
index = self._relpath2index.get(rpath, None)
media = self._media.get(rpath)
if index is not None:
count += 1
for tag, header_index in tags.items():
data = record[header_index]
try:
value = type_map[tag.type](data)
if media is not None:
media.tags[tag.name] = value
else:
self._tag_data[tag.name][index] = value
except ValueError:
pass
msg = "Read tags for %d paths out of %d entries." % (count, total)
if count == 0 and total > 0:
msg += ("\nPlease check that your path column matches "
"the media paths.")
return False, msg
else:
msg += ("\nPlease check the imported tags and make sure you "
"save the project.")
return True, msg
def load(self, fp=None):
"""Load media info from opened file object.
"""
if fp is None:
if not exists(self.save_file):
return
fp = open_file(self.save_file, 'rb')
else:
fp = open_file(fp, 'rb')
data = json_tricks.load(
fp, preserve_order=False, ignore_comments=False
)
fp.close()
self.name = data.get('name', '')
self.description = data.get('description', '')
self.path = data.get('path')
self.tags = [TagInfo(name=x[0], type=x[1]) for x in data['tags']]
self.processors = [processor.load(x)
for x in data.get('processors', [])]
version = data.get('version')
if version == 1:
self._read_version1_media(data['media'])
else:
self._data = data['media_data']
self._tag_data = data['tag_data']
self._relpath2index = data['relpath2index']
root = Directory()
root.__setstate__(data.get('root'))
self.extensions = root.extensions
self.root = root
self.number_of_files = len(self._relpath2index)
def save(self):
"""Save current media info to a file object
"""
if len(self.save_file) > 0:
self.save_as(self.save_file)
self._update_last_save_time()
else:
raise IOError("No valid save file set.")
def save_as(self, fp):
"""Save copy to specified path.
"""
fp = open_file(fp, 'wb')
tags = [(t.name, t.type) for t in self.tags]
root = self.root.__getstate__()
processors = [processor.dump(x) for x in self.processors]
data = dict(
version=2, path=self.path, name=self.name,
description=self.description, tags=tags,
media_data=self._data, tag_data=self._tag_data,
relpath2index=self._relpath2index,
root=root, processors=processors
)
json_tricks.dump(data, fp, compression=True)
fp.close()
logger.info('Saved project: %s', self.name)
def scan(self, refresh=False):
"""Find all the media recursively inside the root directory.
This will not clobber existing records but will add any new ones.
"""
self._setup_root()
def _scan(dir):
for f in dir.files:
if not self.has_media(f.relpath) or refresh:
data = get_media_data(f.path, f.relpath)
self.update(data)
for d in dir.directories:
if refresh:
d.refresh()
_scan(d)
if refresh:
self.root.refresh()
_scan(self.root)
self.number_of_files = len(self._relpath2index)
def search(self, q):
"""A generator which yields the (filename, relpath) for each file
satisfying the search query.
"""
logger.info('Searching for %s', q)
try:
parsed_q = self._query_parser.parse(q)
except Exception:
logger.warn("Invalid search expression: %s", q)
print("Invalid search expression: %s" % q)
return
tag_types = self._get_tag_types()
_cleanup_query(parsed_q, tag_types)
for key, index in self._relpath2index.items():
if _search_media(parsed_q, index, self._get_media_attr):
yield basename(key), key
def refresh(self):
logger.info('Refreshing project: %s', self.name)
self.clean()
self.scan(refresh=True)
# #### Private protocol ################################################
def _setup_root(self):
path = abspath(expanduser(self.path))
root = self.root
if root is None or realpath(root.path) != realpath(path):
self.root = Directory(path=path, extensions=self.extensions)
def _tags_default(self):
return [TagInfo(name='completed', type='bool')]
def _save_file_default(self):
if len(self.name) > 0:
fname = sanitize_name(self.name) + '.vxn'
d = get_project_dir()
return get_non_existing_filename(join(d, fname))
else:
return ''
def _update_last_save_time(self):
self.last_save_time = get_file_saved_time(self.save_file)
def _last_save_time_default(self):
if exists(self.save_file):
return get_file_saved_time(self.save_file)
else:
return ''
def _name_changed(self, name):
if len(name) > 0:
old_save_file = self.save_file
old_dir = dirname(old_save_file)
new_save_file = join(old_dir, sanitize_name(name) + '.vxn')
if new_save_file != old_save_file:
self.save_file = new_save_file
if exists(old_save_file):
shutil.move(old_save_file, self.save_file)
def _extensions_changed(self, ext):
if self.root is not None:
self.root.extensions = ext
def _extensions_items_changed(self):
if self.root is not None:
self.root.extensions = self.extensions
def _get_tag_types(self):
result = dict(COMMON_TAGS)
result.update(dict((t.name, t.type) for t in self.tags))
return result
def _make_schema(self):
from whoosh.fields import BOOLEAN, DATETIME, TEXT, Schema
kw = dict(
type=TEXT, file_name=TEXT, path=TEXT,
mtime=DATETIME, ctime=DATETIME, size=INT
)
type_to_field = dict(
string=TEXT, text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN
)
for tag in self.tags:
kw[tag.name] = type_to_field[tag.type]
return Schema(**kw)
def _make_query_parser(self):
schema = self._make_schema()
qp = qparser.QueryParser('path', schema=schema)
qp.add_plugin(qparser.GtLtPlugin())
from whoosh.qparser.dateparse import DateParserPlugin
qp.add_plugin(DateParserPlugin())
return qp
def __query_parser_default(self):
return self._make_query_parser()
def __data_default(self):
data = {}
for key in MediaData._fields:
data[key] = []
return data
def __tag_data_default(self):
tags = {}
for key in self.tags:
tags[key.name] = []
return tags
def _media_tag_handler(self, obj, tname, old, new):
index = self._relpath2index[obj.relpath]
for tag in new.changed:
self._tag_data[tag][index] = obj.tags[tag]
def _read_version1_media(self, media):
data = self.__data_default()
tag_data = self.__tag_data_default()
relpath2index = {}
keymap = dict.fromkeys(MediaData._fields)
for k in keymap:
keymap[k] = k
keymap['_ctime'] = 'ctime_'
keymap['_mtime'] = 'mtime_'
for index, (key, m) in enumerate(media):
relpath2index[key] = index
tags = m.pop('tags')
for tname, v in tags.items():
tag_data[tname].append(v)
for k, v in m.items():
data[keymap[k]].append(v)
if 'file_name' not in m:
data['file_name'].append(basename(key))
data['mtime_'] = [datetime_to_long(x) for x in data['mtime_']]
data['ctime_'] = [datetime_to_long(x) for x in data['ctime_']]
self._data = data
self._tag_data = tag_data
self._relpath2index = relpath2index
def _delete_record(self, index, relpath):
for key in MediaData._fields:
del self._data[key][index]
for key in self._tag_data:
del self._tag_data[key][index]
if relpath in self._media:
del self._media[relpath]
del self._relpath2index[relpath]
def _replace_with_last_record(self, index, last):
_data = self._data
_tag_data = self._tag_data
for key in MediaData._fields:
_data[key][index] = _data[key][last]
for key in self._tag_data:
_tag_data[key][index] = _tag_data[key][last]
last_relpath = _data['relpath'][last]
self._relpath2index[last_relpath] = index
def _save_as_v1(self, fp):
"""Save copy to specified path.
This mainly exists for testing and making sure we still read the old
saved files.
"""
def _rewrite_dir(state):
"Rewrite directories in the old format."
state['files'] = [x[0] for x in state['files']]
state['directories'] = [_rewrite_dir(d)
for d in state['directories']]
state.pop('relpath')
state.pop('name')
return state
fp = open_file(fp, 'wb')
media = [(key, self.get(key).to_dict()) for key in self._relpath2index]
tags = [(t.name, t.type) for t in self.tags]
root = _rewrite_dir(self.root.__getstate__())
processors = [processor.dump(x) for x in self.processors]
for k, m in media:
m['_ctime'] = long_to_datetime(m['_ctime'])
m['_mtime'] = long_to_datetime(m['_mtime'])
data = dict(
version=1, path=self.path, name=self.name,
description=self.description, tags=tags, media=media,
root=root, processors=processors
)
json_tricks.dump(data, fp, compression=True)
fp.close()
logger.info('Saved project: %s', self.name)
|
534 | from io import StringIO
from unittest import TestCase
from dropSQL.parser.streams import *
class StreamTestCase(TestCase):
def test(self):
s = '12'
cs = Characters(StringIO(s))
ch = cs.peek().ok()
self.assertEqual(ch, '1')
ch = cs.peek().ok()
self.assertEqual(ch, '1')
ch = cs.next().ok()
self.assertEqual(ch, '1')
ch = cs.next().ok()
self.assertEqual(ch, '2')
r = cs.next()
self.assertFalse(r)
self.assertTrue(r.err())
r = cs.next()
self.assertFalse(r)
cs.back()
r = cs.next()
self.assertTrue(r)
self.assertEqual(r.ok(), '2')
cs.back(2)
r = cs.next()
self.assertTrue(r)
self.assertEqual(r.ok(), '1')
|
538 | MANIFEST = {
"hilt": {
"h1": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (110, 111)}},
"colours": {
"primary": (216, 216, 216), # d8d8d8
"secondary": (141, 141, 141), # 8d8d8d
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal/Salvaged materials",
},
"h2": {
"offsets": {"blade": 20, "button": {"x": (8, 8), "y": (100, 105)}},
"colours": {
"primary": (112, 112, 112), # 707070
"secondary": (0, 0, 0), # 000000
"tertiary": (212, 175, 55), # 000000
},
"length": 24,
"materials": "Alloy metal and carbon composite",
},
"h3": {
"offsets": {"blade": 0, "button": {"x": (10, 10), "y": (100, 118)}},
"colours": {
"primary": (157, 157, 157), # 707070
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal",
},
"h4": {
"offsets": {"blade": 7, "button": {"x": (8, 9), "y": (92, 100)}},
"colours": {
"primary": (0, 0, 0), # 000000
"secondary": (157, 157, 157), # 9d9d9d
"tertiary": (180, 97, 19), # b46113
},
"length": 13,
"materials": "Alloy metal",
},
"h5": {
"offsets": {"blade": 0, "button": {"x": (8, 8), "y": (92, 105)}},
"colours": {
"primary": (111, 111, 111), # 6f6f6f
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal",
},
"h6": {
"offsets": {"blade": 2, "button": {"x": (8, 9), "y": (112, 113)}},
"colours": {
"primary": (120, 120, 120), # 787878
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 22,
"materials": "Alloy metal/Salvaged materials",
},
"h7": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (105, 113)}},
"colours": {
"primary": (192, 192, 192), # c0c0c0
"secondary": (255, 215, 0), # ffd700
"tertiary": (0, 0, 0), # 000000
},
"length": 22,
"materials": "Alloy metal and Gold",
},
"h8": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (100, 111)}},
"colours": {
"primary": (216, 216, 216), # d8d8d8
"secondary": (180, 97, 19), # b46113
"tertiary": (0, 0, 0), # 000000
},
"length": 24,
"materials": "Alloy metal/Copper",
},
},
"blade": {
"b1": {"colour": "Red", "crystal": "Adegan crystal", "type": "Sith"},
"b2": {"colour": "Blue", "crystal": "Zophis crystal", "type": "Jedi"},
"b3": {"colour": "Green", "crystal": "Nishalorite stone", "type": "Jedi"},
"b4": {"colour": "Yellow", "crystal": "Kimber stone", "type": "Jedi"},
"b5": {"colour": "White", "crystal": "Dragite gem", "type": "Jedi"},
"b6": {"colour": "Purple", "crystal": "Krayt dragon pearl", "type": "Jedi"},
"b7": {"colour": "Blue/Green", "crystal": "Dantari crystal", "type": "Jedi"},
"b8": {
"colour": "Orange",
"crystal": ["Ilum crystal", "Ultima Pearl"],
"type": "Sith",
},
"b9": {
"colour": "Black",
"crystal": "Obsidian",
"type": ["Jedi", "Mandalorian"],
},
},
"pommel": {
"p1": {"length": 5,},
"p2": {"length": 14,},
"p3": {"length": 3,},
"p4": {"length": 8,},
"p5": {"length": 5,},
"p6": {"length": 5,},
"p7": {"length": 8,},
},
# These are lightsabers for a specific Jedi or Sith. Should use their name instead of
"unique_urls": {""},
}
|
545 | import json
from cisco_sdwan_policy.BaseObject import BaseObject
class Application(BaseObject):
def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs):
self.type = "appList"
self.id = id
self.name = name
self.references = reference
self.app_family=is_app_family
self._entries = app_list
self.url = "template/policy/list/app"
super().__init__(**kwargs)
self.modified=False
def get_entries(self):
return self._entries
def set_entries(self,entries):
self.modified=True
self._entries=entries
@classmethod
def from_json(cls,jsonfile,**kwargs):
id = jsonfile["listId"]
name = jsonfile["name"]
references = jsonfile.get("references")
if len(jsonfile["entries"])>0 and jsonfile["entries"][0].get("app"):
appFamily=False
entries = [i["app"] for i in jsonfile["entries"]]
else:
if not jsonfile["entries"][0].get("appFamily"):
return None
else:
appFamily=True
entries = [i["appFamily"] for i in jsonfile["entries"]]
return cls(name,entries,appFamily,id,references,**kwargs)
def to_json(self):
return {
"name":self.name,
"description":"Desc Not Required",
"type":"app",
"entries":[
{"appFamily" if self.app_family else "app":i} for i in self._entries]
}
|
552 | from functools import partial
from corpustools.corpus.classes import Word
from corpustools.symbolsim.edit_distance import edit_distance
from corpustools.symbolsim.khorsi import khorsi
from corpustools.symbolsim.phono_edit_distance import phono_edit_distance
from corpustools.symbolsim.phono_align import Aligner
from corpustools.multiproc import filter_mp, score_mp
def _is_edit_distance_neighbor(w, query, sequence_type, max_distance):
w_len = len(getattr(w, sequence_type))
query_len = len(getattr(query, sequence_type))
if w_len > query_len+max_distance:
return False
if w_len < query_len-max_distance:
return False
return edit_distance(getattr(w, sequence_type), getattr(query, sequence_type),
sequence_type, max_distance) <= max_distance
def _is_phono_edit_distance_neighbor(w, query, sequence_type, specifier, max_distance):
return phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, specifier) <= max_distance
def _is_khorsi_neighbor(w, query, freq_base, sequence_type, max_distance):
return khorsi(getattr(w, sequence_type), getattr(query, sequence_type), freq_base, sequence_type, max_distance) >= max_distance
def neighborhood_density_all_words(corpus_context, tierdict, tier_type = None, sequence_type = None,
algorithm = 'edit_distance', max_distance = 1, output_format = 'spelling',
num_cores = -1, settable_attr = None, collapse_homophones = False,
stop_check = None, call_back = None):
"""Calculate the neighborhood density of all words in the corpus and
adds them as attributes of the words.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
algorithm : str
The algorithm used to determine distance
max_distance : float, optional
Maximum edit distance from the queried word to consider a word a neighbor.
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
settable_attr: string
Name of attribute that neighbourhood density results will be assigned to
"""
function = partial(neighborhood_density, corpus_context,
tierdict = tierdict,
tier_type = tier_type,
sequence_type = sequence_type,
algorithm = algorithm,
max_distance = max_distance,
collapse_homophones = collapse_homophones)
if call_back is not None:
call_back('Calculating neighborhood densities...')
call_back(0,len(corpus_context))
cur = 0
results = dict()
last_value_removed = None
last_key_removed = None
if num_cores == -1 or num_cores == 1:
for w in corpus_context:
if stop_check is not None and stop_check():
return
if last_value_removed:
tierdict[last_key_removed].append(last_value_removed)
w_sequence = getattr(w, corpus_context.sequence_type)
last_key_removed = str(w_sequence)
for i, item in enumerate(tierdict[last_key_removed]):
if str(item) == str(w):
last_value_removed = tierdict[last_key_removed].pop(i)
break
res = neighborhood_density(corpus_context, w, tierdict,
tier_type = tier_type,
sequence_type = sequence_type,
algorithm = algorithm,
max_distance = max_distance,
collapse_homophones = collapse_homophones)
results[str(w)] = [getattr(r, output_format) for r in res[1]]
setattr(w.original, settable_attr.name, res[0])
# for w in corpus_context:
# if stop_check is not None and stop_check():
# return
# cur += 1
# call_back(cur)
# res = function(w)
# results[str(w)] = [getattr(r, output_format) for r in res[1]]
# setattr(w.original, settable_attr.name, res[0]-1)
# #the -1 is to account for the fact that words are counted as their own neighbour, and this is incorrect
# #subtracting 1 here is easier than fixing the neighbourhood density algorithm
else:
iterable = ((w,) for w in corpus_context)
neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size = 1)
for n in neighbors:
#Have to look up the key, then look up the object due to how
#multiprocessing pickles objects
setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])),
#corpus_context.attribute.name, n[1][0])
settable_attr.name, n[1][0])
return results
def neighborhood_density(corpus_context, query, tierdict,
algorithm = 'edit_distance', max_distance = 1, collapse_homophones = False,
force_quadratic = False, file_type = None, tier_type=None, sequence_type = None,
stop_check = None, call_back = None):
"""Calculate the neighborhood density of a particular word in the corpus.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query : Word
The word whose neighborhood density to calculate.
algorithm : str
The algorithm used to determine distance
max_distance : float, optional
Maximum edit distance from the queried word to consider a word a neighbor
force_quadratic : bool
Force use of the less efficient quadratic algorithm even when finding edit
distance of 1 neighborhoods
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
Returns
-------
tuple(int, set)
Tuple of the number of neighbors and the set of neighbor Words.
"""
matches = []
query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)
if call_back is not None:
call_back('Finding neighbors for {}...'.format(query))
call_back(0,len(corpus_context))
cur = 0
if algorithm == 'edit_distance' and max_distance == 1 and not force_quadratic:
return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict,
file_type=file_type, collapse_homophones=collapse_homophones)
if algorithm == 'edit_distance':
is_neighbor = partial(_is_edit_distance_neighbor,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
elif algorithm == 'phono_edit_distance':
is_neighbor = partial(_is_phono_edit_distance_neighbor,
specifier = corpus_context.specifier,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
elif algorithm == 'khorsi':
freq_base = corpus_context.get_frequency_base()
is_neighbor = partial(_is_khorsi_neighbor,
freq_base = freq_base,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
for w in corpus_context:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 10 == 0:
call_back(cur)
if not is_neighbor(w, query):
continue
matches.append(w)
neighbors = set(matches)-set([query])
return (len(neighbors), neighbors)
def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type,
tierdict, file_type=None, trans_delimiter='.', collapse_homophones = False):
"""Generates all neighbors of edit distance <= 1 and searches
for them in corpus_context.
Will be faster than neighborhood_density when:
n > m * (1 + s), where
n: number of words in corpus
m: length of query
s: size of segment inventory
"""
neighbors = list()
query = ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type)
for candidate in generate_neighbor_candidates(corpus_context, query, sequence_type):
if tier_type.att_type == 'tier':
cand_str = trans_delimiter.join(candidate)
else:
cand_str = ''.join(candidate)
if cand_str in tierdict:
for w in tierdict[cand_str]:
w_sequence = getattr(w, sequence_type)
if collapse_homophones and any(getattr(word, sequence_type) == w_sequence for word in neighbors):
continue
else:
neighbors.append(w)
return (len(neighbors), neighbors)
def generate_neighbor_candidates(corpus_context, query, sequence_type):
sequence = getattr(query, sequence_type)
yield [str(c) for c in sequence]
for i in range(len(sequence)):
yield [str(c) for c in sequence[:i]] + [str(c) for c in sequence[i+1:]] # deletion
for char in corpus_context.inventory:
if str(char) not in ['#', sequence[i]]:
yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i:]] # insertion
yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i+1:]] # substitution
for char in corpus_context.inventory: # final pass to get insertion at len+1
if str(char) not in ['#', sequence[i]]:
yield [str(c) for c in sequence[:]] + [str(char)] # insertion
def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type = None, num_cores = -1, collapse_homophones = False,
stop_check = None, call_back = None):
function = partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones = collapse_homophones)
if call_back is not None:
call_back('Calculating neighborhood densities...')
call_back(0,len(corpus_context))
cur = 0
results = dict()
last_value_removed = None
last_key_removed = None
if num_cores == -1 or num_cores == 1:
for w in corpus_context:
if stop_check is not None and stop_check():
return
if last_value_removed:
tierdict[last_key_removed].append(last_value_removed)
w_sequence = getattr(w, corpus_context.sequence_type)
last_key_removed = str(w_sequence)
for i, item in enumerate(tierdict[last_key_removed]):
if str(item) == str(w):
last_value_removed = tierdict[last_key_removed].pop(i)
break
res = find_mutation_minpairs(corpus_context, w,
tier_type=tier_type, collapse_homophones = collapse_homophones)
results[str(w)] = res[1]
setattr(w.original, corpus_context.attribute.name, res[0])
# for w in corpus_context:
# if stop_check is not None and stop_check():
# return
# cur += 1
# call_back(cur)
# res = function(w)
# results[str(w)] = res[1]#[str(r) for r in res[1]]
# setattr(w.original, corpus_context.attribute.name, res[0])
else:
iterable = ((w,) for w in corpus_context)
neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size= 1)
for n in neighbors:
#Have to look up the key, then look up the object due to how
#multiprocessing pickles objects
setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0])
return results
def find_mutation_minpairs(corpus_context, query, tier_type = None, collapse_homophones = False,
stop_check = None, call_back = None):
"""Find all minimal pairs of the query word based only on segment
mutations (not deletions/insertions)
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query : Word
The word whose minimal pairs to find
stop_check : callable or None
Optional function to check whether to gracefully terminate early
call_back : callable or None
Optional function to supply progress information during the function
Returns
-------
list
The found minimal pairs for the queried word
"""
matches = []
sequence_type = corpus_context.sequence_type
query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)
if call_back is not None:
call_back('Finding neighbors...')
call_back(0,len(corpus_context))
cur = 0
al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1)
for w in corpus_context:
w_sequence = getattr(w, sequence_type)
query_sequence = getattr(query, sequence_type)
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 10 == 0:
call_back(cur)
if (len(w_sequence) > len(query_sequence)+1 or
len(w_sequence) < len(query_sequence)-1):
continue
m = al.make_similarity_matrix(query_sequence, w_sequence)
if m[-1][-1]['f'] != 1:
continue
w_sequence = getattr(w, sequence_type)
if collapse_homophones and any(getattr(m, sequence_type) == w_sequence for m in matches):
continue
else:
#matches.append(str(w_sequence))
matches.append(w)
matches = [m.spelling for m in matches]
neighbors = list(set(matches)-set([str(query_sequence)]))
return (len(neighbors), neighbors)
def ensure_query_is_word(query, corpus, sequence_type, tier_type, trans_delimiter='.', file_type=None):
if isinstance(query, Word):
query_word = query
else:
if tier_type.att_type == 'spelling':
if file_type == sequence_type:
query_word = Word(**{sequence_type: list(query)})
else:
query_word = query.replace(trans_delimiter, '')
query_word = Word(**{sequence_type: list(query_word)})
elif tier_type.att_type == 'tier':
if file_type == sequence_type:
query_with̠td = '.'.join(query) if '.' not in query else query
for entry in corpus:
corpus_word_with_td = str(getattr(entry, sequence_type))
if query_with̠td == corpus_word_with_td: # if a word in corpus has the same transcription
return entry # that word in the corpus is to be referred to.
# the following should be run if no word found in corpus with the transcription
new_query = parse(query, trans_delimiter)
query_word = Word(**{sequence_type: new_query})
else: # if file contains spelling
try:
query_word = corpus.corpus.find(query)
except KeyError:
# if the word in the file can't be found in the corpus
new_query = parse(query, trans_delimiter)
query_word = Word(**{sequence_type: list(new_query)})
return query_word
def parse(word, delimiter):
return word.split(delimiter) if delimiter in word else list(word) |
571 | from CommonServerPython import *
''' IMPORTS '''
import re
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
VENDOR = 'Have I Been Pwned? V2'
MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1)
API_KEY = demisto.params().get('api_key')
USE_SSL = not demisto.params().get('insecure', False)
BASE_URL = 'https://haveibeenpwned.com/api/v3'
HEADERS = {
'hibp-api-key': API_KEY,
'user-agent': 'DBOT-API',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3
DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3
SUFFIXES = {
"email": '/breachedaccount/',
"domain": '/breaches?domain=',
"username": '/breachedaccount/',
"paste": '/pasteaccount/',
"email_truncate_verified": '?truncateResponse=false&includeUnverified=true',
"domain_truncate_verified": '&truncateResponse=false&includeUnverified=true',
"username_truncate_verified": '?truncateResponse=false&includeUnverified=true'
}
RETRIES_END_TIME = datetime.min
''' HELPER FUNCTIONS '''
def http_request(method, url_suffix, params=None, data=None):
while True:
res = requests.request(
method,
BASE_URL + url_suffix,
verify=USE_SSL,
params=params,
data=data,
headers=HEADERS
)
if res.status_code != 429:
# Rate limit response code
break
if datetime.now() > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
wait_regex = re.search(r'\d+', res.json()['message'])
if wait_regex:
wait_amount = wait_regex.group()
else:
demisto.error('failed extracting wait time will use default (5). Res body: {}'.format(res.text))
wait_amount = 5
if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
time.sleep(int(wait_amount))
if res.status_code == 404:
return None
if not res.status_code == 200:
if not res.status_code == 401:
demisto.error(
'Error in API call to Pwned Integration [%d]. Full text: %s' % (res.status_code, res.text))
return_error('Error in API call to Pwned Integration [%d] - %s' % (res.status_code, res.reason))
return None
return res.json()
def html_description_to_human_readable(breach_description):
"""
Converting from html description to hr
:param breach_description: Description of breach from API response
:return: Description string that altered HTML urls to clickable urls
for better readability in war-room
"""
html_link_pattern = re.compile('<a href="(.+?)"(.+?)>(.+?)</a>')
patterns_found = html_link_pattern.findall(breach_description)
for link in patterns_found:
html_actual_address = link[0]
html_readable_name = link[2]
link_from_desc = '[' + html_readable_name + ']' + '(' + html_actual_address + ')'
breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1)
return breach_description
def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None):
records_found = False
md = '### Have I Been Pwned query for ' + query_type.lower() + ': *' + query_arg + '*\n'
if api_res:
records_found = True
for breach in api_res:
verified_breach = 'Verified' if breach['IsVerified'] else 'Unverified'
md += '#### ' + breach['Title'] + ' (' + breach['Domain'] + '): ' + str(breach['PwnCount']) + \
' records breached [' + verified_breach + ' breach]\n'
md += 'Date: **' + breach['BreachDate'] + '**\n\n'
md += html_description_to_human_readable(breach['Description']) + '\n'
md += 'Data breached: **' + ','.join(breach['DataClasses']) + '**\n'
if api_paste_res:
records_found = True
pastes_list = []
for paste_breach in api_paste_res:
paste_entry = \
{
'Source': paste_breach['Source'],
'Title': paste_breach['Title'],
'ID': paste_breach['Id'],
'Date': '',
'Amount of emails in paste': str(paste_breach['EmailCount'])
}
if paste_breach['Date']:
paste_entry['Date'] = paste_breach['Date'].split('T')[0]
pastes_list.append(paste_entry)
md += tableToMarkdown('The email address was found in the following "Pastes":',
pastes_list,
['ID', 'Title', 'Date', 'Source', 'Amount of emails in paste'])
if not records_found:
md += 'No records found'
return md
def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score):
return {
'Indicator': indicator_value,
'Type': indicator_type,
'Vendor': VENDOR,
'Score': dbot_score
}
def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score):
context_dict = dict() # dict
if context_type == 'email':
context_dict['Address'] = context_main_value
else:
context_dict['Name'] = context_main_value
context_dict['Pwned-V2'] = {
'Compromised': {
'Vendor': VENDOR,
'Reporters': ', '.join(comp_sites + comp_pastes)
}
}
if malicious_score == 3:
context_dict['Malicious'] = add_malicious_to_context(context_type)
return context_dict
def add_malicious_to_context(malicious_type):
return {
'Vendor': VENDOR,
'Description': 'The ' + malicious_type + ' has been compromised'
}
def email_to_entry_context(email, api_email_res, api_paste_res):
dbot_score = 0
comp_email = dict() # type: dict
comp_sites = sorted([item['Title'] for item in api_email_res])
comp_pastes = sorted(set(item['Source'] for item in api_paste_res))
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_EMAIL
email_context = create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL)
comp_email[outputPaths['email']] = email_context
comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score)
return comp_email
def domain_to_entry_context(domain, api_res):
comp_sites = [item['Title'] for item in api_res]
comp_sites = sorted(comp_sites)
comp_domain = dict() # type: dict
dbot_score = 0
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_DOMAIN
domain_context = create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN)
comp_domain[outputPaths['domain']] = domain_context
comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score)
return comp_domain
def set_retry_end_time():
global RETRIES_END_TIME
if MAX_RETRY_ALLOWED != -1:
RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED))
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module(args_dict):
"""
If the http request was successful the test will return OK
:return: 3 arrays of outputs
"""
http_request('GET', SUFFIXES.get("username", '') + 'test')
return ['ok'], [None], [None]
def pwned_email_command(args_dict):
"""
Executing the pwned request for emails list, in order to support list input, the function returns 3 lists of outputs
:param args_dict: the demisto argument - in this case the email list is needed
:return: 3 arrays of outputs
"""
email_list = argToList(args_dict.get('email', ''))
api_email_res_list, api_paste_res_list = pwned_email(email_list)
md_list = []
ec_list = []
for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list):
md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res))
ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or []))
return md_list, ec_list, api_email_res_list
def pwned_email(email_list):
"""
Executing the http requests
:param email_list: the email list that needed for the http requests
:return: 2 arrays of http requests outputs
"""
api_email_res_list = []
api_paste_res_list = []
for email in email_list:
email_suffix = SUFFIXES.get("email") + email + SUFFIXES.get("email_truncate_verified")
paste_suffix = SUFFIXES.get("paste") + email
api_email_res_list.append(http_request('GET', url_suffix=email_suffix))
api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix))
return api_email_res_list, api_paste_res_list
def pwned_domain_command(args_dict):
"""
Executing the pwned request for domains list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the domain list is needed
:return: 3 arrays of outputs
"""
domain_list = argToList(args_dict.get('domain', ''))
api_res_list = pwned_domain(domain_list)
md_list = []
ec_list = []
for domain, api_res in zip(domain_list, api_res_list):
md_list.append(data_to_markdown('Domain', domain, api_res))
ec_list.append(domain_to_entry_context(domain, api_res or []))
return md_list, ec_list, api_res_list
def pwned_domain(domain_list):
"""
Executing the http request
:param domain_list: the domains list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for domain in domain_list:
suffix = SUFFIXES.get("domain") + domain + SUFFIXES.get("domain_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
def pwned_username_command(args_dict):
"""
Executing the pwned request for usernames list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the username list is needed
:return: 3 arrays of outputs
"""
username_list = argToList(args_dict.get('username', ''))
api_res_list = pwned_username(username_list)
md_list = []
ec_list = []
for username, api_res in zip(username_list, api_res_list):
md_list.append(data_to_markdown('Username', username, api_res))
ec_list.append(domain_to_entry_context(username, api_res or []))
return md_list, ec_list, api_res_list
def pwned_username(username_list):
"""
Executing the http request
:param username_list: the username list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for username in username_list:
suffix = SUFFIXES.get("username") + username + SUFFIXES.get("username_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
command = demisto.command()
LOG('Command being called is: {}'.format(command))
try:
handle_proxy()
set_retry_end_time()
commands = {
'test-module': test_module,
'email': pwned_email_command,
'pwned-email': pwned_email_command,
'domain': pwned_domain_command,
'pwned-domain': pwned_domain_command,
'pwned-username': pwned_username_command
}
if command in commands:
md_list, ec_list, api_email_res_list = commands[command](demisto.args())
for md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list):
return_outputs(md, ec, api_paste_res)
# Log exceptions
except Exception as e:
return_error(str(e))
|
596 | from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.discretizer import Discretizer
x = np.random.rand(10, 4)
n_features = x.shape[1]
feature_names = [str(_) for _ in range(n_features)]
categorical_features = [[], [1, 3]]
percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))]
tests = list(product(categorical_features, percentiles))
n_tests = len(tests)
@pytest.fixture
def cats_and_percentiles(request):
cat, perc = tests[request.param]
return cat, perc
@pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True)
def test_discretizer(cats_and_percentiles):
cat, perc = cats_and_percentiles
disc = Discretizer(x, cat, feature_names, perc)
to_disc = list(disc.names.keys())
assert len(to_disc) == (x.shape[1] - len(cat))
x_disc = disc.discretize(x)
for k, v in disc.names.items():
assert len(v) <= len(perc) + 1
assert callable(disc.lambdas[k])
assert (x_disc[:, k].min() == 0).all()
assert (x_disc[:, k].max() == len(perc)).all()
for i in range(x.shape[1]):
if i not in to_disc:
assert (x_disc[:, i] == x[:, i]).all()
|
600 | import hashlib
from typing import TypeVar, Union
import redis
from openff.toolkit.topology import Molecule
from openff.bespokefit.executor.services.qcgenerator import worker
from openff.bespokefit.schema.tasks import HessianTask, OptimizationTask, Torsion1DTask
from openff.bespokefit.utilities.molecule import canonical_order_atoms
_T = TypeVar("_T", HessianTask, OptimizationTask, Torsion1DTask)
def _canonicalize_task(task: _T) -> _T:
task = task.copy(deep=True)
# Ensure the SMILES has a canonical ordering to help ensure cache hits.
canonical_molecule = canonical_order_atoms(
Molecule.from_smiles(task.smiles, allow_undefined_stereo=True)
)
if isinstance(task, Torsion1DTask):
map_to_atom_index = {
j: i for i, j in canonical_molecule.properties["atom_map"].items()
}
central_atom_indices = sorted(
map_to_atom_index[task.central_bond[i]] for i in (0, 1)
)
canonical_molecule.properties["atom_map"] = {
atom_index: (i + 1) for i, atom_index in enumerate(central_atom_indices)
}
canonical_smiles = canonical_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=True
)
task.central_bond = (1, 2)
else:
canonical_smiles = canonical_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=False
)
task.smiles = canonical_smiles
return task
def cached_compute_task(
task: Union[HessianTask, OptimizationTask, Torsion1DTask],
redis_connection: redis.Redis,
) -> str:
"""Checks to see if a QC task has already been executed and if not send it to a
worker.
"""
if isinstance(task, Torsion1DTask):
compute = worker.compute_torsion_drive
elif isinstance(task, OptimizationTask):
compute = worker.compute_optimization
elif isinstance(task, HessianTask):
compute = worker.compute_hessian
else:
raise NotImplementedError()
# Canonicalize the task to improve the cache hit rate.
task = _canonicalize_task(task)
task_hash = hashlib.sha512(task.json().encode()).hexdigest()
task_id = redis_connection.hget("qcgenerator:task-ids", task_hash)
if task_id is not None:
return task_id.decode()
task_id = compute.delay(task_json=task.json()).id
redis_connection.hset("qcgenerator:types", task_id, task.type)
# Make sure to only set the hash after the type is set in case the connection
# goes down before this information is entered and subsequently discarded.
redis_connection.hset("qcgenerator:task-ids", task_hash, task_id)
return task_id
|
605 | import contextlib
from datetime import date
from datetime import datetime
from datetime import timezone
from functools import wraps
from io import BytesIO
from itertools import count
from typing import Any
from typing import Dict
from typing import Sequence
import pytest
from dateutil.parser import parse as parse_date
from dateutil.relativedelta import relativedelta
from django import forms
from django.core.exceptions import ValidationError
from django.template.loader import render_to_string
from django.urls import reverse
from freezegun import freeze_time
from lxml import etree
from common.models.records import TrackedModel
from common.renderers import counter_generator
from common.serializers import validate_taric_xml_record_order
from common.util import TaricDateRange
from common.util import get_accessor
from common.util import get_field_tuple
INTERDEPENDENT_IMPORT_IMPLEMENTED = True
UPDATE_IMPORTER_IMPLEMENTED = True
EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED = False
COMMODITIES_IMPLEMENTED = True
MEURSING_TABLES_IMPLEMENTED = False
PARTIAL_TEMPORARY_STOP_IMPLEMENTED = False
UTC = timezone.utc
requires_commodities = pytest.mark.skipif(
not COMMODITIES_IMPLEMENTED,
reason="Commodities not implemented",
)
requires_export_refund_nomenclature = pytest.mark.skipif(
not EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED,
reason="Export refund nomenclature not implemented",
)
requires_meursing_tables = pytest.mark.skipif(
not MEURSING_TABLES_IMPLEMENTED,
reason="Meursing tables not implemented",
)
requires_partial_temporary_stop = pytest.mark.skipif(
not PARTIAL_TEMPORARY_STOP_IMPLEMENTED,
reason="Partial temporary stop not implemented",
)
requires_interdependent_import = pytest.mark.skipif(
not INTERDEPENDENT_IMPORT_IMPLEMENTED,
reason="Interdependent imports not implemented",
)
requires_update_importer = pytest.mark.skipif(
not UPDATE_IMPORTER_IMPLEMENTED,
reason="Requires Updating importers to be implemented",
)
@contextlib.contextmanager
def raises_if(exception, expected):
try:
yield
except exception:
if not expected:
raise
else:
if expected:
pytest.fail(f"Did not raise {exception}")
def check_validator(validate, value, expected_valid):
try:
validate(value)
except ValidationError:
if expected_valid:
pytest.fail(f'Unexpected validation error for value "{value}"')
except Exception:
raise
else:
if not expected_valid:
pytest.fail(f'Expected validation error for value "{value}"')
def make_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are duplicates of each
other and returns the record created last."""
existing = factory.create()
# allow overriding identifying_fields
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
return factory.create(
**dict(get_field_tuple(existing, field) for field in identifying_fields)
)
def make_non_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are not duplicates of
each other and returns the record created last."""
existing = factory.create()
not_duplicate = factory.create()
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
assert any(
get_field_tuple(existing, f) != get_field_tuple(not_duplicate, f)
for f in identifying_fields
)
return not_duplicate
def get_checkable_data(model: TrackedModel, ignore=frozenset()):
"""
Returns a dict representing the model's data ignoring any automatically set
fields and fields with names passed to `ignore`.
The returned data will contain the identifying fields for any linked
models rather than internal PKs.
For example:
get_checkable_data(FootnoteDescriptionFactory(), ignore={"sid"})
# {
# "description": "My sample footnote text",
# "described_footnote": {
# "footnote_type__footnote_type_id": "FN"
# "footnote_id": "123",
# },
# }
"""
checked_field_names = {f.name for f in model.copyable_fields} - ignore
data = {
name: getattr(model, get_accessor(model._meta.get_field(name)))
for name in checked_field_names
}
identifying_fields = {
name: data[name].get_identifying_fields()
for name in checked_field_names
if hasattr(data[name], "get_identifying_fields")
}
data.update(identifying_fields)
return data
def assert_records_match(
expected: TrackedModel,
imported: TrackedModel,
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported model is the same
as the data in the expected model.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = get_checkable_data(expected, ignore=ignore)
imported_data = get_checkable_data(imported, ignore=ignore)
assert expected_data == imported_data
def assert_many_records_match(
expected: Sequence[TrackedModel],
imported: Sequence[TrackedModel],
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported models is the same
as the data in the expected models, and that the count of both is equal.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = [get_checkable_data(e, ignore=ignore) for e in expected]
imported_data = [get_checkable_data(i, ignore=ignore) for i in imported]
assert expected_data == imported_data
_transaction_counter = count(start=1)
def generate_test_import_xml(obj: dict) -> BytesIO:
xml = render_to_string(
template_name="workbaskets/taric/transaction_detail.xml",
context={
"envelope_id": next(_transaction_counter),
"tracked_models": [obj],
"transaction_id": next(_transaction_counter),
"message_counter": counter_generator(),
"counter_generator": counter_generator,
},
)
return BytesIO(xml.encode())
def taric_xml_record_codes(xml):
"""Yields tuples of (record_code, subrecord_code)"""
records = xml.xpath(".//*[local-name() = 'record']")
codes = etree.XPath(
".//*[local-name()='record.code' or local-name()='subrecord.code']/text()",
)
return [tuple(codes(record)) for record in records]
def validate_taric_xml(
factory=None,
instance=None,
factory_kwargs=None,
check_order=True,
):
def decorator(func):
def wraps(
api_client,
taric_schema,
approved_transaction,
valid_user,
*args,
**kwargs,
):
if not factory and not instance:
raise AssertionError(
"Either a factory or an object instance need to be provided",
)
if factory and instance:
raise AssertionError(
"Either a factory or an object instance need to be provided - not both.",
)
current_instance = instance or factory.create(
transaction=approved_transaction, **factory_kwargs or {}
)
api_client.force_login(user=valid_user)
response = api_client.get(
reverse(
"workbaskets:workbasket-detail",
kwargs={"pk": approved_transaction.workbasket.pk},
),
{"format": "xml"},
)
assert response.status_code == 200
content = response.content
xml = etree.XML(content)
taric_schema.validate(xml)
assert not taric_schema.error_log, f"XML errors: {taric_schema.error_log}"
if check_order:
validate_taric_xml_record_order(xml)
kwargs = {"xml": xml, **kwargs}
func(
*args,
**kwargs,
)
return wraps
return decorator
class Dates:
deltas = {
"normal": (relativedelta(), relativedelta(months=+1)),
"earlier": (relativedelta(years=-1), relativedelta(years=-1, months=+1)),
"later": (
relativedelta(years=+1, months=+1, days=+1),
relativedelta(years=+1, months=+2),
),
"big": (relativedelta(years=-2), relativedelta(years=+2, days=+1)),
"adjacent": (relativedelta(days=+1), relativedelta(months=+1)),
"adjacent_earlier": (relativedelta(months=-1), relativedelta(days=-1)),
"adjacent_later": (relativedelta(months=+1, days=+1), relativedelta(months=+2)),
"adjacent_no_end": (relativedelta(months=+1, days=+1), None),
"adjacent_even_later": (
relativedelta(months=+2, days=+1),
relativedelta(months=+3),
),
"adjacent_earlier_big": (
relativedelta(years=-2, months=-2),
relativedelta(years=-2),
),
"adjacent_later_big": (
relativedelta(months=+1, days=+1),
relativedelta(years=+2, months=+2),
),
"overlap_normal": (
relativedelta(days=+15),
relativedelta(days=+14, months=+1, years=+1),
),
"overlap_normal_earlier": (
relativedelta(months=-1, days=+14),
relativedelta(days=+14),
),
"overlap_normal_same_year": (
relativedelta(days=+15),
relativedelta(days=+14, months=+1),
),
"overlap_big": (relativedelta(years=+1), relativedelta(years=+3, days=+2)),
"after_big": (
relativedelta(years=+3, months=+1),
relativedelta(years=+3, months=+2),
),
"backwards": (relativedelta(months=+1), relativedelta(days=+1)),
"starts_with_normal": (relativedelta(), relativedelta(days=+14)),
"ends_with_normal": (relativedelta(days=+14), relativedelta(months=+1)),
"current": (relativedelta(weeks=-4), relativedelta(weeks=+4)),
"future": (relativedelta(weeks=+10), relativedelta(weeks=+20)),
"no_end": (relativedelta(), None),
"normal_first_half": (relativedelta(), relativedelta(days=+14)),
}
@property
def now(self):
return self.datetime_now.date()
@property
def datetime_now(self):
return datetime.now(tz=UTC).replace(hour=0, minute=0, second=0, microsecond=0)
def __getattr__(self, name):
if name in self.deltas:
start, end = self.deltas[name]
start = self.now + start
if end is not None:
end = self.now + end
return TaricDateRange(start, end)
raise AttributeError(name)
@classmethod
def short_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(days=-14),
)
@classmethod
def medium_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(days=-1),
)
@classmethod
def short_after(cls, dt):
return TaricDateRange(
dt + relativedelta(days=+14),
dt + relativedelta(months=+1),
)
@classmethod
def short_overlap(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(months=+1),
)
@classmethod
def no_end_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
None,
)
def only_applicable_after(cutoff):
"""
Decorator which asserts that a test fails after a specified cutoff date.
:param cutoff: A date string, or datetime object before which the test should fail.
"""
cutoff = parse_date(cutoff)
def decorator(fn):
@wraps(fn)
def do_test(*args, **kwargs):
# test should pass normally
fn(*args, **kwargs)
# test should fail before cutoff
with freeze_time(cutoff + relativedelta(days=-1)):
try:
fn(*args, **kwargs)
except pytest.fail.Exception:
pass
except Exception:
raise
else:
pytest.fail(f"Rule applied before {cutoff:%Y-%m-%d}")
return True
return do_test
return decorator
def validity_period_post_data(start: date, end: date) -> Dict[str, int]:
"""
Construct a POST data fragment for the validity period start and end dates
of a ValidityPeriodForm from the given date objects, eg:
>>> validity_period_post_data(
>>> datetime.date(2021, 1, 2),
>>> datetime.date(2022, 3, 4),
>>> )
{
"start_date_0": 1,
"start_date_1": 2,
"start_date_2": 2021,
"end_date_0": 4,
"end_date_1": 3,
"end_date_2": 2022,
}
"""
return {
f"{name}_{i}": part
for name, date in (("start_date", start), ("end_date", end))
for i, part in enumerate([date.day, date.month, date.year])
}
def get_form_data(form: forms.ModelForm) -> Dict[str, Any]:
"""Returns a dictionary of the fields that the form will put onto a page and
their current values, taking account of any fields that have sub-fields and
hence result in multiple HTML <input> objects."""
data = {**form.initial}
for field in form.rendered_fields:
value = data[field] if field in data else form.fields[field].initial
if hasattr(form.fields[field].widget, "decompress"):
# If the widget can be decompressed, then it is not just a simple
# value and has some internal structure. So we need to generate one
# form item per decompressed value and append the name with _0, _1,
# etc. This mirrors the MultiValueWidget in django/forms/widgets.py.
if field in data:
del data[field]
value = form.fields[field].widget.decompress(value)
data.update(
**{f"{field}_{i}": v for i, v in enumerate(value) if v is not None}
)
elif value is not None:
data.setdefault(field, value)
return data
|
608 | import logging
import unittest
from pyinstrument import Profiler
from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario
from nuplan.planning.simulation.history.simulation_history_buffer import SimulationHistoryBuffer
from nuplan.planning.simulation.observation.idm_agents import IDMAgents
from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class TestProfileIDM(unittest.TestCase):
"""
Profiling test for IDM agents.
"""
def setUp(self) -> None:
"""
Inherited, see super class.
"""
self.n_repeat_trials = 1
self.display_results = True
self.scenario = get_test_nuplan_scenario()
def test_profile_idm_agent_observation(self) -> None:
"""Profile IDMAgents."""
profiler = Profiler(interval=0.0001)
profiler.start()
# How many times to repeat runtime test
for _ in range(self.n_repeat_trials):
observation = IDMAgents(
target_velocity=10,
min_gap_to_lead_agent=0.5,
headway_time=1.5,
accel_max=1.0,
decel_max=2.0,
scenario=self.scenario,
)
for step in range(self.scenario.get_number_of_iterations() - 1):
iteration = SimulationIteration(time_point=self.scenario.get_time_point(step), index=step)
next_iteration = SimulationIteration(time_point=self.scenario.get_time_point(step + 1), index=step + 1)
buffer = SimulationHistoryBuffer.initialize_from_list(
1,
[self.scenario.get_ego_state_at_iteration(step)],
[self.scenario.get_tracked_objects_at_iteration(step)],
next_iteration.time_point.time_s - iteration.time_point.time_s,
)
observation.update_observation(iteration, next_iteration, buffer)
profiler.stop()
if self.display_results:
logger.info(profiler.output_text(unicode=True, color=True))
if __name__ == "__main__":
unittest.main()
|
623 | import math
from vp import geom_tools
def horizon_error(ground_truth_horizon, detected_horizon, image_dims):
"""Calculates error in a detected horizon.
This measures the max distance between the detected horizon line and
the ground truth horizon line, within the image's x-axis, and
normalized by image height.
Args:
ground_truth_horizon: Tuple with (slope, intercept) for the GT horizon line.
detected_horizon: Tuple with (slope, intercept) for the detected horizon line.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
Float, or None if a horizon is missing altogether.
"""
if ground_truth_horizon is None or detected_horizon is None:
return None
def gt(x):
return ground_truth_horizon[0] * x + ground_truth_horizon[1]
def dt(x):
return detected_horizon[0] * x + detected_horizon[1]
width, height = image_dims
return max(abs(gt(0) - dt(0)), abs(gt(width) - dt(width))) / height
def vp_direction_error(ground_truth_vps, detected_vps, image_dims):
"""Measures error in direction from center of detected vanishing points.
Each detected VP is matched with its closest unclaimed ground truth VP.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
List with float degrees of error for each ground truth VP.
Error is None for missing VPs.
"""
principal_point = (image_dims[0] // 2, image_dims[1] // 2)
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
gt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], gt_vp[0], gt_vp[1]))
dt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], dt_vp[0], dt_vp[1]))
angle_diff = 180 - abs(abs(gt_angle - dt_angle) - 180)
point_pair_dists.append((angle_diff, gt_vp, dt_vp))
point_pair_dists = sorted(point_pair_dists, key=lambda k: k[0])
gt_vp_to_error = {}
seen_dt_vps = set()
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in gt_vp_to_error or dt_vp in seen_dt_vps:
continue
gt_vp_to_error[gt_vp] = distance
seen_dt_vps.add(dt_vp)
return [gt_vp_to_error.get(gt, None) for gt in ground_truth_vps]
def location_accuracy_error(ground_truth_vps, detected_vps):
"""Measures average error in the location of detected vanishing points.
"Missed" or "extra" VPs do not count against the score.
Based on log distance of detected vp from ground truth vp.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
Returns:
Float, error.
"""
if len(ground_truth_vps) == 0 or len(detected_vps) == 0:
return 0
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
distance = geom_tools.point_to_point_dist(gt_vp, dt_vp)
point_pair_dists.append((distance, gt_vp, dt_vp))
sorted(point_pair_dists, key=lambda k: k[0])
seen_gt_vps = set()
seen_dt_vps = set()
total_error = 0
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in seen_gt_vps or dt_vp in seen_dt_vps:
continue
seen_gt_vps.add(gt_vp)
seen_dt_vps.add(dt_vp)
if distance > 0:
total_error += math.log(distance)
return total_error / min(len(detected_vps), len(ground_truth_vps))
def num_model_detection_error(ground_truth_vps, detected_vps):
"""Measures error in the number of detected vanishing points.
Returns:
Integer, positive when there are too many VPs, negative
when there are too few.
"""
return len(detected_vps) - len(ground_truth_vps)
|
629 | import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import glob
from PIL import Image
import random
class SUN397EncodableDataset(Dataset):
"""SUN397 encodable dataset class"""
def __init__(self, train=True):
super().__init__()
path = 'data/SUN397/train/*/*.jpg' if train else 'data/SUN397/test/*/*.jpg'
self.data = list(glob.glob(path))
random.shuffle(self.data)
cats = list(set([path.split("/")[3] for path in self.data]))
cats.sort()
self.labels = torch.LongTensor([cats.index(path.split("/")[3]) for path in self.data])
self.preprocessor = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if len(self.encoded_data) == 0:
return self.preprocessor(Image.open(self.data[idx]).convert('RGB')), self.labels[idx]
return self.encoded_data[idx], self.labels[idx]
def __len__(self):
return len(self.labels)
def num_classes(self):
return int(max(self.labels) + 1)
|
643 | from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from PhysicsTools.Heppy.physicsobjects.Tau import Tau
from PhysicsTools.HeppyCore.utils.deltar import deltaR, matchObjectCollection3
import PhysicsTools.HeppyCore.framework.config as cfg
class TauAnalyzer( Analyzer ):
def __init__(self, cfg_ana, cfg_comp, looperName ):
super(TauAnalyzer,self).__init__(cfg_ana,cfg_comp,looperName)
#----------------------------------------
# DECLARATION OF HANDLES OF LEPTONS STUFF
#----------------------------------------
def declareHandles(self):
super(TauAnalyzer, self).declareHandles()
self.handles['taus'] = AutoHandle( ('slimmedTaus',''),'std::vector<pat::Tau>')
def beginLoop(self, setup):
super(TauAnalyzer,self).beginLoop(setup)
self.counters.addCounter('events')
count = self.counters.counter('events')
count.register('all events')
count.register('has >=1 tau at preselection')
count.register('has >=1 selected taus')
count.register('has >=1 other taus')
#------------------
# MAKE LEPTON LISTS
#------------------
def makeTaus(self, event):
event.inclusiveTaus = []
event.selectedTaus = []
event.otherTaus = []
#get all
alltaus = map( Tau, self.handles['taus'].product() )
#make inclusive taus
for tau in alltaus:
tau.associatedVertex = event.goodVertices[0] if len(event.goodVertices)>0 else event.vertices[0]
tau.lepVeto = False
tau.idDecayMode = tau.tauID("decayModeFinding")
tau.idDecayModeNewDMs = tau.tauID("decayModeFindingNewDMs")
if hasattr(self.cfg_ana, 'inclusive_decayModeID') and self.cfg_ana.inclusive_decayModeID and not tau.tauID(self.cfg_ana.inclusive_decayModeID):
continue
tau.inclusive_lepVeto = False
if self.cfg_ana.inclusive_vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.inclusive_leptonVetoDR:
tau.inclusive_lepVeto = True
if tau.inclusive_lepVeto: continue
if self.cfg_ana.inclusive_vetoLeptonsPOG:
if not tau.tauID(self.cfg_ana.inclusive_tauAntiMuonID):
tau.inclusive_lepVeto = True
if not tau.tauID(self.cfg_ana.inclusive_tauAntiElectronID):
tau.inclusive_lepVeto = True
if tau.inclusive_lepVeto: continue
if tau.pt() < self.cfg_ana.inclusive_ptMin: continue
if abs(tau.eta()) > self.cfg_ana.inclusive_etaMax: continue
if abs(tau.dxy()) > self.cfg_ana.inclusive_dxyMax or abs(tau.dz()) > self.cfg_ana.inclusive_dzMax: continue
def id3(tau,X):
"""Create an integer equal to 1-2-3 for (loose,medium,tight)"""
return tau.tauID(X%"Loose") + tau.tauID(X%"Medium") + tau.tauID(X%"Tight")
def id5(tau,X):
"""Create an integer equal to 1-2-3-4-5 for (very loose,
loose, medium, tight, very tight)"""
return id3(tau, X) + tau.tauID(X%"VLoose") + tau.tauID(X%"VTight")
def id6(tau,X):
"""Create an integer equal to 1-2-3-4-5-6 for (very loose,
loose, medium, tight, very tight, very very tight)"""
return id5(tau, X) + tau.tauID(X%"VVTight")
tau.idMVA = id6(tau, "by%sIsolationMVArun2v1DBoldDMwLT")
tau.idMVANewDM = id6(tau, "by%sIsolationMVArun2v1DBnewDMwLT")
tau.idCI3hit = id3(tau, "by%sCombinedIsolationDeltaBetaCorr3Hits")
tau.idAntiMu = tau.tauID("againstMuonLoose3") + tau.tauID("againstMuonTight3")
tau.idAntiE = id5(tau, "againstElectron%sMVA6")
#print "Tau pt %5.1f: idMVA2 %d, idCI3hit %d, %s, %s" % (tau.pt(), tau.idMVA2, tau.idCI3hit, tau.tauID(self.cfg_ana.tauID), tau.tauID(self.cfg_ana.tauLooseID))
if tau.tauID(self.cfg_ana.inclusive_tauID):
event.inclusiveTaus.append(tau)
for tau in event.inclusiveTaus:
tau.loose_lepVeto = False
if self.cfg_ana.loose_vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.loose_leptonVetoDR:
tau.loose_lepVeto = True
if self.cfg_ana.loose_vetoLeptonsPOG:
if not tau.tauID(self.cfg_ana.loose_tauAntiMuonID):
tau.loose_lepVeto = True
if not tau.tauID(self.cfg_ana.loose_tauAntiElectronID):
tau.loose_lepVeto = True
if tau.tauID(self.cfg_ana.loose_decayModeID) and \
tau.pt() > self.cfg_ana.loose_ptMin and abs(tau.eta()) < self.cfg_ana.loose_etaMax and \
abs(tau.dxy()) < self.cfg_ana.loose_dxyMax and abs(tau.dz()) < self.cfg_ana.loose_dzMax and \
tau.tauID(self.cfg_ana.loose_tauID) and not tau.loose_lepVeto:
event.selectedTaus.append(tau)
else:
event.otherTaus.append(tau)
event.inclusiveTaus.sort(key = lambda l : l.pt(), reverse = True)
event.selectedTaus.sort(key = lambda l : l.pt(), reverse = True)
event.otherTaus.sort(key = lambda l : l.pt(), reverse = True)
self.counters.counter('events').inc('all events')
if len(event.inclusiveTaus): self.counters.counter('events').inc('has >=1 tau at preselection')
if len(event.selectedTaus): self.counters.counter('events').inc('has >=1 selected taus')
if len(event.otherTaus): self.counters.counter('events').inc('has >=1 other taus')
def matchTaus(self, event):
match = matchObjectCollection3(event.inclusiveTaus, event.gentaus, deltaRMax = 0.5)
for lep in event.inclusiveTaus:
gen = match[lep]
lep.mcMatchId = 1 if gen else 0
lep.genp = gen
def process(self, event):
self.readCollections( event.input )
self.makeTaus(event)
if not self.cfg_comp.isMC:
return True
if hasattr(event, 'gentaus'):
self.matchTaus(event)
return True
# Find the definitions of the tau ID strings here:
# http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py
setattr(TauAnalyzer,"defaultConfig",cfg.Analyzer(
class_object = TauAnalyzer,
# inclusive very loose hadronic tau selection
inclusive_ptMin = 18,
inclusive_etaMax = 9999,
inclusive_dxyMax = 1000.,
inclusive_dzMax = 0.4,
inclusive_vetoLeptons = False,
inclusive_leptonVetoDR = 0.4,
inclusive_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
inclusive_tauID = "decayModeFindingNewDMs",
inclusive_vetoLeptonsPOG = False, # If True, the following two IDs are required
inclusive_tauAntiMuonID = "",
inclusive_tauAntiElectronID = "",
# loose hadronic tau selection
loose_ptMin = 18,
loose_etaMax = 9999,
loose_dxyMax = 1000.,
loose_dzMax = 0.2,
loose_vetoLeptons = True,
loose_leptonVetoDR = 0.4,
loose_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
loose_tauID = "byLooseCombinedIsolationDeltaBetaCorr3Hits",
loose_vetoLeptonsPOG = False, # If True, the following two IDs are required
loose_tauAntiMuonID = "againstMuonLoose3",
loose_tauAntiElectronID = "againstElectronLooseMVA5"
)
)
|
650 | from django.db import models
from .query import BookQuerySet
class Book(models.Model):
objects = BookQuerySet.as_manager()
title = models.CharField(max_length=50)
publication_date = models.DateTimeField()
author = models.ForeignKey('Author')
genres = models.ManyToManyField('Genre')
class Author(models.Model):
name = models.CharField(max_length=50)
nationality = models.ForeignKey('Nation', null=True)
class Genre(models.Model):
name = models.CharField(max_length=50)
class Nation(models.Model):
name = models.CharField(max_length=50)
demonym = models.CharField(max_length=50)
|
666 | def ips_between(start, end):
calc = lambda n, m: (int(end.split(".")[n]) - int(start.split(".")[n])) * m
return calc(0, 256 * 256 * 256) + calc(1, 256 * 256) + calc(2, 256) + calc(3, 1)
|
667 | from abc import ABCMeta, abstractmethod
import os
from vmaf.tools.misc import make_absolute_path, run_process
from vmaf.tools.stats import ListStats
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import re
import numpy as np
import ast
from vmaf import ExternalProgramCaller, to_list
from vmaf.config import VmafConfig, VmafExternalConfig
from vmaf.core.executor import Executor
from vmaf.core.result import Result
from vmaf.tools.reader import YuvReader
class FeatureExtractor(Executor):
"""
FeatureExtractor takes in a list of assets, and run feature extraction on
them, and return a list of corresponding results. A FeatureExtractor must
specify a unique type and version combination (by the TYPE and VERSION
attribute), so that the Result generated by it can be identified.
A derived class of FeatureExtractor must:
1) Override TYPE and VERSION
2) Override _generate_result(self, asset), which call a
command-line executable and generate feature scores in a log file.
3) Override _get_feature_scores(self, asset), which read the feature
scores from the log file, and return the scores in a dictionary format.
For an example, follow VmafFeatureExtractor.
"""
__metaclass__ = ABCMeta
@property
@abstractmethod
def ATOM_FEATURES(self):
raise NotImplementedError
def _read_result(self, asset):
result = {}
result.update(self._get_feature_scores(asset))
executor_id = self.executor_id
return Result(asset, executor_id, result)
@classmethod
def get_scores_key(cls, atom_feature):
return "{type}_{atom_feature}_scores".format(
type=cls.TYPE, atom_feature=atom_feature)
@classmethod
def get_score_key(cls, atom_feature):
return "{type}_{atom_feature}_score".format(
type=cls.TYPE, atom_feature=atom_feature)
def _get_feature_scores(self, asset):
# routine to read the feature scores from the log file, and return
# the scores in a dictionary format.
log_file_path = self._get_log_file_path(asset)
atom_feature_scores_dict = {}
atom_feature_idx_dict = {}
for atom_feature in self.ATOM_FEATURES:
atom_feature_scores_dict[atom_feature] = []
atom_feature_idx_dict[atom_feature] = 0
with open(log_file_path, 'rt') as log_file:
for line in log_file.readlines():
for atom_feature in self.ATOM_FEATURES:
re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature)
mo = re.match(re_template, line)
if mo:
cur_idx = int(mo.group(1))
assert cur_idx == atom_feature_idx_dict[atom_feature]
# parse value, allowing NaN and inf
val = float(mo.group(2))
if np.isnan(val) or np.isinf(val):
val = None
atom_feature_scores_dict[atom_feature].append(val)
atom_feature_idx_dict[atom_feature] += 1
continue
len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]])
assert len_score != 0
for atom_feature in self.ATOM_FEATURES[1:]:
assert len_score == len(atom_feature_scores_dict[atom_feature]), \
"Feature data possibly corrupt. Run cleanup script and try again."
feature_result = {}
for atom_feature in self.ATOM_FEATURES:
scores_key = self.get_scores_key(atom_feature)
feature_result[scores_key] = atom_feature_scores_dict[atom_feature]
return feature_result
class VmafFeatureExtractor(FeatureExtractor):
TYPE = "VMAF_feature"
# VERSION = '0.1' # vmaf_study; Anush's VIF fix
# VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr
# VERSION = '0.2.1' # expose vif num/den of each scale
# VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case
# VERSION = '0.2.2b' # expose adm_den/num_scalex
# VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef
# VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step
# VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2
VERSION = '0.2.4c' # Modify by moving motion2 to c code
ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2',
'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr',
'vif_num_scale0', 'vif_den_scale0',
'vif_num_scale1', 'vif_den_scale1',
'vif_num_scale2', 'vif_den_scale2',
'vif_num_scale3', 'vif_den_scale3',
'adm_num_scale0', 'adm_den_scale0',
'adm_num_scale1', 'adm_den_scale1',
'adm_num_scale2', 'adm_den_scale2',
'adm_num_scale3', 'adm_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3',
'vif2', 'adm2', 'adm3',
'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(VmafFeatureExtractor, cls)._post_process_result(result)
# adm2 =
# (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT)
adm2_scores_key = cls.get_scores_key('adm2')
adm_num_scores_key = cls.get_scores_key('adm_num')
adm_den_scores_key = cls.get_scores_key('adm_den')
result.result_dict[adm2_scores_key] = list(
(np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) /
(np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT)
)
# vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3
vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0')
vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0')
vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1')
vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1')
vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2')
vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2')
vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3')
vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3')
vif_scale0_scores_key = cls.get_scores_key('vif_scale0')
vif_scale1_scores_key = cls.get_scores_key('vif_scale1')
vif_scale2_scores_key = cls.get_scores_key('vif_scale2')
vif_scale3_scores_key = cls.get_scores_key('vif_scale3')
result.result_dict[vif_scale0_scores_key] = list(
(np.array(result.result_dict[vif_num_scale0_scores_key])
/ np.array(result.result_dict[vif_den_scale0_scores_key]))
)
result.result_dict[vif_scale1_scores_key] = list(
(np.array(result.result_dict[vif_num_scale1_scores_key])
/ np.array(result.result_dict[vif_den_scale1_scores_key]))
)
result.result_dict[vif_scale2_scores_key] = list(
(np.array(result.result_dict[vif_num_scale2_scores_key])
/ np.array(result.result_dict[vif_den_scale2_scores_key]))
)
result.result_dict[vif_scale3_scores_key] = list(
(np.array(result.result_dict[vif_num_scale3_scores_key])
/ np.array(result.result_dict[vif_den_scale3_scores_key]))
)
# vif2 =
# ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) +
# (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0
vif_scores_key = cls.get_scores_key('vif2')
result.result_dict[vif_scores_key] = list(
(
(np.array(result.result_dict[vif_num_scale0_scores_key])
/ np.array(result.result_dict[vif_den_scale0_scores_key])) +
(np.array(result.result_dict[vif_num_scale1_scores_key])
/ np.array(result.result_dict[vif_den_scale1_scores_key])) +
(np.array(result.result_dict[vif_num_scale2_scores_key])
/ np.array(result.result_dict[vif_den_scale2_scores_key])) +
(np.array(result.result_dict[vif_num_scale3_scores_key])
/ np.array(result.result_dict[vif_den_scale3_scores_key]))
) / 4.0
)
# adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3
adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0')
adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0')
adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1')
adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1')
adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2')
adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2')
adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3')
adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3')
adm_scale0_scores_key = cls.get_scores_key('adm_scale0')
adm_scale1_scores_key = cls.get_scores_key('adm_scale1')
adm_scale2_scores_key = cls.get_scores_key('adm_scale2')
adm_scale3_scores_key = cls.get_scores_key('adm_scale3')
result.result_dict[adm_scale0_scores_key] = list(
(np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale1_scores_key] = list(
(np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale2_scores_key] = list(
(np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale3_scores_key] = list(
(np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
# adm3 = \
# (((adm_num_scale0 + ADM_SCALE_CONSTANT) / (adm_den_scale0 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale1 + ADM_SCALE_CONSTANT) / (adm_den_scale1 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale2 + ADM_SCALE_CONSTANT) / (adm_den_scale2 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale3 + ADM_SCALE_CONSTANT) / (adm_den_scale3 + ADM_SCALE_CONSTANT))) / 4.0
adm3_scores_key = cls.get_scores_key('adm3')
result.result_dict[adm3_scores_key] = list(
(
((np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT))
) / 4.0
)
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class VifFrameDifferenceFeatureExtractor(FeatureExtractor):
TYPE = "VifDiff_feature"
VERSION = '0.1'
ATOM_FEATURES = ['vifdiff',
'vifdiff_num', 'vifdiff_den',
'vifdiff_num_scale0', 'vifdiff_den_scale0',
'vifdiff_num_scale1', 'vifdiff_den_scale1',
'vifdiff_num_scale2', 'vifdiff_den_scale2',
'vifdiff_num_scale3', 'vifdiff_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vifdiff_scale0', 'vifdiff_scale1', 'vifdiff_scale2', 'vifdiff_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_vifdiff_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(VifFrameDifferenceFeatureExtractor, cls)._post_process_result(result)
# vifdiff_scalei = vifdiff_num_scalei / vifdiff_den_scalei, i = 0, 1, 2, 3
vifdiff_num_scale0_scores_key = cls.get_scores_key('vifdiff_num_scale0')
vifdiff_den_scale0_scores_key = cls.get_scores_key('vifdiff_den_scale0')
vifdiff_num_scale1_scores_key = cls.get_scores_key('vifdiff_num_scale1')
vifdiff_den_scale1_scores_key = cls.get_scores_key('vifdiff_den_scale1')
vifdiff_num_scale2_scores_key = cls.get_scores_key('vifdiff_num_scale2')
vifdiff_den_scale2_scores_key = cls.get_scores_key('vifdiff_den_scale2')
vifdiff_num_scale3_scores_key = cls.get_scores_key('vifdiff_num_scale3')
vifdiff_den_scale3_scores_key = cls.get_scores_key('vifdiff_den_scale3')
vifdiff_scale0_scores_key = cls.get_scores_key('vifdiff_scale0')
vifdiff_scale1_scores_key = cls.get_scores_key('vifdiff_scale1')
vifdiff_scale2_scores_key = cls.get_scores_key('vifdiff_scale2')
vifdiff_scale3_scores_key = cls.get_scores_key('vifdiff_scale3')
result.result_dict[vifdiff_scale0_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale0_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale0_scores_key]))
)
result.result_dict[vifdiff_scale1_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale1_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale1_scores_key]))
)
result.result_dict[vifdiff_scale2_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale2_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale2_scores_key]))
)
result.result_dict[vifdiff_scale3_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale3_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale3_scores_key]))
)
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class PsnrFeatureExtractor(FeatureExtractor):
TYPE = "PSNR_feature"
VERSION = "1.0"
ATOM_FEATURES = ['psnr']
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_psnr(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
class MomentFeatureExtractor(FeatureExtractor):
TYPE = "Moment_feature"
# VERSION = "1.0" # call executable
VERSION = "1.1" # python only
ATOM_FEATURES = ['ref1st', 'ref2nd', 'dis1st', 'dis2nd', ]
DERIVED_ATOM_FEATURES = ['refvar', 'disvar', ]
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_w, quality_h = asset.quality_width_height
ref_scores_mtx = None
with YuvReader(filepath=asset.ref_workfile_path, width=quality_w, height=quality_h,
yuv_type=self._get_workfile_yuv_type(asset)) as ref_yuv_reader:
scores_mtx_list = []
i = 0
for ref_yuv in ref_yuv_reader:
ref_y = ref_yuv[0]
firstm = ref_y.mean()
secondm = ref_y.var() + firstm**2
scores_mtx_list.append(np.hstack(([firstm], [secondm])))
i += 1
ref_scores_mtx = np.vstack(scores_mtx_list)
dis_scores_mtx = None
with YuvReader(filepath=asset.dis_workfile_path, width=quality_w, height=quality_h,
yuv_type=self._get_workfile_yuv_type(asset)) as dis_yuv_reader:
scores_mtx_list = []
i = 0
for dis_yuv in dis_yuv_reader:
dis_y = dis_yuv[0]
firstm = dis_y.mean()
secondm = dis_y.var() + firstm**2
scores_mtx_list.append(np.hstack(([firstm], [secondm])))
i += 1
dis_scores_mtx = np.vstack(scores_mtx_list)
assert ref_scores_mtx is not None and dis_scores_mtx is not None
log_dict = {'ref_scores_mtx': ref_scores_mtx.tolist(),
'dis_scores_mtx': dis_scores_mtx.tolist()}
log_file_path = self._get_log_file_path(asset)
with open(log_file_path, 'wt') as log_file:
log_file.write(str(log_dict))
def _get_feature_scores(self, asset):
# routine to read the feature scores from the log file, and return
# the scores in a dictionary format.
log_file_path = self._get_log_file_path(asset)
with open(log_file_path, 'rt') as log_file:
log_str = log_file.read()
log_dict = ast.literal_eval(log_str)
ref_scores_mtx = np.array(log_dict['ref_scores_mtx'])
dis_scores_mtx = np.array(log_dict['dis_scores_mtx'])
_, num_ref_features = ref_scores_mtx.shape
assert num_ref_features == 2 # ref1st, ref2nd
_, num_dis_features = dis_scores_mtx.shape
assert num_dis_features == 2 # dis1st, dis2nd
feature_result = {}
feature_result[self.get_scores_key('ref1st')] = list(ref_scores_mtx[:, 0])
feature_result[self.get_scores_key('ref2nd')] = list(ref_scores_mtx[:, 1])
feature_result[self.get_scores_key('dis1st')] = list(dis_scores_mtx[:, 0])
feature_result[self.get_scores_key('dis2nd')] = list(dis_scores_mtx[:, 1])
return feature_result
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(MomentFeatureExtractor, cls)._post_process_result(result)
# calculate refvar and disvar from ref1st, ref2nd, dis1st, dis2nd
refvar_scores_key = cls.get_scores_key('refvar')
ref1st_scores_key = cls.get_scores_key('ref1st')
ref2nd_scores_key = cls.get_scores_key('ref2nd')
disvar_scores_key = cls.get_scores_key('disvar')
dis1st_scores_key = cls.get_scores_key('dis1st')
dis2nd_scores_key = cls.get_scores_key('dis2nd')
get_var = lambda m: m[1] - m[0] * m[0]
result.result_dict[refvar_scores_key] = \
to_list(map(get_var, zip(result.result_dict[ref1st_scores_key],
result.result_dict[ref2nd_scores_key])))
result.result_dict[disvar_scores_key] = \
to_list(map(get_var, zip(result.result_dict[dis1st_scores_key],
result.result_dict[dis2nd_scores_key])))
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class SsimFeatureExtractor(FeatureExtractor):
TYPE = "SSIM_feature"
# VERSION = "1.0"
VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0
ATOM_FEATURES = ['ssim', 'ssim_l', 'ssim_c', 'ssim_s']
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
class MsSsimFeatureExtractor(FeatureExtractor):
TYPE = "MS_SSIM_feature"
# VERSION = "1.0"
VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0
ATOM_FEATURES = ['ms_ssim',
'ms_ssim_l_scale0', 'ms_ssim_c_scale0', 'ms_ssim_s_scale0',
'ms_ssim_l_scale1', 'ms_ssim_c_scale1', 'ms_ssim_s_scale1',
'ms_ssim_l_scale2', 'ms_ssim_c_scale2', 'ms_ssim_s_scale2',
'ms_ssim_l_scale3', 'ms_ssim_c_scale3', 'ms_ssim_s_scale3',
'ms_ssim_l_scale4', 'ms_ssim_c_scale4', 'ms_ssim_s_scale4',
]
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_ms_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
|
677 | from sklearn.linear_model import LogisticRegression
from fightchurn.listings.chap8.listing_8_2_logistic_regression import prepare_data, save_regression_model
from fightchurn.listings.chap8.listing_8_2_logistic_regression import save_regression_summary, save_dataset_predictions
def regression_cparam(data_set_path, C_param):
X,y = prepare_data(data_set_path)
retain_reg = LogisticRegression( C=C_param, penalty='l1', solver='liblinear', fit_intercept=True)
retain_reg.fit(X, y)
c_ext = '_c{:.3f}'.format(C_param)
save_regression_summary(data_set_path,retain_reg,ext=c_ext)
save_regression_model(data_set_path,retain_reg,ext=c_ext)
save_dataset_predictions(data_set_path,retain_reg,X,ext=c_ext)
|
696 | from operator import attrgetter
import logging
import os
import shutil
import subprocess
import pyfastaq
import pymummer
from cluster_vcf_records import vcf_record
from varifier import utils
# We only want the .snps file from the dnadiff script from MUMmer. From reading
# the docs inspecting that script, we need to run these commands:
#
# nucmer --maxmatch --delta out.delta ref.fasta query.fasta
# delta-filter -1 out.delta > out.1delta
# show-snps -rlTHC out.1delta > out.snps
#
# This is instead of just running show-snps, which runs several other commands
# in addition to making the snps file.
def _run_dnadiff_one_split(ref_fasta, query_fasta, outfile, threads=1, maxmatch=True):
delta = f"{outfile}.tmp.delta"
delta_1 = f"{outfile}.tmp.1delta"
subprocess.check_output(f"rm -f {delta} {delta_1}", shell=True)
maxmatch_opt = "--maxmatch" if maxmatch else ""
commands = [
f"nucmer --threads {threads} {maxmatch_opt} --delta {delta} {ref_fasta} {query_fasta}",
f"delta-filter -1 {delta} > {delta_1}",
f"show-snps -rlTHC {delta_1} > {outfile}",
]
for command in commands:
logging.info("Start run command: " + command)
subprocess.check_output(command, shell=True)
logging.info("Finish run command: " + command)
os.unlink(delta)
os.unlink(delta_1)
def _run_dnadiff(
ref_fasta,
query_fasta,
outfile,
split_query=False,
debug=False,
threads=1,
maxmatch=True,
):
if not split_query:
_run_dnadiff_one_split(
ref_fasta, query_fasta, outfile, threads=threads, maxmatch=maxmatch
)
else:
tmp_snp_files = []
seq_reader = pyfastaq.sequences.file_reader(query_fasta)
for seq in seq_reader:
prefix = f"{outfile}.tmp.split.{len(tmp_snp_files)}"
tmp_fasta = f"{prefix}.fasta"
with open(tmp_fasta, "w") as f:
print(seq, file=f)
snp_file = f"{prefix}.snps"
_run_dnadiff_one_split(
ref_fasta, tmp_fasta, snp_file, threads=threads, maxmatch=maxmatch
)
os.unlink(tmp_fasta)
tmp_snp_files.append(snp_file)
with open(outfile, "wb") as f_out:
for snp_file in tmp_snp_files:
with open(snp_file, "rb") as f_in:
shutil.copyfileobj(f_in, f_out)
if not debug:
os.unlink(snp_file)
def _snps_file_to_vcf(snps_file, query_fasta, outfile):
"""Loads the .snps file made by dnadiff.
query_fasta = fasta file of query sequences.
Writes a new VCF file unmerged records."""
vcf_records = {}
variants = pymummer.snp_file.get_all_variants(snps_file)
query_seqs = utils.file_to_dict_of_seqs(query_fasta)
for variant in variants:
# If the variant is reversed, it means that either the ref or query had to be
# reverse complemented when aligned by mummer. Need to do the appropriate
# reverse (complement) fixes so the VCF has the correct REF and ALT sequences
if variant.reverse:
qry_seq = pyfastaq.sequences.Fasta("x", variant.qry_base)
qry_seq.revcomp()
variant.qry_base = "".join(reversed(qry_seq.seq))
ref_seq = pyfastaq.sequences.Fasta("x", variant.ref_base)
ref_seq.revcomp()
variant.ref_base = ref_seq.seq
if variant.var_type == pymummer.variant.SNP:
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start + 1),
".",
variant.qry_base,
variant.ref_base,
".",
".",
"SVTYPE=DNADIFF_SNP",
"GT",
"1/1",
]
)
)
elif variant.var_type == pymummer.variant.DEL:
# The query has sequence missing, compared to the
# reference. We're making VCF records w.r.t. the
# query, so this is an insertion. So need to
# get the nucleotide before the insertion as well.
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start + 1),
".",
query_seqs[variant.qry_name][variant.qry_start],
query_seqs[variant.qry_name][variant.qry_start]
+ variant.ref_base,
".",
".",
"SVTYPE=DNADIFF_INS",
"GT",
"1/1",
]
)
)
elif variant.var_type == pymummer.variant.INS:
# The ref has sequence missing, compared to the
# query. We're making VCF records w.r.t. the
# query, so this is a deletion. So need to
# get the nucleotide before the deletion as well.
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start),
".",
query_seqs[variant.qry_name][variant.qry_start - 1]
+ variant.qry_base,
query_seqs[variant.qry_name][variant.qry_start - 1],
".",
".",
"SVTYPE=DNADIFF_DEL",
"GT",
"1/1",
]
)
)
else:
raise Exception("Unknown variant type: " + str(variant))
assert (
new_record.REF
== query_seqs[new_record.CHROM][
new_record.POS : new_record.POS + len(new_record.REF)
]
)
if new_record.CHROM not in vcf_records:
vcf_records[new_record.CHROM] = []
vcf_records[new_record.CHROM].append(new_record)
for vcf_list in vcf_records.values():
vcf_list.sort(key=attrgetter("POS"))
with open(outfile, "w") as f:
print("##fileformat=VCFv4.2", file=f)
for seq in query_seqs.values():
print(f"##contig=<ID={seq.id},length={len(seq)}>", file=f)
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tsample", file=f)
for key, vcf_list in sorted(vcf_records.items()):
for record in vcf_list:
print(record, file=f)
def make_truth_vcf(
ref_fasta,
truth_fasta,
outfile,
debug=False,
split_ref=False,
threads=1,
maxmatch=True,
):
snps_file = f"{outfile}.tmp.snps"
_run_dnadiff(
truth_fasta,
ref_fasta,
snps_file,
split_query=split_ref,
debug=debug,
threads=threads,
maxmatch=maxmatch,
)
_snps_file_to_vcf(snps_file, ref_fasta, outfile)
if not debug:
os.unlink(snps_file)
|
747 | import os
import numpy as np
import tensorflow as tf
def get_train_data(train_dir, batch_size):
train_images = np.load(os.path.join(train_dir, 'train_images.npy'))
train_labels = np.load(os.path.join(train_dir, 'train_labels.npy'))
print('train_images', train_images.shape, 'train_labels', train_labels.shape)
dataset_train = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
dataset_train = dataset_train.repeat().shuffle(10000).batch(batch_size)
return dataset_train
def get_val_data(val_dir):
test_images = np.load(os.path.join(val_dir, 'validation_images.npy'))
test_labels = np.load(os.path.join(val_dir, 'validation_labels.npy'))
print('validation_images', test_images.shape, 'validation_labels', test_labels.shape)
dataset_test = tf.data.Dataset.from_tensor_slices((test_images, test_labels))
return dataset_test
|
837 | import SimpleXMLRPCServer
import sys
import logging
from K8055Controller import K8055Controller
logging.basicConfig()
controller_log = logging.getLogger("Controller")
class Controller:
def __init__(self):
self.k8055 = K8055Controller()
controller_log.debug("initialized")
def reset(self):
self.k8055.reset()
controller_log.debug("reset")
return 0
def turn_on(self, i):
self.k8055.turn_on(i)
controller_log.debug('turned on %i' % (i))
return 0
def turn_off(self, i):
self.k8055.turn_off(i)
controller_log.debug('turned off %i' % (i))
return 0
def set_analog(self, i, level):
if (i == 1):
self.k8055.set_analog1(level)
else:
self.k8055.set_analog2(level)
return 0
controller = Controller()
server = SimpleXMLRPCServer.SimpleXMLRPCServer(("d6349.mysql.zone.ee", 7000))
server.register_instance(controller)
server.serve_forever() |
845 | from freight.api.serializer import serialize
from freight.testutils import TestCase
class UserSerializerTest(TestCase):
def test_simple(self):
user = self.create_user()
result = serialize(user)
assert result["id"] == str(user.id)
assert result["name"] == user.name
|
855 | import os
import pytest
import torch
from hivemind import RemoteExpert
from hivemind.moe.server import background_server
CUSTOM_EXPERTS_PATH = os.path.join(os.path.dirname(__file__), "test_utils", "custom_networks.py")
@pytest.mark.forked
def test_custom_expert(hid_dim=16):
with background_server(
expert_cls="perceptron",
num_experts=2,
device="cpu",
hidden_dim=hid_dim,
num_handlers=2,
no_dht=True,
custom_module_path=CUSTOM_EXPERTS_PATH,
) as (server_endpoint, _):
expert0 = RemoteExpert("expert.0", server_endpoint)
expert1 = RemoteExpert("expert.1", server_endpoint)
for batch_size in (1, 4):
batch = torch.randn(batch_size, hid_dim)
output0 = expert0(batch)
output1 = expert1(batch)
loss = output0.sum()
loss.backward()
loss = output1.sum()
loss.backward()
@pytest.mark.forked
def test_multihead_expert(hid_dim=16):
with background_server(
expert_cls="multihead",
num_experts=2,
device="cpu",
hidden_dim=hid_dim,
num_handlers=2,
no_dht=True,
custom_module_path=CUSTOM_EXPERTS_PATH,
) as (server_endpoint, _):
expert0 = RemoteExpert("expert.0", server_endpoint)
expert1 = RemoteExpert("expert.1", server_endpoint)
for batch_size in (1, 4):
batch = (
torch.randn(batch_size, hid_dim),
torch.randn(batch_size, 2 * hid_dim),
torch.randn(batch_size, 3 * hid_dim),
)
output0 = expert0(*batch)
output1 = expert1(*batch)
loss = output0.sum()
loss.backward()
loss = output1.sum()
loss.backward()
|
905 | from typing import Dict, Optional, List, Any
import torch
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy, F1Measure
from overrides import overrides
@Model.register("text_classifier")
class TextClassifier(Model):
"""
Implements a basic text classifier:
1) Embed tokens using `text_field_embedder`
2) Seq2SeqEncoder, e.g. BiLSTM
3) Append the first and last encoder states
4) Final feedforward layer
Optimized with CrossEntropyLoss. Evaluated with CategoricalAccuracy & F1.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
text_encoder: Seq2SeqEncoder,
classifier_feedforward: FeedForward,
verbose_metrics: False,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super(TextClassifier, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
self.text_encoder = text_encoder
self.classifier_feedforward = classifier_feedforward
self.prediction_layer = torch.nn.Linear(self.classifier_feedforward.get_output_dim() , self.num_classes)
self.label_accuracy = CategoricalAccuracy()
self.label_f1_metrics = {}
self.verbose_metrics = verbose_metrics
for i in range(self.num_classes):
self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i)
self.loss = torch.nn.CrossEntropyLoss()
self.pool = lambda text, mask: util.get_final_encoder_states(text, mask, bidirectional=True)
initializer(self)
@overrides
def forward(self,
text: Dict[str, torch.LongTensor],
label: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
text : Dict[str, torch.LongTensor]
From a ``TextField``
label : torch.IntTensor, optional (default = None)
From a ``LabelField``
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
Metadata containing the original tokenization of the premise and
hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.
Returns
-------
An output dictionary consisting of:
label_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log probabilities of the label.
label_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_text = self.text_field_embedder(text)
mask = util.get_text_field_mask(text)
encoded_text = self.text_encoder(embedded_text, mask)
pooled = self.pool(encoded_text, mask)
ff_hidden = self.classifier_feedforward(pooled)
logits = self.prediction_layer(ff_hidden)
class_probs = F.softmax(logits, dim=1)
output_dict = {"logits": logits}
if label is not None:
loss = self.loss(logits, label)
output_dict["loss"] = loss
# compute F1 per label
for i in range(self.num_classes):
metric = self.label_f1_metrics[self.vocab.get_token_from_index(index=i, namespace="labels")]
metric(class_probs, label)
self.label_accuracy(logits, label)
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
class_probabilities = F.softmax(output_dict['logits'], dim=-1)
output_dict['class_probs'] = class_probabilities
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metric_dict = {}
sum_f1 = 0.0
for name, metric in self.label_f1_metrics.items():
metric_val = metric.get_metric(reset)
if self.verbose_metrics:
metric_dict[name + '_P'] = metric_val[0]
metric_dict[name + '_R'] = metric_val[1]
metric_dict[name + '_F1'] = metric_val[2]
sum_f1 += metric_val[2]
names = list(self.label_f1_metrics.keys())
total_len = len(names)
average_f1 = sum_f1 / total_len
metric_dict['average_F1'] = average_f1
metric_dict['accuracy'] = self.label_accuracy.get_metric(reset)
return metric_dict
|
907 | import stl_path
class MyNDRPlugin():
def __init__(self):
pass
def pre_iteration(self, finding_max_rate, run_results=None, **kwargs):
""" Function ran before each iteration.
:parameters:
finding_max_rate: boolean
Indicates whether we are running for the first time, trying to find the max rate. In this is the case, the run_results will be None.
run_results: dict
A dictionary that contains the following keys:
queue_full_percentage: Percentage of packets that are queued.
drop_rate_percentage: Percentage of packets that were dropped.
rate_tx_bps: TX rate in bps.
rate_rx_bps: RX rate in bps.
tx_util: TX utilization percentage.
latency: Latency groups.
cpu_util: CPU utilization percentage.
tx_pps: TX in pps.
rx_pps: RX in pps.
tx_bps: TX in bps.
rx_bps: RX in bps.
bw_per_core: Bandwidth per core.
rate_p: Running rate in percentage out of max.
total_tx_L1: Total TX L1.
total_rx_L1: Total RX L1.
iteration: Description of iteration (not necessarily a number)
Pay attention: The rate is of the upcoming iteration. All the rest are of the previous iteration.
kwargs: dict
List of tunables passed as parameters.
"""
# Pre iteration function. This function will run before TRex transmits to the DUT.
# Could use this to better prepare the DUT, for example define shapers, policers, increase buffers and queues.
# You can receive tunables in the command line, through the kwargs argument.
pass
def post_iteration(self, finding_max_rate, run_results, **kwargs):
""" Function ran after each iteration.
:parameters:
finding_max_rate: boolean
Indicates whether we are running for the first time, trying to find the max rate. If this is the case, some values of run_results (like iteration for example) are not relevant.
run_results: dict
A dictionary that contains the following keys:
queue_full_percentage: Percentage of packets that are queued.
drop_rate_percentage: Percentage of packets that were dropped.
rate_tx_bps: TX rate in bps.
rate_rx_bps: RX rate in bps.
tx_util: TX utilization percentage.
latency: Latency groups.
cpu_util: CPU utilization percentage.
tx_pps: TX in pps.
rx_pps: RX in pps.
tx_bps: TX in bps.
rx_bps: RX in bps.
bw_per_core: Bandwidth per core.
rate_p: Running rate in percentage out of max.
total_tx_L1: Total TX L1.
total_rx_L1: Total RX L1.
iteration: Description of iteration (not necessarily a number)
kwargs: dict
List of tunables passed as parameters.
:returns:
bool: should stop the benchmarking or not.
"""
# Post iteration function. This function will run after TRex transmits to the DUT.
# Could use this to decide if to continue the benchmark after querying the DUT post run. The DUT might be overheated or any other thing that might make you want to stop the run.
# You can receive tunables in the command line, through the kwargs argument.
should_stop = False
return should_stop
# dynamic load of python module
def register():
return MyNDRPlugin() |
1014 | from lemur import database
def rotate_certificate(endpoint, new_cert):
"""
Rotates a certificate on a given endpoint.
:param endpoint:
:param new_cert:
:return:
"""
# ensure that certificate is available for rotation
endpoint.source.plugin.update_endpoint(endpoint, new_cert)
endpoint.certificate = new_cert
database.update(endpoint)
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 44